aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMark Brown <broonie@linaro.org>2013-10-07 06:46:56 -0400
committerMark Brown <broonie@linaro.org>2013-10-07 06:46:56 -0400
commit9f9e4266a66b6f9dcde305e85035615c06bdb7f7 (patch)
treec6f867c7a1bf94a3b4f13b19a9ce442c7aec9e79
parentbf551413038f74343ec4d1413c3610e2362d0aeb (diff)
parent249ce1387b7739dbea2ac1a697e4bf1e37ec06b7 (diff)
Merge remote-tracking branch 'asoc/topic/dapm' into asoc-twl6040
-rw-r--r--CREDITS3
-rw-r--r--Documentation/arm64/tagged-pointers.txt14
-rw-r--r--Documentation/block/00-INDEX2
-rw-r--r--Documentation/block/cmdline-partition.txt8
-rw-r--r--Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt10
-rw-r--r--Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt10
-rw-r--r--Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt (renamed from Documentation/devicetree/bindings/mmc/synopsis-dw-mshc.txt)8
-rw-r--r--Documentation/devicetree/bindings/mmc/tmio_mmc.txt17
-rw-r--r--Documentation/devicetree/bindings/net/fsl-tsec-phy.txt18
-rw-r--r--Documentation/devicetree/bindings/pci/designware-pcie.txt2
-rw-r--r--Documentation/devicetree/bindings/serial/qca,ar9330-uart.txt (renamed from Documentation/devicetree/bindings/tty/serial/qca,ar9330-uart.txt)0
-rw-r--r--Documentation/filesystems/vfs.txt14
-rw-r--r--Documentation/kernel-parameters.txt10
-rw-r--r--Documentation/networking/bonding.txt6
-rw-r--r--Documentation/scheduler/sched-design-CFS.txt4
-rw-r--r--Documentation/sound/alsa/HD-Audio-Models.txt6
-rw-r--r--MAINTAINERS40
-rw-r--r--Makefile2
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/arc/include/asm/spinlock.h9
-rw-r--r--arch/arc/include/asm/uaccess.h4
-rw-r--r--arch/arc/kernel/signal.c25
-rw-r--r--arch/arc/kernel/time.c7
-rw-r--r--arch/arc/kernel/unaligned.c6
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/boot/dts/Makefile3
-rw-r--r--arch/arm/boot/dts/am335x-bone-common.dtsi262
-rw-r--r--arch/arm/boot/dts/am335x-bone.dts256
-rw-r--r--arch/arm/boot/dts/am335x-boneblack.dts17
-rw-r--r--arch/arm/boot/dts/armada-370-netgear-rn102.dts49
-rw-r--r--arch/arm/boot/dts/armada-xp.dtsi11
-rw-r--r--arch/arm/boot/dts/at91sam9x5.dtsi6
-rw-r--r--arch/arm/boot/dts/atlas6.dtsi12
-rw-r--r--arch/arm/boot/dts/imx27.dtsi6
-rw-r--r--arch/arm/boot/dts/imx51.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6q-pinfunc.h4
-rw-r--r--arch/arm/boot/dts/kirkwood.dtsi3
-rw-r--r--arch/arm/boot/dts/omap3-beagle-xm.dts2
-rw-r--r--arch/arm/boot/dts/omap3-igep.dtsi14
-rw-r--r--arch/arm/boot/dts/omap4-panda-common.dtsi46
-rw-r--r--arch/arm/boot/dts/omap4-sdp.dts39
-rw-r--r--arch/arm/boot/dts/omap5.dtsi7
-rw-r--r--arch/arm/boot/dts/prima2.dtsi27
-rw-r--r--arch/arm/boot/dts/r8a73a4.dtsi6
-rw-r--r--arch/arm/boot/dts/r8a7778.dtsi1
-rw-r--r--arch/arm/boot/dts/r8a7779.dtsi1
-rw-r--r--arch/arm/boot/dts/r8a7790.dtsi9
-rw-r--r--arch/arm/boot/dts/sh73a0.dtsi6
-rw-r--r--arch/arm/common/edma.c38
-rw-r--r--arch/arm/configs/multi_v7_defconfig3
-rw-r--r--arch/arm/crypto/aes-armv4.S6
-rw-r--r--arch/arm/include/asm/uaccess.h7
-rw-r--r--arch/arm/kernel/entry-common.S4
-rw-r--r--arch/arm/kernel/entry-header.S8
-rw-r--r--arch/arm/kvm/reset.c6
-rw-r--r--arch/arm/mach-at91/at91rm9200_time.c2
-rw-r--r--arch/arm/mach-at91/at91sam926x_time.c2
-rw-r--r--arch/arm/mach-at91/at91sam9g45_reset.S8
-rw-r--r--arch/arm/mach-at91/at91x40_time.c2
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c2
-rw-r--r--arch/arm/mach-davinci/include/mach/serial.h4
-rw-r--r--arch/arm/mach-imx/clk-fixup-mux.c1
-rw-r--r--arch/arm/mach-imx/clk-imx27.c2
-rw-r--r--arch/arm/mach-imx/clk-imx51-imx53.c4
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c9
-rw-r--r--arch/arm/mach-imx/system.c11
-rw-r--r--arch/arm/mach-integrator/pci_v3.h7
-rw-r--r--arch/arm/mach-mvebu/coherency.c8
-rw-r--r--arch/arm/mach-mvebu/pmsu.c1
-rw-r--r--arch/arm/mach-mvebu/system-controller.c1
-rw-r--r--arch/arm/mach-omap2/cclock44xx_data.c2
-rw-r--r--arch/arm/mach-omap2/cpuidle44xx.c2
-rw-r--r--arch/arm/mach-omap2/gpmc.c4
-rw-r--r--arch/arm/mach-omap2/mux34xx.c2
-rw-r--r--arch/arm/mach-omap2/omap-smp.c2
-rw-r--r--arch/arm/mach-omap2/omap_device.c2
-rw-r--r--arch/arm/mach-sa1100/collie.c2
-rw-r--r--arch/arm/mach-shmobile/board-armadillo800eva.c4
-rw-r--r--arch/arm/mach-shmobile/board-lager.c27
-rw-r--r--arch/arm/mach-shmobile/clock-r8a73a4.c2
-rw-r--r--arch/arm/mach-shmobile/clock-sh73a0.c2
-rw-r--r--arch/arm/mach-u300/Kconfig10
-rw-r--r--arch/arm/mach-ux500/cache-l2x0.c1
-rw-r--r--arch/arm/mach-vexpress/tc2_pm.c11
-rw-r--r--arch/arm64/Kconfig.debug7
-rw-r--r--arch/arm64/configs/defconfig5
-rw-r--r--arch/arm64/include/asm/hwcap.h2
-rw-r--r--arch/arm64/include/asm/uaccess.h10
-rw-r--r--arch/arm64/kernel/fpsimd.c2
-rw-r--r--arch/arm64/kernel/process.c21
-rw-r--r--arch/arm64/kernel/setup.c2
-rw-r--r--arch/arm64/mm/fault.c2
-rw-r--r--arch/arm64/mm/tlb.S2
-rw-r--r--arch/avr32/include/asm/Kbuild16
-rw-r--r--arch/avr32/include/asm/cputime.h6
-rw-r--r--arch/avr32/include/asm/delay.h1
-rw-r--r--arch/avr32/include/asm/device.h7
-rw-r--r--arch/avr32/include/asm/div64.h6
-rw-r--r--arch/avr32/include/asm/emergency-restart.h6
-rw-r--r--arch/avr32/include/asm/futex.h6
-rw-r--r--arch/avr32/include/asm/irq_regs.h1
-rw-r--r--arch/avr32/include/asm/local.h6
-rw-r--r--arch/avr32/include/asm/local64.h1
-rw-r--r--arch/avr32/include/asm/percpu.h6
-rw-r--r--arch/avr32/include/asm/scatterlist.h6
-rw-r--r--arch/avr32/include/asm/sections.h6
-rw-r--r--arch/avr32/include/asm/topology.h6
-rw-r--r--arch/avr32/include/asm/xor.h6
-rw-r--r--arch/avr32/kernel/process.c2
-rw-r--r--arch/avr32/kernel/time.c9
-rw-r--r--arch/mips/Makefile3
-rw-r--r--arch/mips/alchemy/board-mtx1.c2
-rw-r--r--arch/mips/alchemy/common/usb.c3
-rw-r--r--arch/mips/bcm63xx/cpu.c4
l---------arch/mips/boot/dts/include/dt-bindings2
-rw-r--r--arch/mips/cavium-octeon/csrc-octeon.c1
-rw-r--r--arch/mips/dec/prom/init.c1
-rw-r--r--arch/mips/include/asm/cpu-features.h8
-rw-r--r--arch/mips/include/asm/cpu-info.h1
-rw-r--r--arch/mips/include/asm/cpu-type.h203
-rw-r--r--arch/mips/include/asm/cpu.h38
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1000.h4
-rw-r--r--arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h2
-rw-r--r--arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h2
-rw-r--r--arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h2
-rw-r--r--arch/mips/include/asm/mipsregs.h7
-rw-r--r--arch/mips/include/asm/pci.h12
-rw-r--r--arch/mips/include/asm/timex.h33
-rw-r--r--arch/mips/include/asm/vga.h3
-rw-r--r--arch/mips/kernel/cpu-probe.c58
-rw-r--r--arch/mips/kernel/idle.c3
-rw-r--r--arch/mips/kernel/time.c1
-rw-r--r--arch/mips/kernel/traps.c3
-rw-r--r--arch/mips/mm/c-octeon.c6
-rw-r--r--arch/mips/mm/c-r4k.c50
-rw-r--r--arch/mips/mm/dma-default.c13
-rw-r--r--arch/mips/mm/page.c1
-rw-r--r--arch/mips/mm/sc-mips.c3
-rw-r--r--arch/mips/mm/tlb-r4k.c1
-rw-r--r--arch/mips/mm/tlbex.c1
-rw-r--r--arch/mips/mti-malta/malta-time.c5
-rw-r--r--arch/mips/mti-sead3/sead3-time.c3
-rw-r--r--arch/mips/netlogic/xlr/fmn-config.c3
-rw-r--r--arch/mips/oprofile/common.c1
-rw-r--r--arch/mips/pci/pci-bcm1480.c1
-rw-r--r--arch/mips/sibyte/bcm1480/setup.c3
-rw-r--r--arch/mips/sibyte/sb1250/setup.c3
-rw-r--r--arch/mips/sni/setup.c3
-rw-r--r--arch/openrisc/include/asm/prom.h44
-rw-r--r--arch/parisc/mm/fault.c5
-rw-r--r--arch/powerpc/boot/Makefile4
-rw-r--r--arch/powerpc/boot/epapr-wrapper.c9
-rw-r--r--arch/powerpc/boot/epapr.c4
-rw-r--r--arch/powerpc/boot/of.c16
-rwxr-xr-xarch/powerpc/boot/wrapper9
-rw-r--r--arch/powerpc/include/asm/irq.h4
-rw-r--r--arch/powerpc/include/asm/processor.h4
-rw-r--r--arch/powerpc/kernel/asm-offsets.c3
-rw-r--r--arch/powerpc/kernel/iommu.c2
-rw-r--r--arch/powerpc/kernel/irq.c100
-rw-r--r--arch/powerpc/kernel/misc_32.S25
-rw-r--r--arch/powerpc/kernel/misc_64.S10
-rw-r--r--arch/powerpc/kernel/process.c3
-rw-r--r--arch/powerpc/kernel/prom_init.c21
-rw-r--r--arch/powerpc/kernel/sysfs.c18
-rw-r--r--arch/powerpc/kernel/tm.S95
-rw-r--r--arch/powerpc/kernel/vio.c12
-rw-r--r--arch/powerpc/lib/checksum_64.S58
-rw-r--r--arch/powerpc/lib/sstep.c3
-rw-r--r--arch/powerpc/mm/init_64.c4
-rw-r--r--arch/powerpc/mm/mem.c9
-rw-r--r--arch/powerpc/perf/power8-pmu.c5
-rw-r--r--arch/powerpc/platforms/pseries/smp.c26
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/include/asm/mutex.h2
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/include/asm/spinlock.h5
-rw-r--r--arch/score/Kconfig4
-rw-r--r--arch/score/Makefile4
-rw-r--r--arch/score/include/asm/checksum.h93
-rw-r--r--arch/score/include/asm/io.h1
-rw-r--r--arch/score/include/asm/pgalloc.h2
-rw-r--r--arch/score/kernel/entry.S4
-rw-r--r--arch/score/kernel/process.c4
-rw-r--r--arch/sparc/Kconfig7
-rw-r--r--arch/sparc/include/asm/floppy_64.h2
-rw-r--r--arch/sparc/kernel/Makefile3
-rw-r--r--arch/sparc/kernel/ds.c5
-rw-r--r--arch/sparc/kernel/ldc.c4
-rw-r--r--arch/tile/Kconfig2
-rw-r--r--arch/tile/gxio/iorpc_mpipe.c90
-rw-r--r--arch/tile/gxio/iorpc_mpipe_info.c15
-rw-r--r--arch/tile/gxio/iorpc_trio.c28
-rw-r--r--arch/tile/gxio/iorpc_usb_host.c8
-rw-r--r--arch/tile/gxio/usb_host.c8
-rw-r--r--arch/tile/include/arch/mpipe.h24
-rw-r--r--arch/tile/include/arch/mpipe_constants.h6
-rw-r--r--arch/tile/include/arch/mpipe_shm.h54
-rw-r--r--arch/tile/include/arch/trio_constants.h10
-rw-r--r--arch/tile/include/asm/page.h5
-rw-r--r--arch/tile/include/asm/pgtable_32.h12
-rw-r--r--arch/tile/include/asm/pgtable_64.h4
-rw-r--r--arch/tile/include/gxio/iorpc_mpipe.h52
-rw-r--r--arch/tile/include/gxio/iorpc_mpipe_info.h12
-rw-r--r--arch/tile/include/gxio/iorpc_trio.h28
-rw-r--r--arch/tile/include/gxio/iorpc_usb_host.h8
-rw-r--r--arch/tile/include/gxio/usb_host.h8
-rw-r--r--arch/tile/kernel/compat.c2
-rw-r--r--arch/tile/kernel/futex_64.S55
-rw-r--r--arch/tile/kernel/setup.c3
-rw-r--r--arch/tile/kernel/unaligned.c4
-rw-r--r--arch/tile/mm/fault.c2
-rw-r--r--arch/tile/mm/init.c4
-rw-r--r--arch/tile/mm/pgtable.c3
-rw-r--r--arch/x86/Kconfig5
-rw-r--r--arch/x86/include/asm/xen/page.h31
-rw-r--r--arch/x86/kernel/cpu/perf_event.c12
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c5
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c10
-rw-r--r--arch/x86/kernel/entry_64.S15
-rw-r--r--arch/x86/kernel/microcode_amd.c1
-rw-r--r--arch/x86/kernel/reboot.c18
-rw-r--r--arch/x86/kernel/smpboot.c3
-rw-r--r--arch/x86/kernel/sysfb_simplefb.c4
-rw-r--r--arch/x86/kvm/emulate.c14
-rw-r--r--arch/x86/kvm/paging_tmpl.h20
-rw-r--r--arch/x86/kvm/vmx.c15
-rw-r--r--arch/x86/pci/mmconfig-shared.c7
-rw-r--r--arch/x86/platform/efi/efi.c11
-rw-r--r--arch/x86/xen/p2m.c10
-rw-r--r--arch/x86/xen/spinlock.c26
-rw-r--r--block/Kconfig9
-rw-r--r--block/Makefile2
-rw-r--r--block/blk-cgroup.c25
-rw-r--r--block/blk-core.c6
-rw-r--r--block/blk-exec.c4
-rw-r--r--block/cfq-iosched.c4
-rw-r--r--block/deadline-iosched.c2
-rw-r--r--block/elevator.c2
-rw-r--r--block/genhd.c3
-rw-r--r--block/partitions/Kconfig4
-rw-r--r--block/partitions/cmdline.c8
-rw-r--r--drivers/acpi/acpi_ipmi.c24
-rw-r--r--drivers/acpi/scan.c4
-rw-r--r--drivers/ata/sata_promise.c2
-rw-r--r--drivers/atm/he.c13
-rw-r--r--drivers/atm/nicstar.c2
-rw-r--r--drivers/base/core.c14
-rw-r--r--drivers/bcma/driver_pci.c49
-rw-r--r--drivers/bcma/scan.c12
-rw-r--r--drivers/block/cciss.c1
-rw-r--r--drivers/block/cpqarray.c1
-rw-r--r--drivers/block/rbd.c77
-rw-r--r--drivers/bluetooth/ath3k.c2
-rw-r--r--drivers/bluetooth/btusb.c5
-rw-r--r--drivers/bus/mvebu-mbus.c12
-rw-r--r--drivers/char/tpm/xen-tpmfront.c36
-rw-r--r--drivers/clocksource/Kconfig1
-rw-r--r--drivers/clocksource/clksrc-of.c3
-rw-r--r--drivers/clocksource/em_sti.c2
-rw-r--r--drivers/clocksource/exynos_mct.c10
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c4
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c9
-rw-r--r--drivers/cpufreq/cpufreq.c34
-rw-r--r--drivers/cpufreq/exynos5440-cpufreq.c2
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c7
-rw-r--r--drivers/cpufreq/intel_pstate.c5
-rw-r--r--drivers/cpufreq/spear-cpufreq.c2
-rw-r--r--drivers/dma/Kconfig1
-rw-r--r--drivers/dma/edma.c2
-rw-r--r--drivers/dma/imx-dma.c31
-rw-r--r--drivers/gpio/gpio-omap.c158
-rw-r--r--drivers/gpio/gpio-rcar.c7
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h2
-rw-r--r--drivers/gpu/drm/drm_context.c73
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c8
-rw-r--r--drivers/gpu/drm/drm_fops.c21
-rw-r--r--drivers/gpu/drm/drm_stub.c10
-rw-r--r--drivers/gpu/drm/exynos/Kconfig2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c5
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c19
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c6
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c68
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c46
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c13
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c2
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c1
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c6
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c63
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c8
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c10
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_kms.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c58
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h8
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c41
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c24
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c24
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c35
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c23
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c57
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.h2
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c32
-rw-r--r--drivers/gpu/drm/radeon/ci_smc.c39
-rw-r--r--drivers/gpu/drm/radeon/cik.c53
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c12
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c164
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.h1
-rw-r--r--drivers/gpu/drm/radeon/kv_smc.c8
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c30
-rw-r--r--drivers/gpu/drm/radeon/ppsmc.h2
-rw-r--r--drivers/gpu/drm/radeon/r100.c15
-rw-r--r--drivers/gpu/drm/radeon/r420.c7
-rw-r--r--drivers/gpu/drm/radeon/r600.c19
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c40
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c20
-rw-r--r--drivers/gpu/drm/radeon/r600d.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h82
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c66
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c69
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c89
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h27
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c3
-rw-r--r--drivers/gpu/drm/radeon/rs400.c7
-rw-r--r--drivers/gpu/drm/radeon/rs600.c12
-rw-r--r--drivers/gpu/drm/radeon/rs690.c7
-rw-r--r--drivers/gpu/drm/radeon/rs780_dpm.c112
-rw-r--r--drivers/gpu/drm/radeon/rv515.c8
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c16
-rw-r--r--drivers/gpu/drm/radeon/rv770_smc.c44
-rw-r--r--drivers/gpu/drm/radeon/rv770_smc.h2
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h2
-rw-r--r--drivers/gpu/drm/radeon/si.c21
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c30
-rw-r--r--drivers/gpu/drm/radeon/si_smc.c43
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c17
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.h2
-rw-r--r--drivers/gpu/drm/radeon/trinity_smc.c8
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c2
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c1
-rw-r--r--drivers/hid/hid-core.c74
-rw-r--r--drivers/hid/hid-input.c11
-rw-r--r--drivers/hid/hid-lenovo-tpkbd.c25
-rw-r--r--drivers/hid/hid-lg2ff.c19
-rw-r--r--drivers/hid/hid-lg3ff.c29
-rw-r--r--drivers/hid/hid-lg4ff.c20
-rw-r--r--drivers/hid/hid-lgff.c17
-rw-r--r--drivers/hid/hid-logitech-dj.c10
-rw-r--r--drivers/hid/hid-multitouch.c26
-rw-r--r--drivers/hid/hid-sony.c4
-rw-r--r--drivers/hid/hid-steelseries.c5
-rw-r--r--drivers/hid/hid-zpff.c18
-rw-r--r--drivers/hv/connection.c2
-rw-r--r--drivers/hv/hv_kvp.c38
-rw-r--r--drivers/hv/hv_snapshot.c6
-rw-r--r--drivers/hv/hv_util.c71
-rw-r--r--drivers/hwmon/applesmc.c11
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c26
-rw-r--r--drivers/i2c/busses/i2c-ismt.c3
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c16
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c2
-rw-r--r--drivers/iio/accel/bma180.c4
-rw-r--r--drivers/iio/adc/at91_adc.c11
-rw-r--r--drivers/iio/amplifiers/ad8366.c4
-rw-r--r--drivers/iio/buffer_cb.c2
-rw-r--r--drivers/iio/dac/mcp4725.c12
-rw-r--r--drivers/iio/iio_core.h4
-rw-r--r--drivers/iio/industrialio-buffer.c30
-rw-r--r--drivers/iio/industrialio-core.c33
-rw-r--r--drivers/iio/industrialio-event.c20
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c18
-rw-r--r--drivers/iio/temperature/tmp006.c6
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c14
-rw-r--r--drivers/iommu/arm-smmu.c13
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c4
-rw-r--r--drivers/isdn/hisax/amd7930_fn.c4
-rw-r--r--drivers/isdn/hisax/avm_pci.c4
-rw-r--r--drivers/isdn/hisax/config.c2
-rw-r--r--drivers/isdn/hisax/diva.c4
-rw-r--r--drivers/isdn/hisax/elsa.c2
-rw-r--r--drivers/isdn/hisax/elsa_ser.c2
-rw-r--r--drivers/isdn/hisax/hfc_pci.c2
-rw-r--r--drivers/isdn/hisax/hfc_sx.c2
-rw-r--r--drivers/isdn/hisax/hscx_irq.c4
-rw-r--r--drivers/isdn/hisax/icc.c4
-rw-r--r--drivers/isdn/hisax/ipacx.c8
-rw-r--r--drivers/isdn/hisax/isac.c4
-rw-r--r--drivers/isdn/hisax/isar.c6
-rw-r--r--drivers/isdn/hisax/jade.c18
-rw-r--r--drivers/isdn/hisax/jade_irq.c4
-rw-r--r--drivers/isdn/hisax/l3_1tr6.c50
-rw-r--r--drivers/isdn/hisax/netjet.c2
-rw-r--r--drivers/isdn/hisax/q931.c6
-rw-r--r--drivers/isdn/hisax/w6692.c8
-rw-r--r--drivers/mailbox/mailbox-omap2.c1
-rw-r--r--drivers/md/bcache/bcache.h7
-rw-r--r--drivers/md/bcache/bset.c39
-rw-r--r--drivers/md/bcache/btree.c4
-rw-r--r--drivers/md/bcache/journal.c33
-rw-r--r--drivers/md/bcache/request.c15
-rw-r--r--drivers/md/bcache/sysfs.c9
-rw-r--r--drivers/md/bcache/util.c11
-rw-r--r--drivers/md/bcache/util.h12
-rw-r--r--drivers/md/bcache/writeback.c42
-rw-r--r--drivers/md/dm-io.c7
-rw-r--r--drivers/md/dm-mpath.c18
-rw-r--r--drivers/md/dm-snap-persistent.c2
-rw-r--r--drivers/md/dm-snap.c5
-rw-r--r--drivers/md/dm-stats.c23
-rw-r--r--drivers/md/dm-thin.c14
-rw-r--r--drivers/md/dm.c71
-rw-r--r--drivers/md/dm.h3
-rw-r--r--drivers/misc/mei/amthif.c1
-rw-r--r--drivers/misc/mei/bus.c5
-rw-r--r--drivers/misc/mei/client.h6
-rw-r--r--drivers/misc/mei/hbm.c10
-rw-r--r--drivers/misc/mei/init.c3
-rw-r--r--drivers/misc/mei/main.c11
-rw-r--r--drivers/misc/mei/mei_dev.h6
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c16
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c7
-rw-r--r--drivers/net/bonding/bond_alb.c2
-rw-r--r--drivers/net/bonding/bond_alb.h9
-rw-r--r--drivers/net/bonding/bond_main.c14
-rw-r--r--drivers/net/bonding/bond_sysfs.c39
-rw-r--r--drivers/net/bonding/bonding.h1
-rw-r--r--drivers/net/can/flexcan.c12
-rw-r--r--drivers/net/can/slcan.c139
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c15
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c4
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c2
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c9
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c44
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h37
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c11
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c189
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c38
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c50
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c6
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c6
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c9
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c78
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c4
-rw-r--r--drivers/net/ethernet/hp/hp100.c2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c13
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c162
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c10
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c25
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h1
-rw-r--r--drivers/net/ethernet/lantiq_etop.c6
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c3
-rw-r--r--drivers/net/ethernet/marvell/skge.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c16
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c2
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c2
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c3
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c3
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c39
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_mpi.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c1
-rw-r--r--drivers/net/ethernet/sfc/Kconfig2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c58
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c10
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c2
-rw-r--r--drivers/net/ethernet/sfc/nic.h3
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h2
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c3
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c2
-rw-r--r--drivers/net/ethernet/via/via-rhine.c9
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c6
-rw-r--r--drivers/net/irda/mcs7780.c40
-rw-r--r--drivers/net/loopback.c1
-rw-r--r--drivers/net/netconsole.c5
-rw-r--r--drivers/net/phy/cicada.c4
-rw-r--r--drivers/net/ppp/pptp.c2
-rw-r--r--drivers/net/slip/slip.c3
-rw-r--r--drivers/net/tun.c11
-rw-r--r--drivers/net/usb/cdc_ether.c115
-rw-r--r--drivers/net/usb/dm9601.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/usbnet.c27
-rw-r--r--drivers/net/vxlan.c49
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c17
-rw-r--r--drivers/net/wireless/brcm80211/Kconfig4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c28
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c14
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c4
-rw-r--r--drivers/net/wireless/cw1200/cw1200_spi.c16
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c3
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.h2
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c5
-rw-r--r--drivers/net/wireless/mwifiex/usb.c7
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c3
-rw-r--r--drivers/net/wireless/p54/p54usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c11
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c15
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h2
-rw-r--r--drivers/net/xen-netback/common.h1
-rw-r--r--drivers/net/xen-netback/interface.c28
-rw-r--r--drivers/net/xen-netback/netback.c94
-rw-r--r--drivers/net/xen-netback/xenbus.c151
-rw-r--r--drivers/pci/pci-acpi.c6
-rw-r--r--drivers/pci/pci.c8
-rw-r--r--drivers/pinctrl/pinconf.c4
-rw-r--r--drivers/pinctrl/pinctrl-exynos.c12
-rw-r--r--drivers/pinctrl/pinctrl-palmas.c5
-rw-r--r--drivers/pinctrl/pinctrl-tegra114.c5
-rw-r--r--drivers/regulator/da9063-regulator.c2
-rw-r--r--drivers/regulator/palmas-regulator.c14
-rw-r--r--drivers/regulator/ti-abb-regulator.c16
-rw-r--r--drivers/regulator/wm831x-ldo.c4
-rw-r--r--drivers/regulator/wm8350-regulator.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c3
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c3
-rw-r--r--drivers/staging/comedi/Kconfig33
-rw-r--r--drivers/staging/comedi/drivers/ni_65xx.c25
-rw-r--r--drivers/staging/dgap/dgap_driver.c17
-rw-r--r--drivers/staging/dgnc/dgnc_driver.c4
-rw-r--r--drivers/staging/iio/Kconfig2
-rw-r--r--drivers/staging/iio/light/isl29018.c1
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843.c2
-rw-r--r--drivers/staging/iio/meter/ade7854-spi.c2
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c11
-rw-r--r--drivers/staging/line6/toneport.c10
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c2
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c2
-rw-r--r--drivers/staging/lustre/lustre/Kconfig4
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c4
-rw-r--r--drivers/staging/lustre/lustre/libcfs/workitem.c2
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pinger.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c8
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/service.c6
-rw-r--r--drivers/staging/octeon-usb/cvmx-usb.c2
-rw-r--r--drivers/staging/octeon/ethernet-mem.c7
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c4
-rw-r--r--drivers/staging/octeon/ethernet-rx.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ieee80211.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c14
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mp.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_dm.c2
-rw-r--r--drivers/staging/rtl8188eu/include/odm.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_hal.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme_ext.h1
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c1
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.c2
-rw-r--r--drivers/staging/vt6656/card.c4
-rw-r--r--drivers/staging/vt6656/iwctl.c3
-rw-r--r--drivers/staging/vt6656/main_usb.c3
-rw-r--r--drivers/staging/vt6656/rxtx.c2
-rw-r--r--drivers/staging/xillybus/xillybus_core.c2
-rw-r--r--drivers/staging/zram/zram_drv.c1
-rw-r--r--drivers/target/iscsi/iscsi_target.c13
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c4
-rw-r--r--drivers/target/target_core_sbc.c28
-rw-r--r--drivers/target/target_core_transport.c20
-rw-r--r--drivers/target/target_core_xcopy.c4
-rw-r--r--drivers/tty/hvc/hvc_xen.c1
-rw-r--r--drivers/tty/n_tty.c49
-rw-r--r--drivers/tty/serial/pch_uart.c13
-rw-r--r--drivers/tty/serial/serial-tegra.c4
-rw-r--r--drivers/tty/tty_io.c3
-rw-r--r--drivers/tty/tty_ioctl.c3
-rw-r--r--drivers/usb/chipidea/Kconfig2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c7
-rw-r--r--drivers/usb/chipidea/ci_hdrc_pci.c7
-rw-r--r--drivers/usb/chipidea/core.c1
-rw-r--r--drivers/usb/chipidea/udc.c4
-rw-r--r--drivers/usb/core/devio.c16
-rw-r--r--drivers/usb/core/hub.c3
-rw-r--r--drivers/usb/dwc3/Kconfig1
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/dwc3/gadget.c6
-rw-r--r--drivers/usb/gadget/cdc2.c19
-rw-r--r--drivers/usb/gadget/dummy_hcd.c7
-rw-r--r--drivers/usb/gadget/f_ecm.c2
-rw-r--r--drivers/usb/gadget/f_eem.c2
-rw-r--r--drivers/usb/gadget/f_fs.c62
-rw-r--r--drivers/usb/gadget/f_mass_storage.c2
-rw-r--r--drivers/usb/gadget/fotg210-udc.c2
-rw-r--r--drivers/usb/gadget/fusb300_udc.c2
-rw-r--r--drivers/usb/gadget/multi.c8
-rw-r--r--drivers/usb/gadget/mv_u3d_core.c3
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c9
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c15
-rw-r--r--drivers/usb/host/ehci-fsl.c19
-rw-r--r--drivers/usb/host/ehci-grlib.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c2
-rw-r--r--drivers/usb/host/ehci-mv.c2
-rw-r--r--drivers/usb/host/ehci-octeon.c2
-rw-r--r--drivers/usb/host/ehci-pci.c2
-rw-r--r--drivers/usb/host/ehci-pmcmsp.c2
-rw-r--r--drivers/usb/host/ehci-ppc-of.c2
-rw-r--r--drivers/usb/host/ehci-ps3.c2
-rw-r--r--drivers/usb/host/ehci-q.c5
-rw-r--r--drivers/usb/host/ehci-sead3.c2
-rw-r--r--drivers/usb/host/ehci-sh.c2
-rw-r--r--drivers/usb/host/ehci-tilegx.c2
-rw-r--r--drivers/usb/host/ehci-w90x900.c2
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c2
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c6
-rw-r--r--drivers/usb/host/imx21-hcd.c8
-rw-r--r--drivers/usb/host/ohci-hcd.c22
-rw-r--r--drivers/usb/host/ohci-q.c26
-rw-r--r--drivers/usb/host/uhci-pci.c2
-rw-r--r--drivers/usb/host/uhci-q.c12
-rw-r--r--drivers/usb/host/xhci-hub.c47
-rw-r--r--drivers/usb/host/xhci-mem.c2
-rw-r--r--drivers/usb/host/xhci-pci.c2
-rw-r--r--drivers/usb/host/xhci-ring.c37
-rw-r--r--drivers/usb/host/xhci.c25
-rw-r--r--drivers/usb/host/xhci.h11
-rw-r--r--drivers/usb/musb/musb_dsps.c3
-rw-r--r--drivers/usb/musb/musb_gadget.c5
-rw-r--r--drivers/usb/phy/phy-gpio-vbus-usb.c11
-rw-r--r--drivers/usb/phy/phy-omap-usb3.c2
-rw-r--r--drivers/usb/serial/Kconfig2
-rw-r--r--drivers/usb/serial/option.c3
-rw-r--r--drivers/usb/serial/pl2303.c43
-rw-r--r--drivers/vhost/scsi.c50
-rw-r--r--drivers/vhost/vhost.c4
-rw-r--r--drivers/video/mmp/hw/mmp_ctrl.c17
-rw-r--r--drivers/video/mxsfb.c1
-rw-r--r--drivers/video/neofb.c4
-rw-r--r--drivers/video/of_display_timing.c6
-rw-r--r--drivers/video/omap2/displays-new/Kconfig1
-rw-r--r--drivers/video/omap2/displays-new/connector-analog-tv.c2
-rw-r--r--drivers/video/omap2/displays-new/connector-dvi.c2
-rw-r--r--drivers/video/omap2/displays-new/connector-hdmi.c2
-rw-r--r--drivers/video/omap2/dss/dispc.c1
-rw-r--r--drivers/video/s3fb.c9
-rw-r--r--drivers/xen/balloon.c23
-rw-r--r--fs/9p/v9fs.c7
-rw-r--r--fs/9p/vfs_inode_dotl.c8
-rw-r--r--fs/afs/dir.c3
-rw-r--r--fs/aio.c52
-rw-r--r--fs/autofs4/waitq.c13
-rw-r--r--fs/binfmt_elf.c30
-rw-r--r--fs/bio-integrity.c2
-rw-r--r--fs/bio.c4
-rw-r--r--fs/btrfs/async-thread.c25
-rw-r--r--fs/btrfs/async-thread.h2
-rw-r--r--fs/btrfs/btrfs_inode.h5
-rw-r--r--fs/btrfs/ctree.c7
-rw-r--r--fs/btrfs/ctree.h17
-rw-r--r--fs/btrfs/dev-replace.c9
-rw-r--r--fs/btrfs/disk-io.c2
-rw-r--r--fs/btrfs/extent-tree.c57
-rw-r--r--fs/btrfs/extent_io.c18
-rw-r--r--fs/btrfs/file.c4
-rw-r--r--fs/btrfs/free-space-cache.c67
-rw-r--r--fs/btrfs/free-space-cache.h5
-rw-r--r--fs/btrfs/inode.c16
-rw-r--r--fs/btrfs/ioctl.c80
-rw-r--r--fs/btrfs/ordered-data.c24
-rw-r--r--fs/btrfs/ordered-data.h5
-rw-r--r--fs/btrfs/relocation.c43
-rw-r--r--fs/btrfs/scrub.c112
-rw-r--r--fs/btrfs/super.c21
-rw-r--r--fs/btrfs/transaction.c9
-rw-r--r--fs/btrfs/tree-log.c52
-rw-r--r--fs/btrfs/volumes.c14
-rw-r--r--fs/cachefiles/namei.c2
-rw-r--r--fs/cachefiles/xattr.c29
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h5
-rw-r--r--fs/cifs/cifspdu.h21
-rw-r--r--fs/cifs/cifssmb.c1
-rw-r--r--fs/cifs/dir.c1
-rw-r--r--fs/cifs/file.c8
-rw-r--r--fs/cifs/fscache.c7
-rw-r--r--fs/cifs/fscache.h13
-rw-r--r--fs/cifs/inode.c45
-rw-r--r--fs/cifs/readdir.c3
-rw-r--r--fs/cifs/sess.c84
-rw-r--r--fs/fscache/cookie.c3
-rw-r--r--fs/fuse/dir.c20
-rw-r--r--fs/fuse/file.c23
-rw-r--r--fs/fuse/fuse_i.h2
-rw-r--r--fs/gfs2/inode.c4
-rw-r--r--fs/namei.c34
-rw-r--r--fs/nfs/dir.c5
-rw-r--r--fs/nfs/nfs4file.c3
-rw-r--r--fs/nfs/nfs4filelayoutdev.c20
-rw-r--r--fs/nfs/nfs4proc.c58
-rw-r--r--fs/nilfs2/page.c2
-rw-r--r--fs/nilfs2/segment.c11
-rw-r--r--fs/ocfs2/dcache.c7
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/open.c21
-rw-r--r--fs/pstore/platform.c29
-rw-r--r--fs/reiserfs/journal.c67
-rw-r--r--fs/super.c4
-rw-r--r--fs/sysv/super.c1
-rw-r--r--fs/udf/ialloc.c16
-rw-r--r--fs/udf/super.c64
-rw-r--r--fs/udf/udf_sb.h2
-rw-r--r--fs/xfs/xfs_buf_item.c1
-rw-r--r--fs/xfs/xfs_da_btree.c5
-rw-r--r--fs/xfs/xfs_dir2_block.c6
-rw-r--r--fs/xfs/xfs_dir2_format.h51
-rw-r--r--fs/xfs/xfs_dir2_readdir.c4
-rw-r--r--fs/xfs/xfs_dir2_sf.c6
-rw-r--r--fs/xfs/xfs_dquot.c19
-rw-r--r--fs/xfs/xfs_fs.h2
-rw-r--r--fs/xfs/xfs_icache.c9
-rw-r--r--fs/xfs/xfs_log_recover.c74
-rw-r--r--include/asm-generic/hugetlb.h4
-rw-r--r--include/asm-generic/vtime.h1
-rw-r--r--include/drm/drmP.h7
-rw-r--r--include/drm/drm_pciids.h3
-rw-r--r--include/linux/balloon_compaction.h25
-rw-r--r--include/linux/bcma/bcma_driver_pci.h1
-rw-r--r--include/linux/blkdev.h11
-rw-r--r--include/linux/ceph/osd_client.h2
-rw-r--r--include/linux/device-mapper.h3
-rw-r--r--include/linux/hid.h4
-rw-r--r--include/linux/hyperv.h7
-rw-r--r--include/linux/intel-iommu.h2
-rw-r--r--include/linux/kernel.h11
-rw-r--r--include/linux/kvm_host.h1
-rw-r--r--include/linux/memcontrol.h55
-rw-r--r--include/linux/mutex.h6
-rw-r--r--include/linux/netdevice.h8
-rw-r--r--include/linux/netfilter/ipset/ip_set.h6
-rw-r--r--include/linux/nfs_xdr.h3
-rw-r--r--include/linux/of_irq.h20
-rw-r--r--include/linux/regulator/driver.h2
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--include/linux/smp.h6
-rw-r--r--include/linux/timex.h1
-rw-r--r--include/linux/usb/usbnet.h1
-rw-r--r--include/net/addrconf.h4
-rw-r--r--include/net/bluetooth/hci.h1
-rw-r--r--include/net/ip.h12
-rw-r--r--include/net/ip_vs.h9
-rw-r--r--include/net/mrp.h1
-rw-r--r--include/net/net_namespace.h1
-rw-r--r--include/net/netfilter/nf_conntrack_extend.h2
-rw-r--r--include/net/netfilter/nf_conntrack_synproxy.h2
-rw-r--r--include/net/secure_seq.h1
-rw-r--r--include/net/sock.h5
-rw-r--r--include/sound/soc-dapm.h4
-rw-r--r--include/sound/soc.h3
-rw-r--r--include/trace/events/block.h6
-rw-r--r--include/trace/events/btrfs.h1
-rw-r--r--include/uapi/drm/radeon_drm.h2
-rw-r--r--include/uapi/linux/perf_event.h17
-rw-r--r--ipc/msg.c32
-rw-r--r--ipc/sem.c214
-rw-r--r--ipc/shm.c17
-rw-r--r--ipc/util.c32
-rw-r--r--ipc/util.h10
-rw-r--r--kernel/audit.c5
-rw-r--r--kernel/context_tracking.c12
-rw-r--r--kernel/events/core.c21
-rw-r--r--kernel/kmod.c4
-rw-r--r--kernel/params.c6
-rw-r--r--kernel/pid.c5
-rw-r--r--kernel/power/snapshot.c5
-rw-r--r--kernel/power/user.c8
-rw-r--r--kernel/reboot.c9
-rw-r--r--kernel/sched/debug.c6
-rw-r--r--kernel/sched/fair.c23
-rw-r--r--kernel/sched/stats.h5
-rw-r--r--kernel/softirq.c15
-rw-r--r--kernel/time/ntp.c6
-rw-r--r--kernel/time/timekeeping.c2
-rw-r--r--kernel/watchdog.c60
-rw-r--r--lib/hexdump.c2
-rw-r--r--lib/kobject.c5
-rw-r--r--lib/lockref.c23
-rw-r--r--mm/Kconfig2
-rw-r--r--mm/bounce.c2
-rw-r--r--mm/compaction.c7
-rw-r--r--mm/hwpoison-inject.c5
-rw-r--r--mm/madvise.c5
-rw-r--r--mm/memcontrol.c560
-rw-r--r--mm/memory-failure.c8
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/mlock.c9
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/vmscan.c87
-rw-r--r--net/802/mrp.c27
-rw-r--r--net/batman-adv/soft-interface.c2
-rw-r--r--net/bluetooth/hci_core.c26
-rw-r--r--net/bluetooth/hci_event.c6
-rw-r--r--net/bluetooth/l2cap_core.c7
-rw-r--r--net/bluetooth/rfcomm/tty.c35
-rw-r--r--net/bridge/br_netlink.c4
-rw-r--r--net/bridge/br_private.h8
-rw-r--r--net/bridge/br_stp.c23
-rw-r--r--net/bridge/br_stp_if.c12
-rw-r--r--net/ceph/osd_client.c11
-rw-r--r--net/core/dev.c49
-rw-r--r--net/core/flow_dissector.c4
-rw-r--r--net/core/netpoll.c11
-rw-r--r--net/core/secure_seq.c27
-rw-r--r--net/dccp/ipv6.c1
-rw-r--r--net/ipv4/af_inet.c4
-rw-r--r--net/ipv4/igmp.c8
-rw-r--r--net/ipv4/inetpeer.c4
-rw-r--r--net/ipv4/ip_output.c8
-rw-r--r--net/ipv4/ip_tunnel.c22
-rw-r--r--net/ipv4/ip_tunnel_core.c2
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv4/netfilter/ipt_SYNPROXY.c10
-rw-r--r--net/ipv4/raw.c6
-rw-r--r--net/ipv4/tcp_metrics.c4
-rw-r--r--net/ipv4/tcp_output.c17
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c2
-rw-r--r--net/ipv6/addrconf.c79
-rw-r--r--net/ipv6/ip6_gre.c4
-rw-r--r--net/ipv6/ip6_output.c53
-rw-r--r--net/ipv6/ip6_tunnel.c7
-rw-r--r--net/ipv6/mcast.c6
-rw-r--r--net/ipv6/netfilter/ip6t_SYNPROXY.c10
-rw-r--r--net/ipv6/netfilter/nf_nat_proto_icmpv6.c4
-rw-r--r--net/ipv6/raw.c4
-rw-r--r--net/ipv6/sit.c86
-rw-r--r--net/ipv6/udp.c4
-rw-r--r--net/lapb/lapb_timer.c1
-rw-r--r--net/netfilter/ipset/ip_set_core.c5
-rw-r--r--net/netfilter/ipset/ip_set_getport.c4
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h28
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c4
-rw-r--r--net/netfilter/ipset/ip_set_hash_net.c4
-rw-r--r--net/netfilter/ipset/ip_set_hash_netiface.c4
-rw-r--r--net/netfilter/ipset/ip_set_hash_netport.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c12
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c86
-rw-r--r--net/netfilter/ipvs/ip_vs_est.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c72
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c62
-rw-r--r--net/netfilter/ipvs/ip_vs_nq.c8
-rw-r--r--net/netfilter/ipvs/ip_vs_sed.c8
-rw-r--r--net/netfilter/ipvs/ip_vs_wlc.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c2
-rw-r--r--net/netfilter/nf_synproxy_core.c12
-rw-r--r--net/netfilter/nfnetlink_queue_core.c2
-rw-r--r--net/sched/sch_fq.c102
-rw-r--r--net/sctp/input.c3
-rw-r--r--net/sctp/ipv6.c44
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c11
-rw-r--r--net/sysctl_net.c4
-rwxr-xr-xscripts/checkpatch.pl4
-rw-r--r--security/apparmor/crypto.c34
-rw-r--r--security/apparmor/include/policy.h4
-rw-r--r--security/apparmor/policy.c3
-rw-r--r--security/selinux/avc.c9
-rw-r--r--security/selinux/hooks.c15
-rw-r--r--security/selinux/include/avc.h18
-rw-r--r--sound/core/compress_offload.c15
-rw-r--r--sound/pci/ac97/ac97_codec.c1
-rw-r--r--sound/pci/hda/patch_cirrus.c72
-rw-r--r--sound/pci/hda/patch_conexant.c11
-rw-r--r--sound/pci/hda/patch_hdmi.c47
-rw-r--r--sound/pci/hda/patch_realtek.c16
-rw-r--r--sound/soc/blackfin/bf6xx-i2s.c1
-rw-r--r--sound/soc/codecs/88pm860x-codec.c3
-rw-r--r--sound/soc/codecs/ab8500-codec.c7
-rw-r--r--sound/soc/codecs/max98095.c4
-rw-r--r--sound/soc/fsl/imx-sgtl5000.c7
-rw-r--r--sound/soc/soc-core.c1
-rw-r--r--sound/soc/soc-dapm.c46
-rw-r--r--tools/lib/lk/debugfs.c1
-rw-r--r--tools/perf/arch/x86/util/tsc.c6
-rw-r--r--tools/perf/builtin-inject.c2
-rw-r--r--tools/perf/builtin-kmem.c2
-rw-r--r--tools/perf/builtin-report.c5
-rw-r--r--tools/perf/builtin-script.c2
-rw-r--r--tools/perf/builtin-trace.c18
-rw-r--r--tools/perf/config/Makefile5
-rw-r--r--tools/perf/config/feature-tests.mak10
-rw-r--r--tools/perf/util/annotate.c2
-rw-r--r--tools/perf/util/dwarf-aux.c19
-rw-r--r--tools/perf/util/dwarf-aux.h3
-rw-r--r--tools/perf/util/header.c41
-rw-r--r--tools/perf/util/hist.c2
-rw-r--r--tools/perf/util/machine.c2
-rw-r--r--tools/perf/util/probe-finder.c89
-rw-r--r--tools/perf/util/probe-finder.h3
-rw-r--r--tools/perf/util/session.c9
-rw-r--r--tools/perf/util/session.h4
-rw-r--r--tools/perf/util/symbol-elf.c16
-rw-r--r--tools/perf/util/trace-event-parse.c2
-rw-r--r--virt/kvm/async_pf.c5
-rw-r--r--virt/kvm/kvm_main.c18
943 files changed, 9213 insertions, 5240 deletions
diff --git a/CREDITS b/CREDITS
index 9416a9a8b95e..0640e1650483 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2808,8 +2808,7 @@ S: Ottawa, Ontario
2808S: Canada K2P 0X8 2808S: Canada K2P 0X8
2809 2809
2810N: Mikael Pettersson 2810N: Mikael Pettersson
2811E: mikpe@it.uu.se 2811E: mikpelinux@gmail.com
2812W: http://user.it.uu.se/~mikpe/linux/
2813D: Miscellaneous fixes 2812D: Miscellaneous fixes
2814 2813
2815N: Reed H. Petty 2814N: Reed H. Petty
diff --git a/Documentation/arm64/tagged-pointers.txt b/Documentation/arm64/tagged-pointers.txt
index 264e9841563a..d9995f1f51b3 100644
--- a/Documentation/arm64/tagged-pointers.txt
+++ b/Documentation/arm64/tagged-pointers.txt
@@ -18,17 +18,17 @@ this byte for application use, with the following caveats:
18 parameters containing user virtual addresses *must* have 18 parameters containing user virtual addresses *must* have
19 their top byte cleared before trapping to the kernel. 19 their top byte cleared before trapping to the kernel.
20 20
21 (2) Tags are not guaranteed to be preserved when delivering 21 (2) Non-zero tags are not preserved when delivering signals.
22 signals. This means that signal handlers in applications 22 This means that signal handlers in applications making use
23 making use of tags cannot rely on the tag information for 23 of tags cannot rely on the tag information for user virtual
24 user virtual addresses being maintained for fields inside 24 addresses being maintained for fields inside siginfo_t.
25 siginfo_t. One exception to this rule is for signals raised 25 One exception to this rule is for signals raised in response
26 in response to debug exceptions, where the tag information 26 to watchpoint debug exceptions, where the tag information
27 will be preserved. 27 will be preserved.
28 28
29 (3) Special care should be taken when using tagged pointers, 29 (3) Special care should be taken when using tagged pointers,
30 since it is likely that C compilers will not hazard two 30 since it is likely that C compilers will not hazard two
31 addresses differing only in the upper bits. 31 virtual addresses differing only in the upper byte.
32 32
33The architecture prevents the use of a tagged PC, so the upper byte will 33The architecture prevents the use of a tagged PC, so the upper byte will
34be set to a sign-extension of bit 55 on exception return. 34be set to a sign-extension of bit 55 on exception return.
diff --git a/Documentation/block/00-INDEX b/Documentation/block/00-INDEX
index d18ecd827c40..929d9904f74b 100644
--- a/Documentation/block/00-INDEX
+++ b/Documentation/block/00-INDEX
@@ -6,6 +6,8 @@ capability.txt
6 - Generic Block Device Capability (/sys/block/<device>/capability) 6 - Generic Block Device Capability (/sys/block/<device>/capability)
7cfq-iosched.txt 7cfq-iosched.txt
8 - CFQ IO scheduler tunables 8 - CFQ IO scheduler tunables
9cmdline-partition.txt
10 - how to specify block device partitions on kernel command line
9data-integrity.txt 11data-integrity.txt
10 - Block data integrity 12 - Block data integrity
11deadline-iosched.txt 13deadline-iosched.txt
diff --git a/Documentation/block/cmdline-partition.txt b/Documentation/block/cmdline-partition.txt
index 2bbf4cc40c3f..525b9f6d7fb4 100644
--- a/Documentation/block/cmdline-partition.txt
+++ b/Documentation/block/cmdline-partition.txt
@@ -1,9 +1,9 @@
1Embedded device command line partition 1Embedded device command line partition parsing
2===================================================================== 2=====================================================================
3 3
4Read block device partition table from command line. 4Support for reading the block device partition table from the command line.
5The partition used for fixed block device (eMMC) embedded device. 5It is typically used for fixed block (eMMC) embedded devices.
6It is no MBR, save storage space. Bootloader can be easily accessed 6It has no MBR, so saves storage space. Bootloader can be easily accessed
7by absolute address of data on the block device. 7by absolute address of data on the block device.
8Users can easily change the partition. 8Users can easily change the partition.
9 9
diff --git a/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt
index 6d1c0988cfc7..c67b975c8906 100644
--- a/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt
@@ -1,11 +1,11 @@
1* Samsung Exynos specific extensions to the Synopsis Designware Mobile 1* Samsung Exynos specific extensions to the Synopsys Designware Mobile
2 Storage Host Controller 2 Storage Host Controller
3 3
4The Synopsis designware mobile storage host controller is used to interface 4The Synopsys designware mobile storage host controller is used to interface
5a SoC with storage medium such as eMMC or SD/MMC cards. This file documents 5a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
6differences between the core Synopsis dw mshc controller properties described 6differences between the core Synopsys dw mshc controller properties described
7by synopsis-dw-mshc.txt and the properties used by the Samsung Exynos specific 7by synopsys-dw-mshc.txt and the properties used by the Samsung Exynos specific
8extensions to the Synopsis Designware Mobile Storage Host Controller. 8extensions to the Synopsys Designware Mobile Storage Host Controller.
9 9
10Required Properties: 10Required Properties:
11 11
diff --git a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
index 8a3d91d47b6a..c559f3f36309 100644
--- a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
@@ -1,11 +1,11 @@
1* Rockchip specific extensions to the Synopsis Designware Mobile 1* Rockchip specific extensions to the Synopsys Designware Mobile
2 Storage Host Controller 2 Storage Host Controller
3 3
4The Synopsis designware mobile storage host controller is used to interface 4The Synopsys designware mobile storage host controller is used to interface
5a SoC with storage medium such as eMMC or SD/MMC cards. This file documents 5a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
6differences between the core Synopsis dw mshc controller properties described 6differences between the core Synopsys dw mshc controller properties described
7by synopsis-dw-mshc.txt and the properties used by the Rockchip specific 7by synopsys-dw-mshc.txt and the properties used by the Rockchip specific
8extensions to the Synopsis Designware Mobile Storage Host Controller. 8extensions to the Synopsys Designware Mobile Storage Host Controller.
9 9
10Required Properties: 10Required Properties:
11 11
diff --git a/Documentation/devicetree/bindings/mmc/synopsis-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
index cdcebea9c6f5..066a78b034ca 100644
--- a/Documentation/devicetree/bindings/mmc/synopsis-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
@@ -1,14 +1,14 @@
1* Synopsis Designware Mobile Storage Host Controller 1* Synopsys Designware Mobile Storage Host Controller
2 2
3The Synopsis designware mobile storage host controller is used to interface 3The Synopsys designware mobile storage host controller is used to interface
4a SoC with storage medium such as eMMC or SD/MMC cards. This file documents 4a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
5differences between the core mmc properties described by mmc.txt and the 5differences between the core mmc properties described by mmc.txt and the
6properties used by the Synopsis Designware Mobile Storage Host Controller. 6properties used by the Synopsys Designware Mobile Storage Host Controller.
7 7
8Required Properties: 8Required Properties:
9 9
10* compatible: should be 10* compatible: should be
11 - snps,dw-mshc: for controllers compliant with synopsis dw-mshc. 11 - snps,dw-mshc: for controllers compliant with synopsys dw-mshc.
12* #address-cells: should be 1. 12* #address-cells: should be 1.
13* #size-cells: should be 0. 13* #size-cells: should be 0.
14 14
diff --git a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
index df204e18e030..6a2a1160a70d 100644
--- a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
@@ -9,12 +9,15 @@ compulsory and any optional properties, common to all SD/MMC drivers, as
9described in mmc.txt, can be used. Additionally the following tmio_mmc-specific 9described in mmc.txt, can be used. Additionally the following tmio_mmc-specific
10optional bindings can be used. 10optional bindings can be used.
11 11
12Required properties:
13- compatible: "renesas,sdhi-shmobile" - a generic sh-mobile SDHI unit
14 "renesas,sdhi-sh7372" - SDHI IP on SH7372 SoC
15 "renesas,sdhi-sh73a0" - SDHI IP on SH73A0 SoC
16 "renesas,sdhi-r8a73a4" - SDHI IP on R8A73A4 SoC
17 "renesas,sdhi-r8a7740" - SDHI IP on R8A7740 SoC
18 "renesas,sdhi-r8a7778" - SDHI IP on R8A7778 SoC
19 "renesas,sdhi-r8a7779" - SDHI IP on R8A7779 SoC
20 "renesas,sdhi-r8a7790" - SDHI IP on R8A7790 SoC
21
12Optional properties: 22Optional properties:
13- toshiba,mmc-wrprotect-disable: write-protect detection is unavailable 23- toshiba,mmc-wrprotect-disable: write-protect detection is unavailable
14
15When used with Renesas SDHI hardware, the following compatibility strings
16configure various model-specific properties:
17
18"renesas,sh7372-sdhi": (default) compatible with SH7372
19"renesas,r8a7740-sdhi": compatible with R8A7740: certain MMC/SD commands have to
20 wait for the interface to become idle.
diff --git a/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
index 2c6be0377f55..d2ea4605d078 100644
--- a/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
+++ b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
@@ -86,6 +86,7 @@ General Properties:
86 86
87Clock Properties: 87Clock Properties:
88 88
89 - fsl,cksel Timer reference clock source.
89 - fsl,tclk-period Timer reference clock period in nanoseconds. 90 - fsl,tclk-period Timer reference clock period in nanoseconds.
90 - fsl,tmr-prsc Prescaler, divides the output clock. 91 - fsl,tmr-prsc Prescaler, divides the output clock.
91 - fsl,tmr-add Frequency compensation value. 92 - fsl,tmr-add Frequency compensation value.
@@ -97,7 +98,7 @@ Clock Properties:
97 clock. You must choose these carefully for the clock to work right. 98 clock. You must choose these carefully for the clock to work right.
98 Here is how to figure good values: 99 Here is how to figure good values:
99 100
100 TimerOsc = system clock MHz 101 TimerOsc = selected reference clock MHz
101 tclk_period = desired clock period nanoseconds 102 tclk_period = desired clock period nanoseconds
102 NominalFreq = 1000 / tclk_period MHz 103 NominalFreq = 1000 / tclk_period MHz
103 FreqDivRatio = TimerOsc / NominalFreq (must be greater that 1.0) 104 FreqDivRatio = TimerOsc / NominalFreq (must be greater that 1.0)
@@ -114,6 +115,20 @@ Clock Properties:
114 Pulse Per Second (PPS) signal, since this will be offered to the PPS 115 Pulse Per Second (PPS) signal, since this will be offered to the PPS
115 subsystem to synchronize the Linux clock. 116 subsystem to synchronize the Linux clock.
116 117
118 Reference clock source is determined by the value, which is holded
119 in CKSEL bits in TMR_CTRL register. "fsl,cksel" property keeps the
120 value, which will be directly written in those bits, that is why,
121 according to reference manual, the next clock sources can be used:
122
123 <0> - external high precision timer reference clock (TSEC_TMR_CLK
124 input is used for this purpose);
125 <1> - eTSEC system clock;
126 <2> - eTSEC1 transmit clock;
127 <3> - RTC clock input.
128
129 When this attribute is not used, eTSEC system clock will serve as
130 IEEE 1588 timer reference clock.
131
117Example: 132Example:
118 133
119 ptp_clock@24E00 { 134 ptp_clock@24E00 {
@@ -121,6 +136,7 @@ Example:
121 reg = <0x24E00 0xB0>; 136 reg = <0x24E00 0xB0>;
122 interrupts = <12 0x8 13 0x8>; 137 interrupts = <12 0x8 13 0x8>;
123 interrupt-parent = < &ipic >; 138 interrupt-parent = < &ipic >;
139 fsl,cksel = <1>;
124 fsl,tclk-period = <10>; 140 fsl,tclk-period = <10>;
125 fsl,tmr-prsc = <100>; 141 fsl,tmr-prsc = <100>;
126 fsl,tmr-add = <0x999999A4>; 142 fsl,tmr-add = <0x999999A4>;
diff --git a/Documentation/devicetree/bindings/pci/designware-pcie.txt b/Documentation/devicetree/bindings/pci/designware-pcie.txt
index eabcb4b5db6e..e216af356847 100644
--- a/Documentation/devicetree/bindings/pci/designware-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/designware-pcie.txt
@@ -1,4 +1,4 @@
1* Synopsis Designware PCIe interface 1* Synopsys Designware PCIe interface
2 2
3Required properties: 3Required properties:
4- compatible: should contain "snps,dw-pcie" to identify the 4- compatible: should contain "snps,dw-pcie" to identify the
diff --git a/Documentation/devicetree/bindings/tty/serial/qca,ar9330-uart.txt b/Documentation/devicetree/bindings/serial/qca,ar9330-uart.txt
index c5e032c85bf9..c5e032c85bf9 100644
--- a/Documentation/devicetree/bindings/tty/serial/qca,ar9330-uart.txt
+++ b/Documentation/devicetree/bindings/serial/qca,ar9330-uart.txt
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index f93a88250a44..deb48b5fd883 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -359,11 +359,9 @@ struct inode_operations {
359 ssize_t (*listxattr) (struct dentry *, char *, size_t); 359 ssize_t (*listxattr) (struct dentry *, char *, size_t);
360 int (*removexattr) (struct dentry *, const char *); 360 int (*removexattr) (struct dentry *, const char *);
361 void (*update_time)(struct inode *, struct timespec *, int); 361 void (*update_time)(struct inode *, struct timespec *, int);
362 int (*atomic_open)(struct inode *, struct dentry *, 362 int (*atomic_open)(struct inode *, struct dentry *, struct file *,
363 unsigned open_flag, umode_t create_mode, int *opened);
363 int (*tmpfile) (struct inode *, struct dentry *, umode_t); 364 int (*tmpfile) (struct inode *, struct dentry *, umode_t);
364} ____cacheline_aligned;
365 struct file *, unsigned open_flag,
366 umode_t create_mode, int *opened);
367}; 365};
368 366
369Again, all methods are called without any locks being held, unless 367Again, all methods are called without any locks being held, unless
@@ -470,9 +468,11 @@ otherwise noted.
470 method the filesystem can look up, possibly create and open the file in 468 method the filesystem can look up, possibly create and open the file in
471 one atomic operation. If it cannot perform this (e.g. the file type 469 one atomic operation. If it cannot perform this (e.g. the file type
472 turned out to be wrong) it may signal this by returning 1 instead of 470 turned out to be wrong) it may signal this by returning 1 instead of
473 usual 0 or -ve . This method is only called if the last 471 usual 0 or -ve . This method is only called if the last component is
474 component is negative or needs lookup. Cached positive dentries are 472 negative or needs lookup. Cached positive dentries are still handled by
475 still handled by f_op->open(). 473 f_op->open(). If the file was created, the FILE_CREATED flag should be
474 set in "opened". In case of O_EXCL the method must only succeed if the
475 file didn't exist and hence FILE_CREATED shall always be set on success.
476 476
477 tmpfile: called in the end of O_TMPFILE open(). Optional, equivalent to 477 tmpfile: called in the end of O_TMPFILE open(). Optional, equivalent to
478 atomically creating, opening and unlinking a file in given directory. 478 atomically creating, opening and unlinking a file in given directory.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 1a036cd972fb..fcbb736d55fe 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -480,6 +480,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
480 Format: <io>,<irq>,<mode> 480 Format: <io>,<irq>,<mode>
481 See header of drivers/net/hamradio/baycom_ser_hdx.c. 481 See header of drivers/net/hamradio/baycom_ser_hdx.c.
482 482
483 blkdevparts= Manual partition parsing of block device(s) for
484 embedded devices based on command line input.
485 See Documentation/block/cmdline-partition.txt
486
483 boot_delay= Milliseconds to delay each printk during boot. 487 boot_delay= Milliseconds to delay each printk during boot.
484 Values larger than 10 seconds (10000) are changed to 488 Values larger than 10 seconds (10000) are changed to
485 no delay (0). 489 no delay (0).
@@ -1357,7 +1361,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1357 pages. In the event, a node is too small to have both 1361 pages. In the event, a node is too small to have both
1358 kernelcore and Movable pages, kernelcore pages will 1362 kernelcore and Movable pages, kernelcore pages will
1359 take priority and other nodes will have a larger number 1363 take priority and other nodes will have a larger number
1360 of kernelcore pages. The Movable zone is used for the 1364 of Movable pages. The Movable zone is used for the
1361 allocation of pages that may be reclaimed or moved 1365 allocation of pages that may be reclaimed or moved
1362 by the page migration subsystem. This means that 1366 by the page migration subsystem. This means that
1363 HugeTLB pages may not be allocated from this zone. 1367 HugeTLB pages may not be allocated from this zone.
@@ -3485,6 +3489,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
3485 the unplug protocol 3489 the unplug protocol
3486 never -- do not unplug even if version check succeeds 3490 never -- do not unplug even if version check succeeds
3487 3491
3492 xen_nopvspin [X86,XEN]
3493 Disables the ticketlock slowpath using Xen PV
3494 optimizations.
3495
3488 xirc2ps_cs= [NET,PCMCIA] 3496 xirc2ps_cs= [NET,PCMCIA]
3489 Format: 3497 Format:
3490 <irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]] 3498 <irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index 87bbcfee2e06..9b28e714831a 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -1362,6 +1362,12 @@ To add ARP targets:
1362To remove an ARP target: 1362To remove an ARP target:
1363# echo -192.168.0.100 > /sys/class/net/bond0/bonding/arp_ip_target 1363# echo -192.168.0.100 > /sys/class/net/bond0/bonding/arp_ip_target
1364 1364
1365To configure the interval between learning packet transmits:
1366# echo 12 > /sys/class/net/bond0/bonding/lp_interval
1367 NOTE: the lp_inteval is the number of seconds between instances where
1368the bonding driver sends learning packets to each slaves peer switch. The
1369default interval is 1 second.
1370
1365Example Configuration 1371Example Configuration
1366--------------------- 1372---------------------
1367 We begin with the same example that is shown in section 3.3, 1373 We begin with the same example that is shown in section 3.3,
diff --git a/Documentation/scheduler/sched-design-CFS.txt b/Documentation/scheduler/sched-design-CFS.txt
index d529e02d928d..f14f49304222 100644
--- a/Documentation/scheduler/sched-design-CFS.txt
+++ b/Documentation/scheduler/sched-design-CFS.txt
@@ -66,9 +66,7 @@ rq->cfs.load value, which is the sum of the weights of the tasks queued on the
66runqueue. 66runqueue.
67 67
68CFS maintains a time-ordered rbtree, where all runnable tasks are sorted by the 68CFS maintains a time-ordered rbtree, where all runnable tasks are sorted by the
69p->se.vruntime key (there is a subtraction using rq->cfs.min_vruntime to 69p->se.vruntime key. CFS picks the "leftmost" task from this tree and sticks to it.
70account for possible wraparounds). CFS picks the "leftmost" task from this
71tree and sticks to it.
72As the system progresses forwards, the executed tasks are put into the tree 70As the system progresses forwards, the executed tasks are put into the tree
73more and more to the right --- slowly but surely giving a chance for every task 71more and more to the right --- slowly but surely giving a chance for every task
74to become the "leftmost task" and thus get on the CPU within a deterministic 72to become the "leftmost task" and thus get on the CPU within a deterministic
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
index a46ddb85e83a..f911e3656209 100644
--- a/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -296,6 +296,12 @@ Cirrus Logic CS4206/4207
296 imac27 IMac 27 Inch 296 imac27 IMac 27 Inch
297 auto BIOS setup (default) 297 auto BIOS setup (default)
298 298
299Cirrus Logic CS4208
300===================
301 mba6 MacBook Air 6,1 and 6,2
302 gpio0 Enable GPIO 0 amp
303 auto BIOS setup (default)
304
299VIA VT17xx/VT18xx/VT20xx 305VIA VT17xx/VT18xx/VT20xx
300======================== 306========================
301 auto BIOS setup (default) 307 auto BIOS setup (default)
diff --git a/MAINTAINERS b/MAINTAINERS
index e61c2e83fc2b..8a0cbf3cf2c8 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -824,15 +824,21 @@ S: Maintained
824F: arch/arm/mach-gemini/ 824F: arch/arm/mach-gemini/
825 825
826ARM/CSR SIRFPRIMA2 MACHINE SUPPORT 826ARM/CSR SIRFPRIMA2 MACHINE SUPPORT
827M: Barry Song <baohua.song@csr.com> 827M: Barry Song <baohua@kernel.org>
828L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 828L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
829T: git git://git.kernel.org/pub/scm/linux/kernel/git/baohua/linux.git 829T: git git://git.kernel.org/pub/scm/linux/kernel/git/baohua/linux.git
830S: Maintained 830S: Maintained
831F: arch/arm/mach-prima2/ 831F: arch/arm/mach-prima2/
832F: drivers/clk/clk-prima2.c
833F: drivers/clocksource/timer-prima2.c
834F: drivers/clocksource/timer-marco.c
832F: drivers/dma/sirf-dma.c 835F: drivers/dma/sirf-dma.c
833F: drivers/i2c/busses/i2c-sirf.c 836F: drivers/i2c/busses/i2c-sirf.c
837F: drivers/input/misc/sirfsoc-onkey.c
838F: drivers/irqchip/irq-sirfsoc.c
834F: drivers/mmc/host/sdhci-sirf.c 839F: drivers/mmc/host/sdhci-sirf.c
835F: drivers/pinctrl/sirf/ 840F: drivers/pinctrl/sirf/
841F: drivers/rtc/rtc-sirfsoc.c
836F: drivers/spi/spi-sirf.c 842F: drivers/spi/spi-sirf.c
837 843
838ARM/EBSA110 MACHINE SUPPORT 844ARM/EBSA110 MACHINE SUPPORT
@@ -1812,7 +1818,8 @@ S: Supported
1812F: drivers/net/ethernet/broadcom/bnx2x/ 1818F: drivers/net/ethernet/broadcom/bnx2x/
1813 1819
1814BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE 1820BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE
1815M: Christian Daudt <csd@broadcom.com> 1821M: Christian Daudt <bcm@fixthebug.org>
1822L: bcm-kernel-feedback-list@broadcom.com
1816T: git git://git.github.com/broadcom/bcm11351 1823T: git git://git.github.com/broadcom/bcm11351
1817S: Maintained 1824S: Maintained
1818F: arch/arm/mach-bcm/ 1825F: arch/arm/mach-bcm/
@@ -2639,6 +2646,18 @@ F: include/linux/device-mapper.h
2639F: include/linux/dm-*.h 2646F: include/linux/dm-*.h
2640F: include/uapi/linux/dm-*.h 2647F: include/uapi/linux/dm-*.h
2641 2648
2649DIGI NEO AND CLASSIC PCI PRODUCTS
2650M: Lidza Louina <lidza.louina@gmail.com>
2651L: driverdev-devel@linuxdriverproject.org
2652S: Maintained
2653F: drivers/staging/dgnc/
2654
2655DIGI EPCA PCI PRODUCTS
2656M: Lidza Louina <lidza.louina@gmail.com>
2657L: driverdev-devel@linuxdriverproject.org
2658S: Maintained
2659F: drivers/staging/dgap/
2660
2642DIOLAN U2C-12 I2C DRIVER 2661DIOLAN U2C-12 I2C DRIVER
2643M: Guenter Roeck <linux@roeck-us.net> 2662M: Guenter Roeck <linux@roeck-us.net>
2644L: linux-i2c@vger.kernel.org 2663L: linux-i2c@vger.kernel.org
@@ -4457,6 +4476,13 @@ L: linux-serial@vger.kernel.org
4457S: Maintained 4476S: Maintained
4458F: drivers/tty/serial/ioc3_serial.c 4477F: drivers/tty/serial/ioc3_serial.c
4459 4478
4479IOMMU DRIVERS
4480M: Joerg Roedel <joro@8bytes.org>
4481L: iommu@lists.linux-foundation.org
4482T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
4483S: Maintained
4484F: drivers/iommu/
4485
4460IP MASQUERADING 4486IP MASQUERADING
4461M: Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar> 4487M: Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar>
4462S: Maintained 4488S: Maintained
@@ -6595,7 +6621,7 @@ S: Obsolete
6595F: drivers/net/wireless/prism54/ 6621F: drivers/net/wireless/prism54/
6596 6622
6597PROMISE SATA TX2/TX4 CONTROLLER LIBATA DRIVER 6623PROMISE SATA TX2/TX4 CONTROLLER LIBATA DRIVER
6598M: Mikael Pettersson <mikpe@it.uu.se> 6624M: Mikael Pettersson <mikpelinux@gmail.com>
6599L: linux-ide@vger.kernel.org 6625L: linux-ide@vger.kernel.org
6600S: Maintained 6626S: Maintained
6601F: drivers/ata/sata_promise.* 6627F: drivers/ata/sata_promise.*
@@ -7258,9 +7284,9 @@ F: include/linux/sched.h
7258F: include/uapi/linux/sched.h 7284F: include/uapi/linux/sched.h
7259 7285
7260SCORE ARCHITECTURE 7286SCORE ARCHITECTURE
7261M: Chen Liqin <liqin.chen@sunplusct.com> 7287M: Chen Liqin <liqin.linux@gmail.com>
7262M: Lennox Wu <lennox.wu@gmail.com> 7288M: Lennox Wu <lennox.wu@gmail.com>
7263W: http://www.sunplusct.com 7289W: http://www.sunplus.com
7264S: Supported 7290S: Supported
7265F: arch/score/ 7291F: arch/score/
7266 7292
@@ -8724,9 +8750,8 @@ F: Documentation/hid/hiddev.txt
8724F: drivers/hid/usbhid/ 8750F: drivers/hid/usbhid/
8725 8751
8726USB/IP DRIVERS 8752USB/IP DRIVERS
8727M: Matt Mooney <mfm@muteddisk.com>
8728L: linux-usb@vger.kernel.org 8753L: linux-usb@vger.kernel.org
8729S: Maintained 8754S: Orphan
8730F: drivers/staging/usbip/ 8755F: drivers/staging/usbip/
8731 8756
8732USB ISP116X DRIVER 8757USB ISP116X DRIVER
@@ -9366,6 +9391,7 @@ F: arch/arm64/include/asm/xen/
9366 9391
9367XEN NETWORK BACKEND DRIVER 9392XEN NETWORK BACKEND DRIVER
9368M: Ian Campbell <ian.campbell@citrix.com> 9393M: Ian Campbell <ian.campbell@citrix.com>
9394M: Wei Liu <wei.liu2@citrix.com>
9369L: xen-devel@lists.xenproject.org (moderated for non-subscribers) 9395L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
9370L: netdev@vger.kernel.org 9396L: netdev@vger.kernel.org
9371S: Supported 9397S: Supported
diff --git a/Makefile b/Makefile
index de004ceb6b5e..9de9aba21bf9 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 12 2PATCHLEVEL = 12
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc1 4EXTRAVERSION = -rc4
5NAME = One Giant Leap for Frogkind 5NAME = One Giant Leap for Frogkind
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/Kconfig b/arch/Kconfig
index 1feb169274fe..af2cc6eabcc7 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -286,9 +286,6 @@ config HAVE_PERF_USER_STACK_DUMP
286config HAVE_ARCH_JUMP_LABEL 286config HAVE_ARCH_JUMP_LABEL
287 bool 287 bool
288 288
289config HAVE_ARCH_MUTEX_CPU_RELAX
290 bool
291
292config HAVE_RCU_TABLE_FREE 289config HAVE_RCU_TABLE_FREE
293 bool 290 bool
294 291
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
index f158197ac5b0..b6a8c2dfbe6e 100644
--- a/arch/arc/include/asm/spinlock.h
+++ b/arch/arc/include/asm/spinlock.h
@@ -45,7 +45,14 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
45 45
46static inline void arch_spin_unlock(arch_spinlock_t *lock) 46static inline void arch_spin_unlock(arch_spinlock_t *lock)
47{ 47{
48 lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__; 48 unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__;
49
50 __asm__ __volatile__(
51 " ex %0, [%1] \n"
52 : "+r" (tmp)
53 : "r"(&(lock->slock))
54 : "memory");
55
49 smp_mb(); 56 smp_mb();
50} 57}
51 58
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
index 32420824375b..30c9baffa96f 100644
--- a/arch/arc/include/asm/uaccess.h
+++ b/arch/arc/include/asm/uaccess.h
@@ -43,7 +43,7 @@
43 * Because it essentially checks if buffer end is within limit and @len is 43 * Because it essentially checks if buffer end is within limit and @len is
44 * non-ngeative, which implies that buffer start will be within limit too. 44 * non-ngeative, which implies that buffer start will be within limit too.
45 * 45 *
46 * The reason for rewriting being, for majorit yof cases, @len is generally 46 * The reason for rewriting being, for majority of cases, @len is generally
47 * compile time constant, causing first sub-expression to be compile time 47 * compile time constant, causing first sub-expression to be compile time
48 * subsumed. 48 * subsumed.
49 * 49 *
@@ -53,7 +53,7 @@
53 * 53 *
54 */ 54 */
55#define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \ 55#define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \
56 (((addr)+(sz)) <= get_fs())) 56 ((addr) <= (get_fs() - (sz))))
57#define __access_ok(addr, sz) (unlikely(__kernel_ok) || \ 57#define __access_ok(addr, sz) (unlikely(__kernel_ok) || \
58 likely(__user_ok((addr), (sz)))) 58 likely(__user_ok((addr), (sz))))
59 59
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index ee6ef2f60a28..7e95e1a86510 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -101,7 +101,6 @@ SYSCALL_DEFINE0(rt_sigreturn)
101{ 101{
102 struct rt_sigframe __user *sf; 102 struct rt_sigframe __user *sf;
103 unsigned int magic; 103 unsigned int magic;
104 int err;
105 struct pt_regs *regs = current_pt_regs(); 104 struct pt_regs *regs = current_pt_regs();
106 105
107 /* Always make any pending restarted system calls return -EINTR */ 106 /* Always make any pending restarted system calls return -EINTR */
@@ -119,15 +118,16 @@ SYSCALL_DEFINE0(rt_sigreturn)
119 if (!access_ok(VERIFY_READ, sf, sizeof(*sf))) 118 if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
120 goto badframe; 119 goto badframe;
121 120
122 err = restore_usr_regs(regs, sf); 121 if (__get_user(magic, &sf->sigret_magic))
123 err |= __get_user(magic, &sf->sigret_magic);
124 if (err)
125 goto badframe; 122 goto badframe;
126 123
127 if (unlikely(is_do_ss_needed(magic))) 124 if (unlikely(is_do_ss_needed(magic)))
128 if (restore_altstack(&sf->uc.uc_stack)) 125 if (restore_altstack(&sf->uc.uc_stack))
129 goto badframe; 126 goto badframe;
130 127
128 if (restore_usr_regs(regs, sf))
129 goto badframe;
130
131 /* Don't restart from sigreturn */ 131 /* Don't restart from sigreturn */
132 syscall_wont_restart(regs); 132 syscall_wont_restart(regs);
133 133
@@ -191,6 +191,15 @@ setup_rt_frame(int signo, struct k_sigaction *ka, siginfo_t *info,
191 return 1; 191 return 1;
192 192
193 /* 193 /*
194 * w/o SA_SIGINFO, struct ucontext is partially populated (only
195 * uc_mcontext/uc_sigmask) for kernel's normal user state preservation
196 * during signal handler execution. This works for SA_SIGINFO as well
197 * although the semantics are now overloaded (the same reg state can be
198 * inspected by userland: but are they allowed to fiddle with it ?
199 */
200 err |= stash_usr_regs(sf, regs, set);
201
202 /*
194 * SA_SIGINFO requires 3 args to signal handler: 203 * SA_SIGINFO requires 3 args to signal handler:
195 * #1: sig-no (common to any handler) 204 * #1: sig-no (common to any handler)
196 * #2: struct siginfo 205 * #2: struct siginfo
@@ -213,14 +222,6 @@ setup_rt_frame(int signo, struct k_sigaction *ka, siginfo_t *info,
213 magic = MAGIC_SIGALTSTK; 222 magic = MAGIC_SIGALTSTK;
214 } 223 }
215 224
216 /*
217 * w/o SA_SIGINFO, struct ucontext is partially populated (only
218 * uc_mcontext/uc_sigmask) for kernel's normal user state preservation
219 * during signal handler execution. This works for SA_SIGINFO as well
220 * although the semantics are now overloaded (the same reg state can be
221 * inspected by userland: but are they allowed to fiddle with it ?
222 */
223 err |= stash_usr_regs(sf, regs, set);
224 err |= __put_user(magic, &sf->sigret_magic); 225 err |= __put_user(magic, &sf->sigret_magic);
225 if (err) 226 if (err)
226 return err; 227 return err;
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
index 0e51e69cf30d..3fde7de3ea67 100644
--- a/arch/arc/kernel/time.c
+++ b/arch/arc/kernel/time.c
@@ -227,12 +227,9 @@ void __attribute__((weak)) arc_local_timer_setup(unsigned int cpu)
227{ 227{
228 struct clock_event_device *clk = &per_cpu(arc_clockevent_device, cpu); 228 struct clock_event_device *clk = &per_cpu(arc_clockevent_device, cpu);
229 229
230 clockevents_calc_mult_shift(clk, arc_get_core_freq(), 5);
231
232 clk->max_delta_ns = clockevent_delta2ns(ARC_TIMER_MAX, clk);
233 clk->cpumask = cpumask_of(cpu); 230 clk->cpumask = cpumask_of(cpu);
234 231 clockevents_config_and_register(clk, arc_get_core_freq(),
235 clockevents_register_device(clk); 232 0, ARC_TIMER_MAX);
236 233
237 /* 234 /*
238 * setup the per-cpu timer IRQ handler - for all cpus 235 * setup the per-cpu timer IRQ handler - for all cpus
diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
index 28d170060747..7ff5b5c183bb 100644
--- a/arch/arc/kernel/unaligned.c
+++ b/arch/arc/kernel/unaligned.c
@@ -245,6 +245,12 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
245 regs->status32 &= ~STATUS_DE_MASK; 245 regs->status32 &= ~STATUS_DE_MASK;
246 } else { 246 } else {
247 regs->ret += state.instr_len; 247 regs->ret += state.instr_len;
248
249 /* handle zero-overhead-loop */
250 if ((regs->ret == regs->lp_end) && (regs->lp_count)) {
251 regs->ret = regs->lp_start;
252 regs->lp_count--;
253 }
248 } 254 }
249 255
250 return 0; 256 return 0;
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 3f7714d8d2d2..1ad6fb6c094d 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -2217,8 +2217,7 @@ config NEON
2217 2217
2218config KERNEL_MODE_NEON 2218config KERNEL_MODE_NEON
2219 bool "Support for NEON in kernel mode" 2219 bool "Support for NEON in kernel mode"
2220 default n 2220 depends on NEON && AEABI
2221 depends on NEON
2222 help 2221 help
2223 Say Y to include support for NEON in kernel mode. 2222 Say Y to include support for NEON in kernel mode.
2224 2223
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index cc0f1fb61753..802720e3e8fd 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -41,6 +41,8 @@ dtb-$(CONFIG_ARCH_AT91) += sama5d33ek.dtb
41dtb-$(CONFIG_ARCH_AT91) += sama5d34ek.dtb 41dtb-$(CONFIG_ARCH_AT91) += sama5d34ek.dtb
42dtb-$(CONFIG_ARCH_AT91) += sama5d35ek.dtb 42dtb-$(CONFIG_ARCH_AT91) += sama5d35ek.dtb
43 43
44dtb-$(CONFIG_ARCH_ATLAS6) += atlas6-evb.dtb
45
44dtb-$(CONFIG_ARCH_BCM2835) += bcm2835-rpi-b.dtb 46dtb-$(CONFIG_ARCH_BCM2835) += bcm2835-rpi-b.dtb
45dtb-$(CONFIG_ARCH_BCM) += bcm11351-brt.dtb \ 47dtb-$(CONFIG_ARCH_BCM) += bcm11351-brt.dtb \
46 bcm28155-ap.dtb 48 bcm28155-ap.dtb
@@ -183,6 +185,7 @@ dtb-$(CONFIG_ARCH_OMAP2PLUS) += omap2420-h4.dtb \
183 am335x-evm.dtb \ 185 am335x-evm.dtb \
184 am335x-evmsk.dtb \ 186 am335x-evmsk.dtb \
185 am335x-bone.dtb \ 187 am335x-bone.dtb \
188 am335x-boneblack.dtb \
186 am3517-evm.dtb \ 189 am3517-evm.dtb \
187 am3517_mt_ventoux.dtb \ 190 am3517_mt_ventoux.dtb \
188 am43x-epos-evm.dtb 191 am43x-epos-evm.dtb
diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi
new file mode 100644
index 000000000000..2f66deda9f5c
--- /dev/null
+++ b/arch/arm/boot/dts/am335x-bone-common.dtsi
@@ -0,0 +1,262 @@
1/*
2 * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9/ {
10 model = "TI AM335x BeagleBone";
11 compatible = "ti,am335x-bone", "ti,am33xx";
12
13 cpus {
14 cpu@0 {
15 cpu0-supply = <&dcdc2_reg>;
16 };
17 };
18
19 memory {
20 device_type = "memory";
21 reg = <0x80000000 0x10000000>; /* 256 MB */
22 };
23
24 am33xx_pinmux: pinmux@44e10800 {
25 pinctrl-names = "default";
26 pinctrl-0 = <&clkout2_pin>;
27
28 user_leds_s0: user_leds_s0 {
29 pinctrl-single,pins = <
30 0x54 (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a5.gpio1_21 */
31 0x58 (PIN_OUTPUT_PULLUP | MUX_MODE7) /* gpmc_a6.gpio1_22 */
32 0x5c (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a7.gpio1_23 */
33 0x60 (PIN_OUTPUT_PULLUP | MUX_MODE7) /* gpmc_a8.gpio1_24 */
34 >;
35 };
36
37 i2c0_pins: pinmux_i2c0_pins {
38 pinctrl-single,pins = <
39 0x188 (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_sda.i2c0_sda */
40 0x18c (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_scl.i2c0_scl */
41 >;
42 };
43
44 uart0_pins: pinmux_uart0_pins {
45 pinctrl-single,pins = <
46 0x170 (PIN_INPUT_PULLUP | MUX_MODE0) /* uart0_rxd.uart0_rxd */
47 0x174 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */
48 >;
49 };
50
51 clkout2_pin: pinmux_clkout2_pin {
52 pinctrl-single,pins = <
53 0x1b4 (PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* xdma_event_intr1.clkout2 */
54 >;
55 };
56
57 cpsw_default: cpsw_default {
58 pinctrl-single,pins = <
59 /* Slave 1 */
60 0x110 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxerr.mii1_rxerr */
61 0x114 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txen.mii1_txen */
62 0x118 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxdv.mii1_rxdv */
63 0x11c (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd3.mii1_txd3 */
64 0x120 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd2.mii1_txd2 */
65 0x124 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd1.mii1_txd1 */
66 0x128 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd0.mii1_txd0 */
67 0x12c (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_txclk.mii1_txclk */
68 0x130 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxclk.mii1_rxclk */
69 0x134 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd3.mii1_rxd3 */
70 0x138 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd2.mii1_rxd2 */
71 0x13c (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd1.mii1_rxd1 */
72 0x140 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd0.mii1_rxd0 */
73 >;
74 };
75
76 cpsw_sleep: cpsw_sleep {
77 pinctrl-single,pins = <
78 /* Slave 1 reset value */
79 0x110 (PIN_INPUT_PULLDOWN | MUX_MODE7)
80 0x114 (PIN_INPUT_PULLDOWN | MUX_MODE7)
81 0x118 (PIN_INPUT_PULLDOWN | MUX_MODE7)
82 0x11c (PIN_INPUT_PULLDOWN | MUX_MODE7)
83 0x120 (PIN_INPUT_PULLDOWN | MUX_MODE7)
84 0x124 (PIN_INPUT_PULLDOWN | MUX_MODE7)
85 0x128 (PIN_INPUT_PULLDOWN | MUX_MODE7)
86 0x12c (PIN_INPUT_PULLDOWN | MUX_MODE7)
87 0x130 (PIN_INPUT_PULLDOWN | MUX_MODE7)
88 0x134 (PIN_INPUT_PULLDOWN | MUX_MODE7)
89 0x138 (PIN_INPUT_PULLDOWN | MUX_MODE7)
90 0x13c (PIN_INPUT_PULLDOWN | MUX_MODE7)
91 0x140 (PIN_INPUT_PULLDOWN | MUX_MODE7)
92 >;
93 };
94
95 davinci_mdio_default: davinci_mdio_default {
96 pinctrl-single,pins = <
97 /* MDIO */
98 0x148 (PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0) /* mdio_data.mdio_data */
99 0x14c (PIN_OUTPUT_PULLUP | MUX_MODE0) /* mdio_clk.mdio_clk */
100 >;
101 };
102
103 davinci_mdio_sleep: davinci_mdio_sleep {
104 pinctrl-single,pins = <
105 /* MDIO reset value */
106 0x148 (PIN_INPUT_PULLDOWN | MUX_MODE7)
107 0x14c (PIN_INPUT_PULLDOWN | MUX_MODE7)
108 >;
109 };
110 };
111
112 ocp {
113 uart0: serial@44e09000 {
114 pinctrl-names = "default";
115 pinctrl-0 = <&uart0_pins>;
116
117 status = "okay";
118 };
119
120 musb: usb@47400000 {
121 status = "okay";
122
123 control@44e10000 {
124 status = "okay";
125 };
126
127 usb-phy@47401300 {
128 status = "okay";
129 };
130
131 usb-phy@47401b00 {
132 status = "okay";
133 };
134
135 usb@47401000 {
136 status = "okay";
137 };
138
139 usb@47401800 {
140 status = "okay";
141 dr_mode = "host";
142 };
143
144 dma-controller@07402000 {
145 status = "okay";
146 };
147 };
148
149 i2c0: i2c@44e0b000 {
150 pinctrl-names = "default";
151 pinctrl-0 = <&i2c0_pins>;
152
153 status = "okay";
154 clock-frequency = <400000>;
155
156 tps: tps@24 {
157 reg = <0x24>;
158 };
159
160 };
161 };
162
163 leds {
164 pinctrl-names = "default";
165 pinctrl-0 = <&user_leds_s0>;
166
167 compatible = "gpio-leds";
168
169 led@2 {
170 label = "beaglebone:green:heartbeat";
171 gpios = <&gpio1 21 GPIO_ACTIVE_HIGH>;
172 linux,default-trigger = "heartbeat";
173 default-state = "off";
174 };
175
176 led@3 {
177 label = "beaglebone:green:mmc0";
178 gpios = <&gpio1 22 GPIO_ACTIVE_HIGH>;
179 linux,default-trigger = "mmc0";
180 default-state = "off";
181 };
182
183 led@4 {
184 label = "beaglebone:green:usr2";
185 gpios = <&gpio1 23 GPIO_ACTIVE_HIGH>;
186 default-state = "off";
187 };
188
189 led@5 {
190 label = "beaglebone:green:usr3";
191 gpios = <&gpio1 24 GPIO_ACTIVE_HIGH>;
192 default-state = "off";
193 };
194 };
195};
196
197/include/ "tps65217.dtsi"
198
199&tps {
200 regulators {
201 dcdc1_reg: regulator@0 {
202 regulator-always-on;
203 };
204
205 dcdc2_reg: regulator@1 {
206 /* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */
207 regulator-name = "vdd_mpu";
208 regulator-min-microvolt = <925000>;
209 regulator-max-microvolt = <1325000>;
210 regulator-boot-on;
211 regulator-always-on;
212 };
213
214 dcdc3_reg: regulator@2 {
215 /* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */
216 regulator-name = "vdd_core";
217 regulator-min-microvolt = <925000>;
218 regulator-max-microvolt = <1150000>;
219 regulator-boot-on;
220 regulator-always-on;
221 };
222
223 ldo1_reg: regulator@3 {
224 regulator-always-on;
225 };
226
227 ldo2_reg: regulator@4 {
228 regulator-always-on;
229 };
230
231 ldo3_reg: regulator@5 {
232 regulator-always-on;
233 };
234
235 ldo4_reg: regulator@6 {
236 regulator-always-on;
237 };
238 };
239};
240
241&cpsw_emac0 {
242 phy_id = <&davinci_mdio>, <0>;
243 phy-mode = "mii";
244};
245
246&cpsw_emac1 {
247 phy_id = <&davinci_mdio>, <1>;
248 phy-mode = "mii";
249};
250
251&mac {
252 pinctrl-names = "default", "sleep";
253 pinctrl-0 = <&cpsw_default>;
254 pinctrl-1 = <&cpsw_sleep>;
255
256};
257
258&davinci_mdio {
259 pinctrl-names = "default", "sleep";
260 pinctrl-0 = <&davinci_mdio_default>;
261 pinctrl-1 = <&davinci_mdio_sleep>;
262};
diff --git a/arch/arm/boot/dts/am335x-bone.dts b/arch/arm/boot/dts/am335x-bone.dts
index d318987d44a1..7993c489982c 100644
--- a/arch/arm/boot/dts/am335x-bone.dts
+++ b/arch/arm/boot/dts/am335x-bone.dts
@@ -8,258 +8,4 @@
8/dts-v1/; 8/dts-v1/;
9 9
10#include "am33xx.dtsi" 10#include "am33xx.dtsi"
11 11#include "am335x-bone-common.dtsi"
12/ {
13 model = "TI AM335x BeagleBone";
14 compatible = "ti,am335x-bone", "ti,am33xx";
15
16 cpus {
17 cpu@0 {
18 cpu0-supply = <&dcdc2_reg>;
19 };
20 };
21
22 memory {
23 device_type = "memory";
24 reg = <0x80000000 0x10000000>; /* 256 MB */
25 };
26
27 am33xx_pinmux: pinmux@44e10800 {
28 pinctrl-names = "default";
29 pinctrl-0 = <&clkout2_pin>;
30
31 user_leds_s0: user_leds_s0 {
32 pinctrl-single,pins = <
33 0x54 (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a5.gpio1_21 */
34 0x58 (PIN_OUTPUT_PULLUP | MUX_MODE7) /* gpmc_a6.gpio1_22 */
35 0x5c (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a7.gpio1_23 */
36 0x60 (PIN_OUTPUT_PULLUP | MUX_MODE7) /* gpmc_a8.gpio1_24 */
37 >;
38 };
39
40 i2c0_pins: pinmux_i2c0_pins {
41 pinctrl-single,pins = <
42 0x188 (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_sda.i2c0_sda */
43 0x18c (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_scl.i2c0_scl */
44 >;
45 };
46
47 uart0_pins: pinmux_uart0_pins {
48 pinctrl-single,pins = <
49 0x170 (PIN_INPUT_PULLUP | MUX_MODE0) /* uart0_rxd.uart0_rxd */
50 0x174 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */
51 >;
52 };
53
54 clkout2_pin: pinmux_clkout2_pin {
55 pinctrl-single,pins = <
56 0x1b4 (PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* xdma_event_intr1.clkout2 */
57 >;
58 };
59
60 cpsw_default: cpsw_default {
61 pinctrl-single,pins = <
62 /* Slave 1 */
63 0x110 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxerr.mii1_rxerr */
64 0x114 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txen.mii1_txen */
65 0x118 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxdv.mii1_rxdv */
66 0x11c (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd3.mii1_txd3 */
67 0x120 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd2.mii1_txd2 */
68 0x124 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd1.mii1_txd1 */
69 0x128 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd0.mii1_txd0 */
70 0x12c (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_txclk.mii1_txclk */
71 0x130 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxclk.mii1_rxclk */
72 0x134 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd3.mii1_rxd3 */
73 0x138 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd2.mii1_rxd2 */
74 0x13c (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd1.mii1_rxd1 */
75 0x140 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd0.mii1_rxd0 */
76 >;
77 };
78
79 cpsw_sleep: cpsw_sleep {
80 pinctrl-single,pins = <
81 /* Slave 1 reset value */
82 0x110 (PIN_INPUT_PULLDOWN | MUX_MODE7)
83 0x114 (PIN_INPUT_PULLDOWN | MUX_MODE7)
84 0x118 (PIN_INPUT_PULLDOWN | MUX_MODE7)
85 0x11c (PIN_INPUT_PULLDOWN | MUX_MODE7)
86 0x120 (PIN_INPUT_PULLDOWN | MUX_MODE7)
87 0x124 (PIN_INPUT_PULLDOWN | MUX_MODE7)
88 0x128 (PIN_INPUT_PULLDOWN | MUX_MODE7)
89 0x12c (PIN_INPUT_PULLDOWN | MUX_MODE7)
90 0x130 (PIN_INPUT_PULLDOWN | MUX_MODE7)
91 0x134 (PIN_INPUT_PULLDOWN | MUX_MODE7)
92 0x138 (PIN_INPUT_PULLDOWN | MUX_MODE7)
93 0x13c (PIN_INPUT_PULLDOWN | MUX_MODE7)
94 0x140 (PIN_INPUT_PULLDOWN | MUX_MODE7)
95 >;
96 };
97
98 davinci_mdio_default: davinci_mdio_default {
99 pinctrl-single,pins = <
100 /* MDIO */
101 0x148 (PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0) /* mdio_data.mdio_data */
102 0x14c (PIN_OUTPUT_PULLUP | MUX_MODE0) /* mdio_clk.mdio_clk */
103 >;
104 };
105
106 davinci_mdio_sleep: davinci_mdio_sleep {
107 pinctrl-single,pins = <
108 /* MDIO reset value */
109 0x148 (PIN_INPUT_PULLDOWN | MUX_MODE7)
110 0x14c (PIN_INPUT_PULLDOWN | MUX_MODE7)
111 >;
112 };
113 };
114
115 ocp {
116 uart0: serial@44e09000 {
117 pinctrl-names = "default";
118 pinctrl-0 = <&uart0_pins>;
119
120 status = "okay";
121 };
122
123 musb: usb@47400000 {
124 status = "okay";
125
126 control@44e10000 {
127 status = "okay";
128 };
129
130 usb-phy@47401300 {
131 status = "okay";
132 };
133
134 usb-phy@47401b00 {
135 status = "okay";
136 };
137
138 usb@47401000 {
139 status = "okay";
140 };
141
142 usb@47401800 {
143 status = "okay";
144 dr_mode = "host";
145 };
146
147 dma-controller@07402000 {
148 status = "okay";
149 };
150 };
151
152 i2c0: i2c@44e0b000 {
153 pinctrl-names = "default";
154 pinctrl-0 = <&i2c0_pins>;
155
156 status = "okay";
157 clock-frequency = <400000>;
158
159 tps: tps@24 {
160 reg = <0x24>;
161 };
162
163 };
164 };
165
166 leds {
167 pinctrl-names = "default";
168 pinctrl-0 = <&user_leds_s0>;
169
170 compatible = "gpio-leds";
171
172 led@2 {
173 label = "beaglebone:green:heartbeat";
174 gpios = <&gpio1 21 GPIO_ACTIVE_HIGH>;
175 linux,default-trigger = "heartbeat";
176 default-state = "off";
177 };
178
179 led@3 {
180 label = "beaglebone:green:mmc0";
181 gpios = <&gpio1 22 GPIO_ACTIVE_HIGH>;
182 linux,default-trigger = "mmc0";
183 default-state = "off";
184 };
185
186 led@4 {
187 label = "beaglebone:green:usr2";
188 gpios = <&gpio1 23 GPIO_ACTIVE_HIGH>;
189 default-state = "off";
190 };
191
192 led@5 {
193 label = "beaglebone:green:usr3";
194 gpios = <&gpio1 24 GPIO_ACTIVE_HIGH>;
195 default-state = "off";
196 };
197 };
198};
199
200/include/ "tps65217.dtsi"
201
202&tps {
203 regulators {
204 dcdc1_reg: regulator@0 {
205 regulator-always-on;
206 };
207
208 dcdc2_reg: regulator@1 {
209 /* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */
210 regulator-name = "vdd_mpu";
211 regulator-min-microvolt = <925000>;
212 regulator-max-microvolt = <1325000>;
213 regulator-boot-on;
214 regulator-always-on;
215 };
216
217 dcdc3_reg: regulator@2 {
218 /* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */
219 regulator-name = "vdd_core";
220 regulator-min-microvolt = <925000>;
221 regulator-max-microvolt = <1150000>;
222 regulator-boot-on;
223 regulator-always-on;
224 };
225
226 ldo1_reg: regulator@3 {
227 regulator-always-on;
228 };
229
230 ldo2_reg: regulator@4 {
231 regulator-always-on;
232 };
233
234 ldo3_reg: regulator@5 {
235 regulator-always-on;
236 };
237
238 ldo4_reg: regulator@6 {
239 regulator-always-on;
240 };
241 };
242};
243
244&cpsw_emac0 {
245 phy_id = <&davinci_mdio>, <0>;
246 phy-mode = "mii";
247};
248
249&cpsw_emac1 {
250 phy_id = <&davinci_mdio>, <1>;
251 phy-mode = "mii";
252};
253
254&mac {
255 pinctrl-names = "default", "sleep";
256 pinctrl-0 = <&cpsw_default>;
257 pinctrl-1 = <&cpsw_sleep>;
258
259};
260
261&davinci_mdio {
262 pinctrl-names = "default", "sleep";
263 pinctrl-0 = <&davinci_mdio_default>;
264 pinctrl-1 = <&davinci_mdio_sleep>;
265};
diff --git a/arch/arm/boot/dts/am335x-boneblack.dts b/arch/arm/boot/dts/am335x-boneblack.dts
new file mode 100644
index 000000000000..197cadf72d2c
--- /dev/null
+++ b/arch/arm/boot/dts/am335x-boneblack.dts
@@ -0,0 +1,17 @@
1/*
2 * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8/dts-v1/;
9
10#include "am33xx.dtsi"
11#include "am335x-bone-common.dtsi"
12
13&ldo3_reg {
14 regulator-min-microvolt = <1800000>;
15 regulator-max-microvolt = <1800000>;
16 regulator-always-on;
17};
diff --git a/arch/arm/boot/dts/armada-370-netgear-rn102.dts b/arch/arm/boot/dts/armada-370-netgear-rn102.dts
index 05e4485a8225..8ac2ac1f69cc 100644
--- a/arch/arm/boot/dts/armada-370-netgear-rn102.dts
+++ b/arch/arm/boot/dts/armada-370-netgear-rn102.dts
@@ -27,6 +27,25 @@
27 }; 27 };
28 28
29 soc { 29 soc {
30 ranges = <MBUS_ID(0xf0, 0x01) 0 0xd0000000 0x100000
31 MBUS_ID(0x01, 0xe0) 0 0xfff00000 0x100000>;
32
33 pcie-controller {
34 status = "okay";
35
36 /* Connected to Marvell SATA controller */
37 pcie@1,0 {
38 /* Port 0, Lane 0 */
39 status = "okay";
40 };
41
42 /* Connected to FL1009 USB 3.0 controller */
43 pcie@2,0 {
44 /* Port 1, Lane 0 */
45 status = "okay";
46 };
47 };
48
30 internal-regs { 49 internal-regs {
31 serial@12000 { 50 serial@12000 {
32 clock-frequency = <200000000>; 51 clock-frequency = <200000000>;
@@ -57,6 +76,11 @@
57 marvell,pins = "mpp56"; 76 marvell,pins = "mpp56";
58 marvell,function = "gpio"; 77 marvell,function = "gpio";
59 }; 78 };
79
80 poweroff: poweroff {
81 marvell,pins = "mpp8";
82 marvell,function = "gpio";
83 };
60 }; 84 };
61 85
62 mdio { 86 mdio {
@@ -89,22 +113,6 @@
89 pwm_polarity = <0>; 113 pwm_polarity = <0>;
90 }; 114 };
91 }; 115 };
92
93 pcie-controller {
94 status = "okay";
95
96 /* Connected to Marvell SATA controller */
97 pcie@1,0 {
98 /* Port 0, Lane 0 */
99 status = "okay";
100 };
101
102 /* Connected to FL1009 USB 3.0 controller */
103 pcie@2,0 {
104 /* Port 1, Lane 0 */
105 status = "okay";
106 };
107 };
108 }; 116 };
109 }; 117 };
110 118
@@ -160,7 +168,7 @@
160 button@1 { 168 button@1 {
161 label = "Power Button"; 169 label = "Power Button";
162 linux,code = <116>; /* KEY_POWER */ 170 linux,code = <116>; /* KEY_POWER */
163 gpios = <&gpio1 30 1>; 171 gpios = <&gpio1 30 0>;
164 }; 172 };
165 173
166 button@2 { 174 button@2 {
@@ -176,4 +184,11 @@
176 }; 184 };
177 }; 185 };
178 186
187 gpio_poweroff {
188 compatible = "gpio-poweroff";
189 pinctrl-0 = <&poweroff>;
190 pinctrl-names = "default";
191 gpios = <&gpio0 8 1>;
192 };
193
179}; 194};
diff --git a/arch/arm/boot/dts/armada-xp.dtsi b/arch/arm/boot/dts/armada-xp.dtsi
index def125c0eeaa..3058522f5aad 100644
--- a/arch/arm/boot/dts/armada-xp.dtsi
+++ b/arch/arm/boot/dts/armada-xp.dtsi
@@ -70,6 +70,8 @@
70 70
71 timer@20300 { 71 timer@20300 {
72 compatible = "marvell,armada-xp-timer"; 72 compatible = "marvell,armada-xp-timer";
73 clocks = <&coreclk 2>, <&refclk>;
74 clock-names = "nbclk", "fixed";
73 }; 75 };
74 76
75 coreclk: mvebu-sar@18230 { 77 coreclk: mvebu-sar@18230 {
@@ -169,4 +171,13 @@
169 }; 171 };
170 }; 172 };
171 }; 173 };
174
175 clocks {
176 /* 25 MHz reference crystal */
177 refclk: oscillator {
178 compatible = "fixed-clock";
179 #clock-cells = <0>;
180 clock-frequency = <25000000>;
181 };
182 };
172}; 183};
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index cf78ac0b04b1..e74dc15efa9d 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -190,12 +190,12 @@
190 AT91_PIOA 8 AT91_PERIPH_A AT91_PINCTRL_NONE>; /* PA8 periph A */ 190 AT91_PIOA 8 AT91_PERIPH_A AT91_PINCTRL_NONE>; /* PA8 periph A */
191 }; 191 };
192 192
193 pinctrl_uart2_rts: uart2_rts-0 { 193 pinctrl_usart2_rts: usart2_rts-0 {
194 atmel,pins = 194 atmel,pins =
195 <AT91_PIOB 0 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PB0 periph B */ 195 <AT91_PIOB 0 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PB0 periph B */
196 }; 196 };
197 197
198 pinctrl_uart2_cts: uart2_cts-0 { 198 pinctrl_usart2_cts: usart2_cts-0 {
199 atmel,pins = 199 atmel,pins =
200 <AT91_PIOB 1 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PB1 periph B */ 200 <AT91_PIOB 1 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PB1 periph B */
201 }; 201 };
@@ -556,6 +556,7 @@
556 interrupts = <12 IRQ_TYPE_LEVEL_HIGH 0>; 556 interrupts = <12 IRQ_TYPE_LEVEL_HIGH 0>;
557 dmas = <&dma0 1 AT91_DMA_CFG_PER_ID(0)>; 557 dmas = <&dma0 1 AT91_DMA_CFG_PER_ID(0)>;
558 dma-names = "rxtx"; 558 dma-names = "rxtx";
559 pinctrl-names = "default";
559 #address-cells = <1>; 560 #address-cells = <1>;
560 #size-cells = <0>; 561 #size-cells = <0>;
561 status = "disabled"; 562 status = "disabled";
@@ -567,6 +568,7 @@
567 interrupts = <26 IRQ_TYPE_LEVEL_HIGH 0>; 568 interrupts = <26 IRQ_TYPE_LEVEL_HIGH 0>;
568 dmas = <&dma1 1 AT91_DMA_CFG_PER_ID(0)>; 569 dmas = <&dma1 1 AT91_DMA_CFG_PER_ID(0)>;
569 dma-names = "rxtx"; 570 dma-names = "rxtx";
571 pinctrl-names = "default";
570 #address-cells = <1>; 572 #address-cells = <1>;
571 #size-cells = <0>; 573 #size-cells = <0>;
572 status = "disabled"; 574 status = "disabled";
diff --git a/arch/arm/boot/dts/atlas6.dtsi b/arch/arm/boot/dts/atlas6.dtsi
index 8678e0c11119..6db4f81d4795 100644
--- a/arch/arm/boot/dts/atlas6.dtsi
+++ b/arch/arm/boot/dts/atlas6.dtsi
@@ -181,6 +181,8 @@
181 interrupts = <17>; 181 interrupts = <17>;
182 fifosize = <128>; 182 fifosize = <128>;
183 clocks = <&clks 13>; 183 clocks = <&clks 13>;
184 sirf,uart-dma-rx-channel = <21>;
185 sirf,uart-dma-tx-channel = <2>;
184 }; 186 };
185 187
186 uart1: uart@b0060000 { 188 uart1: uart@b0060000 {
@@ -199,6 +201,8 @@
199 interrupts = <19>; 201 interrupts = <19>;
200 fifosize = <128>; 202 fifosize = <128>;
201 clocks = <&clks 15>; 203 clocks = <&clks 15>;
204 sirf,uart-dma-rx-channel = <6>;
205 sirf,uart-dma-tx-channel = <7>;
202 }; 206 };
203 207
204 usp0: usp@b0080000 { 208 usp0: usp@b0080000 {
@@ -206,7 +210,10 @@
206 compatible = "sirf,prima2-usp"; 210 compatible = "sirf,prima2-usp";
207 reg = <0xb0080000 0x10000>; 211 reg = <0xb0080000 0x10000>;
208 interrupts = <20>; 212 interrupts = <20>;
213 fifosize = <128>;
209 clocks = <&clks 28>; 214 clocks = <&clks 28>;
215 sirf,usp-dma-rx-channel = <17>;
216 sirf,usp-dma-tx-channel = <18>;
210 }; 217 };
211 218
212 usp1: usp@b0090000 { 219 usp1: usp@b0090000 {
@@ -214,7 +221,10 @@
214 compatible = "sirf,prima2-usp"; 221 compatible = "sirf,prima2-usp";
215 reg = <0xb0090000 0x10000>; 222 reg = <0xb0090000 0x10000>;
216 interrupts = <21>; 223 interrupts = <21>;
224 fifosize = <128>;
217 clocks = <&clks 29>; 225 clocks = <&clks 29>;
226 sirf,usp-dma-rx-channel = <14>;
227 sirf,usp-dma-tx-channel = <15>;
218 }; 228 };
219 229
220 dmac0: dma-controller@b00b0000 { 230 dmac0: dma-controller@b00b0000 {
@@ -237,6 +247,8 @@
237 compatible = "sirf,prima2-vip"; 247 compatible = "sirf,prima2-vip";
238 reg = <0xb00C0000 0x10000>; 248 reg = <0xb00C0000 0x10000>;
239 clocks = <&clks 31>; 249 clocks = <&clks 31>;
250 interrupts = <14>;
251 sirf,vip-dma-rx-channel = <16>;
240 }; 252 };
241 253
242 spi0: spi@b00d0000 { 254 spi0: spi@b00d0000 {
diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
index c037c223619a..b7a1c6d950b9 100644
--- a/arch/arm/boot/dts/imx27.dtsi
+++ b/arch/arm/boot/dts/imx27.dtsi
@@ -187,7 +187,7 @@
187 compatible = "fsl,imx27-cspi"; 187 compatible = "fsl,imx27-cspi";
188 reg = <0x1000e000 0x1000>; 188 reg = <0x1000e000 0x1000>;
189 interrupts = <16>; 189 interrupts = <16>;
190 clocks = <&clks 53>, <&clks 53>; 190 clocks = <&clks 53>, <&clks 60>;
191 clock-names = "ipg", "per"; 191 clock-names = "ipg", "per";
192 status = "disabled"; 192 status = "disabled";
193 }; 193 };
@@ -198,7 +198,7 @@
198 compatible = "fsl,imx27-cspi"; 198 compatible = "fsl,imx27-cspi";
199 reg = <0x1000f000 0x1000>; 199 reg = <0x1000f000 0x1000>;
200 interrupts = <15>; 200 interrupts = <15>;
201 clocks = <&clks 52>, <&clks 52>; 201 clocks = <&clks 52>, <&clks 60>;
202 clock-names = "ipg", "per"; 202 clock-names = "ipg", "per";
203 status = "disabled"; 203 status = "disabled";
204 }; 204 };
@@ -309,7 +309,7 @@
309 compatible = "fsl,imx27-cspi"; 309 compatible = "fsl,imx27-cspi";
310 reg = <0x10017000 0x1000>; 310 reg = <0x10017000 0x1000>;
311 interrupts = <6>; 311 interrupts = <6>;
312 clocks = <&clks 51>, <&clks 51>; 312 clocks = <&clks 51>, <&clks 60>;
313 clock-names = "ipg", "per"; 313 clock-names = "ipg", "per";
314 status = "disabled"; 314 status = "disabled";
315 }; 315 };
diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi
index a85abb424c34..54cee6517902 100644
--- a/arch/arm/boot/dts/imx51.dtsi
+++ b/arch/arm/boot/dts/imx51.dtsi
@@ -474,7 +474,7 @@
474 compatible = "fsl,imx51-pata", "fsl,imx27-pata"; 474 compatible = "fsl,imx51-pata", "fsl,imx27-pata";
475 reg = <0x83fe0000 0x4000>; 475 reg = <0x83fe0000 0x4000>;
476 interrupts = <70>; 476 interrupts = <70>;
477 clocks = <&clks 161>; 477 clocks = <&clks 172>;
478 status = "disabled"; 478 status = "disabled";
479 }; 479 };
480 480
diff --git a/arch/arm/boot/dts/imx6q-pinfunc.h b/arch/arm/boot/dts/imx6q-pinfunc.h
index c0e38a45e4bb..9bbe82bdee41 100644
--- a/arch/arm/boot/dts/imx6q-pinfunc.h
+++ b/arch/arm/boot/dts/imx6q-pinfunc.h
@@ -207,8 +207,8 @@
207#define MX6QDL_PAD_EIM_D29__ECSPI4_SS0 0x0c8 0x3dc 0x824 0x2 0x1 207#define MX6QDL_PAD_EIM_D29__ECSPI4_SS0 0x0c8 0x3dc 0x824 0x2 0x1
208#define MX6QDL_PAD_EIM_D29__UART2_RTS_B 0x0c8 0x3dc 0x924 0x4 0x1 208#define MX6QDL_PAD_EIM_D29__UART2_RTS_B 0x0c8 0x3dc 0x924 0x4 0x1
209#define MX6QDL_PAD_EIM_D29__UART2_CTS_B 0x0c8 0x3dc 0x000 0x4 0x0 209#define MX6QDL_PAD_EIM_D29__UART2_CTS_B 0x0c8 0x3dc 0x000 0x4 0x0
210#define MX6QDL_PAD_EIM_D29__UART2_DTE_RTS_B 0x0c4 0x3dc 0x000 0x4 0x0 210#define MX6QDL_PAD_EIM_D29__UART2_DTE_RTS_B 0x0c8 0x3dc 0x000 0x4 0x0
211#define MX6QDL_PAD_EIM_D29__UART2_DTE_CTS_B 0x0c4 0x3dc 0x924 0x4 0x1 211#define MX6QDL_PAD_EIM_D29__UART2_DTE_CTS_B 0x0c8 0x3dc 0x924 0x4 0x1
212#define MX6QDL_PAD_EIM_D29__GPIO3_IO29 0x0c8 0x3dc 0x000 0x5 0x0 212#define MX6QDL_PAD_EIM_D29__GPIO3_IO29 0x0c8 0x3dc 0x000 0x5 0x0
213#define MX6QDL_PAD_EIM_D29__IPU2_CSI1_VSYNC 0x0c8 0x3dc 0x8e4 0x6 0x0 213#define MX6QDL_PAD_EIM_D29__IPU2_CSI1_VSYNC 0x0c8 0x3dc 0x8e4 0x6 0x0
214#define MX6QDL_PAD_EIM_D29__IPU1_DI0_PIN14 0x0c8 0x3dc 0x000 0x7 0x0 214#define MX6QDL_PAD_EIM_D29__IPU1_DI0_PIN14 0x0c8 0x3dc 0x000 0x7 0x0
diff --git a/arch/arm/boot/dts/kirkwood.dtsi b/arch/arm/boot/dts/kirkwood.dtsi
index cf7aeaf89e9c..1335b2e1bed4 100644
--- a/arch/arm/boot/dts/kirkwood.dtsi
+++ b/arch/arm/boot/dts/kirkwood.dtsi
@@ -13,6 +13,7 @@
13 cpu@0 { 13 cpu@0 {
14 device_type = "cpu"; 14 device_type = "cpu";
15 compatible = "marvell,feroceon"; 15 compatible = "marvell,feroceon";
16 reg = <0>;
16 clocks = <&core_clk 1>, <&core_clk 3>, <&gate_clk 11>; 17 clocks = <&core_clk 1>, <&core_clk 3>, <&gate_clk 11>;
17 clock-names = "cpu_clk", "ddrclk", "powersave"; 18 clock-names = "cpu_clk", "ddrclk", "powersave";
18 }; 19 };
@@ -167,7 +168,7 @@
167 xor@60900 { 168 xor@60900 {
168 compatible = "marvell,orion-xor"; 169 compatible = "marvell,orion-xor";
169 reg = <0x60900 0x100 170 reg = <0x60900 0x100
170 0xd0B00 0x100>; 171 0x60B00 0x100>;
171 status = "okay"; 172 status = "okay";
172 clocks = <&gate_clk 16>; 173 clocks = <&gate_clk 16>;
173 174
diff --git a/arch/arm/boot/dts/omap3-beagle-xm.dts b/arch/arm/boot/dts/omap3-beagle-xm.dts
index afdb16417d4e..0c514dc8460c 100644
--- a/arch/arm/boot/dts/omap3-beagle-xm.dts
+++ b/arch/arm/boot/dts/omap3-beagle-xm.dts
@@ -11,7 +11,7 @@
11 11
12/ { 12/ {
13 model = "TI OMAP3 BeagleBoard xM"; 13 model = "TI OMAP3 BeagleBoard xM";
14 compatible = "ti,omap3-beagle-xm, ti,omap3-beagle", "ti,omap3"; 14 compatible = "ti,omap3-beagle-xm", "ti,omap3-beagle", "ti,omap3";
15 15
16 cpus { 16 cpus {
17 cpu@0 { 17 cpu@0 {
diff --git a/arch/arm/boot/dts/omap3-igep.dtsi b/arch/arm/boot/dts/omap3-igep.dtsi
index bc48b114eae6..2326d11462a5 100644
--- a/arch/arm/boot/dts/omap3-igep.dtsi
+++ b/arch/arm/boot/dts/omap3-igep.dtsi
@@ -48,6 +48,15 @@
48 >; 48 >;
49 }; 49 };
50 50
51 mcbsp2_pins: pinmux_mcbsp2_pins {
52 pinctrl-single,pins = <
53 0x10c (PIN_INPUT | MUX_MODE0) /* mcbsp2_fsx.mcbsp2_fsx */
54 0x10e (PIN_INPUT | MUX_MODE0) /* mcbsp2_clkx.mcbsp2_clkx */
55 0x110 (PIN_INPUT | MUX_MODE0) /* mcbsp2_dr.mcbsp2.dr */
56 0x112 (PIN_OUTPUT | MUX_MODE0) /* mcbsp2_dx.mcbsp2_dx */
57 >;
58 };
59
51 mmc1_pins: pinmux_mmc1_pins { 60 mmc1_pins: pinmux_mmc1_pins {
52 pinctrl-single,pins = < 61 pinctrl-single,pins = <
53 0x114 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_clk.sdmmc1_clk */ 62 0x114 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_clk.sdmmc1_clk */
@@ -93,6 +102,11 @@
93 clock-frequency = <400000>; 102 clock-frequency = <400000>;
94}; 103};
95 104
105&mcbsp2 {
106 pinctrl-names = "default";
107 pinctrl-0 = <&mcbsp2_pins>;
108};
109
96&mmc1 { 110&mmc1 {
97 pinctrl-names = "default"; 111 pinctrl-names = "default";
98 pinctrl-0 = <&mmc1_pins>; 112 pinctrl-0 = <&mmc1_pins>;
diff --git a/arch/arm/boot/dts/omap4-panda-common.dtsi b/arch/arm/boot/dts/omap4-panda-common.dtsi
index faa95b5b242e..814ab67c8c29 100644
--- a/arch/arm/boot/dts/omap4-panda-common.dtsi
+++ b/arch/arm/boot/dts/omap4-panda-common.dtsi
@@ -107,6 +107,19 @@
107 */ 107 */
108 clock-frequency = <19200000>; 108 clock-frequency = <19200000>;
109 }; 109 };
110
111 /* regulator for wl12xx on sdio5 */
112 wl12xx_vmmc: wl12xx_vmmc {
113 pinctrl-names = "default";
114 pinctrl-0 = <&wl12xx_gpio>;
115 compatible = "regulator-fixed";
116 regulator-name = "vwl1271";
117 regulator-min-microvolt = <1800000>;
118 regulator-max-microvolt = <1800000>;
119 gpio = <&gpio2 11 0>;
120 startup-delay-us = <70000>;
121 enable-active-high;
122 };
110}; 123};
111 124
112&omap4_pmx_wkup { 125&omap4_pmx_wkup {
@@ -235,6 +248,33 @@
235 0x1c (PIN_OUTPUT | MUX_MODE3) /* gpio_wk8 */ 248 0x1c (PIN_OUTPUT | MUX_MODE3) /* gpio_wk8 */
236 >; 249 >;
237 }; 250 };
251
252 /*
253 * wl12xx GPIO outputs for WLAN_EN, BT_EN, FM_EN, BT_WAKEUP
254 * REVISIT: Are the pull-ups needed for GPIO 48 and 49?
255 */
256 wl12xx_gpio: pinmux_wl12xx_gpio {
257 pinctrl-single,pins = <
258 0x26 (PIN_OUTPUT | MUX_MODE3) /* gpmc_a19.gpio_43 */
259 0x2c (PIN_OUTPUT | MUX_MODE3) /* gpmc_a22.gpio_46 */
260 0x30 (PIN_OUTPUT_PULLUP | MUX_MODE3) /* gpmc_a24.gpio_48 */
261 0x32 (PIN_OUTPUT_PULLUP | MUX_MODE3) /* gpmc_a25.gpio_49 */
262 >;
263 };
264
265 /* wl12xx GPIO inputs and SDIO pins */
266 wl12xx_pins: pinmux_wl12xx_pins {
267 pinctrl-single,pins = <
268 0x38 (PIN_INPUT | MUX_MODE3) /* gpmc_ncs2.gpio_52 */
269 0x3a (PIN_INPUT | MUX_MODE3) /* gpmc_ncs3.gpio_53 */
270 0x108 (PIN_OUTPUT | MUX_MODE0) /* sdmmc5_clk.sdmmc5_clk */
271 0x10a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_cmd.sdmmc5_cmd */
272 0x10c (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat0.sdmmc5_dat0 */
273 0x10e (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat1.sdmmc5_dat1 */
274 0x110 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat2.sdmmc5_dat2 */
275 0x112 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat3.sdmmc5_dat3 */
276 >;
277 };
238}; 278};
239 279
240&i2c1 { 280&i2c1 {
@@ -314,8 +354,12 @@
314}; 354};
315 355
316&mmc5 { 356&mmc5 {
317 ti,non-removable; 357 pinctrl-names = "default";
358 pinctrl-0 = <&wl12xx_pins>;
359 vmmc-supply = <&wl12xx_vmmc>;
360 non-removable;
318 bus-width = <4>; 361 bus-width = <4>;
362 cap-power-off-card;
319}; 363};
320 364
321&emif1 { 365&emif1 {
diff --git a/arch/arm/boot/dts/omap4-sdp.dts b/arch/arm/boot/dts/omap4-sdp.dts
index 7951b4ea500a..4f78380ecdb8 100644
--- a/arch/arm/boot/dts/omap4-sdp.dts
+++ b/arch/arm/boot/dts/omap4-sdp.dts
@@ -140,6 +140,19 @@
140 "DMic", "Digital Mic", 140 "DMic", "Digital Mic",
141 "Digital Mic", "Digital Mic1 Bias"; 141 "Digital Mic", "Digital Mic1 Bias";
142 }; 142 };
143
144 /* regulator for wl12xx on sdio5 */
145 wl12xx_vmmc: wl12xx_vmmc {
146 pinctrl-names = "default";
147 pinctrl-0 = <&wl12xx_gpio>;
148 compatible = "regulator-fixed";
149 regulator-name = "vwl1271";
150 regulator-min-microvolt = <1800000>;
151 regulator-max-microvolt = <1800000>;
152 gpio = <&gpio2 22 0>;
153 startup-delay-us = <70000>;
154 enable-active-high;
155 };
143}; 156};
144 157
145&omap4_pmx_wkup { 158&omap4_pmx_wkup {
@@ -295,6 +308,26 @@
295 0xf0 (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c4_sda */ 308 0xf0 (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c4_sda */
296 >; 309 >;
297 }; 310 };
311
312 /* wl12xx GPIO output for WLAN_EN */
313 wl12xx_gpio: pinmux_wl12xx_gpio {
314 pinctrl-single,pins = <
315 0x3c (PIN_OUTPUT | MUX_MODE3) /* gpmc_nwp.gpio_54 */
316 >;
317 };
318
319 /* wl12xx GPIO inputs and SDIO pins */
320 wl12xx_pins: pinmux_wl12xx_pins {
321 pinctrl-single,pins = <
322 0x3a (PIN_INPUT | MUX_MODE3) /* gpmc_ncs3.gpio_53 */
323 0x108 (PIN_OUTPUT | MUX_MODE3) /* sdmmc5_clk.sdmmc5_clk */
324 0x10a (PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc5_cmd.sdmmc5_cmd */
325 0x10c (PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc5_dat0.sdmmc5_dat0 */
326 0x10e (PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc5_dat1.sdmmc5_dat1 */
327 0x110 (PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc5_dat2.sdmmc5_dat2 */
328 0x112 (PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc5_dat3.sdmmc5_dat3 */
329 >;
330 };
298}; 331};
299 332
300&i2c1 { 333&i2c1 {
@@ -420,8 +453,12 @@
420}; 453};
421 454
422&mmc5 { 455&mmc5 {
456 pinctrl-names = "default";
457 pinctrl-0 = <&wl12xx_pins>;
458 vmmc-supply = <&wl12xx_vmmc>;
459 non-removable;
423 bus-width = <4>; 460 bus-width = <4>;
424 ti,non-removable; 461 cap-power-off-card;
425}; 462};
426 463
427&emif1 { 464&emif1 {
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 07be2cd7b318..7cdea1bfea09 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -637,7 +637,7 @@
637 omap_dwc3@4a020000 { 637 omap_dwc3@4a020000 {
638 compatible = "ti,dwc3"; 638 compatible = "ti,dwc3";
639 ti,hwmods = "usb_otg_ss"; 639 ti,hwmods = "usb_otg_ss";
640 reg = <0x4a020000 0x1000>; 640 reg = <0x4a020000 0x10000>;
641 interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>; 641 interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
642 #address-cells = <1>; 642 #address-cells = <1>;
643 #size-cells = <1>; 643 #size-cells = <1>;
@@ -645,17 +645,18 @@
645 ranges; 645 ranges;
646 dwc3@4a030000 { 646 dwc3@4a030000 {
647 compatible = "snps,dwc3"; 647 compatible = "snps,dwc3";
648 reg = <0x4a030000 0x1000>; 648 reg = <0x4a030000 0x10000>;
649 interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>; 649 interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>;
650 usb-phy = <&usb2_phy>, <&usb3_phy>; 650 usb-phy = <&usb2_phy>, <&usb3_phy>;
651 tx-fifo-resize; 651 tx-fifo-resize;
652 }; 652 };
653 }; 653 };
654 654
655 ocp2scp { 655 ocp2scp@4a080000 {
656 compatible = "ti,omap-ocp2scp"; 656 compatible = "ti,omap-ocp2scp";
657 #address-cells = <1>; 657 #address-cells = <1>;
658 #size-cells = <1>; 658 #size-cells = <1>;
659 reg = <0x4a080000 0x20>;
659 ranges; 660 ranges;
660 ti,hwmods = "ocp2scp1"; 661 ti,hwmods = "ocp2scp1";
661 usb2_phy: usb2phy@4a084000 { 662 usb2_phy: usb2phy@4a084000 {
diff --git a/arch/arm/boot/dts/prima2.dtsi b/arch/arm/boot/dts/prima2.dtsi
index bbeb623fc2c6..27ed9f5144bc 100644
--- a/arch/arm/boot/dts/prima2.dtsi
+++ b/arch/arm/boot/dts/prima2.dtsi
@@ -171,7 +171,8 @@
171 compatible = "simple-bus"; 171 compatible = "simple-bus";
172 #address-cells = <1>; 172 #address-cells = <1>;
173 #size-cells = <1>; 173 #size-cells = <1>;
174 ranges = <0xb0000000 0xb0000000 0x180000>; 174 ranges = <0xb0000000 0xb0000000 0x180000>,
175 <0x56000000 0x56000000 0x1b00000>;
175 176
176 timer@b0020000 { 177 timer@b0020000 {
177 compatible = "sirf,prima2-tick"; 178 compatible = "sirf,prima2-tick";
@@ -196,25 +197,32 @@
196 uart0: uart@b0050000 { 197 uart0: uart@b0050000 {
197 cell-index = <0>; 198 cell-index = <0>;
198 compatible = "sirf,prima2-uart"; 199 compatible = "sirf,prima2-uart";
199 reg = <0xb0050000 0x10000>; 200 reg = <0xb0050000 0x1000>;
200 interrupts = <17>; 201 interrupts = <17>;
202 fifosize = <128>;
201 clocks = <&clks 13>; 203 clocks = <&clks 13>;
204 sirf,uart-dma-rx-channel = <21>;
205 sirf,uart-dma-tx-channel = <2>;
202 }; 206 };
203 207
204 uart1: uart@b0060000 { 208 uart1: uart@b0060000 {
205 cell-index = <1>; 209 cell-index = <1>;
206 compatible = "sirf,prima2-uart"; 210 compatible = "sirf,prima2-uart";
207 reg = <0xb0060000 0x10000>; 211 reg = <0xb0060000 0x1000>;
208 interrupts = <18>; 212 interrupts = <18>;
213 fifosize = <32>;
209 clocks = <&clks 14>; 214 clocks = <&clks 14>;
210 }; 215 };
211 216
212 uart2: uart@b0070000 { 217 uart2: uart@b0070000 {
213 cell-index = <2>; 218 cell-index = <2>;
214 compatible = "sirf,prima2-uart"; 219 compatible = "sirf,prima2-uart";
215 reg = <0xb0070000 0x10000>; 220 reg = <0xb0070000 0x1000>;
216 interrupts = <19>; 221 interrupts = <19>;
222 fifosize = <128>;
217 clocks = <&clks 15>; 223 clocks = <&clks 15>;
224 sirf,uart-dma-rx-channel = <6>;
225 sirf,uart-dma-tx-channel = <7>;
218 }; 226 };
219 227
220 usp0: usp@b0080000 { 228 usp0: usp@b0080000 {
@@ -222,7 +230,10 @@
222 compatible = "sirf,prima2-usp"; 230 compatible = "sirf,prima2-usp";
223 reg = <0xb0080000 0x10000>; 231 reg = <0xb0080000 0x10000>;
224 interrupts = <20>; 232 interrupts = <20>;
233 fifosize = <128>;
225 clocks = <&clks 28>; 234 clocks = <&clks 28>;
235 sirf,usp-dma-rx-channel = <17>;
236 sirf,usp-dma-tx-channel = <18>;
226 }; 237 };
227 238
228 usp1: usp@b0090000 { 239 usp1: usp@b0090000 {
@@ -230,7 +241,10 @@
230 compatible = "sirf,prima2-usp"; 241 compatible = "sirf,prima2-usp";
231 reg = <0xb0090000 0x10000>; 242 reg = <0xb0090000 0x10000>;
232 interrupts = <21>; 243 interrupts = <21>;
244 fifosize = <128>;
233 clocks = <&clks 29>; 245 clocks = <&clks 29>;
246 sirf,usp-dma-rx-channel = <14>;
247 sirf,usp-dma-tx-channel = <15>;
234 }; 248 };
235 249
236 usp2: usp@b00a0000 { 250 usp2: usp@b00a0000 {
@@ -238,7 +252,10 @@
238 compatible = "sirf,prima2-usp"; 252 compatible = "sirf,prima2-usp";
239 reg = <0xb00a0000 0x10000>; 253 reg = <0xb00a0000 0x10000>;
240 interrupts = <22>; 254 interrupts = <22>;
255 fifosize = <128>;
241 clocks = <&clks 30>; 256 clocks = <&clks 30>;
257 sirf,usp-dma-rx-channel = <10>;
258 sirf,usp-dma-tx-channel = <11>;
242 }; 259 };
243 260
244 dmac0: dma-controller@b00b0000 { 261 dmac0: dma-controller@b00b0000 {
@@ -261,6 +278,8 @@
261 compatible = "sirf,prima2-vip"; 278 compatible = "sirf,prima2-vip";
262 reg = <0xb00C0000 0x10000>; 279 reg = <0xb00C0000 0x10000>;
263 clocks = <&clks 31>; 280 clocks = <&clks 31>;
281 interrupts = <14>;
282 sirf,vip-dma-rx-channel = <16>;
264 }; 283 };
265 284
266 spi0: spi@b00d0000 { 285 spi0: spi@b00d0000 {
diff --git a/arch/arm/boot/dts/r8a73a4.dtsi b/arch/arm/boot/dts/r8a73a4.dtsi
index 6c26caa880f2..658fcc537576 100644
--- a/arch/arm/boot/dts/r8a73a4.dtsi
+++ b/arch/arm/boot/dts/r8a73a4.dtsi
@@ -193,7 +193,7 @@
193 }; 193 };
194 194
195 sdhi0: sdhi@ee100000 { 195 sdhi0: sdhi@ee100000 {
196 compatible = "renesas,r8a73a4-sdhi"; 196 compatible = "renesas,sdhi-r8a73a4";
197 reg = <0 0xee100000 0 0x100>; 197 reg = <0 0xee100000 0 0x100>;
198 interrupt-parent = <&gic>; 198 interrupt-parent = <&gic>;
199 interrupts = <0 165 4>; 199 interrupts = <0 165 4>;
@@ -202,7 +202,7 @@
202 }; 202 };
203 203
204 sdhi1: sdhi@ee120000 { 204 sdhi1: sdhi@ee120000 {
205 compatible = "renesas,r8a73a4-sdhi"; 205 compatible = "renesas,sdhi-r8a73a4";
206 reg = <0 0xee120000 0 0x100>; 206 reg = <0 0xee120000 0 0x100>;
207 interrupt-parent = <&gic>; 207 interrupt-parent = <&gic>;
208 interrupts = <0 166 4>; 208 interrupts = <0 166 4>;
@@ -211,7 +211,7 @@
211 }; 211 };
212 212
213 sdhi2: sdhi@ee140000 { 213 sdhi2: sdhi@ee140000 {
214 compatible = "renesas,r8a73a4-sdhi"; 214 compatible = "renesas,sdhi-r8a73a4";
215 reg = <0 0xee140000 0 0x100>; 215 reg = <0 0xee140000 0 0x100>;
216 interrupt-parent = <&gic>; 216 interrupt-parent = <&gic>;
217 interrupts = <0 167 4>; 217 interrupts = <0 167 4>;
diff --git a/arch/arm/boot/dts/r8a7778.dtsi b/arch/arm/boot/dts/r8a7778.dtsi
index 45ac404ab6d8..3577aba82583 100644
--- a/arch/arm/boot/dts/r8a7778.dtsi
+++ b/arch/arm/boot/dts/r8a7778.dtsi
@@ -96,6 +96,5 @@
96 pfc: pfc@fffc0000 { 96 pfc: pfc@fffc0000 {
97 compatible = "renesas,pfc-r8a7778"; 97 compatible = "renesas,pfc-r8a7778";
98 reg = <0xfffc000 0x118>; 98 reg = <0xfffc000 0x118>;
99 #gpio-range-cells = <3>;
100 }; 99 };
101}; 100};
diff --git a/arch/arm/boot/dts/r8a7779.dtsi b/arch/arm/boot/dts/r8a7779.dtsi
index 23a62447359c..ebbe507fcbfa 100644
--- a/arch/arm/boot/dts/r8a7779.dtsi
+++ b/arch/arm/boot/dts/r8a7779.dtsi
@@ -188,7 +188,6 @@
188 pfc: pfc@fffc0000 { 188 pfc: pfc@fffc0000 {
189 compatible = "renesas,pfc-r8a7779"; 189 compatible = "renesas,pfc-r8a7779";
190 reg = <0xfffc0000 0x23c>; 190 reg = <0xfffc0000 0x23c>;
191 #gpio-range-cells = <3>;
192 }; 191 };
193 192
194 thermal@ffc48000 { 193 thermal@ffc48000 {
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
index 3b879e7c697c..413b4c29e782 100644
--- a/arch/arm/boot/dts/r8a7790.dtsi
+++ b/arch/arm/boot/dts/r8a7790.dtsi
@@ -148,11 +148,10 @@
148 pfc: pfc@e6060000 { 148 pfc: pfc@e6060000 {
149 compatible = "renesas,pfc-r8a7790"; 149 compatible = "renesas,pfc-r8a7790";
150 reg = <0 0xe6060000 0 0x250>; 150 reg = <0 0xe6060000 0 0x250>;
151 #gpio-range-cells = <3>;
152 }; 151 };
153 152
154 sdhi0: sdhi@ee100000 { 153 sdhi0: sdhi@ee100000 {
155 compatible = "renesas,r8a7790-sdhi"; 154 compatible = "renesas,sdhi-r8a7790";
156 reg = <0 0xee100000 0 0x100>; 155 reg = <0 0xee100000 0 0x100>;
157 interrupt-parent = <&gic>; 156 interrupt-parent = <&gic>;
158 interrupts = <0 165 4>; 157 interrupts = <0 165 4>;
@@ -161,7 +160,7 @@
161 }; 160 };
162 161
163 sdhi1: sdhi@ee120000 { 162 sdhi1: sdhi@ee120000 {
164 compatible = "renesas,r8a7790-sdhi"; 163 compatible = "renesas,sdhi-r8a7790";
165 reg = <0 0xee120000 0 0x100>; 164 reg = <0 0xee120000 0 0x100>;
166 interrupt-parent = <&gic>; 165 interrupt-parent = <&gic>;
167 interrupts = <0 166 4>; 166 interrupts = <0 166 4>;
@@ -170,7 +169,7 @@
170 }; 169 };
171 170
172 sdhi2: sdhi@ee140000 { 171 sdhi2: sdhi@ee140000 {
173 compatible = "renesas,r8a7790-sdhi"; 172 compatible = "renesas,sdhi-r8a7790";
174 reg = <0 0xee140000 0 0x100>; 173 reg = <0 0xee140000 0 0x100>;
175 interrupt-parent = <&gic>; 174 interrupt-parent = <&gic>;
176 interrupts = <0 167 4>; 175 interrupts = <0 167 4>;
@@ -179,7 +178,7 @@
179 }; 178 };
180 179
181 sdhi3: sdhi@ee160000 { 180 sdhi3: sdhi@ee160000 {
182 compatible = "renesas,r8a7790-sdhi"; 181 compatible = "renesas,sdhi-r8a7790";
183 reg = <0 0xee160000 0 0x100>; 182 reg = <0 0xee160000 0 0x100>;
184 interrupt-parent = <&gic>; 183 interrupt-parent = <&gic>;
185 interrupts = <0 168 4>; 184 interrupts = <0 168 4>;
diff --git a/arch/arm/boot/dts/sh73a0.dtsi b/arch/arm/boot/dts/sh73a0.dtsi
index ba59a5875a10..3955c7606a6f 100644
--- a/arch/arm/boot/dts/sh73a0.dtsi
+++ b/arch/arm/boot/dts/sh73a0.dtsi
@@ -196,7 +196,7 @@
196 }; 196 };
197 197
198 sdhi0: sdhi@ee100000 { 198 sdhi0: sdhi@ee100000 {
199 compatible = "renesas,r8a7740-sdhi"; 199 compatible = "renesas,sdhi-r8a7740";
200 reg = <0xee100000 0x100>; 200 reg = <0xee100000 0x100>;
201 interrupt-parent = <&gic>; 201 interrupt-parent = <&gic>;
202 interrupts = <0 83 4 202 interrupts = <0 83 4
@@ -208,7 +208,7 @@
208 208
209 /* SDHI1 and SDHI2 have no CD pins, no need for CD IRQ */ 209 /* SDHI1 and SDHI2 have no CD pins, no need for CD IRQ */
210 sdhi1: sdhi@ee120000 { 210 sdhi1: sdhi@ee120000 {
211 compatible = "renesas,r8a7740-sdhi"; 211 compatible = "renesas,sdhi-r8a7740";
212 reg = <0xee120000 0x100>; 212 reg = <0xee120000 0x100>;
213 interrupt-parent = <&gic>; 213 interrupt-parent = <&gic>;
214 interrupts = <0 88 4 214 interrupts = <0 88 4
@@ -219,7 +219,7 @@
219 }; 219 };
220 220
221 sdhi2: sdhi@ee140000 { 221 sdhi2: sdhi@ee140000 {
222 compatible = "renesas,r8a7740-sdhi"; 222 compatible = "renesas,sdhi-r8a7740";
223 reg = <0xee140000 0x100>; 223 reg = <0xee140000 0x100>;
224 interrupt-parent = <&gic>; 224 interrupt-parent = <&gic>;
225 interrupts = <0 104 4 225 interrupts = <0 104 4
diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c
index 117f955a2a06..8e1a0245907f 100644
--- a/arch/arm/common/edma.c
+++ b/arch/arm/common/edma.c
@@ -269,6 +269,11 @@ static const struct edmacc_param dummy_paramset = {
269 .ccnt = 1, 269 .ccnt = 1,
270}; 270};
271 271
272static const struct of_device_id edma_of_ids[] = {
273 { .compatible = "ti,edma3", },
274 {}
275};
276
272/*****************************************************************************/ 277/*****************************************************************************/
273 278
274static void map_dmach_queue(unsigned ctlr, unsigned ch_no, 279static void map_dmach_queue(unsigned ctlr, unsigned ch_no,
@@ -560,14 +565,38 @@ static int reserve_contiguous_slots(int ctlr, unsigned int id,
560static int prepare_unused_channel_list(struct device *dev, void *data) 565static int prepare_unused_channel_list(struct device *dev, void *data)
561{ 566{
562 struct platform_device *pdev = to_platform_device(dev); 567 struct platform_device *pdev = to_platform_device(dev);
563 int i, ctlr; 568 int i, count, ctlr;
569 struct of_phandle_args dma_spec;
564 570
571 if (dev->of_node) {
572 count = of_property_count_strings(dev->of_node, "dma-names");
573 if (count < 0)
574 return 0;
575 for (i = 0; i < count; i++) {
576 if (of_parse_phandle_with_args(dev->of_node, "dmas",
577 "#dma-cells", i,
578 &dma_spec))
579 continue;
580
581 if (!of_match_node(edma_of_ids, dma_spec.np)) {
582 of_node_put(dma_spec.np);
583 continue;
584 }
585
586 clear_bit(EDMA_CHAN_SLOT(dma_spec.args[0]),
587 edma_cc[0]->edma_unused);
588 of_node_put(dma_spec.np);
589 }
590 return 0;
591 }
592
593 /* For non-OF case */
565 for (i = 0; i < pdev->num_resources; i++) { 594 for (i = 0; i < pdev->num_resources; i++) {
566 if ((pdev->resource[i].flags & IORESOURCE_DMA) && 595 if ((pdev->resource[i].flags & IORESOURCE_DMA) &&
567 (int)pdev->resource[i].start >= 0) { 596 (int)pdev->resource[i].start >= 0) {
568 ctlr = EDMA_CTLR(pdev->resource[i].start); 597 ctlr = EDMA_CTLR(pdev->resource[i].start);
569 clear_bit(EDMA_CHAN_SLOT(pdev->resource[i].start), 598 clear_bit(EDMA_CHAN_SLOT(pdev->resource[i].start),
570 edma_cc[ctlr]->edma_unused); 599 edma_cc[ctlr]->edma_unused);
571 } 600 }
572 } 601 }
573 602
@@ -1762,11 +1791,6 @@ static int edma_probe(struct platform_device *pdev)
1762 return 0; 1791 return 0;
1763} 1792}
1764 1793
1765static const struct of_device_id edma_of_ids[] = {
1766 { .compatible = "ti,edma3", },
1767 {}
1768};
1769
1770static struct platform_driver edma_driver = { 1794static struct platform_driver edma_driver = {
1771 .driver = { 1795 .driver = {
1772 .name = "edma", 1796 .name = "edma",
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 6e572c64cf5a..119fc378fc52 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -36,6 +36,7 @@ CONFIG_ARCH_TEGRA_114_SOC=y
36CONFIG_TEGRA_PCI=y 36CONFIG_TEGRA_PCI=y
37CONFIG_TEGRA_EMC_SCALING_ENABLE=y 37CONFIG_TEGRA_EMC_SCALING_ENABLE=y
38CONFIG_ARCH_U8500=y 38CONFIG_ARCH_U8500=y
39CONFIG_MACH_HREFV60=y
39CONFIG_MACH_SNOWBALL=y 40CONFIG_MACH_SNOWBALL=y
40CONFIG_MACH_UX500_DT=y 41CONFIG_MACH_UX500_DT=y
41CONFIG_ARCH_VEXPRESS=y 42CONFIG_ARCH_VEXPRESS=y
@@ -46,6 +47,7 @@ CONFIG_ARCH_ZYNQ=y
46CONFIG_SMP=y 47CONFIG_SMP=y
47CONFIG_HIGHPTE=y 48CONFIG_HIGHPTE=y
48CONFIG_ARM_APPENDED_DTB=y 49CONFIG_ARM_APPENDED_DTB=y
50CONFIG_ARM_ATAG_DTB_COMPAT=y
49CONFIG_NET=y 51CONFIG_NET=y
50CONFIG_UNIX=y 52CONFIG_UNIX=y
51CONFIG_INET=y 53CONFIG_INET=y
@@ -133,6 +135,7 @@ CONFIG_MMC=y
133CONFIG_MMC_ARMMMCI=y 135CONFIG_MMC_ARMMMCI=y
134CONFIG_MMC_SDHCI=y 136CONFIG_MMC_SDHCI=y
135CONFIG_MMC_SDHCI_PLTFM=y 137CONFIG_MMC_SDHCI_PLTFM=y
138CONFIG_MMC_SDHCI_ESDHC_IMX=y
136CONFIG_MMC_SDHCI_TEGRA=y 139CONFIG_MMC_SDHCI_TEGRA=y
137CONFIG_MMC_SDHCI_SPEAR=y 140CONFIG_MMC_SDHCI_SPEAR=y
138CONFIG_MMC_OMAP=y 141CONFIG_MMC_OMAP=y
diff --git a/arch/arm/crypto/aes-armv4.S b/arch/arm/crypto/aes-armv4.S
index 19d6cd6f29f9..3a14ea8fe97e 100644
--- a/arch/arm/crypto/aes-armv4.S
+++ b/arch/arm/crypto/aes-armv4.S
@@ -148,7 +148,7 @@ AES_Te:
148@ const AES_KEY *key) { 148@ const AES_KEY *key) {
149.align 5 149.align 5
150ENTRY(AES_encrypt) 150ENTRY(AES_encrypt)
151 sub r3,pc,#8 @ AES_encrypt 151 adr r3,AES_encrypt
152 stmdb sp!,{r1,r4-r12,lr} 152 stmdb sp!,{r1,r4-r12,lr}
153 mov r12,r0 @ inp 153 mov r12,r0 @ inp
154 mov r11,r2 154 mov r11,r2
@@ -381,7 +381,7 @@ _armv4_AES_encrypt:
381.align 5 381.align 5
382ENTRY(private_AES_set_encrypt_key) 382ENTRY(private_AES_set_encrypt_key)
383_armv4_AES_set_encrypt_key: 383_armv4_AES_set_encrypt_key:
384 sub r3,pc,#8 @ AES_set_encrypt_key 384 adr r3,_armv4_AES_set_encrypt_key
385 teq r0,#0 385 teq r0,#0
386 moveq r0,#-1 386 moveq r0,#-1
387 beq .Labrt 387 beq .Labrt
@@ -843,7 +843,7 @@ AES_Td:
843@ const AES_KEY *key) { 843@ const AES_KEY *key) {
844.align 5 844.align 5
845ENTRY(AES_decrypt) 845ENTRY(AES_decrypt)
846 sub r3,pc,#8 @ AES_decrypt 846 adr r3,AES_decrypt
847 stmdb sp!,{r1,r4-r12,lr} 847 stmdb sp!,{r1,r4-r12,lr}
848 mov r12,r0 @ inp 848 mov r12,r0 @ inp
849 mov r11,r2 849 mov r11,r2
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 7e1f76027f66..72abdc541f38 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -19,6 +19,13 @@
19#include <asm/unified.h> 19#include <asm/unified.h>
20#include <asm/compiler.h> 20#include <asm/compiler.h>
21 21
22#if __LINUX_ARM_ARCH__ < 6
23#include <asm-generic/uaccess-unaligned.h>
24#else
25#define __get_user_unaligned __get_user
26#define __put_user_unaligned __put_user
27#endif
28
22#define VERIFY_READ 0 29#define VERIFY_READ 0
23#define VERIFY_WRITE 1 30#define VERIFY_WRITE 1
24 31
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 74ad15d1a065..bc6bd9683ba4 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -442,10 +442,10 @@ local_restart:
442 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine 442 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
443 443
444 add r1, sp, #S_OFF 444 add r1, sp, #S_OFF
445 cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) 4452: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
446 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back 446 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
447 bcs arm_syscall 447 bcs arm_syscall
4482: mov why, #0 @ no longer a real syscall 448 mov why, #0 @ no longer a real syscall
449 b sys_ni_syscall @ not private func 449 b sys_ni_syscall @ not private func
450 450
451#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI) 451#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI)
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index de23a9beed13..39f89fbd5111 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -329,10 +329,10 @@
329#ifdef CONFIG_CONTEXT_TRACKING 329#ifdef CONFIG_CONTEXT_TRACKING
330 .if \save 330 .if \save
331 stmdb sp!, {r0-r3, ip, lr} 331 stmdb sp!, {r0-r3, ip, lr}
332 bl user_exit 332 bl context_tracking_user_exit
333 ldmia sp!, {r0-r3, ip, lr} 333 ldmia sp!, {r0-r3, ip, lr}
334 .else 334 .else
335 bl user_exit 335 bl context_tracking_user_exit
336 .endif 336 .endif
337#endif 337#endif
338 .endm 338 .endm
@@ -341,10 +341,10 @@
341#ifdef CONFIG_CONTEXT_TRACKING 341#ifdef CONFIG_CONTEXT_TRACKING
342 .if \save 342 .if \save
343 stmdb sp!, {r0-r3, ip, lr} 343 stmdb sp!, {r0-r3, ip, lr}
344 bl user_enter 344 bl context_tracking_user_enter
345 ldmia sp!, {r0-r3, ip, lr} 345 ldmia sp!, {r0-r3, ip, lr}
346 .else 346 .else
347 bl user_enter 347 bl context_tracking_user_enter
348 .endif 348 .endif
349#endif 349#endif
350 .endm 350 .endm
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c
index 71e08baee209..c02ba4af599f 100644
--- a/arch/arm/kvm/reset.c
+++ b/arch/arm/kvm/reset.c
@@ -58,14 +58,14 @@ static const struct kvm_irq_level a15_vtimer_irq = {
58 */ 58 */
59int kvm_reset_vcpu(struct kvm_vcpu *vcpu) 59int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
60{ 60{
61 struct kvm_regs *cpu_reset; 61 struct kvm_regs *reset_regs;
62 const struct kvm_irq_level *cpu_vtimer_irq; 62 const struct kvm_irq_level *cpu_vtimer_irq;
63 63
64 switch (vcpu->arch.target) { 64 switch (vcpu->arch.target) {
65 case KVM_ARM_TARGET_CORTEX_A15: 65 case KVM_ARM_TARGET_CORTEX_A15:
66 if (vcpu->vcpu_id > a15_max_cpu_idx) 66 if (vcpu->vcpu_id > a15_max_cpu_idx)
67 return -EINVAL; 67 return -EINVAL;
68 cpu_reset = &a15_regs_reset; 68 reset_regs = &a15_regs_reset;
69 vcpu->arch.midr = read_cpuid_id(); 69 vcpu->arch.midr = read_cpuid_id();
70 cpu_vtimer_irq = &a15_vtimer_irq; 70 cpu_vtimer_irq = &a15_vtimer_irq;
71 break; 71 break;
@@ -74,7 +74,7 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
74 } 74 }
75 75
76 /* Reset core registers */ 76 /* Reset core registers */
77 memcpy(&vcpu->arch.regs, cpu_reset, sizeof(vcpu->arch.regs)); 77 memcpy(&vcpu->arch.regs, reset_regs, sizeof(vcpu->arch.regs));
78 78
79 /* Reset CP15 registers */ 79 /* Reset CP15 registers */
80 kvm_reset_coprocs(vcpu); 80 kvm_reset_coprocs(vcpu);
diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c
index 180b3024bec3..f607deb40f4d 100644
--- a/arch/arm/mach-at91/at91rm9200_time.c
+++ b/arch/arm/mach-at91/at91rm9200_time.c
@@ -93,7 +93,7 @@ static irqreturn_t at91rm9200_timer_interrupt(int irq, void *dev_id)
93 93
94static struct irqaction at91rm9200_timer_irq = { 94static struct irqaction at91rm9200_timer_irq = {
95 .name = "at91_tick", 95 .name = "at91_tick",
96 .flags = IRQF_SHARED | IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, 96 .flags = IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
97 .handler = at91rm9200_timer_interrupt, 97 .handler = at91rm9200_timer_interrupt,
98 .irq = NR_IRQS_LEGACY + AT91_ID_SYS, 98 .irq = NR_IRQS_LEGACY + AT91_ID_SYS,
99}; 99};
diff --git a/arch/arm/mach-at91/at91sam926x_time.c b/arch/arm/mach-at91/at91sam926x_time.c
index 3a4bc2e1a65e..bb392320a0dd 100644
--- a/arch/arm/mach-at91/at91sam926x_time.c
+++ b/arch/arm/mach-at91/at91sam926x_time.c
@@ -171,7 +171,7 @@ static irqreturn_t at91sam926x_pit_interrupt(int irq, void *dev_id)
171 171
172static struct irqaction at91sam926x_pit_irq = { 172static struct irqaction at91sam926x_pit_irq = {
173 .name = "at91_tick", 173 .name = "at91_tick",
174 .flags = IRQF_SHARED | IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, 174 .flags = IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
175 .handler = at91sam926x_pit_interrupt, 175 .handler = at91sam926x_pit_interrupt,
176 .irq = NR_IRQS_LEGACY + AT91_ID_SYS, 176 .irq = NR_IRQS_LEGACY + AT91_ID_SYS,
177}; 177};
diff --git a/arch/arm/mach-at91/at91sam9g45_reset.S b/arch/arm/mach-at91/at91sam9g45_reset.S
index 721a1a34dd1d..c40c1e2ef80f 100644
--- a/arch/arm/mach-at91/at91sam9g45_reset.S
+++ b/arch/arm/mach-at91/at91sam9g45_reset.S
@@ -16,11 +16,17 @@
16#include "at91_rstc.h" 16#include "at91_rstc.h"
17 .arm 17 .arm
18 18
19/*
20 * at91_ramc_base is an array void*
21 * init at NULL if only one DDR controler is present in or DT
22 */
19 .globl at91sam9g45_restart 23 .globl at91sam9g45_restart
20 24
21at91sam9g45_restart: 25at91sam9g45_restart:
22 ldr r5, =at91_ramc_base @ preload constants 26 ldr r5, =at91_ramc_base @ preload constants
23 ldr r0, [r5] 27 ldr r0, [r5]
28 ldr r5, [r5, #4] @ ddr1
29 cmp r5, #0
24 ldr r4, =at91_rstc_base 30 ldr r4, =at91_rstc_base
25 ldr r1, [r4] 31 ldr r1, [r4]
26 32
@@ -30,6 +36,8 @@ at91sam9g45_restart:
30 36
31 .balign 32 @ align to cache line 37 .balign 32 @ align to cache line
32 38
39 strne r2, [r5, #AT91_DDRSDRC_RTR] @ disable DDR1 access
40 strne r3, [r5, #AT91_DDRSDRC_LPR] @ power down DDR1
33 str r2, [r0, #AT91_DDRSDRC_RTR] @ disable DDR0 access 41 str r2, [r0, #AT91_DDRSDRC_RTR] @ disable DDR0 access
34 str r3, [r0, #AT91_DDRSDRC_LPR] @ power down DDR0 42 str r3, [r0, #AT91_DDRSDRC_LPR] @ power down DDR0
35 str r4, [r1, #AT91_RSTC_CR] @ reset processor 43 str r4, [r1, #AT91_RSTC_CR] @ reset processor
diff --git a/arch/arm/mach-at91/at91x40_time.c b/arch/arm/mach-at91/at91x40_time.c
index 2919eba41ff4..c0e637adf65d 100644
--- a/arch/arm/mach-at91/at91x40_time.c
+++ b/arch/arm/mach-at91/at91x40_time.c
@@ -57,7 +57,7 @@ static irqreturn_t at91x40_timer_interrupt(int irq, void *dev_id)
57 57
58static struct irqaction at91x40_timer_irq = { 58static struct irqaction at91x40_timer_irq = {
59 .name = "at91_tick", 59 .name = "at91_tick",
60 .flags = IRQF_DISABLED | IRQF_TIMER, 60 .flags = IRQF_TIMER,
61 .handler = at91x40_timer_interrupt 61 .handler = at91x40_timer_interrupt
62}; 62};
63 63
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index 92b7f770615a..4078ba93776b 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -176,7 +176,7 @@ static struct at24_platform_data eeprom_info = {
176 .context = (void *)0x7f00, 176 .context = (void *)0x7f00,
177}; 177};
178 178
179static struct snd_platform_data dm365_evm_snd_data = { 179static struct snd_platform_data dm365_evm_snd_data __maybe_unused = {
180 .asp_chan_q = EVENTQ_3, 180 .asp_chan_q = EVENTQ_3,
181}; 181};
182 182
diff --git a/arch/arm/mach-davinci/include/mach/serial.h b/arch/arm/mach-davinci/include/mach/serial.h
index 52b8571b2e70..ce402cd21fa0 100644
--- a/arch/arm/mach-davinci/include/mach/serial.h
+++ b/arch/arm/mach-davinci/include/mach/serial.h
@@ -15,8 +15,6 @@
15 15
16#include <mach/hardware.h> 16#include <mach/hardware.h>
17 17
18#include <linux/platform_device.h>
19
20#define DAVINCI_UART0_BASE (IO_PHYS + 0x20000) 18#define DAVINCI_UART0_BASE (IO_PHYS + 0x20000)
21#define DAVINCI_UART1_BASE (IO_PHYS + 0x20400) 19#define DAVINCI_UART1_BASE (IO_PHYS + 0x20400)
22#define DAVINCI_UART2_BASE (IO_PHYS + 0x20800) 20#define DAVINCI_UART2_BASE (IO_PHYS + 0x20800)
@@ -39,6 +37,8 @@
39#define UART_DM646X_SCR_TX_WATERMARK 0x08 37#define UART_DM646X_SCR_TX_WATERMARK 0x08
40 38
41#ifndef __ASSEMBLY__ 39#ifndef __ASSEMBLY__
40#include <linux/platform_device.h>
41
42extern int davinci_serial_init(struct platform_device *); 42extern int davinci_serial_init(struct platform_device *);
43#endif 43#endif
44 44
diff --git a/arch/arm/mach-imx/clk-fixup-mux.c b/arch/arm/mach-imx/clk-fixup-mux.c
index deb4b8093b30..0d40b35c557c 100644
--- a/arch/arm/mach-imx/clk-fixup-mux.c
+++ b/arch/arm/mach-imx/clk-fixup-mux.c
@@ -90,6 +90,7 @@ struct clk *imx_clk_fixup_mux(const char *name, void __iomem *reg,
90 init.ops = &clk_fixup_mux_ops; 90 init.ops = &clk_fixup_mux_ops;
91 init.parent_names = parents; 91 init.parent_names = parents;
92 init.num_parents = num_parents; 92 init.num_parents = num_parents;
93 init.flags = 0;
93 94
94 fixup_mux->mux.reg = reg; 95 fixup_mux->mux.reg = reg;
95 fixup_mux->mux.shift = shift; 96 fixup_mux->mux.shift = shift;
diff --git a/arch/arm/mach-imx/clk-imx27.c b/arch/arm/mach-imx/clk-imx27.c
index c3cfa4116dc0..c6b40f386786 100644
--- a/arch/arm/mach-imx/clk-imx27.c
+++ b/arch/arm/mach-imx/clk-imx27.c
@@ -285,7 +285,7 @@ int __init mx27_clocks_init(unsigned long fref)
285 clk_register_clkdev(clk[ata_ahb_gate], "ata", NULL); 285 clk_register_clkdev(clk[ata_ahb_gate], "ata", NULL);
286 clk_register_clkdev(clk[rtc_ipg_gate], NULL, "imx21-rtc"); 286 clk_register_clkdev(clk[rtc_ipg_gate], NULL, "imx21-rtc");
287 clk_register_clkdev(clk[scc_ipg_gate], "scc", NULL); 287 clk_register_clkdev(clk[scc_ipg_gate], "scc", NULL);
288 clk_register_clkdev(clk[cpu_div], NULL, "cpufreq-cpu0.0"); 288 clk_register_clkdev(clk[cpu_div], NULL, "cpu0");
289 clk_register_clkdev(clk[emi_ahb_gate], "emi_ahb" , NULL); 289 clk_register_clkdev(clk[emi_ahb_gate], "emi_ahb" , NULL);
290 290
291 mxc_timer_init(MX27_IO_ADDRESS(MX27_GPT1_BASE_ADDR), MX27_INT_GPT1); 291 mxc_timer_init(MX27_IO_ADDRESS(MX27_GPT1_BASE_ADDR), MX27_INT_GPT1);
diff --git a/arch/arm/mach-imx/clk-imx51-imx53.c b/arch/arm/mach-imx/clk-imx51-imx53.c
index 1a56a3319997..7c0dc4540aa4 100644
--- a/arch/arm/mach-imx/clk-imx51-imx53.c
+++ b/arch/arm/mach-imx/clk-imx51-imx53.c
@@ -328,7 +328,7 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil,
328 clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "imx-ssi.1"); 328 clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "imx-ssi.1");
329 clk_register_clkdev(clk[ssi3_ipg_gate], NULL, "imx-ssi.2"); 329 clk_register_clkdev(clk[ssi3_ipg_gate], NULL, "imx-ssi.2");
330 clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma"); 330 clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma");
331 clk_register_clkdev(clk[cpu_podf], NULL, "cpufreq-cpu0.0"); 331 clk_register_clkdev(clk[cpu_podf], NULL, "cpu0");
332 clk_register_clkdev(clk[iim_gate], "iim", NULL); 332 clk_register_clkdev(clk[iim_gate], "iim", NULL);
333 clk_register_clkdev(clk[dummy], NULL, "imx2-wdt.0"); 333 clk_register_clkdev(clk[dummy], NULL, "imx2-wdt.0");
334 clk_register_clkdev(clk[dummy], NULL, "imx2-wdt.1"); 334 clk_register_clkdev(clk[dummy], NULL, "imx2-wdt.1");
@@ -397,7 +397,7 @@ int __init mx51_clocks_init(unsigned long rate_ckil, unsigned long rate_osc,
397 mx51_spdif_xtal_sel, ARRAY_SIZE(mx51_spdif_xtal_sel)); 397 mx51_spdif_xtal_sel, ARRAY_SIZE(mx51_spdif_xtal_sel));
398 clk[spdif1_sel] = imx_clk_mux("spdif1_sel", MXC_CCM_CSCMR2, 2, 2, 398 clk[spdif1_sel] = imx_clk_mux("spdif1_sel", MXC_CCM_CSCMR2, 2, 2,
399 spdif_sel, ARRAY_SIZE(spdif_sel)); 399 spdif_sel, ARRAY_SIZE(spdif_sel));
400 clk[spdif1_pred] = imx_clk_divider("spdif1_podf", "spdif1_sel", MXC_CCM_CDCDR, 16, 3); 400 clk[spdif1_pred] = imx_clk_divider("spdif1_pred", "spdif1_sel", MXC_CCM_CDCDR, 16, 3);
401 clk[spdif1_podf] = imx_clk_divider("spdif1_podf", "spdif1_pred", MXC_CCM_CDCDR, 9, 6); 401 clk[spdif1_podf] = imx_clk_divider("spdif1_podf", "spdif1_pred", MXC_CCM_CDCDR, 9, 6);
402 clk[spdif1_com_sel] = imx_clk_mux("spdif1_com_sel", MXC_CCM_CSCMR2, 5, 1, 402 clk[spdif1_com_sel] = imx_clk_mux("spdif1_com_sel", MXC_CCM_CSCMR2, 5, 1,
403 mx51_spdif1_com_sel, ARRAY_SIZE(mx51_spdif1_com_sel)); 403 mx51_spdif1_com_sel, ARRAY_SIZE(mx51_spdif1_com_sel));
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 85a1b51346c8..90372a21087f 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -233,10 +233,15 @@ put_node:
233 of_node_put(np); 233 of_node_put(np);
234} 234}
235 235
236static void __init imx6q_opp_init(struct device *cpu_dev) 236static void __init imx6q_opp_init(void)
237{ 237{
238 struct device_node *np; 238 struct device_node *np;
239 struct device *cpu_dev = get_cpu_device(0);
239 240
241 if (!cpu_dev) {
242 pr_warn("failed to get cpu0 device\n");
243 return;
244 }
240 np = of_node_get(cpu_dev->of_node); 245 np = of_node_get(cpu_dev->of_node);
241 if (!np) { 246 if (!np) {
242 pr_warn("failed to find cpu0 node\n"); 247 pr_warn("failed to find cpu0 node\n");
@@ -268,7 +273,7 @@ static void __init imx6q_init_late(void)
268 imx6q_cpuidle_init(); 273 imx6q_cpuidle_init();
269 274
270 if (IS_ENABLED(CONFIG_ARM_IMX6Q_CPUFREQ)) { 275 if (IS_ENABLED(CONFIG_ARM_IMX6Q_CPUFREQ)) {
271 imx6q_opp_init(&imx6q_cpufreq_pdev.dev); 276 imx6q_opp_init();
272 platform_device_register(&imx6q_cpufreq_pdev); 277 platform_device_register(&imx6q_cpufreq_pdev);
273 } 278 }
274} 279}
diff --git a/arch/arm/mach-imx/system.c b/arch/arm/mach-imx/system.c
index 64ff37ea72b1..80c177c36c5f 100644
--- a/arch/arm/mach-imx/system.c
+++ b/arch/arm/mach-imx/system.c
@@ -117,6 +117,17 @@ void __init imx_init_l2cache(void)
117 /* Configure the L2 PREFETCH and POWER registers */ 117 /* Configure the L2 PREFETCH and POWER registers */
118 val = readl_relaxed(l2x0_base + L2X0_PREFETCH_CTRL); 118 val = readl_relaxed(l2x0_base + L2X0_PREFETCH_CTRL);
119 val |= 0x70800000; 119 val |= 0x70800000;
120 /*
121 * The L2 cache controller(PL310) version on the i.MX6D/Q is r3p1-50rel0
122 * The L2 cache controller(PL310) version on the i.MX6DL/SOLO/SL is r3p2
123 * But according to ARM PL310 errata: 752271
124 * ID: 752271: Double linefill feature can cause data corruption
125 * Fault Status: Present in: r3p0, r3p1, r3p1-50rel0. Fixed in r3p2
126 * Workaround: The only workaround to this erratum is to disable the
127 * double linefill feature. This is the default behavior.
128 */
129 if (cpu_is_imx6q())
130 val &= ~(1 << 30 | 1 << 23);
120 writel_relaxed(val, l2x0_base + L2X0_PREFETCH_CTRL); 131 writel_relaxed(val, l2x0_base + L2X0_PREFETCH_CTRL);
121 val = L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN; 132 val = L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN;
122 writel_relaxed(val, l2x0_base + L2X0_POWER_CTRL); 133 writel_relaxed(val, l2x0_base + L2X0_POWER_CTRL);
diff --git a/arch/arm/mach-integrator/pci_v3.h b/arch/arm/mach-integrator/pci_v3.h
index 755fd29fed4a..06a9e2e7d007 100644
--- a/arch/arm/mach-integrator/pci_v3.h
+++ b/arch/arm/mach-integrator/pci_v3.h
@@ -1,2 +1,9 @@
1/* Simple oneliner include to the PCIv3 early init */ 1/* Simple oneliner include to the PCIv3 early init */
2#ifdef CONFIG_PCI
2extern int pci_v3_early_init(void); 3extern int pci_v3_early_init(void);
4#else
5static inline int pci_v3_early_init(void)
6{
7 return 0;
8}
9#endif
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
index 4c24303ec481..58adf2fd9cfc 100644
--- a/arch/arm/mach-mvebu/coherency.c
+++ b/arch/arm/mach-mvebu/coherency.c
@@ -140,6 +140,7 @@ int __init coherency_init(void)
140 coherency_base = of_iomap(np, 0); 140 coherency_base = of_iomap(np, 0);
141 coherency_cpu_base = of_iomap(np, 1); 141 coherency_cpu_base = of_iomap(np, 1);
142 set_cpu_coherent(cpu_logical_map(smp_processor_id()), 0); 142 set_cpu_coherent(cpu_logical_map(smp_processor_id()), 0);
143 of_node_put(np);
143 } 144 }
144 145
145 return 0; 146 return 0;
@@ -147,9 +148,14 @@ int __init coherency_init(void)
147 148
148static int __init coherency_late_init(void) 149static int __init coherency_late_init(void)
149{ 150{
150 if (of_find_matching_node(NULL, of_coherency_table)) 151 struct device_node *np;
152
153 np = of_find_matching_node(NULL, of_coherency_table);
154 if (np) {
151 bus_register_notifier(&platform_bus_type, 155 bus_register_notifier(&platform_bus_type,
152 &mvebu_hwcc_platform_nb); 156 &mvebu_hwcc_platform_nb);
157 of_node_put(np);
158 }
153 return 0; 159 return 0;
154} 160}
155 161
diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c
index 3cc4bef6401c..27fc4f049474 100644
--- a/arch/arm/mach-mvebu/pmsu.c
+++ b/arch/arm/mach-mvebu/pmsu.c
@@ -67,6 +67,7 @@ int __init armada_370_xp_pmsu_init(void)
67 pr_info("Initializing Power Management Service Unit\n"); 67 pr_info("Initializing Power Management Service Unit\n");
68 pmsu_mp_base = of_iomap(np, 0); 68 pmsu_mp_base = of_iomap(np, 0);
69 pmsu_reset_base = of_iomap(np, 1); 69 pmsu_reset_base = of_iomap(np, 1);
70 of_node_put(np);
70 } 71 }
71 72
72 return 0; 73 return 0;
diff --git a/arch/arm/mach-mvebu/system-controller.c b/arch/arm/mach-mvebu/system-controller.c
index f875124ff4f9..5175083cdb34 100644
--- a/arch/arm/mach-mvebu/system-controller.c
+++ b/arch/arm/mach-mvebu/system-controller.c
@@ -98,6 +98,7 @@ static int __init mvebu_system_controller_init(void)
98 BUG_ON(!match); 98 BUG_ON(!match);
99 system_controller_base = of_iomap(np, 0); 99 system_controller_base = of_iomap(np, 0);
100 mvebu_sc = (struct mvebu_system_controller *)match->data; 100 mvebu_sc = (struct mvebu_system_controller *)match->data;
101 of_node_put(np);
101 } 102 }
102 103
103 return 0; 104 return 0;
diff --git a/arch/arm/mach-omap2/cclock44xx_data.c b/arch/arm/mach-omap2/cclock44xx_data.c
index 1d5b5290d2af..b237950eb8a3 100644
--- a/arch/arm/mach-omap2/cclock44xx_data.c
+++ b/arch/arm/mach-omap2/cclock44xx_data.c
@@ -1632,7 +1632,7 @@ static struct omap_clk omap44xx_clks[] = {
1632 CLK(NULL, "auxclk5_src_ck", &auxclk5_src_ck), 1632 CLK(NULL, "auxclk5_src_ck", &auxclk5_src_ck),
1633 CLK(NULL, "auxclk5_ck", &auxclk5_ck), 1633 CLK(NULL, "auxclk5_ck", &auxclk5_ck),
1634 CLK(NULL, "auxclkreq5_ck", &auxclkreq5_ck), 1634 CLK(NULL, "auxclkreq5_ck", &auxclkreq5_ck),
1635 CLK("omap-gpmc", "fck", &dummy_ck), 1635 CLK("50000000.gpmc", "fck", &dummy_ck),
1636 CLK("omap_i2c.1", "ick", &dummy_ck), 1636 CLK("omap_i2c.1", "ick", &dummy_ck),
1637 CLK("omap_i2c.2", "ick", &dummy_ck), 1637 CLK("omap_i2c.2", "ick", &dummy_ck),
1638 CLK("omap_i2c.3", "ick", &dummy_ck), 1638 CLK("omap_i2c.3", "ick", &dummy_ck),
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index c443f2e97e10..4c8982ae9529 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -143,7 +143,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
143 * Call idle CPU cluster PM exit notifier chain 143 * Call idle CPU cluster PM exit notifier chain
144 * to restore GIC and wakeupgen context. 144 * to restore GIC and wakeupgen context.
145 */ 145 */
146 if ((cx->mpu_state == PWRDM_POWER_RET) && 146 if (dev->cpu == 0 && (cx->mpu_state == PWRDM_POWER_RET) &&
147 (cx->mpu_logic_state == PWRDM_POWER_OFF)) 147 (cx->mpu_logic_state == PWRDM_POWER_OFF))
148 cpu_cluster_pm_exit(); 148 cpu_cluster_pm_exit();
149 149
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index 9f4795aff48a..579697adaae7 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -1491,8 +1491,8 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
1491 */ 1491 */
1492 ret = gpmc_cs_remap(cs, res.start); 1492 ret = gpmc_cs_remap(cs, res.start);
1493 if (ret < 0) { 1493 if (ret < 0) {
1494 dev_err(&pdev->dev, "cannot remap GPMC CS %d to 0x%x\n", 1494 dev_err(&pdev->dev, "cannot remap GPMC CS %d to %pa\n",
1495 cs, res.start); 1495 cs, &res.start);
1496 goto err; 1496 goto err;
1497 } 1497 }
1498 1498
diff --git a/arch/arm/mach-omap2/mux34xx.c b/arch/arm/mach-omap2/mux34xx.c
index c53609f46294..be271f1d585b 100644
--- a/arch/arm/mach-omap2/mux34xx.c
+++ b/arch/arm/mach-omap2/mux34xx.c
@@ -620,7 +620,7 @@ static struct omap_mux __initdata omap3_muxmodes[] = {
620 "uart1_rts", "ssi1_flag_tx", NULL, NULL, 620 "uart1_rts", "ssi1_flag_tx", NULL, NULL,
621 "gpio_149", NULL, NULL, "safe_mode"), 621 "gpio_149", NULL, NULL, "safe_mode"),
622 _OMAP3_MUXENTRY(UART1_RX, 151, 622 _OMAP3_MUXENTRY(UART1_RX, 151,
623 "uart1_rx", "ss1_wake_tx", "mcbsp1_clkr", "mcspi4_clk", 623 "uart1_rx", "ssi1_wake_tx", "mcbsp1_clkr", "mcspi4_clk",
624 "gpio_151", NULL, NULL, "safe_mode"), 624 "gpio_151", NULL, NULL, "safe_mode"),
625 _OMAP3_MUXENTRY(UART1_TX, 148, 625 _OMAP3_MUXENTRY(UART1_TX, 148,
626 "uart1_tx", "ssi1_dat_tx", NULL, NULL, 626 "uart1_tx", "ssi1_dat_tx", NULL, NULL,
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index 8708b2a9da45..891211093295 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * OMAP4 SMP source file. It contains platform specific fucntions 2 * OMAP4 SMP source file. It contains platform specific functions
3 * needed for the linux smp kernel. 3 * needed for the linux smp kernel.
4 * 4 *
5 * Copyright (C) 2009 Texas Instruments, Inc. 5 * Copyright (C) 2009 Texas Instruments, Inc.
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index f99f68e1e85b..b69dd9abb50a 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -158,7 +158,7 @@ static int omap_device_build_from_dt(struct platform_device *pdev)
158 } 158 }
159 159
160 od = omap_device_alloc(pdev, hwmods, oh_cnt); 160 od = omap_device_alloc(pdev, hwmods, oh_cnt);
161 if (!od) { 161 if (IS_ERR(od)) {
162 dev_err(&pdev->dev, "Cannot allocate omap_device for :%s\n", 162 dev_err(&pdev->dev, "Cannot allocate omap_device for :%s\n",
163 oh_name); 163 oh_name);
164 ret = PTR_ERR(od); 164 ret = PTR_ERR(od);
diff --git a/arch/arm/mach-sa1100/collie.c b/arch/arm/mach-sa1100/collie.c
index 612a45689770..7fb96ebdc0fb 100644
--- a/arch/arm/mach-sa1100/collie.c
+++ b/arch/arm/mach-sa1100/collie.c
@@ -289,7 +289,7 @@ static void collie_flash_exit(void)
289} 289}
290 290
291static struct flash_platform_data collie_flash_data = { 291static struct flash_platform_data collie_flash_data = {
292 .map_name = "cfi_probe", 292 .map_name = "jedec_probe",
293 .init = collie_flash_init, 293 .init = collie_flash_init,
294 .set_vpp = collie_set_vpp, 294 .set_vpp = collie_set_vpp,
295 .exit = collie_flash_exit, 295 .exit = collie_flash_exit,
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
index 5bd1479d3deb..7f8f6076d360 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
@@ -1108,9 +1108,9 @@ static const struct pinctrl_map eva_pinctrl_map[] = {
1108 PIN_MAP_MUX_GROUP_DEFAULT("asoc-simple-card.1", "pfc-r8a7740", 1108 PIN_MAP_MUX_GROUP_DEFAULT("asoc-simple-card.1", "pfc-r8a7740",
1109 "fsib_mclk_in", "fsib"), 1109 "fsib_mclk_in", "fsib"),
1110 /* GETHER */ 1110 /* GETHER */
1111 PIN_MAP_MUX_GROUP_DEFAULT("sh-eth", "pfc-r8a7740", 1111 PIN_MAP_MUX_GROUP_DEFAULT("r8a7740-gether", "pfc-r8a7740",
1112 "gether_mii", "gether"), 1112 "gether_mii", "gether"),
1113 PIN_MAP_MUX_GROUP_DEFAULT("sh-eth", "pfc-r8a7740", 1113 PIN_MAP_MUX_GROUP_DEFAULT("r8a7740-gether", "pfc-r8a7740",
1114 "gether_int", "gether"), 1114 "gether_int", "gether"),
1115 /* HDMI */ 1115 /* HDMI */
1116 PIN_MAP_MUX_GROUP_DEFAULT("sh-mobile-hdmi", "pfc-r8a7740", 1116 PIN_MAP_MUX_GROUP_DEFAULT("sh-mobile-hdmi", "pfc-r8a7740",
diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c
index ffb6f0ac7606..5930af8d434f 100644
--- a/arch/arm/mach-shmobile/board-lager.c
+++ b/arch/arm/mach-shmobile/board-lager.c
@@ -29,6 +29,7 @@
29#include <linux/pinctrl/machine.h> 29#include <linux/pinctrl/machine.h>
30#include <linux/platform_data/gpio-rcar.h> 30#include <linux/platform_data/gpio-rcar.h>
31#include <linux/platform_device.h> 31#include <linux/platform_device.h>
32#include <linux/phy.h>
32#include <linux/regulator/fixed.h> 33#include <linux/regulator/fixed.h>
33#include <linux/regulator/machine.h> 34#include <linux/regulator/machine.h>
34#include <linux/sh_eth.h> 35#include <linux/sh_eth.h>
@@ -155,6 +156,30 @@ static void __init lager_add_standard_devices(void)
155 &ether_pdata, sizeof(ether_pdata)); 156 &ether_pdata, sizeof(ether_pdata));
156} 157}
157 158
159/*
160 * Ether LEDs on the Lager board are named LINK and ACTIVE which corresponds
161 * to non-default 01 setting of the Micrel KSZ8041 PHY control register 1 bits
162 * 14-15. We have to set them back to 01 from the default 00 value each time
163 * the PHY is reset. It's also important because the PHY's LED0 signal is
164 * connected to SoC's ETH_LINK signal and in the PHY's default mode it will
165 * bounce on and off after each packet, which we apparently want to avoid.
166 */
167static int lager_ksz8041_fixup(struct phy_device *phydev)
168{
169 u16 phyctrl1 = phy_read(phydev, 0x1e);
170
171 phyctrl1 &= ~0xc000;
172 phyctrl1 |= 0x4000;
173 return phy_write(phydev, 0x1e, phyctrl1);
174}
175
176static void __init lager_init(void)
177{
178 lager_add_standard_devices();
179
180 phy_register_fixup_for_id("r8a7790-ether-ff:01", lager_ksz8041_fixup);
181}
182
158static const char *lager_boards_compat_dt[] __initdata = { 183static const char *lager_boards_compat_dt[] __initdata = {
159 "renesas,lager", 184 "renesas,lager",
160 NULL, 185 NULL,
@@ -163,6 +188,6 @@ static const char *lager_boards_compat_dt[] __initdata = {
163DT_MACHINE_START(LAGER_DT, "lager") 188DT_MACHINE_START(LAGER_DT, "lager")
164 .init_early = r8a7790_init_delay, 189 .init_early = r8a7790_init_delay,
165 .init_time = r8a7790_timer_init, 190 .init_time = r8a7790_timer_init,
166 .init_machine = lager_add_standard_devices, 191 .init_machine = lager_init,
167 .dt_compat = lager_boards_compat_dt, 192 .dt_compat = lager_boards_compat_dt,
168MACHINE_END 193MACHINE_END
diff --git a/arch/arm/mach-shmobile/clock-r8a73a4.c b/arch/arm/mach-shmobile/clock-r8a73a4.c
index 8ea5ef6c79cc..5bd2e851e3c7 100644
--- a/arch/arm/mach-shmobile/clock-r8a73a4.c
+++ b/arch/arm/mach-shmobile/clock-r8a73a4.c
@@ -555,7 +555,7 @@ static struct clk_lookup lookups[] = {
555 CLKDEV_CON_ID("pll2h", &pll2h_clk), 555 CLKDEV_CON_ID("pll2h", &pll2h_clk),
556 556
557 /* CPU clock */ 557 /* CPU clock */
558 CLKDEV_DEV_ID("cpufreq-cpu0", &z_clk), 558 CLKDEV_DEV_ID("cpu0", &z_clk),
559 559
560 /* DIV6 */ 560 /* DIV6 */
561 CLKDEV_CON_ID("zb", &div6_clks[DIV6_ZB]), 561 CLKDEV_CON_ID("zb", &div6_clks[DIV6_ZB]),
diff --git a/arch/arm/mach-shmobile/clock-sh73a0.c b/arch/arm/mach-shmobile/clock-sh73a0.c
index 1942eaef5181..c92c023f0d27 100644
--- a/arch/arm/mach-shmobile/clock-sh73a0.c
+++ b/arch/arm/mach-shmobile/clock-sh73a0.c
@@ -616,7 +616,7 @@ static struct clk_lookup lookups[] = {
616 CLKDEV_DEV_ID("smp_twd", &twd_clk), /* smp_twd */ 616 CLKDEV_DEV_ID("smp_twd", &twd_clk), /* smp_twd */
617 617
618 /* DIV4 clocks */ 618 /* DIV4 clocks */
619 CLKDEV_DEV_ID("cpufreq-cpu0", &div4_clks[DIV4_Z]), 619 CLKDEV_DEV_ID("cpu0", &div4_clks[DIV4_Z]),
620 620
621 /* DIV6 clocks */ 621 /* DIV6 clocks */
622 CLKDEV_CON_ID("vck1_clk", &div6_clks[DIV6_VCK1]), 622 CLKDEV_CON_ID("vck1_clk", &div6_clks[DIV6_VCK1]),
diff --git a/arch/arm/mach-u300/Kconfig b/arch/arm/mach-u300/Kconfig
index a85adcd00882..a1659863bfd5 100644
--- a/arch/arm/mach-u300/Kconfig
+++ b/arch/arm/mach-u300/Kconfig
@@ -1,7 +1,3 @@
1menu "ST-Ericsson AB U300/U335 Platform"
2
3comment "ST-Ericsson Mobile Platform Products"
4
5config ARCH_U300 1config ARCH_U300
6 bool "ST-Ericsson U300 Series" if ARCH_MULTI_V5 2 bool "ST-Ericsson U300 Series" if ARCH_MULTI_V5
7 depends on MMU 3 depends on MMU
@@ -25,7 +21,9 @@ config ARCH_U300
25 help 21 help
26 Support for ST-Ericsson U300 series mobile platforms. 22 Support for ST-Ericsson U300 series mobile platforms.
27 23
28comment "ST-Ericsson U300/U335 Feature Selections" 24if ARCH_U300
25
26menu "ST-Ericsson AB U300/U335 Platform"
29 27
30config MACH_U300 28config MACH_U300
31 depends on ARCH_U300 29 depends on ARCH_U300
@@ -53,3 +51,5 @@ config MACH_U300_SPIDUMMY
53 SPI framework and ARM PL022 support. 51 SPI framework and ARM PL022 support.
54 52
55endmenu 53endmenu
54
55endif
diff --git a/arch/arm/mach-ux500/cache-l2x0.c b/arch/arm/mach-ux500/cache-l2x0.c
index 82ccf1d98735..264f894c0e3d 100644
--- a/arch/arm/mach-ux500/cache-l2x0.c
+++ b/arch/arm/mach-ux500/cache-l2x0.c
@@ -69,6 +69,7 @@ static int __init ux500_l2x0_init(void)
69 * some SMI service available. 69 * some SMI service available.
70 */ 70 */
71 outer_cache.disable = NULL; 71 outer_cache.disable = NULL;
72 outer_cache.set_debug = NULL;
72 73
73 return 0; 74 return 0;
74} 75}
diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c
index 7aeb5d60e484..e6eb48192912 100644
--- a/arch/arm/mach-vexpress/tc2_pm.c
+++ b/arch/arm/mach-vexpress/tc2_pm.c
@@ -131,6 +131,16 @@ static void tc2_pm_down(u64 residency)
131 } else 131 } else
132 BUG(); 132 BUG();
133 133
134 /*
135 * If the CPU is committed to power down, make sure
136 * the power controller will be in charge of waking it
137 * up upon IRQ, ie IRQ lines are cut from GIC CPU IF
138 * to the CPU by disabling the GIC CPU IF to prevent wfi
139 * from completing execution behind power controller back
140 */
141 if (!skip_wfi)
142 gic_cpu_if_down();
143
134 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { 144 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
135 arch_spin_unlock(&tc2_pm_lock); 145 arch_spin_unlock(&tc2_pm_lock);
136 146
@@ -231,7 +241,6 @@ static void tc2_pm_suspend(u64 residency)
231 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); 241 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
232 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); 242 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
233 ve_spc_set_resume_addr(cluster, cpu, virt_to_phys(mcpm_entry_point)); 243 ve_spc_set_resume_addr(cluster, cpu, virt_to_phys(mcpm_entry_point));
234 gic_cpu_if_down();
235 tc2_pm_down(residency); 244 tc2_pm_down(residency);
236} 245}
237 246
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index 1a6bfe954d49..835c559786bd 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -6,13 +6,6 @@ config FRAME_POINTER
6 bool 6 bool
7 default y 7 default y
8 8
9config DEBUG_STACK_USAGE
10 bool "Enable stack utilization instrumentation"
11 depends on DEBUG_KERNEL
12 help
13 Enables the display of the minimum amount of free stack which each
14 task has ever had available in the sysrq-T output.
15
16config EARLY_PRINTK 9config EARLY_PRINTK
17 bool "Early printk support" 10 bool "Early printk support"
18 default y 11 default y
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 5b3e83217b03..31c81e9b792e 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -42,7 +42,7 @@ CONFIG_IP_PNP_BOOTP=y
42# CONFIG_WIRELESS is not set 42# CONFIG_WIRELESS is not set
43CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 43CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
44CONFIG_DEVTMPFS=y 44CONFIG_DEVTMPFS=y
45# CONFIG_BLK_DEV is not set 45CONFIG_BLK_DEV=y
46CONFIG_SCSI=y 46CONFIG_SCSI=y
47# CONFIG_SCSI_PROC_FS is not set 47# CONFIG_SCSI_PROC_FS is not set
48CONFIG_BLK_DEV_SD=y 48CONFIG_BLK_DEV_SD=y
@@ -72,6 +72,7 @@ CONFIG_LOGO=y
72# CONFIG_IOMMU_SUPPORT is not set 72# CONFIG_IOMMU_SUPPORT is not set
73CONFIG_EXT2_FS=y 73CONFIG_EXT2_FS=y
74CONFIG_EXT3_FS=y 74CONFIG_EXT3_FS=y
75CONFIG_EXT4_FS=y
75# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 76# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
76# CONFIG_EXT3_FS_XATTR is not set 77# CONFIG_EXT3_FS_XATTR is not set
77CONFIG_FUSE_FS=y 78CONFIG_FUSE_FS=y
@@ -90,3 +91,5 @@ CONFIG_DEBUG_KERNEL=y
90CONFIG_DEBUG_INFO=y 91CONFIG_DEBUG_INFO=y
91# CONFIG_FTRACE is not set 92# CONFIG_FTRACE is not set
92CONFIG_ATOMIC64_SELFTEST=y 93CONFIG_ATOMIC64_SELFTEST=y
94CONFIG_VIRTIO_MMIO=y
95CONFIG_VIRTIO_BLK=y
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
index 6d4482fa35bc..e2950b098e76 100644
--- a/arch/arm64/include/asm/hwcap.h
+++ b/arch/arm64/include/asm/hwcap.h
@@ -43,6 +43,6 @@
43 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\ 43 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
44 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV) 44 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV)
45 45
46extern unsigned int elf_hwcap; 46extern unsigned long elf_hwcap;
47#endif 47#endif
48#endif 48#endif
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index edb3d5c73a32..7ecc2b23882e 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -166,9 +166,10 @@ do { \
166 166
167#define get_user(x, ptr) \ 167#define get_user(x, ptr) \
168({ \ 168({ \
169 __typeof__(*(ptr)) __user *__p = (ptr); \
169 might_fault(); \ 170 might_fault(); \
170 access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) ? \ 171 access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \
171 __get_user((x), (ptr)) : \ 172 __get_user((x), __p) : \
172 ((x) = 0, -EFAULT); \ 173 ((x) = 0, -EFAULT); \
173}) 174})
174 175
@@ -227,9 +228,10 @@ do { \
227 228
228#define put_user(x, ptr) \ 229#define put_user(x, ptr) \
229({ \ 230({ \
231 __typeof__(*(ptr)) __user *__p = (ptr); \
230 might_fault(); \ 232 might_fault(); \
231 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \ 233 access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \
232 __put_user((x), (ptr)) : \ 234 __put_user((x), __p) : \
233 -EFAULT; \ 235 -EFAULT; \
234}) 236})
235 237
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 1f2e4d5a5c0f..bb785d23dbde 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -80,8 +80,10 @@ void fpsimd_thread_switch(struct task_struct *next)
80 80
81void fpsimd_flush_thread(void) 81void fpsimd_flush_thread(void)
82{ 82{
83 preempt_disable();
83 memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state)); 84 memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
84 fpsimd_load_state(&current->thread.fpsimd_state); 85 fpsimd_load_state(&current->thread.fpsimd_state);
86 preempt_enable();
85} 87}
86 88
87#ifdef CONFIG_KERNEL_MODE_NEON 89#ifdef CONFIG_KERNEL_MODE_NEON
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 57fb55c44c90..7ae8a1f00c3c 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -143,15 +143,26 @@ void machine_restart(char *cmd)
143 143
144void __show_regs(struct pt_regs *regs) 144void __show_regs(struct pt_regs *regs)
145{ 145{
146 int i; 146 int i, top_reg;
147 u64 lr, sp;
148
149 if (compat_user_mode(regs)) {
150 lr = regs->compat_lr;
151 sp = regs->compat_sp;
152 top_reg = 12;
153 } else {
154 lr = regs->regs[30];
155 sp = regs->sp;
156 top_reg = 29;
157 }
147 158
148 show_regs_print_info(KERN_DEFAULT); 159 show_regs_print_info(KERN_DEFAULT);
149 print_symbol("PC is at %s\n", instruction_pointer(regs)); 160 print_symbol("PC is at %s\n", instruction_pointer(regs));
150 print_symbol("LR is at %s\n", regs->regs[30]); 161 print_symbol("LR is at %s\n", lr);
151 printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n", 162 printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n",
152 regs->pc, regs->regs[30], regs->pstate); 163 regs->pc, lr, regs->pstate);
153 printk("sp : %016llx\n", regs->sp); 164 printk("sp : %016llx\n", sp);
154 for (i = 29; i >= 0; i--) { 165 for (i = top_reg; i >= 0; i--) {
155 printk("x%-2d: %016llx ", i, regs->regs[i]); 166 printk("x%-2d: %016llx ", i, regs->regs[i]);
156 if (i % 2 == 0) 167 if (i % 2 == 0)
157 printk("\n"); 168 printk("\n");
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 12ad8f3d0cfd..055cfb80e05c 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -57,7 +57,7 @@
57unsigned int processor_id; 57unsigned int processor_id;
58EXPORT_SYMBOL(processor_id); 58EXPORT_SYMBOL(processor_id);
59 59
60unsigned int elf_hwcap __read_mostly; 60unsigned long elf_hwcap __read_mostly;
61EXPORT_SYMBOL_GPL(elf_hwcap); 61EXPORT_SYMBOL_GPL(elf_hwcap);
62 62
63static const char *cpu_name; 63static const char *cpu_name;
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 6d6acf153bff..c23751b06120 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -130,7 +130,7 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
130 force_sig_info(sig, &si, tsk); 130 force_sig_info(sig, &si, tsk);
131} 131}
132 132
133void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs) 133static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
134{ 134{
135 struct task_struct *tsk = current; 135 struct task_struct *tsk = current;
136 struct mm_struct *mm = tsk->active_mm; 136 struct mm_struct *mm = tsk->active_mm;
diff --git a/arch/arm64/mm/tlb.S b/arch/arm64/mm/tlb.S
index 8ae80a18e8ec..19da91e0cd27 100644
--- a/arch/arm64/mm/tlb.S
+++ b/arch/arm64/mm/tlb.S
@@ -35,7 +35,7 @@
35 */ 35 */
36ENTRY(__cpu_flush_user_tlb_range) 36ENTRY(__cpu_flush_user_tlb_range)
37 vma_vm_mm x3, x2 // get vma->vm_mm 37 vma_vm_mm x3, x2 // get vma->vm_mm
38 mmid x3, x3 // get vm_mm->context.id 38 mmid w3, x3 // get vm_mm->context.id
39 dsb sy 39 dsb sy
40 lsr x0, x0, #12 // align address 40 lsr x0, x0, #12 // align address
41 lsr x1, x1, #12 41 lsr x1, x1, #12
diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild
index d22af851f3f6..fd7980743890 100644
--- a/arch/avr32/include/asm/Kbuild
+++ b/arch/avr32/include/asm/Kbuild
@@ -1,5 +1,19 @@
1 1
2generic-y += clkdev.h 2generic-y += clkdev.h
3generic-y += cputime.h
4generic-y += delay.h
5generic-y += device.h
6generic-y += div64.h
7generic-y += emergency-restart.h
3generic-y += exec.h 8generic-y += exec.h
4generic-y += trace_clock.h 9generic-y += futex.h
10generic-y += irq_regs.h
5generic-y += param.h 11generic-y += param.h
12generic-y += local.h
13generic-y += local64.h
14generic-y += percpu.h
15generic-y += scatterlist.h
16generic-y += sections.h
17generic-y += topology.h
18generic-y += trace_clock.h
19generic-y += xor.h
diff --git a/arch/avr32/include/asm/cputime.h b/arch/avr32/include/asm/cputime.h
deleted file mode 100644
index e87e0f81cbeb..000000000000
--- a/arch/avr32/include/asm/cputime.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_AVR32_CPUTIME_H
2#define __ASM_AVR32_CPUTIME_H
3
4#include <asm-generic/cputime.h>
5
6#endif /* __ASM_AVR32_CPUTIME_H */
diff --git a/arch/avr32/include/asm/delay.h b/arch/avr32/include/asm/delay.h
deleted file mode 100644
index 9670e127b7b2..000000000000
--- a/arch/avr32/include/asm/delay.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/delay.h>
diff --git a/arch/avr32/include/asm/device.h b/arch/avr32/include/asm/device.h
deleted file mode 100644
index d8f9872b0e2d..000000000000
--- a/arch/avr32/include/asm/device.h
+++ /dev/null
@@ -1,7 +0,0 @@
1/*
2 * Arch specific extensions to struct device
3 *
4 * This file is released under the GPLv2
5 */
6#include <asm-generic/device.h>
7
diff --git a/arch/avr32/include/asm/div64.h b/arch/avr32/include/asm/div64.h
deleted file mode 100644
index d7ddd4fdeca6..000000000000
--- a/arch/avr32/include/asm/div64.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_AVR32_DIV64_H
2#define __ASM_AVR32_DIV64_H
3
4#include <asm-generic/div64.h>
5
6#endif /* __ASM_AVR32_DIV64_H */
diff --git a/arch/avr32/include/asm/emergency-restart.h b/arch/avr32/include/asm/emergency-restart.h
deleted file mode 100644
index 3e7e014776ba..000000000000
--- a/arch/avr32/include/asm/emergency-restart.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_AVR32_EMERGENCY_RESTART_H
2#define __ASM_AVR32_EMERGENCY_RESTART_H
3
4#include <asm-generic/emergency-restart.h>
5
6#endif /* __ASM_AVR32_EMERGENCY_RESTART_H */
diff --git a/arch/avr32/include/asm/futex.h b/arch/avr32/include/asm/futex.h
deleted file mode 100644
index 10419f14a68a..000000000000
--- a/arch/avr32/include/asm/futex.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_AVR32_FUTEX_H
2#define __ASM_AVR32_FUTEX_H
3
4#include <asm-generic/futex.h>
5
6#endif /* __ASM_AVR32_FUTEX_H */
diff --git a/arch/avr32/include/asm/irq_regs.h b/arch/avr32/include/asm/irq_regs.h
deleted file mode 100644
index 3dd9c0b70270..000000000000
--- a/arch/avr32/include/asm/irq_regs.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/irq_regs.h>
diff --git a/arch/avr32/include/asm/local.h b/arch/avr32/include/asm/local.h
deleted file mode 100644
index 1c1619694da3..000000000000
--- a/arch/avr32/include/asm/local.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_AVR32_LOCAL_H
2#define __ASM_AVR32_LOCAL_H
3
4#include <asm-generic/local.h>
5
6#endif /* __ASM_AVR32_LOCAL_H */
diff --git a/arch/avr32/include/asm/local64.h b/arch/avr32/include/asm/local64.h
deleted file mode 100644
index 36c93b5cc239..000000000000
--- a/arch/avr32/include/asm/local64.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/local64.h>
diff --git a/arch/avr32/include/asm/percpu.h b/arch/avr32/include/asm/percpu.h
deleted file mode 100644
index 69227b4cd0d4..000000000000
--- a/arch/avr32/include/asm/percpu.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_AVR32_PERCPU_H
2#define __ASM_AVR32_PERCPU_H
3
4#include <asm-generic/percpu.h>
5
6#endif /* __ASM_AVR32_PERCPU_H */
diff --git a/arch/avr32/include/asm/scatterlist.h b/arch/avr32/include/asm/scatterlist.h
deleted file mode 100644
index a5902d9834e8..000000000000
--- a/arch/avr32/include/asm/scatterlist.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_AVR32_SCATTERLIST_H
2#define __ASM_AVR32_SCATTERLIST_H
3
4#include <asm-generic/scatterlist.h>
5
6#endif /* __ASM_AVR32_SCATTERLIST_H */
diff --git a/arch/avr32/include/asm/sections.h b/arch/avr32/include/asm/sections.h
deleted file mode 100644
index aa14252e4181..000000000000
--- a/arch/avr32/include/asm/sections.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_AVR32_SECTIONS_H
2#define __ASM_AVR32_SECTIONS_H
3
4#include <asm-generic/sections.h>
5
6#endif /* __ASM_AVR32_SECTIONS_H */
diff --git a/arch/avr32/include/asm/topology.h b/arch/avr32/include/asm/topology.h
deleted file mode 100644
index 5b766cbb4806..000000000000
--- a/arch/avr32/include/asm/topology.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_AVR32_TOPOLOGY_H
2#define __ASM_AVR32_TOPOLOGY_H
3
4#include <asm-generic/topology.h>
5
6#endif /* __ASM_AVR32_TOPOLOGY_H */
diff --git a/arch/avr32/include/asm/xor.h b/arch/avr32/include/asm/xor.h
deleted file mode 100644
index 99c87aa0af4f..000000000000
--- a/arch/avr32/include/asm/xor.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASM_XOR_H
2#define _ASM_XOR_H
3
4#include <asm-generic/xor.h>
5
6#endif
diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c
index c2731003edef..42a53e740a7e 100644
--- a/arch/avr32/kernel/process.c
+++ b/arch/avr32/kernel/process.c
@@ -289,7 +289,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
289 memset(childregs, 0, sizeof(struct pt_regs)); 289 memset(childregs, 0, sizeof(struct pt_regs));
290 p->thread.cpu_context.r0 = arg; 290 p->thread.cpu_context.r0 = arg;
291 p->thread.cpu_context.r1 = usp; /* fn */ 291 p->thread.cpu_context.r1 = usp; /* fn */
292 p->thread.cpu_context.r2 = syscall_return; 292 p->thread.cpu_context.r2 = (unsigned long)syscall_return;
293 p->thread.cpu_context.pc = (unsigned long)ret_from_kernel_thread; 293 p->thread.cpu_context.pc = (unsigned long)ret_from_kernel_thread;
294 childregs->sr = MODE_SUPERVISOR; 294 childregs->sr = MODE_SUPERVISOR;
295 } else { 295 } else {
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c
index 869a1c6ffeee..12f828ad5058 100644
--- a/arch/avr32/kernel/time.c
+++ b/arch/avr32/kernel/time.c
@@ -98,7 +98,14 @@ static void comparator_mode(enum clock_event_mode mode,
98 case CLOCK_EVT_MODE_SHUTDOWN: 98 case CLOCK_EVT_MODE_SHUTDOWN:
99 sysreg_write(COMPARE, 0); 99 sysreg_write(COMPARE, 0);
100 pr_debug("%s: stop\n", evdev->name); 100 pr_debug("%s: stop\n", evdev->name);
101 cpu_idle_poll_ctrl(false); 101 if (evdev->mode == CLOCK_EVT_MODE_ONESHOT ||
102 evdev->mode == CLOCK_EVT_MODE_RESUME) {
103 /*
104 * Only disable idle poll if we have forced that
105 * in a previous call.
106 */
107 cpu_idle_poll_ctrl(false);
108 }
102 break; 109 break;
103 default: 110 default:
104 BUG(); 111 BUG();
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 75a36ad11ff5..ca8f8340d75f 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -288,9 +288,6 @@ endif
288vmlinux.32: vmlinux 288vmlinux.32: vmlinux
289 $(OBJCOPY) -O $(32bit-bfd) $(OBJCOPYFLAGS) $< $@ 289 $(OBJCOPY) -O $(32bit-bfd) $(OBJCOPYFLAGS) $< $@
290 290
291
292#obj-$(CONFIG_KPROBES) += kprobes.o
293
294# 291#
295# The 64-bit ELF tools are pretty broken so at this time we generate 64-bit 292# The 64-bit ELF tools are pretty broken so at this time we generate 64-bit
296# ELF files from 32-bit files by conversion. 293# ELF files from 32-bit files by conversion.
diff --git a/arch/mips/alchemy/board-mtx1.c b/arch/mips/alchemy/board-mtx1.c
index 4a9baa9f6330..9969dbab19e3 100644
--- a/arch/mips/alchemy/board-mtx1.c
+++ b/arch/mips/alchemy/board-mtx1.c
@@ -276,7 +276,7 @@ static struct platform_device mtx1_pci_host = {
276 .resource = alchemy_pci_host_res, 276 .resource = alchemy_pci_host_res,
277}; 277};
278 278
279static struct __initdata platform_device * mtx1_devs[] = { 279static struct platform_device *mtx1_devs[] __initdata = {
280 &mtx1_pci_host, 280 &mtx1_pci_host,
281 &mtx1_gpio_leds, 281 &mtx1_gpio_leds,
282 &mtx1_wdt, 282 &mtx1_wdt,
diff --git a/arch/mips/alchemy/common/usb.c b/arch/mips/alchemy/common/usb.c
index fcc695626117..2adc7edda49c 100644
--- a/arch/mips/alchemy/common/usb.c
+++ b/arch/mips/alchemy/common/usb.c
@@ -14,6 +14,7 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/spinlock.h> 15#include <linux/spinlock.h>
16#include <linux/syscore_ops.h> 16#include <linux/syscore_ops.h>
17#include <asm/cpu.h>
17#include <asm/mach-au1x00/au1000.h> 18#include <asm/mach-au1x00/au1000.h>
18 19
19/* control register offsets */ 20/* control register offsets */
@@ -358,7 +359,7 @@ static inline int au1200_coherency_bug(void)
358{ 359{
359#if defined(CONFIG_DMA_COHERENT) 360#if defined(CONFIG_DMA_COHERENT)
360 /* Au1200 AB USB does not support coherent memory */ 361 /* Au1200 AB USB does not support coherent memory */
361 if (!(read_c0_prid() & 0xff)) { 362 if (!(read_c0_prid() & PRID_REV_MASK)) {
362 printk(KERN_INFO "Au1200 USB: this is chip revision AB !!\n"); 363 printk(KERN_INFO "Au1200 USB: this is chip revision AB !!\n");
363 printk(KERN_INFO "Au1200 USB: update your board or re-configure" 364 printk(KERN_INFO "Au1200 USB: update your board or re-configure"
364 " the kernel\n"); 365 " the kernel\n");
diff --git a/arch/mips/bcm63xx/cpu.c b/arch/mips/bcm63xx/cpu.c
index 7e17374a9ae8..b713cd64b087 100644
--- a/arch/mips/bcm63xx/cpu.c
+++ b/arch/mips/bcm63xx/cpu.c
@@ -306,14 +306,14 @@ void __init bcm63xx_cpu_init(void)
306 306
307 switch (c->cputype) { 307 switch (c->cputype) {
308 case CPU_BMIPS3300: 308 case CPU_BMIPS3300:
309 if ((read_c0_prid() & 0xff00) != PRID_IMP_BMIPS3300_ALT) 309 if ((read_c0_prid() & PRID_IMP_MASK) != PRID_IMP_BMIPS3300_ALT)
310 __cpu_name[cpu] = "Broadcom BCM6338"; 310 __cpu_name[cpu] = "Broadcom BCM6338";
311 /* fall-through */ 311 /* fall-through */
312 case CPU_BMIPS32: 312 case CPU_BMIPS32:
313 chipid_reg = BCM_6345_PERF_BASE; 313 chipid_reg = BCM_6345_PERF_BASE;
314 break; 314 break;
315 case CPU_BMIPS4350: 315 case CPU_BMIPS4350:
316 switch ((read_c0_prid() & 0xff)) { 316 switch ((read_c0_prid() & PRID_REV_MASK)) {
317 case 0x04: 317 case 0x04:
318 chipid_reg = BCM_3368_PERF_BASE; 318 chipid_reg = BCM_3368_PERF_BASE;
319 break; 319 break;
diff --git a/arch/mips/boot/dts/include/dt-bindings b/arch/mips/boot/dts/include/dt-bindings
index 68ae3887b3e5..08c00e4972fa 120000
--- a/arch/mips/boot/dts/include/dt-bindings
+++ b/arch/mips/boot/dts/include/dt-bindings
@@ -1 +1 @@
../../../../../include/dt-bindings ../../../../../include/dt-bindings \ No newline at end of file
diff --git a/arch/mips/cavium-octeon/csrc-octeon.c b/arch/mips/cavium-octeon/csrc-octeon.c
index 02193953eb9e..b752c4ed0b79 100644
--- a/arch/mips/cavium-octeon/csrc-octeon.c
+++ b/arch/mips/cavium-octeon/csrc-octeon.c
@@ -12,6 +12,7 @@
12#include <linux/smp.h> 12#include <linux/smp.h>
13 13
14#include <asm/cpu-info.h> 14#include <asm/cpu-info.h>
15#include <asm/cpu-type.h>
15#include <asm/time.h> 16#include <asm/time.h>
16 17
17#include <asm/octeon/octeon.h> 18#include <asm/octeon/octeon.h>
diff --git a/arch/mips/dec/prom/init.c b/arch/mips/dec/prom/init.c
index ab169046e442..468f665de7bb 100644
--- a/arch/mips/dec/prom/init.c
+++ b/arch/mips/dec/prom/init.c
@@ -13,6 +13,7 @@
13 13
14#include <asm/bootinfo.h> 14#include <asm/bootinfo.h>
15#include <asm/cpu.h> 15#include <asm/cpu.h>
16#include <asm/cpu-type.h>
16#include <asm/processor.h> 17#include <asm/processor.h>
17 18
18#include <asm/dec/prom.h> 19#include <asm/dec/prom.h>
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index fa44f3ec5302..d445d060e346 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -13,12 +13,6 @@
13#include <asm/cpu-info.h> 13#include <asm/cpu-info.h>
14#include <cpu-feature-overrides.h> 14#include <cpu-feature-overrides.h>
15 15
16#ifndef current_cpu_type
17#define current_cpu_type() current_cpu_data.cputype
18#endif
19
20#define boot_cpu_type() cpu_data[0].cputype
21
22/* 16/*
23 * SMP assumption: Options of CPU 0 are a superset of all processors. 17 * SMP assumption: Options of CPU 0 are a superset of all processors.
24 * This is true for all known MIPS systems. 18 * This is true for all known MIPS systems.
@@ -193,7 +187,7 @@
193 187
194/* 188/*
195 * MIPS32, MIPS64, VR5500, IDT32332, IDT32334 and maybe a few other 189 * MIPS32, MIPS64, VR5500, IDT32332, IDT32334 and maybe a few other
196 * pre-MIPS32/MIPS53 processors have CLO, CLZ. The IDT RC64574 is 64-bit and 190 * pre-MIPS32/MIPS64 processors have CLO, CLZ. The IDT RC64574 is 64-bit and
197 * has CLO and CLZ but not DCLO nor DCLZ. For 64-bit kernels 191 * has CLO and CLZ but not DCLO nor DCLZ. For 64-bit kernels
198 * cpu_has_clo_clz also indicates the availability of DCLO and DCLZ. 192 * cpu_has_clo_clz also indicates the availability of DCLO and DCLZ.
199 */ 193 */
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
index 41401d8eb7d1..21c8e29c8f91 100644
--- a/arch/mips/include/asm/cpu-info.h
+++ b/arch/mips/include/asm/cpu-info.h
@@ -84,6 +84,7 @@ struct cpuinfo_mips {
84extern struct cpuinfo_mips cpu_data[]; 84extern struct cpuinfo_mips cpu_data[];
85#define current_cpu_data cpu_data[smp_processor_id()] 85#define current_cpu_data cpu_data[smp_processor_id()]
86#define raw_current_cpu_data cpu_data[raw_smp_processor_id()] 86#define raw_current_cpu_data cpu_data[raw_smp_processor_id()]
87#define boot_cpu_data cpu_data[0]
87 88
88extern void cpu_probe(void); 89extern void cpu_probe(void);
89extern void cpu_report(void); 90extern void cpu_report(void);
diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h
new file mode 100644
index 000000000000..4a402cc60c03
--- /dev/null
+++ b/arch/mips/include/asm/cpu-type.h
@@ -0,0 +1,203 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2003, 2004 Ralf Baechle
7 * Copyright (C) 2004 Maciej W. Rozycki
8 */
9#ifndef __ASM_CPU_TYPE_H
10#define __ASM_CPU_TYPE_H
11
12#include <linux/smp.h>
13#include <linux/compiler.h>
14
15static inline int __pure __get_cpu_type(const int cpu_type)
16{
17 switch (cpu_type) {
18#if defined(CONFIG_SYS_HAS_CPU_LOONGSON2E) || \
19 defined(CONFIG_SYS_HAS_CPU_LOONGSON2F)
20 case CPU_LOONGSON2:
21#endif
22
23#ifdef CONFIG_SYS_HAS_CPU_LOONGSON1B
24 case CPU_LOONGSON1:
25#endif
26
27#ifdef CONFIG_SYS_HAS_CPU_MIPS32_R1
28 case CPU_4KC:
29 case CPU_ALCHEMY:
30 case CPU_BMIPS3300:
31 case CPU_BMIPS4350:
32 case CPU_PR4450:
33 case CPU_BMIPS32:
34 case CPU_JZRISC:
35#endif
36
37#if defined(CONFIG_SYS_HAS_CPU_MIPS32_R1) || \
38 defined(CONFIG_SYS_HAS_CPU_MIPS32_R2)
39 case CPU_4KEC:
40#endif
41
42#ifdef CONFIG_SYS_HAS_CPU_MIPS32_R2
43 case CPU_4KSC:
44 case CPU_24K:
45 case CPU_34K:
46 case CPU_1004K:
47 case CPU_74K:
48 case CPU_M14KC:
49 case CPU_M14KEC:
50#endif
51
52#ifdef CONFIG_SYS_HAS_CPU_MIPS64_R1
53 case CPU_5KC:
54 case CPU_5KE:
55 case CPU_20KC:
56 case CPU_25KF:
57 case CPU_SB1:
58 case CPU_SB1A:
59#endif
60
61#ifdef CONFIG_SYS_HAS_CPU_MIPS64_R2
62 /*
63 * All MIPS64 R2 processors have their own special symbols. That is,
64 * there currently is no pure R2 core
65 */
66#endif
67
68#ifdef CONFIG_SYS_HAS_CPU_R3000
69 case CPU_R2000:
70 case CPU_R3000:
71 case CPU_R3000A:
72 case CPU_R3041:
73 case CPU_R3051:
74 case CPU_R3052:
75 case CPU_R3081:
76 case CPU_R3081E:
77#endif
78
79#ifdef CONFIG_SYS_HAS_CPU_TX39XX
80 case CPU_TX3912:
81 case CPU_TX3922:
82 case CPU_TX3927:
83#endif
84
85#ifdef CONFIG_SYS_HAS_CPU_VR41XX
86 case CPU_VR41XX:
87 case CPU_VR4111:
88 case CPU_VR4121:
89 case CPU_VR4122:
90 case CPU_VR4131:
91 case CPU_VR4133:
92 case CPU_VR4181:
93 case CPU_VR4181A:
94#endif
95
96#ifdef CONFIG_SYS_HAS_CPU_R4300
97 case CPU_R4300:
98 case CPU_R4310:
99#endif
100
101#ifdef CONFIG_SYS_HAS_CPU_R4X00
102 case CPU_R4000PC:
103 case CPU_R4000SC:
104 case CPU_R4000MC:
105 case CPU_R4200:
106 case CPU_R4400PC:
107 case CPU_R4400SC:
108 case CPU_R4400MC:
109 case CPU_R4600:
110 case CPU_R4700:
111 case CPU_R4640:
112 case CPU_R4650:
113#endif
114
115#ifdef CONFIG_SYS_HAS_CPU_TX49XX
116 case CPU_TX49XX:
117#endif
118
119#ifdef CONFIG_SYS_HAS_CPU_R5000
120 case CPU_R5000:
121#endif
122
123#ifdef CONFIG_SYS_HAS_CPU_R5432
124 case CPU_R5432:
125#endif
126
127#ifdef CONFIG_SYS_HAS_CPU_R5500
128 case CPU_R5500:
129#endif
130
131#ifdef CONFIG_SYS_HAS_CPU_R6000
132 case CPU_R6000:
133 case CPU_R6000A:
134#endif
135
136#ifdef CONFIG_SYS_HAS_CPU_NEVADA
137 case CPU_NEVADA:
138#endif
139
140#ifdef CONFIG_SYS_HAS_CPU_R8000
141 case CPU_R8000:
142#endif
143
144#ifdef CONFIG_SYS_HAS_CPU_R10000
145 case CPU_R10000:
146 case CPU_R12000:
147 case CPU_R14000:
148#endif
149#ifdef CONFIG_SYS_HAS_CPU_RM7000
150 case CPU_RM7000:
151 case CPU_SR71000:
152#endif
153#ifdef CONFIG_SYS_HAS_CPU_RM9000
154 case CPU_RM9000:
155#endif
156#ifdef CONFIG_SYS_HAS_CPU_SB1
157 case CPU_SB1:
158 case CPU_SB1A:
159#endif
160#ifdef CONFIG_SYS_HAS_CPU_CAVIUM_OCTEON
161 case CPU_CAVIUM_OCTEON:
162 case CPU_CAVIUM_OCTEON_PLUS:
163 case CPU_CAVIUM_OCTEON2:
164#endif
165
166#ifdef CONFIG_SYS_HAS_CPU_BMIPS4380
167 case CPU_BMIPS4380:
168#endif
169
170#ifdef CONFIG_SYS_HAS_CPU_BMIPS5000
171 case CPU_BMIPS5000:
172#endif
173
174#ifdef CONFIG_SYS_HAS_CPU_XLP
175 case CPU_XLP:
176#endif
177
178#ifdef CONFIG_SYS_HAS_CPU_XLR
179 case CPU_XLR:
180#endif
181 break;
182 default:
183 unreachable();
184 }
185
186 return cpu_type;
187}
188
189static inline int __pure current_cpu_type(void)
190{
191 const int cpu_type = current_cpu_data.cputype;
192
193 return __get_cpu_type(cpu_type);
194}
195
196static inline int __pure boot_cpu_type(void)
197{
198 const int cpu_type = cpu_data[0].cputype;
199
200 return __get_cpu_type(cpu_type);
201}
202
203#endif /* __ASM_CPU_TYPE_H */
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index 71b9f1998be7..d2035e16502a 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -3,15 +3,14 @@
3 * various MIPS cpu types. 3 * various MIPS cpu types.
4 * 4 *
5 * Copyright (C) 1996 David S. Miller (davem@davemloft.net) 5 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
6 * Copyright (C) 2004 Maciej W. Rozycki 6 * Copyright (C) 2004, 2013 Maciej W. Rozycki
7 */ 7 */
8#ifndef _ASM_CPU_H 8#ifndef _ASM_CPU_H
9#define _ASM_CPU_H 9#define _ASM_CPU_H
10 10
11/* Assigned Company values for bits 23:16 of the PRId Register 11/*
12 (CP0 register 15, select 0). As of the MIPS32 and MIPS64 specs from 12 As of the MIPS32 and MIPS64 specs from MTI, the PRId register (CP0
13 MTI, the PRId register is defined in this (backwards compatible) 13 register 15, select 0) is defined in this (backwards compatible) way:
14 way:
15 14
16 +----------------+----------------+----------------+----------------+ 15 +----------------+----------------+----------------+----------------+
17 | Company Options| Company ID | Processor ID | Revision | 16 | Company Options| Company ID | Processor ID | Revision |
@@ -23,6 +22,14 @@
23 spec. 22 spec.
24*/ 23*/
25 24
25#define PRID_OPT_MASK 0xff000000
26
27/*
28 * Assigned Company values for bits 23:16 of the PRId register.
29 */
30
31#define PRID_COMP_MASK 0xff0000
32
26#define PRID_COMP_LEGACY 0x000000 33#define PRID_COMP_LEGACY 0x000000
27#define PRID_COMP_MIPS 0x010000 34#define PRID_COMP_MIPS 0x010000
28#define PRID_COMP_BROADCOM 0x020000 35#define PRID_COMP_BROADCOM 0x020000
@@ -38,10 +45,17 @@
38#define PRID_COMP_INGENIC 0xd00000 45#define PRID_COMP_INGENIC 0xd00000
39 46
40/* 47/*
41 * Assigned values for the product ID register. In order to detect a 48 * Assigned Processor ID (implementation) values for bits 15:8 of the PRId
42 * certain CPU type exactly eventually additional registers may need to 49 * register. In order to detect a certain CPU type exactly eventually
43 * be examined. These are valid when 23:16 == PRID_COMP_LEGACY 50 * additional registers may need to be examined.
44 */ 51 */
52
53#define PRID_IMP_MASK 0xff00
54
55/*
56 * These are valid when 23:16 == PRID_COMP_LEGACY
57 */
58
45#define PRID_IMP_R2000 0x0100 59#define PRID_IMP_R2000 0x0100
46#define PRID_IMP_AU1_REV1 0x0100 60#define PRID_IMP_AU1_REV1 0x0100
47#define PRID_IMP_AU1_REV2 0x0200 61#define PRID_IMP_AU1_REV2 0x0200
@@ -182,11 +196,15 @@
182#define PRID_IMP_NETLOGIC_XLP2XX 0x1200 196#define PRID_IMP_NETLOGIC_XLP2XX 0x1200
183 197
184/* 198/*
185 * Definitions for 7:0 on legacy processors 199 * Particular Revision values for bits 7:0 of the PRId register.
186 */ 200 */
187 201
188#define PRID_REV_MASK 0x00ff 202#define PRID_REV_MASK 0x00ff
189 203
204/*
205 * Definitions for 7:0 on legacy processors
206 */
207
190#define PRID_REV_TX4927 0x0022 208#define PRID_REV_TX4927 0x0022
191#define PRID_REV_TX4937 0x0030 209#define PRID_REV_TX4937 0x0030
192#define PRID_REV_R4400 0x0040 210#define PRID_REV_R4400 0x0040
@@ -227,6 +245,8 @@
227 * 31 16 15 8 7 0 245 * 31 16 15 8 7 0
228 */ 246 */
229 247
248#define FPIR_IMP_MASK 0xff00
249
230#define FPIR_IMP_NONE 0x0000 250#define FPIR_IMP_NONE 0x0000
231 251
232enum cpu_type_enum { 252enum cpu_type_enum {
diff --git a/arch/mips/include/asm/mach-au1x00/au1000.h b/arch/mips/include/asm/mach-au1x00/au1000.h
index 3e11a468cdf8..54f9e84db8ac 100644
--- a/arch/mips/include/asm/mach-au1x00/au1000.h
+++ b/arch/mips/include/asm/mach-au1x00/au1000.h
@@ -43,6 +43,8 @@
43#include <linux/io.h> 43#include <linux/io.h>
44#include <linux/irq.h> 44#include <linux/irq.h>
45 45
46#include <asm/cpu.h>
47
46/* cpu pipeline flush */ 48/* cpu pipeline flush */
47void static inline au_sync(void) 49void static inline au_sync(void)
48{ 50{
@@ -140,7 +142,7 @@ static inline int au1xxx_cpu_needs_config_od(void)
140 142
141static inline int alchemy_get_cputype(void) 143static inline int alchemy_get_cputype(void)
142{ 144{
143 switch (read_c0_prid() & 0xffff0000) { 145 switch (read_c0_prid() & (PRID_OPT_MASK | PRID_COMP_MASK)) {
144 case 0x00030000: 146 case 0x00030000:
145 return ALCHEMY_CPU_AU1000; 147 return ALCHEMY_CPU_AU1000;
146 break; 148 break;
diff --git a/arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h
index f4caacd25552..1bcb6421205e 100644
--- a/arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h
@@ -8,6 +8,8 @@
8#ifndef __ASM_MACH_IP22_CPU_FEATURE_OVERRIDES_H 8#ifndef __ASM_MACH_IP22_CPU_FEATURE_OVERRIDES_H
9#define __ASM_MACH_IP22_CPU_FEATURE_OVERRIDES_H 9#define __ASM_MACH_IP22_CPU_FEATURE_OVERRIDES_H
10 10
11#include <asm/cpu.h>
12
11/* 13/*
12 * IP22 with a variety of processors so we can't use defaults for everything. 14 * IP22 with a variety of processors so we can't use defaults for everything.
13 */ 15 */
diff --git a/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h
index 1d2b6ff60d33..d6111aa2e886 100644
--- a/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h
@@ -8,6 +8,8 @@
8#ifndef __ASM_MACH_IP27_CPU_FEATURE_OVERRIDES_H 8#ifndef __ASM_MACH_IP27_CPU_FEATURE_OVERRIDES_H
9#define __ASM_MACH_IP27_CPU_FEATURE_OVERRIDES_H 9#define __ASM_MACH_IP27_CPU_FEATURE_OVERRIDES_H
10 10
11#include <asm/cpu.h>
12
11/* 13/*
12 * IP27 only comes with R10000 family processors all using the same config 14 * IP27 only comes with R10000 family processors all using the same config
13 */ 15 */
diff --git a/arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h
index 65e9c856390d..4cec06d133db 100644
--- a/arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h
@@ -9,6 +9,8 @@
9#ifndef __ASM_MACH_IP28_CPU_FEATURE_OVERRIDES_H 9#ifndef __ASM_MACH_IP28_CPU_FEATURE_OVERRIDES_H
10#define __ASM_MACH_IP28_CPU_FEATURE_OVERRIDES_H 10#define __ASM_MACH_IP28_CPU_FEATURE_OVERRIDES_H
11 11
12#include <asm/cpu.h>
13
12/* 14/*
13 * IP28 only comes with R10000 family processors all using the same config 15 * IP28 only comes with R10000 family processors all using the same config
14 */ 16 */
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index fed1c3e9b486..e0331414c7d6 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -603,6 +603,13 @@
603#define MIPS_CONF4_MMUEXTDEF (_ULCAST_(3) << 14) 603#define MIPS_CONF4_MMUEXTDEF (_ULCAST_(3) << 14)
604#define MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT (_ULCAST_(1) << 14) 604#define MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT (_ULCAST_(1) << 14)
605 605
606#define MIPS_CONF5_NF (_ULCAST_(1) << 0)
607#define MIPS_CONF5_UFR (_ULCAST_(1) << 2)
608#define MIPS_CONF5_MSAEN (_ULCAST_(1) << 27)
609#define MIPS_CONF5_EVA (_ULCAST_(1) << 28)
610#define MIPS_CONF5_CV (_ULCAST_(1) << 29)
611#define MIPS_CONF5_K (_ULCAST_(1) << 30)
612
606#define MIPS_CONF6_SYND (_ULCAST_(1) << 13) 613#define MIPS_CONF6_SYND (_ULCAST_(1) << 13)
607 614
608#define MIPS_CONF7_WII (_ULCAST_(1) << 31) 615#define MIPS_CONF7_WII (_ULCAST_(1) << 31)
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index f194c08bd057..12d6842962be 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -83,6 +83,18 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
83extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, 83extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
84 enum pci_mmap_state mmap_state, int write_combine); 84 enum pci_mmap_state mmap_state, int write_combine);
85 85
86#define HAVE_ARCH_PCI_RESOURCE_TO_USER
87
88static inline void pci_resource_to_user(const struct pci_dev *dev, int bar,
89 const struct resource *rsrc, resource_size_t *start,
90 resource_size_t *end)
91{
92 phys_t size = resource_size(rsrc);
93
94 *start = fixup_bigphys_addr(rsrc->start, size);
95 *end = rsrc->start + size;
96}
97
86/* 98/*
87 * Dynamic DMA mapping stuff. 99 * Dynamic DMA mapping stuff.
88 * MIPS has everything mapped statically. 100 * MIPS has everything mapped statically.
diff --git a/arch/mips/include/asm/timex.h b/arch/mips/include/asm/timex.h
index 6529704aa73a..c5424757da65 100644
--- a/arch/mips/include/asm/timex.h
+++ b/arch/mips/include/asm/timex.h
@@ -10,7 +10,9 @@
10 10
11#ifdef __KERNEL__ 11#ifdef __KERNEL__
12 12
13#include <asm/cpu-features.h>
13#include <asm/mipsregs.h> 14#include <asm/mipsregs.h>
15#include <asm/cpu-type.h>
14 16
15/* 17/*
16 * This is the clock rate of the i8253 PIT. A MIPS system may not have 18 * This is the clock rate of the i8253 PIT. A MIPS system may not have
@@ -33,9 +35,38 @@
33 35
34typedef unsigned int cycles_t; 36typedef unsigned int cycles_t;
35 37
38/*
39 * On R4000/R4400 before version 5.0 an erratum exists such that if the
40 * cycle counter is read in the exact moment that it is matching the
41 * compare register, no interrupt will be generated.
42 *
43 * There is a suggested workaround and also the erratum can't strike if
44 * the compare interrupt isn't being used as the clock source device.
45 * However for now the implementaton of this function doesn't get these
46 * fine details right.
47 */
36static inline cycles_t get_cycles(void) 48static inline cycles_t get_cycles(void)
37{ 49{
38 return 0; 50 switch (boot_cpu_type()) {
51 case CPU_R4400PC:
52 case CPU_R4400SC:
53 case CPU_R4400MC:
54 if ((read_c0_prid() & 0xff) >= 0x0050)
55 return read_c0_count();
56 break;
57
58 case CPU_R4000PC:
59 case CPU_R4000SC:
60 case CPU_R4000MC:
61 break;
62
63 default:
64 if (cpu_has_counter)
65 return read_c0_count();
66 break;
67 }
68
69 return 0; /* no usable counter */
39} 70}
40 71
41#endif /* __KERNEL__ */ 72#endif /* __KERNEL__ */
diff --git a/arch/mips/include/asm/vga.h b/arch/mips/include/asm/vga.h
index f4cff7e4fa8a..f82c83749a08 100644
--- a/arch/mips/include/asm/vga.h
+++ b/arch/mips/include/asm/vga.h
@@ -6,6 +6,7 @@
6#ifndef _ASM_VGA_H 6#ifndef _ASM_VGA_H
7#define _ASM_VGA_H 7#define _ASM_VGA_H
8 8
9#include <asm/addrspace.h>
9#include <asm/byteorder.h> 10#include <asm/byteorder.h>
10 11
11/* 12/*
@@ -13,7 +14,7 @@
13 * access the videoram directly without any black magic. 14 * access the videoram directly without any black magic.
14 */ 15 */
15 16
16#define VGA_MAP_MEM(x, s) (0xb0000000L + (unsigned long)(x)) 17#define VGA_MAP_MEM(x, s) CKSEG1ADDR(0x10000000L + (unsigned long)(x))
17 18
18#define vga_readb(x) (*(x)) 19#define vga_readb(x) (*(x))
19#define vga_writeb(x, y) (*(y) = (x)) 20#define vga_writeb(x, y) (*(y) = (x))
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 37663c7862a5..5465dc183e5a 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -20,6 +20,7 @@
20 20
21#include <asm/bugs.h> 21#include <asm/bugs.h>
22#include <asm/cpu.h> 22#include <asm/cpu.h>
23#include <asm/cpu-type.h>
23#include <asm/fpu.h> 24#include <asm/fpu.h>
24#include <asm/mipsregs.h> 25#include <asm/mipsregs.h>
25#include <asm/watch.h> 26#include <asm/watch.h>
@@ -55,7 +56,7 @@ static inline void check_errata(void)
55{ 56{
56 struct cpuinfo_mips *c = &current_cpu_data; 57 struct cpuinfo_mips *c = &current_cpu_data;
57 58
58 switch (c->cputype) { 59 switch (current_cpu_type()) {
59 case CPU_34K: 60 case CPU_34K:
60 /* 61 /*
61 * Erratum "RPS May Cause Incorrect Instruction Execution" 62 * Erratum "RPS May Cause Incorrect Instruction Execution"
@@ -122,7 +123,7 @@ static inline unsigned long cpu_get_fpu_id(void)
122 */ 123 */
123static inline int __cpu_has_fpu(void) 124static inline int __cpu_has_fpu(void)
124{ 125{
125 return ((cpu_get_fpu_id() & 0xff00) != FPIR_IMP_NONE); 126 return ((cpu_get_fpu_id() & FPIR_IMP_MASK) != FPIR_IMP_NONE);
126} 127}
127 128
128static inline void cpu_probe_vmbits(struct cpuinfo_mips *c) 129static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
@@ -290,6 +291,17 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c)
290 return config4 & MIPS_CONF_M; 291 return config4 & MIPS_CONF_M;
291} 292}
292 293
294static inline unsigned int decode_config5(struct cpuinfo_mips *c)
295{
296 unsigned int config5;
297
298 config5 = read_c0_config5();
299 config5 &= ~MIPS_CONF5_UFR;
300 write_c0_config5(config5);
301
302 return config5 & MIPS_CONF_M;
303}
304
293static void decode_configs(struct cpuinfo_mips *c) 305static void decode_configs(struct cpuinfo_mips *c)
294{ 306{
295 int ok; 307 int ok;
@@ -310,6 +322,8 @@ static void decode_configs(struct cpuinfo_mips *c)
310 ok = decode_config3(c); 322 ok = decode_config3(c);
311 if (ok) 323 if (ok)
312 ok = decode_config4(c); 324 ok = decode_config4(c);
325 if (ok)
326 ok = decode_config5(c);
313 327
314 mips_probe_watch_registers(c); 328 mips_probe_watch_registers(c);
315 329
@@ -322,7 +336,7 @@ static void decode_configs(struct cpuinfo_mips *c)
322 336
323static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) 337static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
324{ 338{
325 switch (c->processor_id & 0xff00) { 339 switch (c->processor_id & PRID_IMP_MASK) {
326 case PRID_IMP_R2000: 340 case PRID_IMP_R2000:
327 c->cputype = CPU_R2000; 341 c->cputype = CPU_R2000;
328 __cpu_name[cpu] = "R2000"; 342 __cpu_name[cpu] = "R2000";
@@ -333,7 +347,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
333 c->tlbsize = 64; 347 c->tlbsize = 64;
334 break; 348 break;
335 case PRID_IMP_R3000: 349 case PRID_IMP_R3000:
336 if ((c->processor_id & 0xff) == PRID_REV_R3000A) { 350 if ((c->processor_id & PRID_REV_MASK) == PRID_REV_R3000A) {
337 if (cpu_has_confreg()) { 351 if (cpu_has_confreg()) {
338 c->cputype = CPU_R3081E; 352 c->cputype = CPU_R3081E;
339 __cpu_name[cpu] = "R3081"; 353 __cpu_name[cpu] = "R3081";
@@ -353,7 +367,8 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
353 break; 367 break;
354 case PRID_IMP_R4000: 368 case PRID_IMP_R4000:
355 if (read_c0_config() & CONF_SC) { 369 if (read_c0_config() & CONF_SC) {
356 if ((c->processor_id & 0xff) >= PRID_REV_R4400) { 370 if ((c->processor_id & PRID_REV_MASK) >=
371 PRID_REV_R4400) {
357 c->cputype = CPU_R4400PC; 372 c->cputype = CPU_R4400PC;
358 __cpu_name[cpu] = "R4400PC"; 373 __cpu_name[cpu] = "R4400PC";
359 } else { 374 } else {
@@ -361,7 +376,8 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
361 __cpu_name[cpu] = "R4000PC"; 376 __cpu_name[cpu] = "R4000PC";
362 } 377 }
363 } else { 378 } else {
364 if ((c->processor_id & 0xff) >= PRID_REV_R4400) { 379 if ((c->processor_id & PRID_REV_MASK) >=
380 PRID_REV_R4400) {
365 c->cputype = CPU_R4400SC; 381 c->cputype = CPU_R4400SC;
366 __cpu_name[cpu] = "R4400SC"; 382 __cpu_name[cpu] = "R4400SC";
367 } else { 383 } else {
@@ -454,7 +470,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
454 __cpu_name[cpu] = "TX3927"; 470 __cpu_name[cpu] = "TX3927";
455 c->tlbsize = 64; 471 c->tlbsize = 64;
456 } else { 472 } else {
457 switch (c->processor_id & 0xff) { 473 switch (c->processor_id & PRID_REV_MASK) {
458 case PRID_REV_TX3912: 474 case PRID_REV_TX3912:
459 c->cputype = CPU_TX3912; 475 c->cputype = CPU_TX3912;
460 __cpu_name[cpu] = "TX3912"; 476 __cpu_name[cpu] = "TX3912";
@@ -640,7 +656,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
640static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu) 656static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
641{ 657{
642 decode_configs(c); 658 decode_configs(c);
643 switch (c->processor_id & 0xff00) { 659 switch (c->processor_id & PRID_IMP_MASK) {
644 case PRID_IMP_4KC: 660 case PRID_IMP_4KC:
645 c->cputype = CPU_4KC; 661 c->cputype = CPU_4KC;
646 __cpu_name[cpu] = "MIPS 4Kc"; 662 __cpu_name[cpu] = "MIPS 4Kc";
@@ -711,7 +727,7 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
711static inline void cpu_probe_alchemy(struct cpuinfo_mips *c, unsigned int cpu) 727static inline void cpu_probe_alchemy(struct cpuinfo_mips *c, unsigned int cpu)
712{ 728{
713 decode_configs(c); 729 decode_configs(c);
714 switch (c->processor_id & 0xff00) { 730 switch (c->processor_id & PRID_IMP_MASK) {
715 case PRID_IMP_AU1_REV1: 731 case PRID_IMP_AU1_REV1:
716 case PRID_IMP_AU1_REV2: 732 case PRID_IMP_AU1_REV2:
717 c->cputype = CPU_ALCHEMY; 733 c->cputype = CPU_ALCHEMY;
@@ -730,7 +746,7 @@ static inline void cpu_probe_alchemy(struct cpuinfo_mips *c, unsigned int cpu)
730 break; 746 break;
731 case 4: 747 case 4:
732 __cpu_name[cpu] = "Au1200"; 748 __cpu_name[cpu] = "Au1200";
733 if ((c->processor_id & 0xff) == 2) 749 if ((c->processor_id & PRID_REV_MASK) == 2)
734 __cpu_name[cpu] = "Au1250"; 750 __cpu_name[cpu] = "Au1250";
735 break; 751 break;
736 case 5: 752 case 5:
@@ -748,12 +764,12 @@ static inline void cpu_probe_sibyte(struct cpuinfo_mips *c, unsigned int cpu)
748{ 764{
749 decode_configs(c); 765 decode_configs(c);
750 766
751 switch (c->processor_id & 0xff00) { 767 switch (c->processor_id & PRID_IMP_MASK) {
752 case PRID_IMP_SB1: 768 case PRID_IMP_SB1:
753 c->cputype = CPU_SB1; 769 c->cputype = CPU_SB1;
754 __cpu_name[cpu] = "SiByte SB1"; 770 __cpu_name[cpu] = "SiByte SB1";
755 /* FPU in pass1 is known to have issues. */ 771 /* FPU in pass1 is known to have issues. */
756 if ((c->processor_id & 0xff) < 0x02) 772 if ((c->processor_id & PRID_REV_MASK) < 0x02)
757 c->options &= ~(MIPS_CPU_FPU | MIPS_CPU_32FPR); 773 c->options &= ~(MIPS_CPU_FPU | MIPS_CPU_32FPR);
758 break; 774 break;
759 case PRID_IMP_SB1A: 775 case PRID_IMP_SB1A:
@@ -766,7 +782,7 @@ static inline void cpu_probe_sibyte(struct cpuinfo_mips *c, unsigned int cpu)
766static inline void cpu_probe_sandcraft(struct cpuinfo_mips *c, unsigned int cpu) 782static inline void cpu_probe_sandcraft(struct cpuinfo_mips *c, unsigned int cpu)
767{ 783{
768 decode_configs(c); 784 decode_configs(c);
769 switch (c->processor_id & 0xff00) { 785 switch (c->processor_id & PRID_IMP_MASK) {
770 case PRID_IMP_SR71000: 786 case PRID_IMP_SR71000:
771 c->cputype = CPU_SR71000; 787 c->cputype = CPU_SR71000;
772 __cpu_name[cpu] = "Sandcraft SR71000"; 788 __cpu_name[cpu] = "Sandcraft SR71000";
@@ -779,7 +795,7 @@ static inline void cpu_probe_sandcraft(struct cpuinfo_mips *c, unsigned int cpu)
779static inline void cpu_probe_nxp(struct cpuinfo_mips *c, unsigned int cpu) 795static inline void cpu_probe_nxp(struct cpuinfo_mips *c, unsigned int cpu)
780{ 796{
781 decode_configs(c); 797 decode_configs(c);
782 switch (c->processor_id & 0xff00) { 798 switch (c->processor_id & PRID_IMP_MASK) {
783 case PRID_IMP_PR4450: 799 case PRID_IMP_PR4450:
784 c->cputype = CPU_PR4450; 800 c->cputype = CPU_PR4450;
785 __cpu_name[cpu] = "Philips PR4450"; 801 __cpu_name[cpu] = "Philips PR4450";
@@ -791,7 +807,7 @@ static inline void cpu_probe_nxp(struct cpuinfo_mips *c, unsigned int cpu)
791static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu) 807static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
792{ 808{
793 decode_configs(c); 809 decode_configs(c);
794 switch (c->processor_id & 0xff00) { 810 switch (c->processor_id & PRID_IMP_MASK) {
795 case PRID_IMP_BMIPS32_REV4: 811 case PRID_IMP_BMIPS32_REV4:
796 case PRID_IMP_BMIPS32_REV8: 812 case PRID_IMP_BMIPS32_REV8:
797 c->cputype = CPU_BMIPS32; 813 c->cputype = CPU_BMIPS32;
@@ -806,7 +822,7 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
806 set_elf_platform(cpu, "bmips3300"); 822 set_elf_platform(cpu, "bmips3300");
807 break; 823 break;
808 case PRID_IMP_BMIPS43XX: { 824 case PRID_IMP_BMIPS43XX: {
809 int rev = c->processor_id & 0xff; 825 int rev = c->processor_id & PRID_REV_MASK;
810 826
811 if (rev >= PRID_REV_BMIPS4380_LO && 827 if (rev >= PRID_REV_BMIPS4380_LO &&
812 rev <= PRID_REV_BMIPS4380_HI) { 828 rev <= PRID_REV_BMIPS4380_HI) {
@@ -832,7 +848,7 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
832static inline void cpu_probe_cavium(struct cpuinfo_mips *c, unsigned int cpu) 848static inline void cpu_probe_cavium(struct cpuinfo_mips *c, unsigned int cpu)
833{ 849{
834 decode_configs(c); 850 decode_configs(c);
835 switch (c->processor_id & 0xff00) { 851 switch (c->processor_id & PRID_IMP_MASK) {
836 case PRID_IMP_CAVIUM_CN38XX: 852 case PRID_IMP_CAVIUM_CN38XX:
837 case PRID_IMP_CAVIUM_CN31XX: 853 case PRID_IMP_CAVIUM_CN31XX:
838 case PRID_IMP_CAVIUM_CN30XX: 854 case PRID_IMP_CAVIUM_CN30XX:
@@ -875,7 +891,7 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
875 decode_configs(c); 891 decode_configs(c);
876 /* JZRISC does not implement the CP0 counter. */ 892 /* JZRISC does not implement the CP0 counter. */
877 c->options &= ~MIPS_CPU_COUNTER; 893 c->options &= ~MIPS_CPU_COUNTER;
878 switch (c->processor_id & 0xff00) { 894 switch (c->processor_id & PRID_IMP_MASK) {
879 case PRID_IMP_JZRISC: 895 case PRID_IMP_JZRISC:
880 c->cputype = CPU_JZRISC; 896 c->cputype = CPU_JZRISC;
881 __cpu_name[cpu] = "Ingenic JZRISC"; 897 __cpu_name[cpu] = "Ingenic JZRISC";
@@ -890,7 +906,7 @@ static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu)
890{ 906{
891 decode_configs(c); 907 decode_configs(c);
892 908
893 if ((c->processor_id & 0xff00) == PRID_IMP_NETLOGIC_AU13XX) { 909 if ((c->processor_id & PRID_IMP_MASK) == PRID_IMP_NETLOGIC_AU13XX) {
894 c->cputype = CPU_ALCHEMY; 910 c->cputype = CPU_ALCHEMY;
895 __cpu_name[cpu] = "Au1300"; 911 __cpu_name[cpu] = "Au1300";
896 /* following stuff is not for Alchemy */ 912 /* following stuff is not for Alchemy */
@@ -905,7 +921,7 @@ static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu)
905 MIPS_CPU_EJTAG | 921 MIPS_CPU_EJTAG |
906 MIPS_CPU_LLSC); 922 MIPS_CPU_LLSC);
907 923
908 switch (c->processor_id & 0xff00) { 924 switch (c->processor_id & PRID_IMP_MASK) {
909 case PRID_IMP_NETLOGIC_XLP2XX: 925 case PRID_IMP_NETLOGIC_XLP2XX:
910 c->cputype = CPU_XLP; 926 c->cputype = CPU_XLP;
911 __cpu_name[cpu] = "Broadcom XLPII"; 927 __cpu_name[cpu] = "Broadcom XLPII";
@@ -984,7 +1000,7 @@ void cpu_probe(void)
984 c->cputype = CPU_UNKNOWN; 1000 c->cputype = CPU_UNKNOWN;
985 1001
986 c->processor_id = read_c0_prid(); 1002 c->processor_id = read_c0_prid();
987 switch (c->processor_id & 0xff0000) { 1003 switch (c->processor_id & PRID_COMP_MASK) {
988 case PRID_COMP_LEGACY: 1004 case PRID_COMP_LEGACY:
989 cpu_probe_legacy(c, cpu); 1005 cpu_probe_legacy(c, cpu);
990 break; 1006 break;
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index 42f8875d2444..f7991d95bff9 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -18,6 +18,7 @@
18#include <linux/sched.h> 18#include <linux/sched.h>
19#include <asm/cpu.h> 19#include <asm/cpu.h>
20#include <asm/cpu-info.h> 20#include <asm/cpu-info.h>
21#include <asm/cpu-type.h>
21#include <asm/idle.h> 22#include <asm/idle.h>
22#include <asm/mipsregs.h> 23#include <asm/mipsregs.h>
23 24
@@ -136,7 +137,7 @@ void __init check_wait(void)
136 return; 137 return;
137 } 138 }
138 139
139 switch (c->cputype) { 140 switch (current_cpu_type()) {
140 case CPU_R3081: 141 case CPU_R3081:
141 case CPU_R3081E: 142 case CPU_R3081E:
142 cpu_wait = r3081_wait; 143 cpu_wait = r3081_wait;
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index 364d26ae4215..dcb8e5d3bb8a 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -24,6 +24,7 @@
24#include <linux/export.h> 24#include <linux/export.h>
25 25
26#include <asm/cpu-features.h> 26#include <asm/cpu-features.h>
27#include <asm/cpu-type.h>
27#include <asm/div64.h> 28#include <asm/div64.h>
28#include <asm/smtc_ipi.h> 29#include <asm/smtc_ipi.h>
29#include <asm/time.h> 30#include <asm/time.h>
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index aec3408edd4b..524841f02803 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -39,6 +39,7 @@
39#include <asm/break.h> 39#include <asm/break.h>
40#include <asm/cop2.h> 40#include <asm/cop2.h>
41#include <asm/cpu.h> 41#include <asm/cpu.h>
42#include <asm/cpu-type.h>
42#include <asm/dsp.h> 43#include <asm/dsp.h>
43#include <asm/fpu.h> 44#include <asm/fpu.h>
44#include <asm/fpu_emulator.h> 45#include <asm/fpu_emulator.h>
@@ -622,7 +623,7 @@ static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
622 regs->regs[rt] = read_c0_count(); 623 regs->regs[rt] = read_c0_count();
623 return 0; 624 return 0;
624 case 3: /* Count register resolution */ 625 case 3: /* Count register resolution */
625 switch (current_cpu_data.cputype) { 626 switch (current_cpu_type()) {
626 case CPU_20KC: 627 case CPU_20KC:
627 case CPU_25KF: 628 case CPU_25KF:
628 regs->regs[rt] = 1; 629 regs->regs[rt] = 1;
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 729e7702b1de..c8efdb5b6ee0 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -19,6 +19,7 @@
19#include <asm/bootinfo.h> 19#include <asm/bootinfo.h>
20#include <asm/cacheops.h> 20#include <asm/cacheops.h>
21#include <asm/cpu-features.h> 21#include <asm/cpu-features.h>
22#include <asm/cpu-type.h>
22#include <asm/page.h> 23#include <asm/page.h>
23#include <asm/pgtable.h> 24#include <asm/pgtable.h>
24#include <asm/r4kcache.h> 25#include <asm/r4kcache.h>
@@ -186,9 +187,10 @@ static void probe_octeon(void)
186 unsigned long dcache_size; 187 unsigned long dcache_size;
187 unsigned int config1; 188 unsigned int config1;
188 struct cpuinfo_mips *c = &current_cpu_data; 189 struct cpuinfo_mips *c = &current_cpu_data;
190 int cputype = current_cpu_type();
189 191
190 config1 = read_c0_config1(); 192 config1 = read_c0_config1();
191 switch (c->cputype) { 193 switch (cputype) {
192 case CPU_CAVIUM_OCTEON: 194 case CPU_CAVIUM_OCTEON:
193 case CPU_CAVIUM_OCTEON_PLUS: 195 case CPU_CAVIUM_OCTEON_PLUS:
194 c->icache.linesz = 2 << ((config1 >> 19) & 7); 196 c->icache.linesz = 2 << ((config1 >> 19) & 7);
@@ -199,7 +201,7 @@ static void probe_octeon(void)
199 c->icache.sets * c->icache.ways * c->icache.linesz; 201 c->icache.sets * c->icache.ways * c->icache.linesz;
200 c->icache.waybit = ffs(icache_size / c->icache.ways) - 1; 202 c->icache.waybit = ffs(icache_size / c->icache.ways) - 1;
201 c->dcache.linesz = 128; 203 c->dcache.linesz = 128;
202 if (c->cputype == CPU_CAVIUM_OCTEON_PLUS) 204 if (cputype == CPU_CAVIUM_OCTEON_PLUS)
203 c->dcache.sets = 2; /* CN5XXX has two Dcache sets */ 205 c->dcache.sets = 2; /* CN5XXX has two Dcache sets */
204 else 206 else
205 c->dcache.sets = 1; /* CN3XXX has one Dcache set */ 207 c->dcache.sets = 1; /* CN3XXX has one Dcache set */
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index f749f687ee87..bc6f96fcb529 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -12,6 +12,7 @@
12#include <linux/highmem.h> 12#include <linux/highmem.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/linkage.h> 14#include <linux/linkage.h>
15#include <linux/preempt.h>
15#include <linux/sched.h> 16#include <linux/sched.h>
16#include <linux/smp.h> 17#include <linux/smp.h>
17#include <linux/mm.h> 18#include <linux/mm.h>
@@ -24,6 +25,7 @@
24#include <asm/cacheops.h> 25#include <asm/cacheops.h>
25#include <asm/cpu.h> 26#include <asm/cpu.h>
26#include <asm/cpu-features.h> 27#include <asm/cpu-features.h>
28#include <asm/cpu-type.h>
27#include <asm/io.h> 29#include <asm/io.h>
28#include <asm/page.h> 30#include <asm/page.h>
29#include <asm/pgtable.h> 31#include <asm/pgtable.h>
@@ -601,11 +603,13 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
601 /* Catch bad driver code */ 603 /* Catch bad driver code */
602 BUG_ON(size == 0); 604 BUG_ON(size == 0);
603 605
606 preempt_disable();
604 if (cpu_has_inclusive_pcaches) { 607 if (cpu_has_inclusive_pcaches) {
605 if (size >= scache_size) 608 if (size >= scache_size)
606 r4k_blast_scache(); 609 r4k_blast_scache();
607 else 610 else
608 blast_scache_range(addr, addr + size); 611 blast_scache_range(addr, addr + size);
612 preempt_enable();
609 __sync(); 613 __sync();
610 return; 614 return;
611 } 615 }
@@ -621,6 +625,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
621 R4600_HIT_CACHEOP_WAR_IMPL; 625 R4600_HIT_CACHEOP_WAR_IMPL;
622 blast_dcache_range(addr, addr + size); 626 blast_dcache_range(addr, addr + size);
623 } 627 }
628 preempt_enable();
624 629
625 bc_wback_inv(addr, size); 630 bc_wback_inv(addr, size);
626 __sync(); 631 __sync();
@@ -631,6 +636,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
631 /* Catch bad driver code */ 636 /* Catch bad driver code */
632 BUG_ON(size == 0); 637 BUG_ON(size == 0);
633 638
639 preempt_disable();
634 if (cpu_has_inclusive_pcaches) { 640 if (cpu_has_inclusive_pcaches) {
635 if (size >= scache_size) 641 if (size >= scache_size)
636 r4k_blast_scache(); 642 r4k_blast_scache();
@@ -645,6 +651,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
645 */ 651 */
646 blast_inv_scache_range(addr, addr + size); 652 blast_inv_scache_range(addr, addr + size);
647 } 653 }
654 preempt_enable();
648 __sync(); 655 __sync();
649 return; 656 return;
650 } 657 }
@@ -655,6 +662,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
655 R4600_HIT_CACHEOP_WAR_IMPL; 662 R4600_HIT_CACHEOP_WAR_IMPL;
656 blast_inv_dcache_range(addr, addr + size); 663 blast_inv_dcache_range(addr, addr + size);
657 } 664 }
665 preempt_enable();
658 666
659 bc_inv(addr, size); 667 bc_inv(addr, size);
660 __sync(); 668 __sync();
@@ -780,20 +788,30 @@ static inline void rm7k_erratum31(void)
780 788
781static inline void alias_74k_erratum(struct cpuinfo_mips *c) 789static inline void alias_74k_erratum(struct cpuinfo_mips *c)
782{ 790{
791 unsigned int imp = c->processor_id & PRID_IMP_MASK;
792 unsigned int rev = c->processor_id & PRID_REV_MASK;
793
783 /* 794 /*
784 * Early versions of the 74K do not update the cache tags on a 795 * Early versions of the 74K do not update the cache tags on a
785 * vtag miss/ptag hit which can occur in the case of KSEG0/KUSEG 796 * vtag miss/ptag hit which can occur in the case of KSEG0/KUSEG
786 * aliases. In this case it is better to treat the cache as always 797 * aliases. In this case it is better to treat the cache as always
787 * having aliases. 798 * having aliases.
788 */ 799 */
789 if ((c->processor_id & 0xff) <= PRID_REV_ENCODE_332(2, 4, 0)) 800 switch (imp) {
790 c->dcache.flags |= MIPS_CACHE_VTAG; 801 case PRID_IMP_74K:
791 if ((c->processor_id & 0xff) == PRID_REV_ENCODE_332(2, 4, 0)) 802 if (rev <= PRID_REV_ENCODE_332(2, 4, 0))
792 write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND); 803 c->dcache.flags |= MIPS_CACHE_VTAG;
793 if (((c->processor_id & 0xff00) == PRID_IMP_1074K) && 804 if (rev == PRID_REV_ENCODE_332(2, 4, 0))
794 ((c->processor_id & 0xff) <= PRID_REV_ENCODE_332(1, 1, 0))) { 805 write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
795 c->dcache.flags |= MIPS_CACHE_VTAG; 806 break;
796 write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND); 807 case PRID_IMP_1074K:
808 if (rev <= PRID_REV_ENCODE_332(1, 1, 0)) {
809 c->dcache.flags |= MIPS_CACHE_VTAG;
810 write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
811 }
812 break;
813 default:
814 BUG();
797 } 815 }
798} 816}
799 817
@@ -809,7 +827,7 @@ static void probe_pcache(void)
809 unsigned long config1; 827 unsigned long config1;
810 unsigned int lsize; 828 unsigned int lsize;
811 829
812 switch (c->cputype) { 830 switch (current_cpu_type()) {
813 case CPU_R4600: /* QED style two way caches? */ 831 case CPU_R4600: /* QED style two way caches? */
814 case CPU_R4700: 832 case CPU_R4700:
815 case CPU_R5000: 833 case CPU_R5000:
@@ -1025,7 +1043,8 @@ static void probe_pcache(void)
1025 * presumably no vendor is shipping his hardware in the "bad" 1043 * presumably no vendor is shipping his hardware in the "bad"
1026 * configuration. 1044 * configuration.
1027 */ 1045 */
1028 if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 && 1046 if ((prid & PRID_IMP_MASK) == PRID_IMP_R4000 &&
1047 (prid & PRID_REV_MASK) < PRID_REV_R4400 &&
1029 !(config & CONF_SC) && c->icache.linesz != 16 && 1048 !(config & CONF_SC) && c->icache.linesz != 16 &&
1030 PAGE_SIZE <= 0x8000) 1049 PAGE_SIZE <= 0x8000)
1031 panic("Improper R4000SC processor configuration detected"); 1050 panic("Improper R4000SC processor configuration detected");
@@ -1045,7 +1064,7 @@ static void probe_pcache(void)
1045 * normally they'd suffer from aliases but magic in the hardware deals 1064 * normally they'd suffer from aliases but magic in the hardware deals
1046 * with that for us so we don't need to take care ourselves. 1065 * with that for us so we don't need to take care ourselves.
1047 */ 1066 */
1048 switch (c->cputype) { 1067 switch (current_cpu_type()) {
1049 case CPU_20KC: 1068 case CPU_20KC:
1050 case CPU_25KF: 1069 case CPU_25KF:
1051 case CPU_SB1: 1070 case CPU_SB1:
@@ -1065,7 +1084,7 @@ static void probe_pcache(void)
1065 case CPU_34K: 1084 case CPU_34K:
1066 case CPU_74K: 1085 case CPU_74K:
1067 case CPU_1004K: 1086 case CPU_1004K:
1068 if (c->cputype == CPU_74K) 1087 if (current_cpu_type() == CPU_74K)
1069 alias_74k_erratum(c); 1088 alias_74k_erratum(c);
1070 if ((read_c0_config7() & (1 << 16))) { 1089 if ((read_c0_config7() & (1 << 16))) {
1071 /* effectively physically indexed dcache, 1090 /* effectively physically indexed dcache,
@@ -1078,7 +1097,7 @@ static void probe_pcache(void)
1078 c->dcache.flags |= MIPS_CACHE_ALIASES; 1097 c->dcache.flags |= MIPS_CACHE_ALIASES;
1079 } 1098 }
1080 1099
1081 switch (c->cputype) { 1100 switch (current_cpu_type()) {
1082 case CPU_20KC: 1101 case CPU_20KC:
1083 /* 1102 /*
1084 * Some older 20Kc chips doesn't have the 'VI' bit in 1103 * Some older 20Kc chips doesn't have the 'VI' bit in
@@ -1207,7 +1226,7 @@ static void setup_scache(void)
1207 * processors don't have a S-cache that would be relevant to the 1226 * processors don't have a S-cache that would be relevant to the
1208 * Linux memory management. 1227 * Linux memory management.
1209 */ 1228 */
1210 switch (c->cputype) { 1229 switch (current_cpu_type()) {
1211 case CPU_R4000SC: 1230 case CPU_R4000SC:
1212 case CPU_R4000MC: 1231 case CPU_R4000MC:
1213 case CPU_R4400SC: 1232 case CPU_R4400SC:
@@ -1384,9 +1403,8 @@ static void r4k_cache_error_setup(void)
1384{ 1403{
1385 extern char __weak except_vec2_generic; 1404 extern char __weak except_vec2_generic;
1386 extern char __weak except_vec2_sb1; 1405 extern char __weak except_vec2_sb1;
1387 struct cpuinfo_mips *c = &current_cpu_data;
1388 1406
1389 switch (c->cputype) { 1407 switch (current_cpu_type()) {
1390 case CPU_SB1: 1408 case CPU_SB1:
1391 case CPU_SB1A: 1409 case CPU_SB1A:
1392 set_uncached_handler(0x100, &except_vec2_sb1, 0x80); 1410 set_uncached_handler(0x100, &except_vec2_sb1, 0x80);
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index 664e523653d0..5f8b95512580 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -18,6 +18,7 @@
18#include <linux/highmem.h> 18#include <linux/highmem.h>
19 19
20#include <asm/cache.h> 20#include <asm/cache.h>
21#include <asm/cpu-type.h>
21#include <asm/io.h> 22#include <asm/io.h>
22 23
23#include <dma-coherence.h> 24#include <dma-coherence.h>
@@ -307,12 +308,10 @@ static void mips_dma_sync_sg_for_cpu(struct device *dev,
307{ 308{
308 int i; 309 int i;
309 310
310 /* Make sure that gcc doesn't leave the empty loop body. */ 311 if (cpu_needs_post_dma_flush(dev))
311 for (i = 0; i < nelems; i++, sg++) { 312 for (i = 0; i < nelems; i++, sg++)
312 if (cpu_needs_post_dma_flush(dev))
313 __dma_sync(sg_page(sg), sg->offset, sg->length, 313 __dma_sync(sg_page(sg), sg->offset, sg->length,
314 direction); 314 direction);
315 }
316} 315}
317 316
318static void mips_dma_sync_sg_for_device(struct device *dev, 317static void mips_dma_sync_sg_for_device(struct device *dev,
@@ -320,12 +319,10 @@ static void mips_dma_sync_sg_for_device(struct device *dev,
320{ 319{
321 int i; 320 int i;
322 321
323 /* Make sure that gcc doesn't leave the empty loop body. */ 322 if (!plat_device_is_coherent(dev))
324 for (i = 0; i < nelems; i++, sg++) { 323 for (i = 0; i < nelems; i++, sg++)
325 if (!plat_device_is_coherent(dev))
326 __dma_sync(sg_page(sg), sg->offset, sg->length, 324 __dma_sync(sg_page(sg), sg->offset, sg->length,
327 direction); 325 direction);
328 }
329} 326}
330 327
331int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 328int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index 218c2109a55d..cbd81d17793a 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -18,6 +18,7 @@
18 18
19#include <asm/bugs.h> 19#include <asm/bugs.h>
20#include <asm/cacheops.h> 20#include <asm/cacheops.h>
21#include <asm/cpu-type.h>
21#include <asm/inst.h> 22#include <asm/inst.h>
22#include <asm/io.h> 23#include <asm/io.h>
23#include <asm/page.h> 24#include <asm/page.h>
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index 5d01392e3518..08d05aee8788 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -6,6 +6,7 @@
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <linux/mm.h> 7#include <linux/mm.h>
8 8
9#include <asm/cpu-type.h>
9#include <asm/mipsregs.h> 10#include <asm/mipsregs.h>
10#include <asm/bcache.h> 11#include <asm/bcache.h>
11#include <asm/cacheops.h> 12#include <asm/cacheops.h>
@@ -71,7 +72,7 @@ static inline int mips_sc_is_activated(struct cpuinfo_mips *c)
71 unsigned int tmp; 72 unsigned int tmp;
72 73
73 /* Check the bypass bit (L2B) */ 74 /* Check the bypass bit (L2B) */
74 switch (c->cputype) { 75 switch (current_cpu_type()) {
75 case CPU_34K: 76 case CPU_34K:
76 case CPU_74K: 77 case CPU_74K:
77 case CPU_1004K: 78 case CPU_1004K:
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 00b26a67a06d..bb3a5f643e97 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -16,6 +16,7 @@
16#include <linux/module.h> 16#include <linux/module.h>
17 17
18#include <asm/cpu.h> 18#include <asm/cpu.h>
19#include <asm/cpu-type.h>
19#include <asm/bootinfo.h> 20#include <asm/bootinfo.h>
20#include <asm/mmu_context.h> 21#include <asm/mmu_context.h>
21#include <asm/pgtable.h> 22#include <asm/pgtable.h>
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 821b45175dc1..9bb3a9363b06 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -30,6 +30,7 @@
30#include <linux/cache.h> 30#include <linux/cache.h>
31 31
32#include <asm/cacheflush.h> 32#include <asm/cacheflush.h>
33#include <asm/cpu-type.h>
33#include <asm/pgtable.h> 34#include <asm/pgtable.h>
34#include <asm/war.h> 35#include <asm/war.h>
35#include <asm/uasm.h> 36#include <asm/uasm.h>
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index 53aad4a35375..a18af5fce67e 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -27,6 +27,7 @@
27#include <linux/timex.h> 27#include <linux/timex.h>
28#include <linux/mc146818rtc.h> 28#include <linux/mc146818rtc.h>
29 29
30#include <asm/cpu.h>
30#include <asm/mipsregs.h> 31#include <asm/mipsregs.h>
31#include <asm/mipsmtregs.h> 32#include <asm/mipsmtregs.h>
32#include <asm/hardirq.h> 33#include <asm/hardirq.h>
@@ -76,7 +77,7 @@ static void __init estimate_frequencies(void)
76#endif 77#endif
77 78
78#if defined (CONFIG_KVM_GUEST) && defined (CONFIG_KVM_HOST_FREQ) 79#if defined (CONFIG_KVM_GUEST) && defined (CONFIG_KVM_HOST_FREQ)
79 unsigned int prid = read_c0_prid() & 0xffff00; 80 unsigned int prid = read_c0_prid() & (PRID_COMP_MASK | PRID_IMP_MASK);
80 81
81 /* 82 /*
82 * XXXKYMA: hardwire the CPU frequency to Host Freq/4 83 * XXXKYMA: hardwire the CPU frequency to Host Freq/4
@@ -169,7 +170,7 @@ unsigned int get_c0_compare_int(void)
169 170
170void __init plat_time_init(void) 171void __init plat_time_init(void)
171{ 172{
172 unsigned int prid = read_c0_prid() & 0xffff00; 173 unsigned int prid = read_c0_prid() & (PRID_COMP_MASK | PRID_IMP_MASK);
173 unsigned int freq; 174 unsigned int freq;
174 175
175 estimate_frequencies(); 176 estimate_frequencies();
diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c
index a43ea3cc0a3b..552d26c34386 100644
--- a/arch/mips/mti-sead3/sead3-time.c
+++ b/arch/mips/mti-sead3/sead3-time.c
@@ -7,6 +7,7 @@
7 */ 7 */
8#include <linux/init.h> 8#include <linux/init.h>
9 9
10#include <asm/cpu.h>
10#include <asm/setup.h> 11#include <asm/setup.h>
11#include <asm/time.h> 12#include <asm/time.h>
12#include <asm/irq.h> 13#include <asm/irq.h>
@@ -34,7 +35,7 @@ static void __iomem *status_reg = (void __iomem *)0xbf000410;
34 */ 35 */
35static unsigned int __init estimate_cpu_frequency(void) 36static unsigned int __init estimate_cpu_frequency(void)
36{ 37{
37 unsigned int prid = read_c0_prid() & 0xffff00; 38 unsigned int prid = read_c0_prid() & (PRID_COMP_MASK | PRID_IMP_MASK);
38 unsigned int tick = 0; 39 unsigned int tick = 0;
39 unsigned int freq; 40 unsigned int freq;
40 unsigned int orig; 41 unsigned int orig;
diff --git a/arch/mips/netlogic/xlr/fmn-config.c b/arch/mips/netlogic/xlr/fmn-config.c
index ed3bf0e3f309..c7622c6e5f67 100644
--- a/arch/mips/netlogic/xlr/fmn-config.c
+++ b/arch/mips/netlogic/xlr/fmn-config.c
@@ -36,6 +36,7 @@
36#include <linux/irq.h> 36#include <linux/irq.h>
37#include <linux/interrupt.h> 37#include <linux/interrupt.h>
38 38
39#include <asm/cpu.h>
39#include <asm/mipsregs.h> 40#include <asm/mipsregs.h>
40#include <asm/netlogic/xlr/fmn.h> 41#include <asm/netlogic/xlr/fmn.h>
41#include <asm/netlogic/xlr/xlr.h> 42#include <asm/netlogic/xlr/xlr.h>
@@ -187,7 +188,7 @@ void xlr_board_info_setup(void)
187 int processor_id, num_core; 188 int processor_id, num_core;
188 189
189 num_core = hweight32(nlm_current_node()->coremask); 190 num_core = hweight32(nlm_current_node()->coremask);
190 processor_id = read_c0_prid() & 0xff00; 191 processor_id = read_c0_prid() & PRID_IMP_MASK;
191 192
192 setup_cpu_fmninfo(cpu, num_core); 193 setup_cpu_fmninfo(cpu, num_core);
193 switch (processor_id) { 194 switch (processor_id) {
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index 5e5424753b56..4d1736fc1955 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -12,6 +12,7 @@
12#include <linux/oprofile.h> 12#include <linux/oprofile.h>
13#include <linux/smp.h> 13#include <linux/smp.h>
14#include <asm/cpu-info.h> 14#include <asm/cpu-info.h>
15#include <asm/cpu-type.h>
15 16
16#include "op_impl.h" 17#include "op_impl.h"
17 18
diff --git a/arch/mips/pci/pci-bcm1480.c b/arch/mips/pci/pci-bcm1480.c
index 44dd5aa2e36f..5ec2a7bae02c 100644
--- a/arch/mips/pci/pci-bcm1480.c
+++ b/arch/mips/pci/pci-bcm1480.c
@@ -39,6 +39,7 @@
39#include <linux/mm.h> 39#include <linux/mm.h>
40#include <linux/console.h> 40#include <linux/console.h>
41#include <linux/tty.h> 41#include <linux/tty.h>
42#include <linux/vt.h>
42 43
43#include <asm/sibyte/bcm1480_regs.h> 44#include <asm/sibyte/bcm1480_regs.h>
44#include <asm/sibyte/bcm1480_scd.h> 45#include <asm/sibyte/bcm1480_scd.h>
diff --git a/arch/mips/sibyte/bcm1480/setup.c b/arch/mips/sibyte/bcm1480/setup.c
index 05ed92c92b69..8e2e04f77870 100644
--- a/arch/mips/sibyte/bcm1480/setup.c
+++ b/arch/mips/sibyte/bcm1480/setup.c
@@ -22,6 +22,7 @@
22#include <linux/string.h> 22#include <linux/string.h>
23 23
24#include <asm/bootinfo.h> 24#include <asm/bootinfo.h>
25#include <asm/cpu.h>
25#include <asm/mipsregs.h> 26#include <asm/mipsregs.h>
26#include <asm/io.h> 27#include <asm/io.h>
27#include <asm/sibyte/sb1250.h> 28#include <asm/sibyte/sb1250.h>
@@ -119,7 +120,7 @@ void __init bcm1480_setup(void)
119 uint64_t sys_rev; 120 uint64_t sys_rev;
120 int plldiv; 121 int plldiv;
121 122
122 sb1_pass = read_c0_prid() & 0xff; 123 sb1_pass = read_c0_prid() & PRID_REV_MASK;
123 sys_rev = __raw_readq(IOADDR(A_SCD_SYSTEM_REVISION)); 124 sys_rev = __raw_readq(IOADDR(A_SCD_SYSTEM_REVISION));
124 soc_type = SYS_SOC_TYPE(sys_rev); 125 soc_type = SYS_SOC_TYPE(sys_rev);
125 part_type = G_SYS_PART(sys_rev); 126 part_type = G_SYS_PART(sys_rev);
diff --git a/arch/mips/sibyte/sb1250/setup.c b/arch/mips/sibyte/sb1250/setup.c
index a14bd4cb0bc0..3c02b2a77ae9 100644
--- a/arch/mips/sibyte/sb1250/setup.c
+++ b/arch/mips/sibyte/sb1250/setup.c
@@ -22,6 +22,7 @@
22#include <linux/string.h> 22#include <linux/string.h>
23 23
24#include <asm/bootinfo.h> 24#include <asm/bootinfo.h>
25#include <asm/cpu.h>
25#include <asm/mipsregs.h> 26#include <asm/mipsregs.h>
26#include <asm/io.h> 27#include <asm/io.h>
27#include <asm/sibyte/sb1250.h> 28#include <asm/sibyte/sb1250.h>
@@ -182,7 +183,7 @@ void __init sb1250_setup(void)
182 int plldiv; 183 int plldiv;
183 int bad_config = 0; 184 int bad_config = 0;
184 185
185 sb1_pass = read_c0_prid() & 0xff; 186 sb1_pass = read_c0_prid() & PRID_REV_MASK;
186 sys_rev = __raw_readq(IOADDR(A_SCD_SYSTEM_REVISION)); 187 sys_rev = __raw_readq(IOADDR(A_SCD_SYSTEM_REVISION));
187 soc_type = SYS_SOC_TYPE(sys_rev); 188 soc_type = SYS_SOC_TYPE(sys_rev);
188 soc_pass = G_SYS_REVISION(sys_rev); 189 soc_pass = G_SYS_REVISION(sys_rev);
diff --git a/arch/mips/sni/setup.c b/arch/mips/sni/setup.c
index 5b09b3544edd..efad85c8c823 100644
--- a/arch/mips/sni/setup.c
+++ b/arch/mips/sni/setup.c
@@ -25,6 +25,7 @@
25#endif 25#endif
26 26
27#include <asm/bootinfo.h> 27#include <asm/bootinfo.h>
28#include <asm/cpu.h>
28#include <asm/io.h> 29#include <asm/io.h>
29#include <asm/reboot.h> 30#include <asm/reboot.h>
30#include <asm/sni.h> 31#include <asm/sni.h>
@@ -173,7 +174,7 @@ void __init plat_mem_setup(void)
173 system_type = "RM300-Cxx"; 174 system_type = "RM300-Cxx";
174 break; 175 break;
175 case SNI_BRD_PCI_DESKTOP: 176 case SNI_BRD_PCI_DESKTOP:
176 switch (read_c0_prid() & 0xff00) { 177 switch (read_c0_prid() & PRID_IMP_MASK) {
177 case PRID_IMP_R4600: 178 case PRID_IMP_R4600:
178 case PRID_IMP_R4700: 179 case PRID_IMP_R4700:
179 system_type = "RM200-C20"; 180 system_type = "RM200-C20";
diff --git a/arch/openrisc/include/asm/prom.h b/arch/openrisc/include/asm/prom.h
index eb59bfe23e85..93c9980e1b6b 100644
--- a/arch/openrisc/include/asm/prom.h
+++ b/arch/openrisc/include/asm/prom.h
@@ -14,53 +14,9 @@
14 * the Free Software Foundation; either version 2 of the License, or 14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version. 15 * (at your option) any later version.
16 */ 16 */
17
18#include <linux/of.h> /* linux/of.h gets to determine #include ordering */
19
20#ifndef _ASM_OPENRISC_PROM_H 17#ifndef _ASM_OPENRISC_PROM_H
21#define _ASM_OPENRISC_PROM_H 18#define _ASM_OPENRISC_PROM_H
22#ifdef __KERNEL__
23#ifndef __ASSEMBLY__
24 19
25#include <linux/types.h>
26#include <asm/irq.h>
27#include <linux/irqdomain.h>
28#include <linux/atomic.h>
29#include <linux/of_irq.h>
30#include <linux/of_fdt.h>
31#include <linux/of_address.h>
32#include <linux/proc_fs.h>
33#include <linux/platform_device.h>
34#define HAVE_ARCH_DEVTREE_FIXUPS 20#define HAVE_ARCH_DEVTREE_FIXUPS
35 21
36/* Other Prototypes */
37extern int early_uartlite_console(void);
38
39/* Parse the ibm,dma-window property of an OF node into the busno, phys and
40 * size parameters.
41 */
42void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
43 unsigned long *busno, unsigned long *phys, unsigned long *size);
44
45extern void kdump_move_device_tree(void);
46
47/* Get the MAC address */
48extern const void *of_get_mac_address(struct device_node *np);
49
50/**
51 * of_irq_map_pci - Resolve the interrupt for a PCI device
52 * @pdev: the device whose interrupt is to be resolved
53 * @out_irq: structure of_irq filled by this function
54 *
55 * This function resolves the PCI interrupt for a given PCI device. If a
56 * device-node exists for a given pci_dev, it will use normal OF tree
57 * walking. If not, it will implement standard swizzling and walk up the
58 * PCI tree until an device-node is found, at which point it will finish
59 * resolving using the OF tree walking.
60 */
61struct pci_dev;
62extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq);
63
64#endif /* __ASSEMBLY__ */
65#endif /* __KERNEL__ */
66#endif /* _ASM_OPENRISC_PROM_H */ 22#endif /* _ASM_OPENRISC_PROM_H */
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index d10d27a720c0..00c0ed333a3d 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -182,6 +182,9 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
182 182
183 if (user_mode(regs)) 183 if (user_mode(regs))
184 flags |= FAULT_FLAG_USER; 184 flags |= FAULT_FLAG_USER;
185
186 acc_type = parisc_acctyp(code, regs->iir);
187
185 if (acc_type & VM_WRITE) 188 if (acc_type & VM_WRITE)
186 flags |= FAULT_FLAG_WRITE; 189 flags |= FAULT_FLAG_WRITE;
187retry: 190retry:
@@ -196,8 +199,6 @@ retry:
196 199
197good_area: 200good_area:
198 201
199 acc_type = parisc_acctyp(code,regs->iir);
200
201 if ((vma->vm_flags & acc_type) != acc_type) 202 if ((vma->vm_flags & acc_type) != acc_type)
202 goto bad_area; 203 goto bad_area;
203 204
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 6a15c968d214..15ca2255f438 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -74,7 +74,7 @@ src-wlib-$(CONFIG_8xx) += mpc8xx.c planetcore.c
74src-wlib-$(CONFIG_PPC_82xx) += pq2.c fsl-soc.c planetcore.c 74src-wlib-$(CONFIG_PPC_82xx) += pq2.c fsl-soc.c planetcore.c
75src-wlib-$(CONFIG_EMBEDDED6xx) += mv64x60.c mv64x60_i2c.c ugecon.c 75src-wlib-$(CONFIG_EMBEDDED6xx) += mv64x60.c mv64x60_i2c.c ugecon.c
76 76
77src-plat-y := of.c 77src-plat-y := of.c epapr.c
78src-plat-$(CONFIG_40x) += fixed-head.S ep405.c cuboot-hotfoot.c \ 78src-plat-$(CONFIG_40x) += fixed-head.S ep405.c cuboot-hotfoot.c \
79 treeboot-walnut.c cuboot-acadia.c \ 79 treeboot-walnut.c cuboot-acadia.c \
80 cuboot-kilauea.c simpleboot.c \ 80 cuboot-kilauea.c simpleboot.c \
@@ -97,7 +97,7 @@ src-plat-$(CONFIG_EMBEDDED6xx) += cuboot-pq2.c cuboot-mpc7448hpc2.c \
97 prpmc2800.c 97 prpmc2800.c
98src-plat-$(CONFIG_AMIGAONE) += cuboot-amigaone.c 98src-plat-$(CONFIG_AMIGAONE) += cuboot-amigaone.c
99src-plat-$(CONFIG_PPC_PS3) += ps3-head.S ps3-hvcall.S ps3.c 99src-plat-$(CONFIG_PPC_PS3) += ps3-head.S ps3-hvcall.S ps3.c
100src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c 100src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c epapr-wrapper.c
101 101
102src-wlib := $(sort $(src-wlib-y)) 102src-wlib := $(sort $(src-wlib-y))
103src-plat := $(sort $(src-plat-y)) 103src-plat := $(sort $(src-plat-y))
diff --git a/arch/powerpc/boot/epapr-wrapper.c b/arch/powerpc/boot/epapr-wrapper.c
new file mode 100644
index 000000000000..c10191006673
--- /dev/null
+++ b/arch/powerpc/boot/epapr-wrapper.c
@@ -0,0 +1,9 @@
1extern void epapr_platform_init(unsigned long r3, unsigned long r4,
2 unsigned long r5, unsigned long r6,
3 unsigned long r7);
4
5void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
6 unsigned long r6, unsigned long r7)
7{
8 epapr_platform_init(r3, r4, r5, r6, r7);
9}
diff --git a/arch/powerpc/boot/epapr.c b/arch/powerpc/boot/epapr.c
index 06c1961bd124..02e91aa2194a 100644
--- a/arch/powerpc/boot/epapr.c
+++ b/arch/powerpc/boot/epapr.c
@@ -48,8 +48,8 @@ static void platform_fixups(void)
48 fdt_addr, fdt_totalsize((void *)fdt_addr), ima_size); 48 fdt_addr, fdt_totalsize((void *)fdt_addr), ima_size);
49} 49}
50 50
51void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, 51void epapr_platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
52 unsigned long r6, unsigned long r7) 52 unsigned long r6, unsigned long r7)
53{ 53{
54 epapr_magic = r6; 54 epapr_magic = r6;
55 ima_size = r7; 55 ima_size = r7;
diff --git a/arch/powerpc/boot/of.c b/arch/powerpc/boot/of.c
index 61d9899aa0d0..62e2f43ec1df 100644
--- a/arch/powerpc/boot/of.c
+++ b/arch/powerpc/boot/of.c
@@ -26,6 +26,9 @@
26 26
27static unsigned long claim_base; 27static unsigned long claim_base;
28 28
29void epapr_platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
30 unsigned long r6, unsigned long r7);
31
29static void *of_try_claim(unsigned long size) 32static void *of_try_claim(unsigned long size)
30{ 33{
31 unsigned long addr = 0; 34 unsigned long addr = 0;
@@ -61,7 +64,7 @@ static void of_image_hdr(const void *hdr)
61 } 64 }
62} 65}
63 66
64void platform_init(unsigned long a1, unsigned long a2, void *promptr) 67static void of_platform_init(unsigned long a1, unsigned long a2, void *promptr)
65{ 68{
66 platform_ops.image_hdr = of_image_hdr; 69 platform_ops.image_hdr = of_image_hdr;
67 platform_ops.malloc = of_try_claim; 70 platform_ops.malloc = of_try_claim;
@@ -81,3 +84,14 @@ void platform_init(unsigned long a1, unsigned long a2, void *promptr)
81 loader_info.initrd_size = a2; 84 loader_info.initrd_size = a2;
82 } 85 }
83} 86}
87
88void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
89 unsigned long r6, unsigned long r7)
90{
91 /* Detect OF vs. ePAPR boot */
92 if (r5)
93 of_platform_init(r3, r4, (void *)r5);
94 else
95 epapr_platform_init(r3, r4, r5, r6, r7);
96}
97
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index 6761c746048d..cd7af841ba05 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -148,18 +148,18 @@ make_space=y
148 148
149case "$platform" in 149case "$platform" in
150pseries) 150pseries)
151 platformo=$object/of.o 151 platformo="$object/of.o $object/epapr.o"
152 link_address='0x4000000' 152 link_address='0x4000000'
153 ;; 153 ;;
154maple) 154maple)
155 platformo=$object/of.o 155 platformo="$object/of.o $object/epapr.o"
156 link_address='0x400000' 156 link_address='0x400000'
157 ;; 157 ;;
158pmac|chrp) 158pmac|chrp)
159 platformo=$object/of.o 159 platformo="$object/of.o $object/epapr.o"
160 ;; 160 ;;
161coff) 161coff)
162 platformo="$object/crt0.o $object/of.o" 162 platformo="$object/crt0.o $object/of.o $object/epapr.o"
163 lds=$object/zImage.coff.lds 163 lds=$object/zImage.coff.lds
164 link_address='0x500000' 164 link_address='0x500000'
165 pie= 165 pie=
@@ -253,6 +253,7 @@ treeboot-iss4xx-mpic)
253 platformo="$object/treeboot-iss4xx.o" 253 platformo="$object/treeboot-iss4xx.o"
254 ;; 254 ;;
255epapr) 255epapr)
256 platformo="$object/epapr.o $object/epapr-wrapper.o"
256 link_address='0x20000000' 257 link_address='0x20000000'
257 pie=-pie 258 pie=-pie
258 ;; 259 ;;
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index 0e40843a1c6e..41f13cec8a8f 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -69,9 +69,9 @@ extern struct thread_info *softirq_ctx[NR_CPUS];
69 69
70extern void irq_ctx_init(void); 70extern void irq_ctx_init(void);
71extern void call_do_softirq(struct thread_info *tp); 71extern void call_do_softirq(struct thread_info *tp);
72extern int call_handle_irq(int irq, void *p1, 72extern void call_do_irq(struct pt_regs *regs, struct thread_info *tp);
73 struct thread_info *tp, void *func);
74extern void do_IRQ(struct pt_regs *regs); 73extern void do_IRQ(struct pt_regs *regs);
74extern void __do_irq(struct pt_regs *regs);
75 75
76int irq_choose_cpu(const struct cpumask *mask); 76int irq_choose_cpu(const struct cpumask *mask);
77 77
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index e378cccfca55..ce4de5aed7b5 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -149,8 +149,6 @@ typedef struct {
149 149
150struct thread_struct { 150struct thread_struct {
151 unsigned long ksp; /* Kernel stack pointer */ 151 unsigned long ksp; /* Kernel stack pointer */
152 unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
153
154#ifdef CONFIG_PPC64 152#ifdef CONFIG_PPC64
155 unsigned long ksp_vsid; 153 unsigned long ksp_vsid;
156#endif 154#endif
@@ -162,6 +160,7 @@ struct thread_struct {
162#endif 160#endif
163#ifdef CONFIG_PPC32 161#ifdef CONFIG_PPC32
164 void *pgdir; /* root of page-table tree */ 162 void *pgdir; /* root of page-table tree */
163 unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
165#endif 164#endif
166#ifdef CONFIG_PPC_ADV_DEBUG_REGS 165#ifdef CONFIG_PPC_ADV_DEBUG_REGS
167 /* 166 /*
@@ -321,7 +320,6 @@ struct thread_struct {
321#else 320#else
322#define INIT_THREAD { \ 321#define INIT_THREAD { \
323 .ksp = INIT_SP, \ 322 .ksp = INIT_SP, \
324 .ksp_limit = INIT_SP_LIMIT, \
325 .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \ 323 .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
326 .fs = KERNEL_DS, \ 324 .fs = KERNEL_DS, \
327 .fpr = {{0}}, \ 325 .fpr = {{0}}, \
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index d8958be5f31a..502c7a4e73f7 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -80,10 +80,11 @@ int main(void)
80 DEFINE(TASKTHREADPPR, offsetof(struct task_struct, thread.ppr)); 80 DEFINE(TASKTHREADPPR, offsetof(struct task_struct, thread.ppr));
81#else 81#else
82 DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); 82 DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
83 DEFINE(THREAD_INFO_GAP, _ALIGN_UP(sizeof(struct thread_info), 16));
84 DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
83#endif /* CONFIG_PPC64 */ 85#endif /* CONFIG_PPC64 */
84 86
85 DEFINE(KSP, offsetof(struct thread_struct, ksp)); 87 DEFINE(KSP, offsetof(struct thread_struct, ksp));
86 DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
87 DEFINE(PT_REGS, offsetof(struct thread_struct, regs)); 88 DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
88#ifdef CONFIG_BOOKE 89#ifdef CONFIG_BOOKE
89 DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0])); 90 DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0]));
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 0adab06ce5c0..572bb5b95f35 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -661,7 +661,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
661 /* number of bytes needed for the bitmap */ 661 /* number of bytes needed for the bitmap */
662 sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long); 662 sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
663 663
664 page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz)); 664 page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz));
665 if (!page) 665 if (!page)
666 panic("iommu_init_table: Can't allocate %ld bytes\n", sz); 666 panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
667 tbl->it_map = page_address(page); 667 tbl->it_map = page_address(page);
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index c69440cef7af..57d286a78f86 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -441,50 +441,6 @@ void migrate_irqs(void)
441} 441}
442#endif 442#endif
443 443
444static inline void handle_one_irq(unsigned int irq)
445{
446 struct thread_info *curtp, *irqtp;
447 unsigned long saved_sp_limit;
448 struct irq_desc *desc;
449
450 desc = irq_to_desc(irq);
451 if (!desc)
452 return;
453
454 /* Switch to the irq stack to handle this */
455 curtp = current_thread_info();
456 irqtp = hardirq_ctx[smp_processor_id()];
457
458 if (curtp == irqtp) {
459 /* We're already on the irq stack, just handle it */
460 desc->handle_irq(irq, desc);
461 return;
462 }
463
464 saved_sp_limit = current->thread.ksp_limit;
465
466 irqtp->task = curtp->task;
467 irqtp->flags = 0;
468
469 /* Copy the softirq bits in preempt_count so that the
470 * softirq checks work in the hardirq context. */
471 irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
472 (curtp->preempt_count & SOFTIRQ_MASK);
473
474 current->thread.ksp_limit = (unsigned long)irqtp +
475 _ALIGN_UP(sizeof(struct thread_info), 16);
476
477 call_handle_irq(irq, desc, irqtp, desc->handle_irq);
478 current->thread.ksp_limit = saved_sp_limit;
479 irqtp->task = NULL;
480
481 /* Set any flag that may have been set on the
482 * alternate stack
483 */
484 if (irqtp->flags)
485 set_bits(irqtp->flags, &curtp->flags);
486}
487
488static inline void check_stack_overflow(void) 444static inline void check_stack_overflow(void)
489{ 445{
490#ifdef CONFIG_DEBUG_STACKOVERFLOW 446#ifdef CONFIG_DEBUG_STACKOVERFLOW
@@ -501,9 +457,9 @@ static inline void check_stack_overflow(void)
501#endif 457#endif
502} 458}
503 459
504void do_IRQ(struct pt_regs *regs) 460void __do_irq(struct pt_regs *regs)
505{ 461{
506 struct pt_regs *old_regs = set_irq_regs(regs); 462 struct irq_desc *desc;
507 unsigned int irq; 463 unsigned int irq;
508 464
509 irq_enter(); 465 irq_enter();
@@ -519,18 +475,56 @@ void do_IRQ(struct pt_regs *regs)
519 */ 475 */
520 irq = ppc_md.get_irq(); 476 irq = ppc_md.get_irq();
521 477
522 /* We can hard enable interrupts now */ 478 /* We can hard enable interrupts now to allow perf interrupts */
523 may_hard_irq_enable(); 479 may_hard_irq_enable();
524 480
525 /* And finally process it */ 481 /* And finally process it */
526 if (irq != NO_IRQ) 482 if (unlikely(irq == NO_IRQ))
527 handle_one_irq(irq);
528 else
529 __get_cpu_var(irq_stat).spurious_irqs++; 483 __get_cpu_var(irq_stat).spurious_irqs++;
484 else {
485 desc = irq_to_desc(irq);
486 if (likely(desc))
487 desc->handle_irq(irq, desc);
488 }
530 489
531 trace_irq_exit(regs); 490 trace_irq_exit(regs);
532 491
533 irq_exit(); 492 irq_exit();
493}
494
495void do_IRQ(struct pt_regs *regs)
496{
497 struct pt_regs *old_regs = set_irq_regs(regs);
498 struct thread_info *curtp, *irqtp;
499
500 /* Switch to the irq stack to handle this */
501 curtp = current_thread_info();
502 irqtp = hardirq_ctx[raw_smp_processor_id()];
503
504 /* Already there ? */
505 if (unlikely(curtp == irqtp)) {
506 __do_irq(regs);
507 set_irq_regs(old_regs);
508 return;
509 }
510
511 /* Prepare the thread_info in the irq stack */
512 irqtp->task = curtp->task;
513 irqtp->flags = 0;
514
515 /* Copy the preempt_count so that the [soft]irq checks work. */
516 irqtp->preempt_count = curtp->preempt_count;
517
518 /* Switch stack and call */
519 call_do_irq(regs, irqtp);
520
521 /* Restore stack limit */
522 irqtp->task = NULL;
523
524 /* Copy back updates to the thread_info */
525 if (irqtp->flags)
526 set_bits(irqtp->flags, &curtp->flags);
527
534 set_irq_regs(old_regs); 528 set_irq_regs(old_regs);
535} 529}
536 530
@@ -592,28 +586,22 @@ void irq_ctx_init(void)
592 memset((void *)softirq_ctx[i], 0, THREAD_SIZE); 586 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
593 tp = softirq_ctx[i]; 587 tp = softirq_ctx[i];
594 tp->cpu = i; 588 tp->cpu = i;
595 tp->preempt_count = 0;
596 589
597 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); 590 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
598 tp = hardirq_ctx[i]; 591 tp = hardirq_ctx[i];
599 tp->cpu = i; 592 tp->cpu = i;
600 tp->preempt_count = HARDIRQ_OFFSET;
601 } 593 }
602} 594}
603 595
604static inline void do_softirq_onstack(void) 596static inline void do_softirq_onstack(void)
605{ 597{
606 struct thread_info *curtp, *irqtp; 598 struct thread_info *curtp, *irqtp;
607 unsigned long saved_sp_limit = current->thread.ksp_limit;
608 599
609 curtp = current_thread_info(); 600 curtp = current_thread_info();
610 irqtp = softirq_ctx[smp_processor_id()]; 601 irqtp = softirq_ctx[smp_processor_id()];
611 irqtp->task = curtp->task; 602 irqtp->task = curtp->task;
612 irqtp->flags = 0; 603 irqtp->flags = 0;
613 current->thread.ksp_limit = (unsigned long)irqtp +
614 _ALIGN_UP(sizeof(struct thread_info), 16);
615 call_do_softirq(irqtp); 604 call_do_softirq(irqtp);
616 current->thread.ksp_limit = saved_sp_limit;
617 irqtp->task = NULL; 605 irqtp->task = NULL;
618 606
619 /* Set any flag that may have been set on the 607 /* Set any flag that may have been set on the
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 777d999f563b..2b0ad9845363 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -36,26 +36,41 @@
36 36
37 .text 37 .text
38 38
39/*
40 * We store the saved ksp_limit in the unused part
41 * of the STACK_FRAME_OVERHEAD
42 */
39_GLOBAL(call_do_softirq) 43_GLOBAL(call_do_softirq)
40 mflr r0 44 mflr r0
41 stw r0,4(r1) 45 stw r0,4(r1)
46 lwz r10,THREAD+KSP_LIMIT(r2)
47 addi r11,r3,THREAD_INFO_GAP
42 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3) 48 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
43 mr r1,r3 49 mr r1,r3
50 stw r10,8(r1)
51 stw r11,THREAD+KSP_LIMIT(r2)
44 bl __do_softirq 52 bl __do_softirq
53 lwz r10,8(r1)
45 lwz r1,0(r1) 54 lwz r1,0(r1)
46 lwz r0,4(r1) 55 lwz r0,4(r1)
56 stw r10,THREAD+KSP_LIMIT(r2)
47 mtlr r0 57 mtlr r0
48 blr 58 blr
49 59
50_GLOBAL(call_handle_irq) 60_GLOBAL(call_do_irq)
51 mflr r0 61 mflr r0
52 stw r0,4(r1) 62 stw r0,4(r1)
53 mtctr r6 63 lwz r10,THREAD+KSP_LIMIT(r2)
54 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5) 64 addi r11,r3,THREAD_INFO_GAP
55 mr r1,r5 65 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
56 bctrl 66 mr r1,r4
67 stw r10,8(r1)
68 stw r11,THREAD+KSP_LIMIT(r2)
69 bl __do_irq
70 lwz r10,8(r1)
57 lwz r1,0(r1) 71 lwz r1,0(r1)
58 lwz r0,4(r1) 72 lwz r0,4(r1)
73 stw r10,THREAD+KSP_LIMIT(r2)
59 mtlr r0 74 mtlr r0
60 blr 75 blr
61 76
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 971d7e78aff2..e59caf874d05 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -40,14 +40,12 @@ _GLOBAL(call_do_softirq)
40 mtlr r0 40 mtlr r0
41 blr 41 blr
42 42
43_GLOBAL(call_handle_irq) 43_GLOBAL(call_do_irq)
44 ld r8,0(r6)
45 mflr r0 44 mflr r0
46 std r0,16(r1) 45 std r0,16(r1)
47 mtctr r8 46 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
48 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5) 47 mr r1,r4
49 mr r1,r5 48 bl .__do_irq
50 bctrl
51 ld r1,0(r1) 49 ld r1,0(r1)
52 ld r0,16(r1) 50 ld r0,16(r1)
53 mtlr r0 51 mtlr r0
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 6f428da53e20..96d2fdf3aa9e 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1000,9 +1000,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
1000 kregs = (struct pt_regs *) sp; 1000 kregs = (struct pt_regs *) sp;
1001 sp -= STACK_FRAME_OVERHEAD; 1001 sp -= STACK_FRAME_OVERHEAD;
1002 p->thread.ksp = sp; 1002 p->thread.ksp = sp;
1003#ifdef CONFIG_PPC32
1003 p->thread.ksp_limit = (unsigned long)task_stack_page(p) + 1004 p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
1004 _ALIGN_UP(sizeof(struct thread_info), 16); 1005 _ALIGN_UP(sizeof(struct thread_info), 16);
1005 1006#endif
1006#ifdef CONFIG_HAVE_HW_BREAKPOINT 1007#ifdef CONFIG_HAVE_HW_BREAKPOINT
1007 p->thread.ptrace_bps[0] = NULL; 1008 p->thread.ptrace_bps[0] = NULL;
1008#endif 1009#endif
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 12e656ffe60e..5fe2842e8bab 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -196,6 +196,8 @@ static int __initdata mem_reserve_cnt;
196 196
197static cell_t __initdata regbuf[1024]; 197static cell_t __initdata regbuf[1024];
198 198
199static bool rtas_has_query_cpu_stopped;
200
199 201
200/* 202/*
201 * Error results ... some OF calls will return "-1" on error, some 203 * Error results ... some OF calls will return "-1" on error, some
@@ -1574,6 +1576,11 @@ static void __init prom_instantiate_rtas(void)
1574 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry", 1576 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
1575 &val, sizeof(val)); 1577 &val, sizeof(val));
1576 1578
1579 /* Check if it supports "query-cpu-stopped-state" */
1580 if (prom_getprop(rtas_node, "query-cpu-stopped-state",
1581 &val, sizeof(val)) != PROM_ERROR)
1582 rtas_has_query_cpu_stopped = true;
1583
1577#if defined(CONFIG_PPC_POWERNV) && defined(__BIG_ENDIAN__) 1584#if defined(CONFIG_PPC_POWERNV) && defined(__BIG_ENDIAN__)
1578 /* PowerVN takeover hack */ 1585 /* PowerVN takeover hack */
1579 prom_rtas_data = base; 1586 prom_rtas_data = base;
@@ -1815,6 +1822,18 @@ static void __init prom_hold_cpus(void)
1815 = (void *) LOW_ADDR(__secondary_hold_acknowledge); 1822 = (void *) LOW_ADDR(__secondary_hold_acknowledge);
1816 unsigned long secondary_hold = LOW_ADDR(__secondary_hold); 1823 unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
1817 1824
1825 /*
1826 * On pseries, if RTAS supports "query-cpu-stopped-state",
1827 * we skip this stage, the CPUs will be started by the
1828 * kernel using RTAS.
1829 */
1830 if ((of_platform == PLATFORM_PSERIES ||
1831 of_platform == PLATFORM_PSERIES_LPAR) &&
1832 rtas_has_query_cpu_stopped) {
1833 prom_printf("prom_hold_cpus: skipped\n");
1834 return;
1835 }
1836
1818 prom_debug("prom_hold_cpus: start...\n"); 1837 prom_debug("prom_hold_cpus: start...\n");
1819 prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop); 1838 prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop);
1820 prom_debug(" 1) *spinloop = 0x%x\n", *spinloop); 1839 prom_debug(" 1) *spinloop = 0x%x\n", *spinloop);
@@ -3011,6 +3030,8 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
3011 * On non-powermacs, put all CPUs in spin-loops. 3030 * On non-powermacs, put all CPUs in spin-loops.
3012 * 3031 *
3013 * PowerMacs use a different mechanism to spin CPUs 3032 * PowerMacs use a different mechanism to spin CPUs
3033 *
3034 * (This must be done after instanciating RTAS)
3014 */ 3035 */
3015 if (of_platform != PLATFORM_POWERMAC && 3036 if (of_platform != PLATFORM_POWERMAC &&
3016 of_platform != PLATFORM_OPAL) 3037 of_platform != PLATFORM_OPAL)
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 27a90b99ef67..b4e667663d9b 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -17,6 +17,7 @@
17#include <asm/machdep.h> 17#include <asm/machdep.h>
18#include <asm/smp.h> 18#include <asm/smp.h>
19#include <asm/pmc.h> 19#include <asm/pmc.h>
20#include <asm/firmware.h>
20 21
21#include "cacheinfo.h" 22#include "cacheinfo.h"
22 23
@@ -179,15 +180,25 @@ SYSFS_PMCSETUP(spurr, SPRN_SPURR);
179SYSFS_PMCSETUP(dscr, SPRN_DSCR); 180SYSFS_PMCSETUP(dscr, SPRN_DSCR);
180SYSFS_PMCSETUP(pir, SPRN_PIR); 181SYSFS_PMCSETUP(pir, SPRN_PIR);
181 182
183/*
184 Lets only enable read for phyp resources and
185 enable write when needed with a separate function.
186 Lets be conservative and default to pseries.
187*/
182static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra); 188static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
183static DEVICE_ATTR(spurr, 0400, show_spurr, NULL); 189static DEVICE_ATTR(spurr, 0400, show_spurr, NULL);
184static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr); 190static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr);
185static DEVICE_ATTR(purr, 0600, show_purr, store_purr); 191static DEVICE_ATTR(purr, 0400, show_purr, store_purr);
186static DEVICE_ATTR(pir, 0400, show_pir, NULL); 192static DEVICE_ATTR(pir, 0400, show_pir, NULL);
187 193
188unsigned long dscr_default = 0; 194unsigned long dscr_default = 0;
189EXPORT_SYMBOL(dscr_default); 195EXPORT_SYMBOL(dscr_default);
190 196
197static void add_write_permission_dev_attr(struct device_attribute *attr)
198{
199 attr->attr.mode |= 0200;
200}
201
191static ssize_t show_dscr_default(struct device *dev, 202static ssize_t show_dscr_default(struct device *dev,
192 struct device_attribute *attr, char *buf) 203 struct device_attribute *attr, char *buf)
193{ 204{
@@ -394,8 +405,11 @@ static void register_cpu_online(unsigned int cpu)
394 if (cpu_has_feature(CPU_FTR_MMCRA)) 405 if (cpu_has_feature(CPU_FTR_MMCRA))
395 device_create_file(s, &dev_attr_mmcra); 406 device_create_file(s, &dev_attr_mmcra);
396 407
397 if (cpu_has_feature(CPU_FTR_PURR)) 408 if (cpu_has_feature(CPU_FTR_PURR)) {
409 if (!firmware_has_feature(FW_FEATURE_LPAR))
410 add_write_permission_dev_attr(&dev_attr_purr);
398 device_create_file(s, &dev_attr_purr); 411 device_create_file(s, &dev_attr_purr);
412 }
399 413
400 if (cpu_has_feature(CPU_FTR_SPURR)) 414 if (cpu_has_feature(CPU_FTR_SPURR))
401 device_create_file(s, &dev_attr_spurr); 415 device_create_file(s, &dev_attr_spurr);
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index 7b60b9851469..cd809eaa8b5c 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -79,6 +79,11 @@ _GLOBAL(tm_abort)
79 TABORT(R3) 79 TABORT(R3)
80 blr 80 blr
81 81
82 .section ".toc","aw"
83DSCR_DEFAULT:
84 .tc dscr_default[TC],dscr_default
85
86 .section ".text"
82 87
83/* void tm_reclaim(struct thread_struct *thread, 88/* void tm_reclaim(struct thread_struct *thread,
84 * unsigned long orig_msr, 89 * unsigned long orig_msr,
@@ -123,6 +128,7 @@ _GLOBAL(tm_reclaim)
123 mr r15, r14 128 mr r15, r14
124 ori r15, r15, MSR_FP 129 ori r15, r15, MSR_FP
125 li r16, MSR_RI 130 li r16, MSR_RI
131 ori r16, r16, MSR_EE /* IRQs hard off */
126 andc r15, r15, r16 132 andc r15, r15, r16
127 oris r15, r15, MSR_VEC@h 133 oris r15, r15, MSR_VEC@h
128#ifdef CONFIG_VSX 134#ifdef CONFIG_VSX
@@ -187,11 +193,18 @@ dont_backup_fp:
187 std r1, PACATMSCRATCH(r13) 193 std r1, PACATMSCRATCH(r13)
188 ld r1, PACAR1(r13) 194 ld r1, PACAR1(r13)
189 195
196 /* Store the PPR in r11 and reset to decent value */
197 std r11, GPR11(r1) /* Temporary stash */
198 mfspr r11, SPRN_PPR
199 HMT_MEDIUM
200
190 /* Now get some more GPRS free */ 201 /* Now get some more GPRS free */
191 std r7, GPR7(r1) /* Temporary stash */ 202 std r7, GPR7(r1) /* Temporary stash */
192 std r12, GPR12(r1) /* '' '' '' */ 203 std r12, GPR12(r1) /* '' '' '' */
193 ld r12, STACK_PARAM(0)(r1) /* Param 0, thread_struct * */ 204 ld r12, STACK_PARAM(0)(r1) /* Param 0, thread_struct * */
194 205
206 std r11, THREAD_TM_PPR(r12) /* Store PPR and free r11 */
207
195 addi r7, r12, PT_CKPT_REGS /* Thread's ckpt_regs */ 208 addi r7, r12, PT_CKPT_REGS /* Thread's ckpt_regs */
196 209
197 /* Make r7 look like an exception frame so that we 210 /* Make r7 look like an exception frame so that we
@@ -203,15 +216,19 @@ dont_backup_fp:
203 SAVE_GPR(0, r7) /* user r0 */ 216 SAVE_GPR(0, r7) /* user r0 */
204 SAVE_GPR(2, r7) /* user r2 */ 217 SAVE_GPR(2, r7) /* user r2 */
205 SAVE_4GPRS(3, r7) /* user r3-r6 */ 218 SAVE_4GPRS(3, r7) /* user r3-r6 */
206 SAVE_4GPRS(8, r7) /* user r8-r11 */ 219 SAVE_GPR(8, r7) /* user r8 */
220 SAVE_GPR(9, r7) /* user r9 */
221 SAVE_GPR(10, r7) /* user r10 */
207 ld r3, PACATMSCRATCH(r13) /* user r1 */ 222 ld r3, PACATMSCRATCH(r13) /* user r1 */
208 ld r4, GPR7(r1) /* user r7 */ 223 ld r4, GPR7(r1) /* user r7 */
209 ld r5, GPR12(r1) /* user r12 */ 224 ld r5, GPR11(r1) /* user r11 */
210 GET_SCRATCH0(6) /* user r13 */ 225 ld r6, GPR12(r1) /* user r12 */
226 GET_SCRATCH0(8) /* user r13 */
211 std r3, GPR1(r7) 227 std r3, GPR1(r7)
212 std r4, GPR7(r7) 228 std r4, GPR7(r7)
213 std r5, GPR12(r7) 229 std r5, GPR11(r7)
214 std r6, GPR13(r7) 230 std r6, GPR12(r7)
231 std r8, GPR13(r7)
215 232
216 SAVE_NVGPRS(r7) /* user r14-r31 */ 233 SAVE_NVGPRS(r7) /* user r14-r31 */
217 234
@@ -234,14 +251,12 @@ dont_backup_fp:
234 std r6, _XER(r7) 251 std r6, _XER(r7)
235 252
236 253
237 /* ******************** TAR, PPR, DSCR ********** */ 254 /* ******************** TAR, DSCR ********** */
238 mfspr r3, SPRN_TAR 255 mfspr r3, SPRN_TAR
239 mfspr r4, SPRN_PPR 256 mfspr r4, SPRN_DSCR
240 mfspr r5, SPRN_DSCR
241 257
242 std r3, THREAD_TM_TAR(r12) 258 std r3, THREAD_TM_TAR(r12)
243 std r4, THREAD_TM_PPR(r12) 259 std r4, THREAD_TM_DSCR(r12)
244 std r5, THREAD_TM_DSCR(r12)
245 260
246 /* MSR and flags: We don't change CRs, and we don't need to alter 261 /* MSR and flags: We don't change CRs, and we don't need to alter
247 * MSR. 262 * MSR.
@@ -258,7 +273,7 @@ dont_backup_fp:
258 std r3, THREAD_TM_TFHAR(r12) 273 std r3, THREAD_TM_TFHAR(r12)
259 std r4, THREAD_TM_TFIAR(r12) 274 std r4, THREAD_TM_TFIAR(r12)
260 275
261 /* AMR and PPR are checkpointed too, but are unsupported by Linux. */ 276 /* AMR is checkpointed too, but is unsupported by Linux. */
262 277
263 /* Restore original MSR/IRQ state & clear TM mode */ 278 /* Restore original MSR/IRQ state & clear TM mode */
264 ld r14, TM_FRAME_L0(r1) /* Orig MSR */ 279 ld r14, TM_FRAME_L0(r1) /* Orig MSR */
@@ -274,6 +289,12 @@ dont_backup_fp:
274 mtcr r4 289 mtcr r4
275 mtlr r0 290 mtlr r0
276 ld r2, 40(r1) 291 ld r2, 40(r1)
292
293 /* Load system default DSCR */
294 ld r4, DSCR_DEFAULT@toc(r2)
295 ld r0, 0(r4)
296 mtspr SPRN_DSCR, r0
297
277 blr 298 blr
278 299
279 300
@@ -358,25 +379,24 @@ dont_restore_fp:
358 379
359restore_gprs: 380restore_gprs:
360 381
361 /* ******************** TAR, PPR, DSCR ********** */ 382 /* ******************** CR,LR,CCR,MSR ********** */
362 ld r4, THREAD_TM_TAR(r3) 383 ld r4, _CTR(r7)
363 ld r5, THREAD_TM_PPR(r3) 384 ld r5, _LINK(r7)
364 ld r6, THREAD_TM_DSCR(r3) 385 ld r6, _CCR(r7)
386 ld r8, _XER(r7)
365 387
366 mtspr SPRN_TAR, r4 388 mtctr r4
367 mtspr SPRN_PPR, r5 389 mtlr r5
368 mtspr SPRN_DSCR, r6 390 mtcr r6
391 mtxer r8
369 392
370 /* ******************** CR,LR,CCR,MSR ********** */ 393 /* ******************** TAR ******************** */
371 ld r3, _CTR(r7) 394 ld r4, THREAD_TM_TAR(r3)
372 ld r4, _LINK(r7) 395 mtspr SPRN_TAR, r4
373 ld r5, _CCR(r7)
374 ld r6, _XER(r7)
375 396
376 mtctr r3 397 /* Load up the PPR and DSCR in GPRs only at this stage */
377 mtlr r4 398 ld r5, THREAD_TM_DSCR(r3)
378 mtcr r5 399 ld r6, THREAD_TM_PPR(r3)
379 mtxer r6
380 400
381 /* Clear the MSR RI since we are about to change R1. EE is already off 401 /* Clear the MSR RI since we are about to change R1. EE is already off
382 */ 402 */
@@ -384,19 +404,26 @@ restore_gprs:
384 mtmsrd r4, 1 404 mtmsrd r4, 1
385 405
386 REST_4GPRS(0, r7) /* GPR0-3 */ 406 REST_4GPRS(0, r7) /* GPR0-3 */
387 REST_GPR(4, r7) /* GPR4-6 */ 407 REST_GPR(4, r7) /* GPR4 */
388 REST_GPR(5, r7)
389 REST_GPR(6, r7)
390 REST_4GPRS(8, r7) /* GPR8-11 */ 408 REST_4GPRS(8, r7) /* GPR8-11 */
391 REST_2GPRS(12, r7) /* GPR12-13 */ 409 REST_2GPRS(12, r7) /* GPR12-13 */
392 410
393 REST_NVGPRS(r7) /* GPR14-31 */ 411 REST_NVGPRS(r7) /* GPR14-31 */
394 412
395 ld r7, GPR7(r7) /* GPR7 */ 413 /* Load up PPR and DSCR here so we don't run with user values for long
414 */
415 mtspr SPRN_DSCR, r5
416 mtspr SPRN_PPR, r6
417
418 REST_GPR(5, r7) /* GPR5-7 */
419 REST_GPR(6, r7)
420 ld r7, GPR7(r7)
396 421
397 /* Commit register state as checkpointed state: */ 422 /* Commit register state as checkpointed state: */
398 TRECHKPT 423 TRECHKPT
399 424
425 HMT_MEDIUM
426
400 /* Our transactional state has now changed. 427 /* Our transactional state has now changed.
401 * 428 *
402 * Now just get out of here. Transactional (current) state will be 429 * Now just get out of here. Transactional (current) state will be
@@ -419,6 +446,12 @@ restore_gprs:
419 mtcr r4 446 mtcr r4
420 mtlr r0 447 mtlr r0
421 ld r2, 40(r1) 448 ld r2, 40(r1)
449
450 /* Load system default DSCR */
451 ld r4, DSCR_DEFAULT@toc(r2)
452 ld r0, 0(r4)
453 mtspr SPRN_DSCR, r0
454
422 blr 455 blr
423 456
424 /* ****************************************************************** */ 457 /* ****************************************************************** */
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 78a350670de3..d38cc08b16c7 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -1530,11 +1530,15 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
1530 const char *cp; 1530 const char *cp;
1531 1531
1532 dn = dev->of_node; 1532 dn = dev->of_node;
1533 if (!dn) 1533 if (!dn) {
1534 return -ENODEV; 1534 strcat(buf, "\n");
1535 return strlen(buf);
1536 }
1535 cp = of_get_property(dn, "compatible", NULL); 1537 cp = of_get_property(dn, "compatible", NULL);
1536 if (!cp) 1538 if (!cp) {
1537 return -ENODEV; 1539 strcat(buf, "\n");
1540 return strlen(buf);
1541 }
1538 1542
1539 return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp); 1543 return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
1540} 1544}
diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S
index 167f72555d60..57a072065057 100644
--- a/arch/powerpc/lib/checksum_64.S
+++ b/arch/powerpc/lib/checksum_64.S
@@ -226,19 +226,35 @@ _GLOBAL(csum_partial)
226 blr 226 blr
227 227
228 228
229 .macro source 229 .macro srcnr
230100: 230100:
231 .section __ex_table,"a" 231 .section __ex_table,"a"
232 .align 3 232 .align 3
233 .llong 100b,.Lsrc_error 233 .llong 100b,.Lsrc_error_nr
234 .previous 234 .previous
235 .endm 235 .endm
236 236
237 .macro dest 237 .macro source
238150:
239 .section __ex_table,"a"
240 .align 3
241 .llong 150b,.Lsrc_error
242 .previous
243 .endm
244
245 .macro dstnr
238200: 246200:
239 .section __ex_table,"a" 247 .section __ex_table,"a"
240 .align 3 248 .align 3
241 .llong 200b,.Ldest_error 249 .llong 200b,.Ldest_error_nr
250 .previous
251 .endm
252
253 .macro dest
254250:
255 .section __ex_table,"a"
256 .align 3
257 .llong 250b,.Ldest_error
242 .previous 258 .previous
243 .endm 259 .endm
244 260
@@ -269,16 +285,16 @@ _GLOBAL(csum_partial_copy_generic)
269 rldicl. r6,r3,64-1,64-2 /* r6 = (r3 & 0x3) >> 1 */ 285 rldicl. r6,r3,64-1,64-2 /* r6 = (r3 & 0x3) >> 1 */
270 beq .Lcopy_aligned 286 beq .Lcopy_aligned
271 287
272 li r7,4 288 li r9,4
273 sub r6,r7,r6 289 sub r6,r9,r6
274 mtctr r6 290 mtctr r6
275 291
2761: 2921:
277source; lhz r6,0(r3) /* align to doubleword */ 293srcnr; lhz r6,0(r3) /* align to doubleword */
278 subi r5,r5,2 294 subi r5,r5,2
279 addi r3,r3,2 295 addi r3,r3,2
280 adde r0,r0,r6 296 adde r0,r0,r6
281dest; sth r6,0(r4) 297dstnr; sth r6,0(r4)
282 addi r4,r4,2 298 addi r4,r4,2
283 bdnz 1b 299 bdnz 1b
284 300
@@ -392,10 +408,10 @@ dest; std r16,56(r4)
392 408
393 mtctr r6 409 mtctr r6
3943: 4103:
395source; ld r6,0(r3) 411srcnr; ld r6,0(r3)
396 addi r3,r3,8 412 addi r3,r3,8
397 adde r0,r0,r6 413 adde r0,r0,r6
398dest; std r6,0(r4) 414dstnr; std r6,0(r4)
399 addi r4,r4,8 415 addi r4,r4,8
400 bdnz 3b 416 bdnz 3b
401 417
@@ -405,10 +421,10 @@ dest; std r6,0(r4)
405 srdi. r6,r5,2 421 srdi. r6,r5,2
406 beq .Lcopy_tail_halfword 422 beq .Lcopy_tail_halfword
407 423
408source; lwz r6,0(r3) 424srcnr; lwz r6,0(r3)
409 addi r3,r3,4 425 addi r3,r3,4
410 adde r0,r0,r6 426 adde r0,r0,r6
411dest; stw r6,0(r4) 427dstnr; stw r6,0(r4)
412 addi r4,r4,4 428 addi r4,r4,4
413 subi r5,r5,4 429 subi r5,r5,4
414 430
@@ -416,10 +432,10 @@ dest; stw r6,0(r4)
416 srdi. r6,r5,1 432 srdi. r6,r5,1
417 beq .Lcopy_tail_byte 433 beq .Lcopy_tail_byte
418 434
419source; lhz r6,0(r3) 435srcnr; lhz r6,0(r3)
420 addi r3,r3,2 436 addi r3,r3,2
421 adde r0,r0,r6 437 adde r0,r0,r6
422dest; sth r6,0(r4) 438dstnr; sth r6,0(r4)
423 addi r4,r4,2 439 addi r4,r4,2
424 subi r5,r5,2 440 subi r5,r5,2
425 441
@@ -427,10 +443,10 @@ dest; sth r6,0(r4)
427 andi. r6,r5,1 443 andi. r6,r5,1
428 beq .Lcopy_finish 444 beq .Lcopy_finish
429 445
430source; lbz r6,0(r3) 446srcnr; lbz r6,0(r3)
431 sldi r9,r6,8 /* Pad the byte out to 16 bits */ 447 sldi r9,r6,8 /* Pad the byte out to 16 bits */
432 adde r0,r0,r9 448 adde r0,r0,r9
433dest; stb r6,0(r4) 449dstnr; stb r6,0(r4)
434 450
435.Lcopy_finish: 451.Lcopy_finish:
436 addze r0,r0 /* add in final carry */ 452 addze r0,r0 /* add in final carry */
@@ -440,6 +456,11 @@ dest; stb r6,0(r4)
440 blr 456 blr
441 457
442.Lsrc_error: 458.Lsrc_error:
459 ld r14,STK_REG(R14)(r1)
460 ld r15,STK_REG(R15)(r1)
461 ld r16,STK_REG(R16)(r1)
462 addi r1,r1,STACKFRAMESIZE
463.Lsrc_error_nr:
443 cmpdi 0,r7,0 464 cmpdi 0,r7,0
444 beqlr 465 beqlr
445 li r6,-EFAULT 466 li r6,-EFAULT
@@ -447,6 +468,11 @@ dest; stb r6,0(r4)
447 blr 468 blr
448 469
449.Ldest_error: 470.Ldest_error:
471 ld r14,STK_REG(R14)(r1)
472 ld r15,STK_REG(R15)(r1)
473 ld r16,STK_REG(R16)(r1)
474 addi r1,r1,STACKFRAMESIZE
475.Ldest_error_nr:
450 cmpdi 0,r8,0 476 cmpdi 0,r8,0
451 beqlr 477 beqlr
452 li r6,-EFAULT 478 li r6,-EFAULT
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index a7ee978fb860..b1faa1593c90 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -1505,6 +1505,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1505 */ 1505 */
1506 if ((ra == 1) && !(regs->msr & MSR_PR) \ 1506 if ((ra == 1) && !(regs->msr & MSR_PR) \
1507 && (val3 >= (regs->gpr[1] - STACK_INT_FRAME_SIZE))) { 1507 && (val3 >= (regs->gpr[1] - STACK_INT_FRAME_SIZE))) {
1508#ifdef CONFIG_PPC32
1508 /* 1509 /*
1509 * Check if we will touch kernel sack overflow 1510 * Check if we will touch kernel sack overflow
1510 */ 1511 */
@@ -1513,7 +1514,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1513 err = -EINVAL; 1514 err = -EINVAL;
1514 break; 1515 break;
1515 } 1516 }
1516 1517#endif /* CONFIG_PPC32 */
1517 /* 1518 /*
1518 * Check if we already set since that means we'll 1519 * Check if we already set since that means we'll
1519 * lose the previous value. 1520 * lose the previous value.
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index d0cd9e4c6837..8ed035d2edb5 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -300,5 +300,9 @@ void vmemmap_free(unsigned long start, unsigned long end)
300{ 300{
301} 301}
302 302
303void register_page_bootmem_memmap(unsigned long section_nr,
304 struct page *start_page, unsigned long size)
305{
306}
303#endif /* CONFIG_SPARSEMEM_VMEMMAP */ 307#endif /* CONFIG_SPARSEMEM_VMEMMAP */
304 308
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 1cf9c5b67f24..3fa93dc7fe75 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -297,12 +297,21 @@ void __init paging_init(void)
297} 297}
298#endif /* ! CONFIG_NEED_MULTIPLE_NODES */ 298#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
299 299
300static void __init register_page_bootmem_info(void)
301{
302 int i;
303
304 for_each_online_node(i)
305 register_page_bootmem_info_node(NODE_DATA(i));
306}
307
300void __init mem_init(void) 308void __init mem_init(void)
301{ 309{
302#ifdef CONFIG_SWIOTLB 310#ifdef CONFIG_SWIOTLB
303 swiotlb_init(0); 311 swiotlb_init(0);
304#endif 312#endif
305 313
314 register_page_bootmem_info();
306 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 315 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
307 set_max_mapnr(max_pfn); 316 set_max_mapnr(max_pfn);
308 free_all_bootmem(); 317 free_all_bootmem();
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index 2ee4a707f0df..a3f7abd2f13f 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -199,6 +199,7 @@
199#define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1))) 199#define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1)))
200#define MMCR1_COMBINE_SHIFT(pmc) (35 - ((pmc) - 1)) 200#define MMCR1_COMBINE_SHIFT(pmc) (35 - ((pmc) - 1))
201#define MMCR1_PMCSEL_SHIFT(pmc) (24 - (((pmc) - 1)) * 8) 201#define MMCR1_PMCSEL_SHIFT(pmc) (24 - (((pmc) - 1)) * 8)
202#define MMCR1_FAB_SHIFT 36
202#define MMCR1_DC_QUAL_SHIFT 47 203#define MMCR1_DC_QUAL_SHIFT 47
203#define MMCR1_IC_QUAL_SHIFT 46 204#define MMCR1_IC_QUAL_SHIFT 46
204 205
@@ -388,8 +389,8 @@ static int power8_compute_mmcr(u64 event[], int n_ev,
388 * the threshold bits are used for the match value. 389 * the threshold bits are used for the match value.
389 */ 390 */
390 if (event_is_fab_match(event[i])) { 391 if (event_is_fab_match(event[i])) {
391 mmcr1 |= (event[i] >> EVENT_THR_CTL_SHIFT) & 392 mmcr1 |= ((event[i] >> EVENT_THR_CTL_SHIFT) &
392 EVENT_THR_CTL_MASK; 393 EVENT_THR_CTL_MASK) << MMCR1_FAB_SHIFT;
393 } else { 394 } else {
394 val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK; 395 val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK;
395 mmcra |= val << MMCRA_THR_CTL_SHIFT; 396 mmcra |= val << MMCRA_THR_CTL_SHIFT;
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 1c1771a40250..24f58cb0a543 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -233,18 +233,24 @@ static void __init smp_init_pseries(void)
233 233
234 alloc_bootmem_cpumask_var(&of_spin_mask); 234 alloc_bootmem_cpumask_var(&of_spin_mask);
235 235
236 /* Mark threads which are still spinning in hold loops. */ 236 /*
237 if (cpu_has_feature(CPU_FTR_SMT)) { 237 * Mark threads which are still spinning in hold loops
238 for_each_present_cpu(i) { 238 *
239 if (cpu_thread_in_core(i) == 0) 239 * We know prom_init will not have started them if RTAS supports
240 cpumask_set_cpu(i, of_spin_mask); 240 * query-cpu-stopped-state.
241 } 241 */
242 } else { 242 if (rtas_token("query-cpu-stopped-state") == RTAS_UNKNOWN_SERVICE) {
243 cpumask_copy(of_spin_mask, cpu_present_mask); 243 if (cpu_has_feature(CPU_FTR_SMT)) {
244 for_each_present_cpu(i) {
245 if (cpu_thread_in_core(i) == 0)
246 cpumask_set_cpu(i, of_spin_mask);
247 }
248 } else
249 cpumask_copy(of_spin_mask, cpu_present_mask);
250
251 cpumask_clear_cpu(boot_cpuid, of_spin_mask);
244 } 252 }
245 253
246 cpumask_clear_cpu(boot_cpuid, of_spin_mask);
247
248 /* Non-lpar has additional take/give timebase */ 254 /* Non-lpar has additional take/give timebase */
249 if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) { 255 if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
250 smp_ops->give_timebase = rtas_give_timebase; 256 smp_ops->give_timebase = rtas_give_timebase;
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index dcc6ac2d8026..7143793859fa 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -93,6 +93,7 @@ config S390
93 select ARCH_INLINE_WRITE_UNLOCK_IRQ 93 select ARCH_INLINE_WRITE_UNLOCK_IRQ
94 select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE 94 select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
95 select ARCH_SAVE_PAGE_KEYS if HIBERNATION 95 select ARCH_SAVE_PAGE_KEYS if HIBERNATION
96 select ARCH_USE_CMPXCHG_LOCKREF
96 select ARCH_WANT_IPC_PARSE_VERSION 97 select ARCH_WANT_IPC_PARSE_VERSION
97 select BUILDTIME_EXTABLE_SORT 98 select BUILDTIME_EXTABLE_SORT
98 select CLONE_BACKWARDS2 99 select CLONE_BACKWARDS2
@@ -102,7 +103,6 @@ config S390
102 select GENERIC_TIME_VSYSCALL_OLD 103 select GENERIC_TIME_VSYSCALL_OLD
103 select HAVE_ALIGNED_STRUCT_PAGE if SLUB 104 select HAVE_ALIGNED_STRUCT_PAGE if SLUB
104 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 105 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
105 select HAVE_ARCH_MUTEX_CPU_RELAX
106 select HAVE_ARCH_SECCOMP_FILTER 106 select HAVE_ARCH_SECCOMP_FILTER
107 select HAVE_ARCH_TRACEHOOK 107 select HAVE_ARCH_TRACEHOOK
108 select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT 108 select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT
diff --git a/arch/s390/include/asm/mutex.h b/arch/s390/include/asm/mutex.h
index 688271f5f2e4..458c1f7fbc18 100644
--- a/arch/s390/include/asm/mutex.h
+++ b/arch/s390/include/asm/mutex.h
@@ -7,5 +7,3 @@
7 */ 7 */
8 8
9#include <asm-generic/mutex-dec.h> 9#include <asm-generic/mutex-dec.h>
10
11#define arch_mutex_cpu_relax() barrier()
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 0eb37505cab1..ca7821f07260 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -198,6 +198,8 @@ static inline void cpu_relax(void)
198 barrier(); 198 barrier();
199} 199}
200 200
201#define arch_mutex_cpu_relax() barrier()
202
201static inline void psw_set_key(unsigned int key) 203static inline void psw_set_key(unsigned int key)
202{ 204{
203 asm volatile("spka 0(%0)" : : "d" (key)); 205 asm volatile("spka 0(%0)" : : "d" (key));
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 701fe8c59e1f..83e5d216105e 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -44,6 +44,11 @@ extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
44extern int arch_spin_trylock_retry(arch_spinlock_t *); 44extern int arch_spin_trylock_retry(arch_spinlock_t *);
45extern void arch_spin_relax(arch_spinlock_t *lock); 45extern void arch_spin_relax(arch_spinlock_t *lock);
46 46
47static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
48{
49 return lock.owner_cpu == 0;
50}
51
47static inline void arch_spin_lock(arch_spinlock_t *lp) 52static inline void arch_spin_lock(arch_spinlock_t *lp)
48{ 53{
49 int old; 54 int old;
diff --git a/arch/score/Kconfig b/arch/score/Kconfig
index a1be70db75fe..305f7ee1f382 100644
--- a/arch/score/Kconfig
+++ b/arch/score/Kconfig
@@ -2,6 +2,7 @@ menu "Machine selection"
2 2
3config SCORE 3config SCORE
4 def_bool y 4 def_bool y
5 select HAVE_GENERIC_HARDIRQS
5 select GENERIC_IRQ_SHOW 6 select GENERIC_IRQ_SHOW
6 select GENERIC_IOMAP 7 select GENERIC_IOMAP
7 select GENERIC_ATOMIC64 8 select GENERIC_ATOMIC64
@@ -110,3 +111,6 @@ source "security/Kconfig"
110source "crypto/Kconfig" 111source "crypto/Kconfig"
111 112
112source "lib/Kconfig" 113source "lib/Kconfig"
114
115config NO_IOMEM
116 def_bool y
diff --git a/arch/score/Makefile b/arch/score/Makefile
index 974aefe86123..9e3e060290e0 100644
--- a/arch/score/Makefile
+++ b/arch/score/Makefile
@@ -20,8 +20,8 @@ cflags-y += -G0 -pipe -mel -mnhwloop -D__SCOREEL__ \
20# 20#
21KBUILD_AFLAGS += $(cflags-y) 21KBUILD_AFLAGS += $(cflags-y)
22KBUILD_CFLAGS += $(cflags-y) 22KBUILD_CFLAGS += $(cflags-y)
23KBUILD_AFLAGS_MODULE += -mlong-calls 23KBUILD_AFLAGS_MODULE +=
24KBUILD_CFLAGS_MODULE += -mlong-calls 24KBUILD_CFLAGS_MODULE +=
25LDFLAGS += --oformat elf32-littlescore 25LDFLAGS += --oformat elf32-littlescore
26LDFLAGS_vmlinux += -G0 -static -nostdlib 26LDFLAGS_vmlinux += -G0 -static -nostdlib
27 27
diff --git a/arch/score/include/asm/checksum.h b/arch/score/include/asm/checksum.h
index f909ac3144a4..961bd64015a8 100644
--- a/arch/score/include/asm/checksum.h
+++ b/arch/score/include/asm/checksum.h
@@ -184,48 +184,57 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
184 __wsum sum) 184 __wsum sum)
185{ 185{
186 __asm__ __volatile__( 186 __asm__ __volatile__(
187 ".set\tnoreorder\t\t\t# csum_ipv6_magic\n\t" 187 ".set\tvolatile\t\t\t# csum_ipv6_magic\n\t"
188 ".set\tnoat\n\t" 188 "add\t%0, %0, %5\t\t\t# proto (long in network byte order)\n\t"
189 "addu\t%0, %5\t\t\t# proto (long in network byte order)\n\t" 189 "cmp.c\t%5, %0\n\t"
190 "sltu\t$1, %0, %5\n\t" 190 "bleu 1f\n\t"
191 "addu\t%0, $1\n\t" 191 "addi\t%0, 0x1\n\t"
192 "addu\t%0, %6\t\t\t# csum\n\t" 192 "1:add\t%0, %0, %6\t\t\t# csum\n\t"
193 "sltu\t$1, %0, %6\n\t" 193 "cmp.c\t%6, %0\n\t"
194 "lw\t%1, 0(%2)\t\t\t# four words source address\n\t" 194 "lw\t%1, [%2, 0]\t\t\t# four words source address\n\t"
195 "addu\t%0, $1\n\t" 195 "bleu 1f\n\t"
196 "addu\t%0, %1\n\t" 196 "addi\t%0, 0x1\n\t"
197 "sltu\t$1, %0, %1\n\t" 197 "1:add\t%0, %0, %1\n\t"
198 "lw\t%1, 4(%2)\n\t" 198 "cmp.c\t%1, %0\n\t"
199 "addu\t%0, $1\n\t" 199 "1:lw\t%1, [%2, 4]\n\t"
200 "addu\t%0, %1\n\t" 200 "bleu 1f\n\t"
201 "sltu\t$1, %0, %1\n\t" 201 "addi\t%0, 0x1\n\t"
202 "lw\t%1, 8(%2)\n\t" 202 "1:add\t%0, %0, %1\n\t"
203 "addu\t%0, $1\n\t" 203 "cmp.c\t%1, %0\n\t"
204 "addu\t%0, %1\n\t" 204 "lw\t%1, [%2,8]\n\t"
205 "sltu\t$1, %0, %1\n\t" 205 "bleu 1f\n\t"
206 "lw\t%1, 12(%2)\n\t" 206 "addi\t%0, 0x1\n\t"
207 "addu\t%0, $1\n\t" 207 "1:add\t%0, %0, %1\n\t"
208 "addu\t%0, %1\n\t" 208 "cmp.c\t%1, %0\n\t"
209 "sltu\t$1, %0, %1\n\t" 209 "lw\t%1, [%2, 12]\n\t"
210 "lw\t%1, 0(%3)\n\t" 210 "bleu 1f\n\t"
211 "addu\t%0, $1\n\t" 211 "addi\t%0, 0x1\n\t"
212 "addu\t%0, %1\n\t" 212 "1:add\t%0, %0,%1\n\t"
213 "sltu\t$1, %0, %1\n\t" 213 "cmp.c\t%1, %0\n\t"
214 "lw\t%1, 4(%3)\n\t" 214 "lw\t%1, [%3, 0]\n\t"
215 "addu\t%0, $1\n\t" 215 "bleu 1f\n\t"
216 "addu\t%0, %1\n\t" 216 "addi\t%0, 0x1\n\t"
217 "sltu\t$1, %0, %1\n\t" 217 "1:add\t%0, %0, %1\n\t"
218 "lw\t%1, 8(%3)\n\t" 218 "cmp.c\t%1, %0\n\t"
219 "addu\t%0, $1\n\t" 219 "lw\t%1, [%3, 4]\n\t"
220 "addu\t%0, %1\n\t" 220 "bleu 1f\n\t"
221 "sltu\t$1, %0, %1\n\t" 221 "addi\t%0, 0x1\n\t"
222 "lw\t%1, 12(%3)\n\t" 222 "1:add\t%0, %0, %1\n\t"
223 "addu\t%0, $1\n\t" 223 "cmp.c\t%1, %0\n\t"
224 "addu\t%0, %1\n\t" 224 "lw\t%1, [%3, 8]\n\t"
225 "sltu\t$1, %0, %1\n\t" 225 "bleu 1f\n\t"
226 "addu\t%0, $1\t\t\t# Add final carry\n\t" 226 "addi\t%0, 0x1\n\t"
227 ".set\tnoat\n\t" 227 "1:add\t%0, %0, %1\n\t"
228 ".set\tnoreorder" 228 "cmp.c\t%1, %0\n\t"
229 "lw\t%1, [%3, 12]\n\t"
230 "bleu 1f\n\t"
231 "addi\t%0, 0x1\n\t"
232 "1:add\t%0, %0, %1\n\t"
233 "cmp.c\t%1, %0\n\t"
234 "bleu 1f\n\t"
235 "addi\t%0, 0x1\n\t"
236 "1:\n\t"
237 ".set\toptimize"
229 : "=r" (sum), "=r" (proto) 238 : "=r" (sum), "=r" (proto)
230 : "r" (saddr), "r" (daddr), 239 : "r" (saddr), "r" (daddr),
231 "0" (htonl(len)), "1" (htonl(proto)), "r" (sum)); 240 "0" (htonl(len)), "1" (htonl(proto)), "r" (sum));
diff --git a/arch/score/include/asm/io.h b/arch/score/include/asm/io.h
index fbbfd7132e3b..574c8827abe2 100644
--- a/arch/score/include/asm/io.h
+++ b/arch/score/include/asm/io.h
@@ -5,5 +5,4 @@
5 5
6#define virt_to_bus virt_to_phys 6#define virt_to_bus virt_to_phys
7#define bus_to_virt phys_to_virt 7#define bus_to_virt phys_to_virt
8
9#endif /* _ASM_SCORE_IO_H */ 8#endif /* _ASM_SCORE_IO_H */
diff --git a/arch/score/include/asm/pgalloc.h b/arch/score/include/asm/pgalloc.h
index 059a61b7071b..716b3fd1d863 100644
--- a/arch/score/include/asm/pgalloc.h
+++ b/arch/score/include/asm/pgalloc.h
@@ -2,7 +2,7 @@
2#define _ASM_SCORE_PGALLOC_H 2#define _ASM_SCORE_PGALLOC_H
3 3
4#include <linux/mm.h> 4#include <linux/mm.h>
5 5#include <linux/highmem.h>
6static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, 6static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
7 pte_t *pte) 7 pte_t *pte)
8{ 8{
diff --git a/arch/score/kernel/entry.S b/arch/score/kernel/entry.S
index 7234ed09b7b7..befb87d30a89 100644
--- a/arch/score/kernel/entry.S
+++ b/arch/score/kernel/entry.S
@@ -264,7 +264,7 @@ resume_kernel:
264 disable_irq 264 disable_irq
265 lw r8, [r28, TI_PRE_COUNT] 265 lw r8, [r28, TI_PRE_COUNT]
266 cmpz.c r8 266 cmpz.c r8
267 bne r8, restore_all 267 bne restore_all
268need_resched: 268need_resched:
269 lw r8, [r28, TI_FLAGS] 269 lw r8, [r28, TI_FLAGS]
270 andri.c r9, r8, _TIF_NEED_RESCHED 270 andri.c r9, r8, _TIF_NEED_RESCHED
@@ -415,7 +415,7 @@ ENTRY(handle_sys)
415 sw r9, [r0, PT_EPC] 415 sw r9, [r0, PT_EPC]
416 416
417 cmpi.c r27, __NR_syscalls # check syscall number 417 cmpi.c r27, __NR_syscalls # check syscall number
418 bgeu illegal_syscall 418 bcs illegal_syscall
419 419
420 slli r8, r27, 2 # get syscall routine 420 slli r8, r27, 2 # get syscall routine
421 la r11, sys_call_table 421 la r11, sys_call_table
diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
index f4c6d02421d3..a1519ad3d49d 100644
--- a/arch/score/kernel/process.c
+++ b/arch/score/kernel/process.c
@@ -78,8 +78,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
78 p->thread.reg0 = (unsigned long) childregs; 78 p->thread.reg0 = (unsigned long) childregs;
79 if (unlikely(p->flags & PF_KTHREAD)) { 79 if (unlikely(p->flags & PF_KTHREAD)) {
80 memset(childregs, 0, sizeof(struct pt_regs)); 80 memset(childregs, 0, sizeof(struct pt_regs));
81 p->thread->reg12 = usp; 81 p->thread.reg12 = usp;
82 p->thread->reg13 = arg; 82 p->thread.reg13 = arg;
83 p->thread.reg3 = (unsigned long) ret_from_kernel_thread; 83 p->thread.reg3 = (unsigned long) ret_from_kernel_thread;
84 } else { 84 } else {
85 *childregs = *current_pt_regs(); 85 *childregs = *current_pt_regs();
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 2137ad667438..78c4fdb91bc5 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -506,12 +506,17 @@ config SUN_OPENPROMFS
506 Only choose N if you know in advance that you will not need to modify 506 Only choose N if you know in advance that you will not need to modify
507 OpenPROM settings on the running system. 507 OpenPROM settings on the running system.
508 508
509# Makefile helper 509# Makefile helpers
510config SPARC64_PCI 510config SPARC64_PCI
511 bool 511 bool
512 default y 512 default y
513 depends on SPARC64 && PCI 513 depends on SPARC64 && PCI
514 514
515config SPARC64_PCI_MSI
516 bool
517 default y
518 depends on SPARC64_PCI && PCI_MSI
519
515endmenu 520endmenu
516 521
517menu "Executable file formats" 522menu "Executable file formats"
diff --git a/arch/sparc/include/asm/floppy_64.h b/arch/sparc/include/asm/floppy_64.h
index e204f902e6c9..7c90c50c200d 100644
--- a/arch/sparc/include/asm/floppy_64.h
+++ b/arch/sparc/include/asm/floppy_64.h
@@ -254,7 +254,7 @@ static int sun_fd_request_irq(void)
254 once = 1; 254 once = 1;
255 255
256 error = request_irq(FLOPPY_IRQ, sparc_floppy_irq, 256 error = request_irq(FLOPPY_IRQ, sparc_floppy_irq,
257 IRQF_DISABLED, "floppy", NULL); 257 0, "floppy", NULL);
258 258
259 return ((error == 0) ? 0 : -1); 259 return ((error == 0) ? 0 : -1);
260 } 260 }
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index d432fb20358e..d15cc1794b0e 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -1,3 +1,4 @@
1
1# 2#
2# Makefile for the linux kernel. 3# Makefile for the linux kernel.
3# 4#
@@ -99,7 +100,7 @@ obj-$(CONFIG_STACKTRACE) += stacktrace.o
99obj-$(CONFIG_SPARC64_PCI) += pci.o pci_common.o psycho_common.o 100obj-$(CONFIG_SPARC64_PCI) += pci.o pci_common.o psycho_common.o
100obj-$(CONFIG_SPARC64_PCI) += pci_psycho.o pci_sabre.o pci_schizo.o 101obj-$(CONFIG_SPARC64_PCI) += pci_psycho.o pci_sabre.o pci_schizo.o
101obj-$(CONFIG_SPARC64_PCI) += pci_sun4v.o pci_sun4v_asm.o pci_fire.o 102obj-$(CONFIG_SPARC64_PCI) += pci_sun4v.o pci_sun4v_asm.o pci_fire.o
102obj-$(CONFIG_PCI_MSI) += pci_msi.o 103obj-$(CONFIG_SPARC64_PCI_MSI) += pci_msi.o
103 104
104obj-$(CONFIG_COMPAT) += sys32.o sys_sparc32.o signal32.o 105obj-$(CONFIG_COMPAT) += sys32.o sys_sparc32.o signal32.o
105 106
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index 62d6b153ffa2..dff60abbea01 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -849,9 +849,8 @@ void ldom_reboot(const char *boot_command)
849 if (boot_command && strlen(boot_command)) { 849 if (boot_command && strlen(boot_command)) {
850 unsigned long len; 850 unsigned long len;
851 851
852 strcpy(full_boot_str, "boot "); 852 snprintf(full_boot_str, sizeof(full_boot_str), "boot %s",
853 strlcpy(full_boot_str + strlen("boot "), boot_command, 853 boot_command);
854 sizeof(full_boot_str + strlen("boot ")));
855 len = strlen(full_boot_str); 854 len = strlen(full_boot_str);
856 855
857 if (reboot_data_supported) { 856 if (reboot_data_supported) {
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index 54df554b82d9..e01d75d40329 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -1249,12 +1249,12 @@ int ldc_bind(struct ldc_channel *lp, const char *name)
1249 snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name); 1249 snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name);
1250 snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name); 1250 snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
1251 1251
1252 err = request_irq(lp->cfg.rx_irq, ldc_rx, IRQF_DISABLED, 1252 err = request_irq(lp->cfg.rx_irq, ldc_rx, 0,
1253 lp->rx_irq_name, lp); 1253 lp->rx_irq_name, lp);
1254 if (err) 1254 if (err)
1255 return err; 1255 return err;
1256 1256
1257 err = request_irq(lp->cfg.tx_irq, ldc_tx, IRQF_DISABLED, 1257 err = request_irq(lp->cfg.tx_irq, ldc_tx, 0,
1258 lp->tx_irq_name, lp); 1258 lp->tx_irq_name, lp);
1259 if (err) { 1259 if (err) {
1260 free_irq(lp->cfg.rx_irq, lp); 1260 free_irq(lp->cfg.rx_irq, lp);
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 8a7cc663b3f8..d45a2c48f185 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -361,7 +361,7 @@ config CMDLINE_OVERRIDE
361 361
362config VMALLOC_RESERVE 362config VMALLOC_RESERVE
363 hex 363 hex
364 default 0x1000000 364 default 0x2000000
365 365
366config HARDWALL 366config HARDWALL
367 bool "Hardwall support to allow access to user dynamic network" 367 bool "Hardwall support to allow access to user dynamic network"
diff --git a/arch/tile/gxio/iorpc_mpipe.c b/arch/tile/gxio/iorpc_mpipe.c
index 4f8f3d619c4a..e19325c4c431 100644
--- a/arch/tile/gxio/iorpc_mpipe.c
+++ b/arch/tile/gxio/iorpc_mpipe.c
@@ -21,7 +21,7 @@ struct alloc_buffer_stacks_param {
21 unsigned int flags; 21 unsigned int flags;
22}; 22};
23 23
24int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t * context, 24int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t *context,
25 unsigned int count, unsigned int first, 25 unsigned int count, unsigned int first,
26 unsigned int flags) 26 unsigned int flags)
27{ 27{
@@ -45,7 +45,7 @@ struct init_buffer_stack_aux_param {
45 unsigned int buffer_size_enum; 45 unsigned int buffer_size_enum;
46}; 46};
47 47
48int gxio_mpipe_init_buffer_stack_aux(gxio_mpipe_context_t * context, 48int gxio_mpipe_init_buffer_stack_aux(gxio_mpipe_context_t *context,
49 void *mem_va, size_t mem_size, 49 void *mem_va, size_t mem_size,
50 unsigned int mem_flags, unsigned int stack, 50 unsigned int mem_flags, unsigned int stack,
51 unsigned int buffer_size_enum) 51 unsigned int buffer_size_enum)
@@ -80,7 +80,7 @@ struct alloc_notif_rings_param {
80 unsigned int flags; 80 unsigned int flags;
81}; 81};
82 82
83int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t * context, 83int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t *context,
84 unsigned int count, unsigned int first, 84 unsigned int count, unsigned int first,
85 unsigned int flags) 85 unsigned int flags)
86{ 86{
@@ -102,7 +102,7 @@ struct init_notif_ring_aux_param {
102 unsigned int ring; 102 unsigned int ring;
103}; 103};
104 104
105int gxio_mpipe_init_notif_ring_aux(gxio_mpipe_context_t * context, void *mem_va, 105int gxio_mpipe_init_notif_ring_aux(gxio_mpipe_context_t *context, void *mem_va,
106 size_t mem_size, unsigned int mem_flags, 106 size_t mem_size, unsigned int mem_flags,
107 unsigned int ring) 107 unsigned int ring)
108{ 108{
@@ -133,7 +133,7 @@ struct request_notif_ring_interrupt_param {
133 unsigned int ring; 133 unsigned int ring;
134}; 134};
135 135
136int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t * context, 136int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t *context,
137 int inter_x, int inter_y, 137 int inter_x, int inter_y,
138 int inter_ipi, int inter_event, 138 int inter_ipi, int inter_event,
139 unsigned int ring) 139 unsigned int ring)
@@ -158,7 +158,7 @@ struct enable_notif_ring_interrupt_param {
158 unsigned int ring; 158 unsigned int ring;
159}; 159};
160 160
161int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t * context, 161int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t *context,
162 unsigned int ring) 162 unsigned int ring)
163{ 163{
164 struct enable_notif_ring_interrupt_param temp; 164 struct enable_notif_ring_interrupt_param temp;
@@ -179,7 +179,7 @@ struct alloc_notif_groups_param {
179 unsigned int flags; 179 unsigned int flags;
180}; 180};
181 181
182int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t * context, 182int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t *context,
183 unsigned int count, unsigned int first, 183 unsigned int count, unsigned int first,
184 unsigned int flags) 184 unsigned int flags)
185{ 185{
@@ -201,7 +201,7 @@ struct init_notif_group_param {
201 gxio_mpipe_notif_group_bits_t bits; 201 gxio_mpipe_notif_group_bits_t bits;
202}; 202};
203 203
204int gxio_mpipe_init_notif_group(gxio_mpipe_context_t * context, 204int gxio_mpipe_init_notif_group(gxio_mpipe_context_t *context,
205 unsigned int group, 205 unsigned int group,
206 gxio_mpipe_notif_group_bits_t bits) 206 gxio_mpipe_notif_group_bits_t bits)
207{ 207{
@@ -223,7 +223,7 @@ struct alloc_buckets_param {
223 unsigned int flags; 223 unsigned int flags;
224}; 224};
225 225
226int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t * context, unsigned int count, 226int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t *context, unsigned int count,
227 unsigned int first, unsigned int flags) 227 unsigned int first, unsigned int flags)
228{ 228{
229 struct alloc_buckets_param temp; 229 struct alloc_buckets_param temp;
@@ -244,7 +244,7 @@ struct init_bucket_param {
244 MPIPE_LBL_INIT_DAT_BSTS_TBL_t bucket_info; 244 MPIPE_LBL_INIT_DAT_BSTS_TBL_t bucket_info;
245}; 245};
246 246
247int gxio_mpipe_init_bucket(gxio_mpipe_context_t * context, unsigned int bucket, 247int gxio_mpipe_init_bucket(gxio_mpipe_context_t *context, unsigned int bucket,
248 MPIPE_LBL_INIT_DAT_BSTS_TBL_t bucket_info) 248 MPIPE_LBL_INIT_DAT_BSTS_TBL_t bucket_info)
249{ 249{
250 struct init_bucket_param temp; 250 struct init_bucket_param temp;
@@ -265,7 +265,7 @@ struct alloc_edma_rings_param {
265 unsigned int flags; 265 unsigned int flags;
266}; 266};
267 267
268int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t * context, 268int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t *context,
269 unsigned int count, unsigned int first, 269 unsigned int count, unsigned int first,
270 unsigned int flags) 270 unsigned int flags)
271{ 271{
@@ -288,7 +288,7 @@ struct init_edma_ring_aux_param {
288 unsigned int channel; 288 unsigned int channel;
289}; 289};
290 290
291int gxio_mpipe_init_edma_ring_aux(gxio_mpipe_context_t * context, void *mem_va, 291int gxio_mpipe_init_edma_ring_aux(gxio_mpipe_context_t *context, void *mem_va,
292 size_t mem_size, unsigned int mem_flags, 292 size_t mem_size, unsigned int mem_flags,
293 unsigned int ring, unsigned int channel) 293 unsigned int ring, unsigned int channel)
294{ 294{
@@ -315,7 +315,7 @@ int gxio_mpipe_init_edma_ring_aux(gxio_mpipe_context_t * context, void *mem_va,
315EXPORT_SYMBOL(gxio_mpipe_init_edma_ring_aux); 315EXPORT_SYMBOL(gxio_mpipe_init_edma_ring_aux);
316 316
317 317
318int gxio_mpipe_commit_rules(gxio_mpipe_context_t * context, const void *blob, 318int gxio_mpipe_commit_rules(gxio_mpipe_context_t *context, const void *blob,
319 size_t blob_size) 319 size_t blob_size)
320{ 320{
321 const void *params = blob; 321 const void *params = blob;
@@ -332,7 +332,7 @@ struct register_client_memory_param {
332 unsigned int flags; 332 unsigned int flags;
333}; 333};
334 334
335int gxio_mpipe_register_client_memory(gxio_mpipe_context_t * context, 335int gxio_mpipe_register_client_memory(gxio_mpipe_context_t *context,
336 unsigned int iotlb, HV_PTE pte, 336 unsigned int iotlb, HV_PTE pte,
337 unsigned int flags) 337 unsigned int flags)
338{ 338{
@@ -355,7 +355,7 @@ struct link_open_aux_param {
355 unsigned int flags; 355 unsigned int flags;
356}; 356};
357 357
358int gxio_mpipe_link_open_aux(gxio_mpipe_context_t * context, 358int gxio_mpipe_link_open_aux(gxio_mpipe_context_t *context,
359 _gxio_mpipe_link_name_t name, unsigned int flags) 359 _gxio_mpipe_link_name_t name, unsigned int flags)
360{ 360{
361 struct link_open_aux_param temp; 361 struct link_open_aux_param temp;
@@ -374,7 +374,7 @@ struct link_close_aux_param {
374 int mac; 374 int mac;
375}; 375};
376 376
377int gxio_mpipe_link_close_aux(gxio_mpipe_context_t * context, int mac) 377int gxio_mpipe_link_close_aux(gxio_mpipe_context_t *context, int mac)
378{ 378{
379 struct link_close_aux_param temp; 379 struct link_close_aux_param temp;
380 struct link_close_aux_param *params = &temp; 380 struct link_close_aux_param *params = &temp;
@@ -393,7 +393,7 @@ struct link_set_attr_aux_param {
393 int64_t val; 393 int64_t val;
394}; 394};
395 395
396int gxio_mpipe_link_set_attr_aux(gxio_mpipe_context_t * context, int mac, 396int gxio_mpipe_link_set_attr_aux(gxio_mpipe_context_t *context, int mac,
397 uint32_t attr, int64_t val) 397 uint32_t attr, int64_t val)
398{ 398{
399 struct link_set_attr_aux_param temp; 399 struct link_set_attr_aux_param temp;
@@ -415,8 +415,8 @@ struct get_timestamp_aux_param {
415 uint64_t cycles; 415 uint64_t cycles;
416}; 416};
417 417
418int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t * context, uint64_t * sec, 418int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t *context, uint64_t *sec,
419 uint64_t * nsec, uint64_t * cycles) 419 uint64_t *nsec, uint64_t *cycles)
420{ 420{
421 int __result; 421 int __result;
422 struct get_timestamp_aux_param temp; 422 struct get_timestamp_aux_param temp;
@@ -440,7 +440,7 @@ struct set_timestamp_aux_param {
440 uint64_t cycles; 440 uint64_t cycles;
441}; 441};
442 442
443int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t * context, uint64_t sec, 443int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t *context, uint64_t sec,
444 uint64_t nsec, uint64_t cycles) 444 uint64_t nsec, uint64_t cycles)
445{ 445{
446 struct set_timestamp_aux_param temp; 446 struct set_timestamp_aux_param temp;
@@ -460,8 +460,7 @@ struct adjust_timestamp_aux_param {
460 int64_t nsec; 460 int64_t nsec;
461}; 461};
462 462
463int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t * context, 463int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t *context, int64_t nsec)
464 int64_t nsec)
465{ 464{
466 struct adjust_timestamp_aux_param temp; 465 struct adjust_timestamp_aux_param temp;
467 struct adjust_timestamp_aux_param *params = &temp; 466 struct adjust_timestamp_aux_param *params = &temp;
@@ -475,25 +474,6 @@ int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t * context,
475 474
476EXPORT_SYMBOL(gxio_mpipe_adjust_timestamp_aux); 475EXPORT_SYMBOL(gxio_mpipe_adjust_timestamp_aux);
477 476
478struct adjust_timestamp_freq_param {
479 int32_t ppb;
480};
481
482int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t * context,
483 int32_t ppb)
484{
485 struct adjust_timestamp_freq_param temp;
486 struct adjust_timestamp_freq_param *params = &temp;
487
488 params->ppb = ppb;
489
490 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
491 sizeof(*params),
492 GXIO_MPIPE_OP_ADJUST_TIMESTAMP_FREQ);
493}
494
495EXPORT_SYMBOL(gxio_mpipe_adjust_timestamp_freq);
496
497struct config_edma_ring_blks_param { 477struct config_edma_ring_blks_param {
498 unsigned int ering; 478 unsigned int ering;
499 unsigned int max_blks; 479 unsigned int max_blks;
@@ -501,7 +481,7 @@ struct config_edma_ring_blks_param {
501 unsigned int db; 481 unsigned int db;
502}; 482};
503 483
504int gxio_mpipe_config_edma_ring_blks(gxio_mpipe_context_t * context, 484int gxio_mpipe_config_edma_ring_blks(gxio_mpipe_context_t *context,
505 unsigned int ering, unsigned int max_blks, 485 unsigned int ering, unsigned int max_blks,
506 unsigned int min_snf_blks, unsigned int db) 486 unsigned int min_snf_blks, unsigned int db)
507{ 487{
@@ -520,11 +500,29 @@ int gxio_mpipe_config_edma_ring_blks(gxio_mpipe_context_t * context,
520 500
521EXPORT_SYMBOL(gxio_mpipe_config_edma_ring_blks); 501EXPORT_SYMBOL(gxio_mpipe_config_edma_ring_blks);
522 502
503struct adjust_timestamp_freq_param {
504 int32_t ppb;
505};
506
507int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t *context, int32_t ppb)
508{
509 struct adjust_timestamp_freq_param temp;
510 struct adjust_timestamp_freq_param *params = &temp;
511
512 params->ppb = ppb;
513
514 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
515 sizeof(*params),
516 GXIO_MPIPE_OP_ADJUST_TIMESTAMP_FREQ);
517}
518
519EXPORT_SYMBOL(gxio_mpipe_adjust_timestamp_freq);
520
523struct arm_pollfd_param { 521struct arm_pollfd_param {
524 union iorpc_pollfd pollfd; 522 union iorpc_pollfd pollfd;
525}; 523};
526 524
527int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie) 525int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t *context, int pollfd_cookie)
528{ 526{
529 struct arm_pollfd_param temp; 527 struct arm_pollfd_param temp;
530 struct arm_pollfd_param *params = &temp; 528 struct arm_pollfd_param *params = &temp;
@@ -541,7 +539,7 @@ struct close_pollfd_param {
541 union iorpc_pollfd pollfd; 539 union iorpc_pollfd pollfd;
542}; 540};
543 541
544int gxio_mpipe_close_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie) 542int gxio_mpipe_close_pollfd(gxio_mpipe_context_t *context, int pollfd_cookie)
545{ 543{
546 struct close_pollfd_param temp; 544 struct close_pollfd_param temp;
547 struct close_pollfd_param *params = &temp; 545 struct close_pollfd_param *params = &temp;
@@ -558,7 +556,7 @@ struct get_mmio_base_param {
558 HV_PTE base; 556 HV_PTE base;
559}; 557};
560 558
561int gxio_mpipe_get_mmio_base(gxio_mpipe_context_t * context, HV_PTE *base) 559int gxio_mpipe_get_mmio_base(gxio_mpipe_context_t *context, HV_PTE *base)
562{ 560{
563 int __result; 561 int __result;
564 struct get_mmio_base_param temp; 562 struct get_mmio_base_param temp;
@@ -579,7 +577,7 @@ struct check_mmio_offset_param {
579 unsigned long size; 577 unsigned long size;
580}; 578};
581 579
582int gxio_mpipe_check_mmio_offset(gxio_mpipe_context_t * context, 580int gxio_mpipe_check_mmio_offset(gxio_mpipe_context_t *context,
583 unsigned long offset, unsigned long size) 581 unsigned long offset, unsigned long size)
584{ 582{
585 struct check_mmio_offset_param temp; 583 struct check_mmio_offset_param temp;
diff --git a/arch/tile/gxio/iorpc_mpipe_info.c b/arch/tile/gxio/iorpc_mpipe_info.c
index 64883aabeb9c..77019c6e9b4a 100644
--- a/arch/tile/gxio/iorpc_mpipe_info.c
+++ b/arch/tile/gxio/iorpc_mpipe_info.c
@@ -15,12 +15,11 @@
15/* This file is machine-generated; DO NOT EDIT! */ 15/* This file is machine-generated; DO NOT EDIT! */
16#include "gxio/iorpc_mpipe_info.h" 16#include "gxio/iorpc_mpipe_info.h"
17 17
18
19struct instance_aux_param { 18struct instance_aux_param {
20 _gxio_mpipe_link_name_t name; 19 _gxio_mpipe_link_name_t name;
21}; 20};
22 21
23int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t * context, 22int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t *context,
24 _gxio_mpipe_link_name_t name) 23 _gxio_mpipe_link_name_t name)
25{ 24{
26 struct instance_aux_param temp; 25 struct instance_aux_param temp;
@@ -39,10 +38,10 @@ struct enumerate_aux_param {
39 _gxio_mpipe_link_mac_t mac; 38 _gxio_mpipe_link_mac_t mac;
40}; 39};
41 40
42int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t * context, 41int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t *context,
43 unsigned int idx, 42 unsigned int idx,
44 _gxio_mpipe_link_name_t * name, 43 _gxio_mpipe_link_name_t *name,
45 _gxio_mpipe_link_mac_t * mac) 44 _gxio_mpipe_link_mac_t *mac)
46{ 45{
47 int __result; 46 int __result;
48 struct enumerate_aux_param temp; 47 struct enumerate_aux_param temp;
@@ -50,7 +49,7 @@ int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t * context,
50 49
51 __result = 50 __result =
52 hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), 51 hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params),
53 (((uint64_t) idx << 32) | 52 (((uint64_t)idx << 32) |
54 GXIO_MPIPE_INFO_OP_ENUMERATE_AUX)); 53 GXIO_MPIPE_INFO_OP_ENUMERATE_AUX));
55 *name = params->name; 54 *name = params->name;
56 *mac = params->mac; 55 *mac = params->mac;
@@ -64,7 +63,7 @@ struct get_mmio_base_param {
64 HV_PTE base; 63 HV_PTE base;
65}; 64};
66 65
67int gxio_mpipe_info_get_mmio_base(gxio_mpipe_info_context_t * context, 66int gxio_mpipe_info_get_mmio_base(gxio_mpipe_info_context_t *context,
68 HV_PTE *base) 67 HV_PTE *base)
69{ 68{
70 int __result; 69 int __result;
@@ -86,7 +85,7 @@ struct check_mmio_offset_param {
86 unsigned long size; 85 unsigned long size;
87}; 86};
88 87
89int gxio_mpipe_info_check_mmio_offset(gxio_mpipe_info_context_t * context, 88int gxio_mpipe_info_check_mmio_offset(gxio_mpipe_info_context_t *context,
90 unsigned long offset, unsigned long size) 89 unsigned long offset, unsigned long size)
91{ 90{
92 struct check_mmio_offset_param temp; 91 struct check_mmio_offset_param temp;
diff --git a/arch/tile/gxio/iorpc_trio.c b/arch/tile/gxio/iorpc_trio.c
index da6e18e049c3..1d3cedb9aeb4 100644
--- a/arch/tile/gxio/iorpc_trio.c
+++ b/arch/tile/gxio/iorpc_trio.c
@@ -21,7 +21,7 @@ struct alloc_asids_param {
21 unsigned int flags; 21 unsigned int flags;
22}; 22};
23 23
24int gxio_trio_alloc_asids(gxio_trio_context_t * context, unsigned int count, 24int gxio_trio_alloc_asids(gxio_trio_context_t *context, unsigned int count,
25 unsigned int first, unsigned int flags) 25 unsigned int first, unsigned int flags)
26{ 26{
27 struct alloc_asids_param temp; 27 struct alloc_asids_param temp;
@@ -44,7 +44,7 @@ struct alloc_memory_maps_param {
44 unsigned int flags; 44 unsigned int flags;
45}; 45};
46 46
47int gxio_trio_alloc_memory_maps(gxio_trio_context_t * context, 47int gxio_trio_alloc_memory_maps(gxio_trio_context_t *context,
48 unsigned int count, unsigned int first, 48 unsigned int count, unsigned int first,
49 unsigned int flags) 49 unsigned int flags)
50{ 50{
@@ -67,7 +67,7 @@ struct alloc_scatter_queues_param {
67 unsigned int flags; 67 unsigned int flags;
68}; 68};
69 69
70int gxio_trio_alloc_scatter_queues(gxio_trio_context_t * context, 70int gxio_trio_alloc_scatter_queues(gxio_trio_context_t *context,
71 unsigned int count, unsigned int first, 71 unsigned int count, unsigned int first,
72 unsigned int flags) 72 unsigned int flags)
73{ 73{
@@ -91,7 +91,7 @@ struct alloc_pio_regions_param {
91 unsigned int flags; 91 unsigned int flags;
92}; 92};
93 93
94int gxio_trio_alloc_pio_regions(gxio_trio_context_t * context, 94int gxio_trio_alloc_pio_regions(gxio_trio_context_t *context,
95 unsigned int count, unsigned int first, 95 unsigned int count, unsigned int first,
96 unsigned int flags) 96 unsigned int flags)
97{ 97{
@@ -115,7 +115,7 @@ struct init_pio_region_aux_param {
115 unsigned int flags; 115 unsigned int flags;
116}; 116};
117 117
118int gxio_trio_init_pio_region_aux(gxio_trio_context_t * context, 118int gxio_trio_init_pio_region_aux(gxio_trio_context_t *context,
119 unsigned int pio_region, unsigned int mac, 119 unsigned int pio_region, unsigned int mac,
120 uint32_t bus_address_hi, unsigned int flags) 120 uint32_t bus_address_hi, unsigned int flags)
121{ 121{
@@ -145,7 +145,7 @@ struct init_memory_map_mmu_aux_param {
145 unsigned int order_mode; 145 unsigned int order_mode;
146}; 146};
147 147
148int gxio_trio_init_memory_map_mmu_aux(gxio_trio_context_t * context, 148int gxio_trio_init_memory_map_mmu_aux(gxio_trio_context_t *context,
149 unsigned int map, unsigned long va, 149 unsigned int map, unsigned long va,
150 uint64_t size, unsigned int asid, 150 uint64_t size, unsigned int asid,
151 unsigned int mac, uint64_t bus_address, 151 unsigned int mac, uint64_t bus_address,
@@ -175,7 +175,7 @@ struct get_port_property_param {
175 struct pcie_trio_ports_property trio_ports; 175 struct pcie_trio_ports_property trio_ports;
176}; 176};
177 177
178int gxio_trio_get_port_property(gxio_trio_context_t * context, 178int gxio_trio_get_port_property(gxio_trio_context_t *context,
179 struct pcie_trio_ports_property *trio_ports) 179 struct pcie_trio_ports_property *trio_ports)
180{ 180{
181 int __result; 181 int __result;
@@ -198,7 +198,7 @@ struct config_legacy_intr_param {
198 unsigned int intx; 198 unsigned int intx;
199}; 199};
200 200
201int gxio_trio_config_legacy_intr(gxio_trio_context_t * context, int inter_x, 201int gxio_trio_config_legacy_intr(gxio_trio_context_t *context, int inter_x,
202 int inter_y, int inter_ipi, int inter_event, 202 int inter_y, int inter_ipi, int inter_event,
203 unsigned int mac, unsigned int intx) 203 unsigned int mac, unsigned int intx)
204{ 204{
@@ -227,7 +227,7 @@ struct config_msi_intr_param {
227 unsigned int asid; 227 unsigned int asid;
228}; 228};
229 229
230int gxio_trio_config_msi_intr(gxio_trio_context_t * context, int inter_x, 230int gxio_trio_config_msi_intr(gxio_trio_context_t *context, int inter_x,
231 int inter_y, int inter_ipi, int inter_event, 231 int inter_y, int inter_ipi, int inter_event,
232 unsigned int mac, unsigned int mem_map, 232 unsigned int mac, unsigned int mem_map,
233 uint64_t mem_map_base, uint64_t mem_map_limit, 233 uint64_t mem_map_base, uint64_t mem_map_limit,
@@ -259,7 +259,7 @@ struct set_mps_mrs_param {
259 unsigned int mac; 259 unsigned int mac;
260}; 260};
261 261
262int gxio_trio_set_mps_mrs(gxio_trio_context_t * context, uint16_t mps, 262int gxio_trio_set_mps_mrs(gxio_trio_context_t *context, uint16_t mps,
263 uint16_t mrs, unsigned int mac) 263 uint16_t mrs, unsigned int mac)
264{ 264{
265 struct set_mps_mrs_param temp; 265 struct set_mps_mrs_param temp;
@@ -279,7 +279,7 @@ struct force_rc_link_up_param {
279 unsigned int mac; 279 unsigned int mac;
280}; 280};
281 281
282int gxio_trio_force_rc_link_up(gxio_trio_context_t * context, unsigned int mac) 282int gxio_trio_force_rc_link_up(gxio_trio_context_t *context, unsigned int mac)
283{ 283{
284 struct force_rc_link_up_param temp; 284 struct force_rc_link_up_param temp;
285 struct force_rc_link_up_param *params = &temp; 285 struct force_rc_link_up_param *params = &temp;
@@ -296,7 +296,7 @@ struct force_ep_link_up_param {
296 unsigned int mac; 296 unsigned int mac;
297}; 297};
298 298
299int gxio_trio_force_ep_link_up(gxio_trio_context_t * context, unsigned int mac) 299int gxio_trio_force_ep_link_up(gxio_trio_context_t *context, unsigned int mac)
300{ 300{
301 struct force_ep_link_up_param temp; 301 struct force_ep_link_up_param temp;
302 struct force_ep_link_up_param *params = &temp; 302 struct force_ep_link_up_param *params = &temp;
@@ -313,7 +313,7 @@ struct get_mmio_base_param {
313 HV_PTE base; 313 HV_PTE base;
314}; 314};
315 315
316int gxio_trio_get_mmio_base(gxio_trio_context_t * context, HV_PTE *base) 316int gxio_trio_get_mmio_base(gxio_trio_context_t *context, HV_PTE *base)
317{ 317{
318 int __result; 318 int __result;
319 struct get_mmio_base_param temp; 319 struct get_mmio_base_param temp;
@@ -334,7 +334,7 @@ struct check_mmio_offset_param {
334 unsigned long size; 334 unsigned long size;
335}; 335};
336 336
337int gxio_trio_check_mmio_offset(gxio_trio_context_t * context, 337int gxio_trio_check_mmio_offset(gxio_trio_context_t *context,
338 unsigned long offset, unsigned long size) 338 unsigned long offset, unsigned long size)
339{ 339{
340 struct check_mmio_offset_param temp; 340 struct check_mmio_offset_param temp;
diff --git a/arch/tile/gxio/iorpc_usb_host.c b/arch/tile/gxio/iorpc_usb_host.c
index cf3c3cc12204..9c820073bfc0 100644
--- a/arch/tile/gxio/iorpc_usb_host.c
+++ b/arch/tile/gxio/iorpc_usb_host.c
@@ -19,7 +19,7 @@ struct cfg_interrupt_param {
19 union iorpc_interrupt interrupt; 19 union iorpc_interrupt interrupt;
20}; 20};
21 21
22int gxio_usb_host_cfg_interrupt(gxio_usb_host_context_t * context, int inter_x, 22int gxio_usb_host_cfg_interrupt(gxio_usb_host_context_t *context, int inter_x,
23 int inter_y, int inter_ipi, int inter_event) 23 int inter_y, int inter_ipi, int inter_event)
24{ 24{
25 struct cfg_interrupt_param temp; 25 struct cfg_interrupt_param temp;
@@ -41,7 +41,7 @@ struct register_client_memory_param {
41 unsigned int flags; 41 unsigned int flags;
42}; 42};
43 43
44int gxio_usb_host_register_client_memory(gxio_usb_host_context_t * context, 44int gxio_usb_host_register_client_memory(gxio_usb_host_context_t *context,
45 HV_PTE pte, unsigned int flags) 45 HV_PTE pte, unsigned int flags)
46{ 46{
47 struct register_client_memory_param temp; 47 struct register_client_memory_param temp;
@@ -61,7 +61,7 @@ struct get_mmio_base_param {
61 HV_PTE base; 61 HV_PTE base;
62}; 62};
63 63
64int gxio_usb_host_get_mmio_base(gxio_usb_host_context_t * context, HV_PTE *base) 64int gxio_usb_host_get_mmio_base(gxio_usb_host_context_t *context, HV_PTE *base)
65{ 65{
66 int __result; 66 int __result;
67 struct get_mmio_base_param temp; 67 struct get_mmio_base_param temp;
@@ -82,7 +82,7 @@ struct check_mmio_offset_param {
82 unsigned long size; 82 unsigned long size;
83}; 83};
84 84
85int gxio_usb_host_check_mmio_offset(gxio_usb_host_context_t * context, 85int gxio_usb_host_check_mmio_offset(gxio_usb_host_context_t *context,
86 unsigned long offset, unsigned long size) 86 unsigned long offset, unsigned long size)
87{ 87{
88 struct check_mmio_offset_param temp; 88 struct check_mmio_offset_param temp;
diff --git a/arch/tile/gxio/usb_host.c b/arch/tile/gxio/usb_host.c
index 66b002f54ecc..785afad7922e 100644
--- a/arch/tile/gxio/usb_host.c
+++ b/arch/tile/gxio/usb_host.c
@@ -26,7 +26,7 @@
26#include <gxio/kiorpc.h> 26#include <gxio/kiorpc.h>
27#include <gxio/usb_host.h> 27#include <gxio/usb_host.h>
28 28
29int gxio_usb_host_init(gxio_usb_host_context_t * context, int usb_index, 29int gxio_usb_host_init(gxio_usb_host_context_t *context, int usb_index,
30 int is_ehci) 30 int is_ehci)
31{ 31{
32 char file[32]; 32 char file[32];
@@ -63,7 +63,7 @@ int gxio_usb_host_init(gxio_usb_host_context_t * context, int usb_index,
63 63
64EXPORT_SYMBOL_GPL(gxio_usb_host_init); 64EXPORT_SYMBOL_GPL(gxio_usb_host_init);
65 65
66int gxio_usb_host_destroy(gxio_usb_host_context_t * context) 66int gxio_usb_host_destroy(gxio_usb_host_context_t *context)
67{ 67{
68 iounmap((void __force __iomem *)(context->mmio_base)); 68 iounmap((void __force __iomem *)(context->mmio_base));
69 hv_dev_close(context->fd); 69 hv_dev_close(context->fd);
@@ -76,14 +76,14 @@ int gxio_usb_host_destroy(gxio_usb_host_context_t * context)
76 76
77EXPORT_SYMBOL_GPL(gxio_usb_host_destroy); 77EXPORT_SYMBOL_GPL(gxio_usb_host_destroy);
78 78
79void *gxio_usb_host_get_reg_start(gxio_usb_host_context_t * context) 79void *gxio_usb_host_get_reg_start(gxio_usb_host_context_t *context)
80{ 80{
81 return context->mmio_base; 81 return context->mmio_base;
82} 82}
83 83
84EXPORT_SYMBOL_GPL(gxio_usb_host_get_reg_start); 84EXPORT_SYMBOL_GPL(gxio_usb_host_get_reg_start);
85 85
86size_t gxio_usb_host_get_reg_len(gxio_usb_host_context_t * context) 86size_t gxio_usb_host_get_reg_len(gxio_usb_host_context_t *context)
87{ 87{
88 return HV_USB_HOST_MMIO_SIZE; 88 return HV_USB_HOST_MMIO_SIZE;
89} 89}
diff --git a/arch/tile/include/arch/mpipe.h b/arch/tile/include/arch/mpipe.h
index 8a33912fd6cc..904538e754d8 100644
--- a/arch/tile/include/arch/mpipe.h
+++ b/arch/tile/include/arch/mpipe.h
@@ -176,7 +176,18 @@ typedef union
176 */ 176 */
177 uint_reg_t stack_idx : 5; 177 uint_reg_t stack_idx : 5;
178 /* Reserved. */ 178 /* Reserved. */
179 uint_reg_t __reserved_2 : 5; 179 uint_reg_t __reserved_2 : 3;
180 /*
181 * Instance ID. For devices that support automatic buffer return between
182 * mPIPE instances, this field indicates the buffer owner. If the INST
183 * field does not match the mPIPE's instance number when a packet is
184 * egressed, buffers with HWB set will be returned to the other mPIPE
185 * instance. Note that not all devices support multi-mPIPE buffer
186 * return. The MPIPE_EDMA_INFO.REMOTE_BUFF_RTN_SUPPORT bit indicates
187 * whether the INST field in the buffer descriptor is populated by iDMA
188 * hardware. This field is ignored on writes.
189 */
190 uint_reg_t inst : 2;
180 /* 191 /*
181 * Reads as one to indicate that this is a hardware managed buffer. 192 * Reads as one to indicate that this is a hardware managed buffer.
182 * Ignored on writes since all buffers on a given stack are the same size. 193 * Ignored on writes since all buffers on a given stack are the same size.
@@ -205,7 +216,8 @@ typedef union
205 uint_reg_t c : 2; 216 uint_reg_t c : 2;
206 uint_reg_t size : 3; 217 uint_reg_t size : 3;
207 uint_reg_t hwb : 1; 218 uint_reg_t hwb : 1;
208 uint_reg_t __reserved_2 : 5; 219 uint_reg_t inst : 2;
220 uint_reg_t __reserved_2 : 3;
209 uint_reg_t stack_idx : 5; 221 uint_reg_t stack_idx : 5;
210 uint_reg_t __reserved_1 : 6; 222 uint_reg_t __reserved_1 : 6;
211 int_reg_t va : 35; 223 int_reg_t va : 35;
@@ -231,9 +243,9 @@ typedef union
231 /* Reserved. */ 243 /* Reserved. */
232 uint_reg_t __reserved_0 : 3; 244 uint_reg_t __reserved_0 : 3;
233 /* eDMA ring being accessed */ 245 /* eDMA ring being accessed */
234 uint_reg_t ring : 5; 246 uint_reg_t ring : 6;
235 /* Reserved. */ 247 /* Reserved. */
236 uint_reg_t __reserved_1 : 18; 248 uint_reg_t __reserved_1 : 17;
237 /* 249 /*
238 * This field of the address selects the region (address space) to be 250 * This field of the address selects the region (address space) to be
239 * accessed. For the egress DMA post region, this field must be 5. 251 * accessed. For the egress DMA post region, this field must be 5.
@@ -250,8 +262,8 @@ typedef union
250 uint_reg_t svc_dom : 5; 262 uint_reg_t svc_dom : 5;
251 uint_reg_t __reserved_2 : 6; 263 uint_reg_t __reserved_2 : 6;
252 uint_reg_t region : 3; 264 uint_reg_t region : 3;
253 uint_reg_t __reserved_1 : 18; 265 uint_reg_t __reserved_1 : 17;
254 uint_reg_t ring : 5; 266 uint_reg_t ring : 6;
255 uint_reg_t __reserved_0 : 3; 267 uint_reg_t __reserved_0 : 3;
256#endif 268#endif
257 }; 269 };
diff --git a/arch/tile/include/arch/mpipe_constants.h b/arch/tile/include/arch/mpipe_constants.h
index 410a0400e055..84022ac5fe82 100644
--- a/arch/tile/include/arch/mpipe_constants.h
+++ b/arch/tile/include/arch/mpipe_constants.h
@@ -16,13 +16,13 @@
16#ifndef __ARCH_MPIPE_CONSTANTS_H__ 16#ifndef __ARCH_MPIPE_CONSTANTS_H__
17#define __ARCH_MPIPE_CONSTANTS_H__ 17#define __ARCH_MPIPE_CONSTANTS_H__
18 18
19#define MPIPE_NUM_CLASSIFIERS 10 19#define MPIPE_NUM_CLASSIFIERS 16
20#define MPIPE_CLS_MHZ 1200 20#define MPIPE_CLS_MHZ 1200
21 21
22#define MPIPE_NUM_EDMA_RINGS 32 22#define MPIPE_NUM_EDMA_RINGS 64
23 23
24#define MPIPE_NUM_SGMII_MACS 16 24#define MPIPE_NUM_SGMII_MACS 16
25#define MPIPE_NUM_XAUI_MACS 4 25#define MPIPE_NUM_XAUI_MACS 16
26#define MPIPE_NUM_LOOPBACK_CHANNELS 4 26#define MPIPE_NUM_LOOPBACK_CHANNELS 4
27#define MPIPE_NUM_NON_LB_CHANNELS 28 27#define MPIPE_NUM_NON_LB_CHANNELS 28
28 28
diff --git a/arch/tile/include/arch/mpipe_shm.h b/arch/tile/include/arch/mpipe_shm.h
index f2e9e122818d..13b3c4300e50 100644
--- a/arch/tile/include/arch/mpipe_shm.h
+++ b/arch/tile/include/arch/mpipe_shm.h
@@ -44,8 +44,14 @@ typedef union
44 * descriptors toggles each time the ring tail pointer wraps. 44 * descriptors toggles each time the ring tail pointer wraps.
45 */ 45 */
46 uint_reg_t gen : 1; 46 uint_reg_t gen : 1;
47 /**
48 * For devices with EDMA reorder support, this field allows the
49 * descriptor to select the egress FIFO. The associated DMA ring must
50 * have ALLOW_EFIFO_SEL enabled.
51 */
52 uint_reg_t efifo_sel : 6;
47 /** Reserved. Must be zero. */ 53 /** Reserved. Must be zero. */
48 uint_reg_t r0 : 7; 54 uint_reg_t r0 : 1;
49 /** Checksum generation enabled for this transfer. */ 55 /** Checksum generation enabled for this transfer. */
50 uint_reg_t csum : 1; 56 uint_reg_t csum : 1;
51 /** 57 /**
@@ -110,7 +116,8 @@ typedef union
110 uint_reg_t notif : 1; 116 uint_reg_t notif : 1;
111 uint_reg_t ns : 1; 117 uint_reg_t ns : 1;
112 uint_reg_t csum : 1; 118 uint_reg_t csum : 1;
113 uint_reg_t r0 : 7; 119 uint_reg_t r0 : 1;
120 uint_reg_t efifo_sel : 6;
114 uint_reg_t gen : 1; 121 uint_reg_t gen : 1;
115#endif 122#endif
116 123
@@ -126,14 +133,16 @@ typedef union
126 /** Reserved. */ 133 /** Reserved. */
127 uint_reg_t __reserved_1 : 3; 134 uint_reg_t __reserved_1 : 3;
128 /** 135 /**
129 * Instance ID. For devices that support more than one mPIPE instance, 136 * Instance ID. For devices that support automatic buffer return between
130 * this field indicates the buffer owner. If the INST field does not 137 * mPIPE instances, this field indicates the buffer owner. If the INST
131 * match the mPIPE's instance number when a packet is egressed, buffers 138 * field does not match the mPIPE's instance number when a packet is
132 * with HWB set will be returned to the other mPIPE instance. 139 * egressed, buffers with HWB set will be returned to the other mPIPE
140 * instance. Note that not all devices support multi-mPIPE buffer
141 * return. The MPIPE_EDMA_INFO.REMOTE_BUFF_RTN_SUPPORT bit indicates
142 * whether the INST field in the buffer descriptor is populated by iDMA
143 * hardware.
133 */ 144 */
134 uint_reg_t inst : 1; 145 uint_reg_t inst : 2;
135 /** Reserved. */
136 uint_reg_t __reserved_2 : 1;
137 /** 146 /**
138 * Always set to one by hardware in iDMA packet descriptors. For eDMA, 147 * Always set to one by hardware in iDMA packet descriptors. For eDMA,
139 * indicates whether the buffer will be released to the buffer stack 148 * indicates whether the buffer will be released to the buffer stack
@@ -166,8 +175,7 @@ typedef union
166 uint_reg_t c : 2; 175 uint_reg_t c : 2;
167 uint_reg_t size : 3; 176 uint_reg_t size : 3;
168 uint_reg_t hwb : 1; 177 uint_reg_t hwb : 1;
169 uint_reg_t __reserved_2 : 1; 178 uint_reg_t inst : 2;
170 uint_reg_t inst : 1;
171 uint_reg_t __reserved_1 : 3; 179 uint_reg_t __reserved_1 : 3;
172 uint_reg_t stack_idx : 5; 180 uint_reg_t stack_idx : 5;
173 uint_reg_t __reserved_0 : 6; 181 uint_reg_t __reserved_0 : 6;
@@ -408,7 +416,10 @@ typedef union
408 /** 416 /**
409 * Sequence number applied when packet is distributed. Classifier 417 * Sequence number applied when packet is distributed. Classifier
410 * selects which sequence number is to be applied by writing the 13-bit 418 * selects which sequence number is to be applied by writing the 13-bit
411 * SQN-selector into this field. 419 * SQN-selector into this field. For devices that support EXT_SQN (as
420 * indicated in IDMA_INFO.EXT_SQN_SUPPORT), the GP_SQN can be extended to
421 * 32-bits via the IDMA_CTL.EXT_SQN register. In this case the
422 * PACKET_SQN will be reduced to 32 bits.
412 */ 423 */
413 uint_reg_t gp_sqn : 16; 424 uint_reg_t gp_sqn : 16;
414 /** 425 /**
@@ -451,14 +462,16 @@ typedef union
451 /** Reserved. */ 462 /** Reserved. */
452 uint_reg_t __reserved_5 : 3; 463 uint_reg_t __reserved_5 : 3;
453 /** 464 /**
454 * Instance ID. For devices that support more than one mPIPE instance, 465 * Instance ID. For devices that support automatic buffer return between
455 * this field indicates the buffer owner. If the INST field does not 466 * mPIPE instances, this field indicates the buffer owner. If the INST
456 * match the mPIPE's instance number when a packet is egressed, buffers 467 * field does not match the mPIPE's instance number when a packet is
457 * with HWB set will be returned to the other mPIPE instance. 468 * egressed, buffers with HWB set will be returned to the other mPIPE
469 * instance. Note that not all devices support multi-mPIPE buffer
470 * return. The MPIPE_EDMA_INFO.REMOTE_BUFF_RTN_SUPPORT bit indicates
471 * whether the INST field in the buffer descriptor is populated by iDMA
472 * hardware.
458 */ 473 */
459 uint_reg_t inst : 1; 474 uint_reg_t inst : 2;
460 /** Reserved. */
461 uint_reg_t __reserved_6 : 1;
462 /** 475 /**
463 * Always set to one by hardware in iDMA packet descriptors. For eDMA, 476 * Always set to one by hardware in iDMA packet descriptors. For eDMA,
464 * indicates whether the buffer will be released to the buffer stack 477 * indicates whether the buffer will be released to the buffer stack
@@ -491,8 +504,7 @@ typedef union
491 uint_reg_t c : 2; 504 uint_reg_t c : 2;
492 uint_reg_t size : 3; 505 uint_reg_t size : 3;
493 uint_reg_t hwb : 1; 506 uint_reg_t hwb : 1;
494 uint_reg_t __reserved_6 : 1; 507 uint_reg_t inst : 2;
495 uint_reg_t inst : 1;
496 uint_reg_t __reserved_5 : 3; 508 uint_reg_t __reserved_5 : 3;
497 uint_reg_t stack_idx : 5; 509 uint_reg_t stack_idx : 5;
498 uint_reg_t __reserved_4 : 6; 510 uint_reg_t __reserved_4 : 6;
diff --git a/arch/tile/include/arch/trio_constants.h b/arch/tile/include/arch/trio_constants.h
index 628b045436b8..85647e91a458 100644
--- a/arch/tile/include/arch/trio_constants.h
+++ b/arch/tile/include/arch/trio_constants.h
@@ -16,21 +16,21 @@
16#ifndef __ARCH_TRIO_CONSTANTS_H__ 16#ifndef __ARCH_TRIO_CONSTANTS_H__
17#define __ARCH_TRIO_CONSTANTS_H__ 17#define __ARCH_TRIO_CONSTANTS_H__
18 18
19#define TRIO_NUM_ASIDS 16 19#define TRIO_NUM_ASIDS 32
20#define TRIO_NUM_TLBS_PER_ASID 16 20#define TRIO_NUM_TLBS_PER_ASID 16
21 21
22#define TRIO_NUM_TPIO_REGIONS 8 22#define TRIO_NUM_TPIO_REGIONS 8
23#define TRIO_LOG2_NUM_TPIO_REGIONS 3 23#define TRIO_LOG2_NUM_TPIO_REGIONS 3
24 24
25#define TRIO_NUM_MAP_MEM_REGIONS 16 25#define TRIO_NUM_MAP_MEM_REGIONS 32
26#define TRIO_LOG2_NUM_MAP_MEM_REGIONS 4 26#define TRIO_LOG2_NUM_MAP_MEM_REGIONS 5
27#define TRIO_NUM_MAP_SQ_REGIONS 8 27#define TRIO_NUM_MAP_SQ_REGIONS 8
28#define TRIO_LOG2_NUM_MAP_SQ_REGIONS 3 28#define TRIO_LOG2_NUM_MAP_SQ_REGIONS 3
29 29
30#define TRIO_LOG2_NUM_SQ_FIFO_ENTRIES 6 30#define TRIO_LOG2_NUM_SQ_FIFO_ENTRIES 6
31 31
32#define TRIO_NUM_PUSH_DMA_RINGS 32 32#define TRIO_NUM_PUSH_DMA_RINGS 64
33 33
34#define TRIO_NUM_PULL_DMA_RINGS 32 34#define TRIO_NUM_PULL_DMA_RINGS 64
35 35
36#endif /* __ARCH_TRIO_CONSTANTS_H__ */ 36#endif /* __ARCH_TRIO_CONSTANTS_H__ */
diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h
index 6346888f7bdc..672768008618 100644
--- a/arch/tile/include/asm/page.h
+++ b/arch/tile/include/asm/page.h
@@ -182,10 +182,9 @@ static inline __attribute_const__ int get_order(unsigned long size)
182 182
183#define PAGE_OFFSET (-(_AC(1, UL) << (MAX_VA_WIDTH - 1))) 183#define PAGE_OFFSET (-(_AC(1, UL) << (MAX_VA_WIDTH - 1)))
184#define KERNEL_HIGH_VADDR _AC(0xfffffff800000000, UL) /* high 32GB */ 184#define KERNEL_HIGH_VADDR _AC(0xfffffff800000000, UL) /* high 32GB */
185#define FIXADDR_BASE (KERNEL_HIGH_VADDR - 0x400000000) /* 4 GB */ 185#define FIXADDR_BASE (KERNEL_HIGH_VADDR - 0x300000000) /* 4 GB */
186#define FIXADDR_TOP (KERNEL_HIGH_VADDR - 0x300000000) /* 4 GB */ 186#define FIXADDR_TOP (KERNEL_HIGH_VADDR - 0x200000000) /* 4 GB */
187#define _VMALLOC_START FIXADDR_TOP 187#define _VMALLOC_START FIXADDR_TOP
188#define HUGE_VMAP_BASE (KERNEL_HIGH_VADDR - 0x200000000) /* 4 GB */
189#define MEM_SV_START (KERNEL_HIGH_VADDR - 0x100000000) /* 256 MB */ 188#define MEM_SV_START (KERNEL_HIGH_VADDR - 0x100000000) /* 256 MB */
190#define MEM_MODULE_START (MEM_SV_START + (256*1024*1024)) /* 256 MB */ 189#define MEM_MODULE_START (MEM_SV_START + (256*1024*1024)) /* 256 MB */
191#define MEM_MODULE_END (MEM_MODULE_START + (256*1024*1024)) 190#define MEM_MODULE_END (MEM_MODULE_START + (256*1024*1024))
diff --git a/arch/tile/include/asm/pgtable_32.h b/arch/tile/include/asm/pgtable_32.h
index 63142ab3b3dd..d26a42279036 100644
--- a/arch/tile/include/asm/pgtable_32.h
+++ b/arch/tile/include/asm/pgtable_32.h
@@ -55,17 +55,9 @@
55#define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE*LAST_PKMAP) & PGDIR_MASK) 55#define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE*LAST_PKMAP) & PGDIR_MASK)
56 56
57#ifdef CONFIG_HIGHMEM 57#ifdef CONFIG_HIGHMEM
58# define __VMAPPING_END (PKMAP_BASE & ~(HPAGE_SIZE-1)) 58# define _VMALLOC_END (PKMAP_BASE & ~(HPAGE_SIZE-1))
59#else 59#else
60# define __VMAPPING_END (FIXADDR_START & ~(HPAGE_SIZE-1)) 60# define _VMALLOC_END (FIXADDR_START & ~(HPAGE_SIZE-1))
61#endif
62
63#ifdef CONFIG_HUGEVMAP
64#define HUGE_VMAP_END __VMAPPING_END
65#define HUGE_VMAP_BASE (HUGE_VMAP_END - CONFIG_NR_HUGE_VMAPS * HPAGE_SIZE)
66#define _VMALLOC_END HUGE_VMAP_BASE
67#else
68#define _VMALLOC_END __VMAPPING_END
69#endif 61#endif
70 62
71/* 63/*
diff --git a/arch/tile/include/asm/pgtable_64.h b/arch/tile/include/asm/pgtable_64.h
index 3421177f7370..2c8a9cd102d3 100644
--- a/arch/tile/include/asm/pgtable_64.h
+++ b/arch/tile/include/asm/pgtable_64.h
@@ -52,12 +52,10 @@
52 * memory allocation code). The vmalloc code puts in an internal 52 * memory allocation code). The vmalloc code puts in an internal
53 * guard page between each allocation. 53 * guard page between each allocation.
54 */ 54 */
55#define _VMALLOC_END HUGE_VMAP_BASE 55#define _VMALLOC_END MEM_SV_START
56#define VMALLOC_END _VMALLOC_END 56#define VMALLOC_END _VMALLOC_END
57#define VMALLOC_START _VMALLOC_START 57#define VMALLOC_START _VMALLOC_START
58 58
59#define HUGE_VMAP_END (HUGE_VMAP_BASE + PGDIR_SIZE)
60
61#ifndef __ASSEMBLY__ 59#ifndef __ASSEMBLY__
62 60
63/* We have no pud since we are a three-level page table. */ 61/* We have no pud since we are a three-level page table. */
diff --git a/arch/tile/include/gxio/iorpc_mpipe.h b/arch/tile/include/gxio/iorpc_mpipe.h
index fdd07f88cfd7..4cda03de734f 100644
--- a/arch/tile/include/gxio/iorpc_mpipe.h
+++ b/arch/tile/include/gxio/iorpc_mpipe.h
@@ -56,89 +56,89 @@
56#define GXIO_MPIPE_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000) 56#define GXIO_MPIPE_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
57#define GXIO_MPIPE_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001) 57#define GXIO_MPIPE_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001)
58 58
59int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t * context, 59int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t *context,
60 unsigned int count, unsigned int first, 60 unsigned int count, unsigned int first,
61 unsigned int flags); 61 unsigned int flags);
62 62
63int gxio_mpipe_init_buffer_stack_aux(gxio_mpipe_context_t * context, 63int gxio_mpipe_init_buffer_stack_aux(gxio_mpipe_context_t *context,
64 void *mem_va, size_t mem_size, 64 void *mem_va, size_t mem_size,
65 unsigned int mem_flags, unsigned int stack, 65 unsigned int mem_flags, unsigned int stack,
66 unsigned int buffer_size_enum); 66 unsigned int buffer_size_enum);
67 67
68 68
69int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t * context, 69int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t *context,
70 unsigned int count, unsigned int first, 70 unsigned int count, unsigned int first,
71 unsigned int flags); 71 unsigned int flags);
72 72
73int gxio_mpipe_init_notif_ring_aux(gxio_mpipe_context_t * context, void *mem_va, 73int gxio_mpipe_init_notif_ring_aux(gxio_mpipe_context_t *context, void *mem_va,
74 size_t mem_size, unsigned int mem_flags, 74 size_t mem_size, unsigned int mem_flags,
75 unsigned int ring); 75 unsigned int ring);
76 76
77int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t * context, 77int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t *context,
78 int inter_x, int inter_y, 78 int inter_x, int inter_y,
79 int inter_ipi, int inter_event, 79 int inter_ipi, int inter_event,
80 unsigned int ring); 80 unsigned int ring);
81 81
82int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t * context, 82int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t *context,
83 unsigned int ring); 83 unsigned int ring);
84 84
85int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t * context, 85int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t *context,
86 unsigned int count, unsigned int first, 86 unsigned int count, unsigned int first,
87 unsigned int flags); 87 unsigned int flags);
88 88
89int gxio_mpipe_init_notif_group(gxio_mpipe_context_t * context, 89int gxio_mpipe_init_notif_group(gxio_mpipe_context_t *context,
90 unsigned int group, 90 unsigned int group,
91 gxio_mpipe_notif_group_bits_t bits); 91 gxio_mpipe_notif_group_bits_t bits);
92 92
93int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t * context, unsigned int count, 93int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t *context, unsigned int count,
94 unsigned int first, unsigned int flags); 94 unsigned int first, unsigned int flags);
95 95
96int gxio_mpipe_init_bucket(gxio_mpipe_context_t * context, unsigned int bucket, 96int gxio_mpipe_init_bucket(gxio_mpipe_context_t *context, unsigned int bucket,
97 MPIPE_LBL_INIT_DAT_BSTS_TBL_t bucket_info); 97 MPIPE_LBL_INIT_DAT_BSTS_TBL_t bucket_info);
98 98
99int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t * context, 99int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t *context,
100 unsigned int count, unsigned int first, 100 unsigned int count, unsigned int first,
101 unsigned int flags); 101 unsigned int flags);
102 102
103int gxio_mpipe_init_edma_ring_aux(gxio_mpipe_context_t * context, void *mem_va, 103int gxio_mpipe_init_edma_ring_aux(gxio_mpipe_context_t *context, void *mem_va,
104 size_t mem_size, unsigned int mem_flags, 104 size_t mem_size, unsigned int mem_flags,
105 unsigned int ring, unsigned int channel); 105 unsigned int ring, unsigned int channel);
106 106
107 107
108int gxio_mpipe_commit_rules(gxio_mpipe_context_t * context, const void *blob, 108int gxio_mpipe_commit_rules(gxio_mpipe_context_t *context, const void *blob,
109 size_t blob_size); 109 size_t blob_size);
110 110
111int gxio_mpipe_register_client_memory(gxio_mpipe_context_t * context, 111int gxio_mpipe_register_client_memory(gxio_mpipe_context_t *context,
112 unsigned int iotlb, HV_PTE pte, 112 unsigned int iotlb, HV_PTE pte,
113 unsigned int flags); 113 unsigned int flags);
114 114
115int gxio_mpipe_link_open_aux(gxio_mpipe_context_t * context, 115int gxio_mpipe_link_open_aux(gxio_mpipe_context_t *context,
116 _gxio_mpipe_link_name_t name, unsigned int flags); 116 _gxio_mpipe_link_name_t name, unsigned int flags);
117 117
118int gxio_mpipe_link_close_aux(gxio_mpipe_context_t * context, int mac); 118int gxio_mpipe_link_close_aux(gxio_mpipe_context_t *context, int mac);
119 119
120int gxio_mpipe_link_set_attr_aux(gxio_mpipe_context_t * context, int mac, 120int gxio_mpipe_link_set_attr_aux(gxio_mpipe_context_t *context, int mac,
121 uint32_t attr, int64_t val); 121 uint32_t attr, int64_t val);
122 122
123int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t * context, uint64_t * sec, 123int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t *context, uint64_t *sec,
124 uint64_t * nsec, uint64_t * cycles); 124 uint64_t *nsec, uint64_t *cycles);
125 125
126int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t * context, uint64_t sec, 126int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t *context, uint64_t sec,
127 uint64_t nsec, uint64_t cycles); 127 uint64_t nsec, uint64_t cycles);
128 128
129int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t * context, 129int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t *context,
130 int64_t nsec); 130 int64_t nsec);
131 131
132int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t * context, 132int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t *context,
133 int32_t ppb); 133 int32_t ppb);
134 134
135int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie); 135int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t *context, int pollfd_cookie);
136 136
137int gxio_mpipe_close_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie); 137int gxio_mpipe_close_pollfd(gxio_mpipe_context_t *context, int pollfd_cookie);
138 138
139int gxio_mpipe_get_mmio_base(gxio_mpipe_context_t * context, HV_PTE *base); 139int gxio_mpipe_get_mmio_base(gxio_mpipe_context_t *context, HV_PTE *base);
140 140
141int gxio_mpipe_check_mmio_offset(gxio_mpipe_context_t * context, 141int gxio_mpipe_check_mmio_offset(gxio_mpipe_context_t *context,
142 unsigned long offset, unsigned long size); 142 unsigned long offset, unsigned long size);
143 143
144#endif /* !__GXIO_MPIPE_LINUX_RPC_H__ */ 144#endif /* !__GXIO_MPIPE_LINUX_RPC_H__ */
diff --git a/arch/tile/include/gxio/iorpc_mpipe_info.h b/arch/tile/include/gxio/iorpc_mpipe_info.h
index 476c5e5ca22c..f0b04284468b 100644
--- a/arch/tile/include/gxio/iorpc_mpipe_info.h
+++ b/arch/tile/include/gxio/iorpc_mpipe_info.h
@@ -33,18 +33,18 @@
33#define GXIO_MPIPE_INFO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001) 33#define GXIO_MPIPE_INFO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001)
34 34
35 35
36int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t * context, 36int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t *context,
37 _gxio_mpipe_link_name_t name); 37 _gxio_mpipe_link_name_t name);
38 38
39int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t * context, 39int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t *context,
40 unsigned int idx, 40 unsigned int idx,
41 _gxio_mpipe_link_name_t * name, 41 _gxio_mpipe_link_name_t *name,
42 _gxio_mpipe_link_mac_t * mac); 42 _gxio_mpipe_link_mac_t *mac);
43 43
44int gxio_mpipe_info_get_mmio_base(gxio_mpipe_info_context_t * context, 44int gxio_mpipe_info_get_mmio_base(gxio_mpipe_info_context_t *context,
45 HV_PTE *base); 45 HV_PTE *base);
46 46
47int gxio_mpipe_info_check_mmio_offset(gxio_mpipe_info_context_t * context, 47int gxio_mpipe_info_check_mmio_offset(gxio_mpipe_info_context_t *context,
48 unsigned long offset, unsigned long size); 48 unsigned long offset, unsigned long size);
49 49
50#endif /* !__GXIO_MPIPE_INFO_LINUX_RPC_H__ */ 50#endif /* !__GXIO_MPIPE_INFO_LINUX_RPC_H__ */
diff --git a/arch/tile/include/gxio/iorpc_trio.h b/arch/tile/include/gxio/iorpc_trio.h
index d95b96fd6c93..376a4f771167 100644
--- a/arch/tile/include/gxio/iorpc_trio.h
+++ b/arch/tile/include/gxio/iorpc_trio.h
@@ -46,59 +46,59 @@
46#define GXIO_TRIO_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000) 46#define GXIO_TRIO_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
47#define GXIO_TRIO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001) 47#define GXIO_TRIO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001)
48 48
49int gxio_trio_alloc_asids(gxio_trio_context_t * context, unsigned int count, 49int gxio_trio_alloc_asids(gxio_trio_context_t *context, unsigned int count,
50 unsigned int first, unsigned int flags); 50 unsigned int first, unsigned int flags);
51 51
52 52
53int gxio_trio_alloc_memory_maps(gxio_trio_context_t * context, 53int gxio_trio_alloc_memory_maps(gxio_trio_context_t *context,
54 unsigned int count, unsigned int first, 54 unsigned int count, unsigned int first,
55 unsigned int flags); 55 unsigned int flags);
56 56
57 57
58int gxio_trio_alloc_scatter_queues(gxio_trio_context_t * context, 58int gxio_trio_alloc_scatter_queues(gxio_trio_context_t *context,
59 unsigned int count, unsigned int first, 59 unsigned int count, unsigned int first,
60 unsigned int flags); 60 unsigned int flags);
61 61
62int gxio_trio_alloc_pio_regions(gxio_trio_context_t * context, 62int gxio_trio_alloc_pio_regions(gxio_trio_context_t *context,
63 unsigned int count, unsigned int first, 63 unsigned int count, unsigned int first,
64 unsigned int flags); 64 unsigned int flags);
65 65
66int gxio_trio_init_pio_region_aux(gxio_trio_context_t * context, 66int gxio_trio_init_pio_region_aux(gxio_trio_context_t *context,
67 unsigned int pio_region, unsigned int mac, 67 unsigned int pio_region, unsigned int mac,
68 uint32_t bus_address_hi, unsigned int flags); 68 uint32_t bus_address_hi, unsigned int flags);
69 69
70 70
71int gxio_trio_init_memory_map_mmu_aux(gxio_trio_context_t * context, 71int gxio_trio_init_memory_map_mmu_aux(gxio_trio_context_t *context,
72 unsigned int map, unsigned long va, 72 unsigned int map, unsigned long va,
73 uint64_t size, unsigned int asid, 73 uint64_t size, unsigned int asid,
74 unsigned int mac, uint64_t bus_address, 74 unsigned int mac, uint64_t bus_address,
75 unsigned int node, 75 unsigned int node,
76 unsigned int order_mode); 76 unsigned int order_mode);
77 77
78int gxio_trio_get_port_property(gxio_trio_context_t * context, 78int gxio_trio_get_port_property(gxio_trio_context_t *context,
79 struct pcie_trio_ports_property *trio_ports); 79 struct pcie_trio_ports_property *trio_ports);
80 80
81int gxio_trio_config_legacy_intr(gxio_trio_context_t * context, int inter_x, 81int gxio_trio_config_legacy_intr(gxio_trio_context_t *context, int inter_x,
82 int inter_y, int inter_ipi, int inter_event, 82 int inter_y, int inter_ipi, int inter_event,
83 unsigned int mac, unsigned int intx); 83 unsigned int mac, unsigned int intx);
84 84
85int gxio_trio_config_msi_intr(gxio_trio_context_t * context, int inter_x, 85int gxio_trio_config_msi_intr(gxio_trio_context_t *context, int inter_x,
86 int inter_y, int inter_ipi, int inter_event, 86 int inter_y, int inter_ipi, int inter_event,
87 unsigned int mac, unsigned int mem_map, 87 unsigned int mac, unsigned int mem_map,
88 uint64_t mem_map_base, uint64_t mem_map_limit, 88 uint64_t mem_map_base, uint64_t mem_map_limit,
89 unsigned int asid); 89 unsigned int asid);
90 90
91 91
92int gxio_trio_set_mps_mrs(gxio_trio_context_t * context, uint16_t mps, 92int gxio_trio_set_mps_mrs(gxio_trio_context_t *context, uint16_t mps,
93 uint16_t mrs, unsigned int mac); 93 uint16_t mrs, unsigned int mac);
94 94
95int gxio_trio_force_rc_link_up(gxio_trio_context_t * context, unsigned int mac); 95int gxio_trio_force_rc_link_up(gxio_trio_context_t *context, unsigned int mac);
96 96
97int gxio_trio_force_ep_link_up(gxio_trio_context_t * context, unsigned int mac); 97int gxio_trio_force_ep_link_up(gxio_trio_context_t *context, unsigned int mac);
98 98
99int gxio_trio_get_mmio_base(gxio_trio_context_t * context, HV_PTE *base); 99int gxio_trio_get_mmio_base(gxio_trio_context_t *context, HV_PTE *base);
100 100
101int gxio_trio_check_mmio_offset(gxio_trio_context_t * context, 101int gxio_trio_check_mmio_offset(gxio_trio_context_t *context,
102 unsigned long offset, unsigned long size); 102 unsigned long offset, unsigned long size);
103 103
104#endif /* !__GXIO_TRIO_LINUX_RPC_H__ */ 104#endif /* !__GXIO_TRIO_LINUX_RPC_H__ */
diff --git a/arch/tile/include/gxio/iorpc_usb_host.h b/arch/tile/include/gxio/iorpc_usb_host.h
index 8622e7d126ad..79962a97de8e 100644
--- a/arch/tile/include/gxio/iorpc_usb_host.h
+++ b/arch/tile/include/gxio/iorpc_usb_host.h
@@ -31,16 +31,16 @@
31#define GXIO_USB_HOST_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000) 31#define GXIO_USB_HOST_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
32#define GXIO_USB_HOST_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001) 32#define GXIO_USB_HOST_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001)
33 33
34int gxio_usb_host_cfg_interrupt(gxio_usb_host_context_t * context, int inter_x, 34int gxio_usb_host_cfg_interrupt(gxio_usb_host_context_t *context, int inter_x,
35 int inter_y, int inter_ipi, int inter_event); 35 int inter_y, int inter_ipi, int inter_event);
36 36
37int gxio_usb_host_register_client_memory(gxio_usb_host_context_t * context, 37int gxio_usb_host_register_client_memory(gxio_usb_host_context_t *context,
38 HV_PTE pte, unsigned int flags); 38 HV_PTE pte, unsigned int flags);
39 39
40int gxio_usb_host_get_mmio_base(gxio_usb_host_context_t * context, 40int gxio_usb_host_get_mmio_base(gxio_usb_host_context_t *context,
41 HV_PTE *base); 41 HV_PTE *base);
42 42
43int gxio_usb_host_check_mmio_offset(gxio_usb_host_context_t * context, 43int gxio_usb_host_check_mmio_offset(gxio_usb_host_context_t *context,
44 unsigned long offset, unsigned long size); 44 unsigned long offset, unsigned long size);
45 45
46#endif /* !__GXIO_USB_HOST_LINUX_RPC_H__ */ 46#endif /* !__GXIO_USB_HOST_LINUX_RPC_H__ */
diff --git a/arch/tile/include/gxio/usb_host.h b/arch/tile/include/gxio/usb_host.h
index 5eedec0e988e..93c9636d2dd7 100644
--- a/arch/tile/include/gxio/usb_host.h
+++ b/arch/tile/include/gxio/usb_host.h
@@ -53,7 +53,7 @@ typedef struct {
53 * @return Zero if the context was successfully initialized, else a 53 * @return Zero if the context was successfully initialized, else a
54 * GXIO_ERR_xxx error code. 54 * GXIO_ERR_xxx error code.
55 */ 55 */
56extern int gxio_usb_host_init(gxio_usb_host_context_t * context, int usb_index, 56extern int gxio_usb_host_init(gxio_usb_host_context_t *context, int usb_index,
57 int is_ehci); 57 int is_ehci);
58 58
59/* Destroy a USB context. 59/* Destroy a USB context.
@@ -68,20 +68,20 @@ extern int gxio_usb_host_init(gxio_usb_host_context_t * context, int usb_index,
68 * @return Zero if the context was successfully destroyed, else a 68 * @return Zero if the context was successfully destroyed, else a
69 * GXIO_ERR_xxx error code. 69 * GXIO_ERR_xxx error code.
70 */ 70 */
71extern int gxio_usb_host_destroy(gxio_usb_host_context_t * context); 71extern int gxio_usb_host_destroy(gxio_usb_host_context_t *context);
72 72
73/* Retrieve the address of the shim's MMIO registers. 73/* Retrieve the address of the shim's MMIO registers.
74 * 74 *
75 * @param context Pointer to a properly initialized gxio_usb_host_context_t. 75 * @param context Pointer to a properly initialized gxio_usb_host_context_t.
76 * @return The address of the shim's MMIO registers. 76 * @return The address of the shim's MMIO registers.
77 */ 77 */
78extern void *gxio_usb_host_get_reg_start(gxio_usb_host_context_t * context); 78extern void *gxio_usb_host_get_reg_start(gxio_usb_host_context_t *context);
79 79
80/* Retrieve the length of the shim's MMIO registers. 80/* Retrieve the length of the shim's MMIO registers.
81 * 81 *
82 * @param context Pointer to a properly initialized gxio_usb_host_context_t. 82 * @param context Pointer to a properly initialized gxio_usb_host_context_t.
83 * @return The length of the shim's MMIO registers. 83 * @return The length of the shim's MMIO registers.
84 */ 84 */
85extern size_t gxio_usb_host_get_reg_len(gxio_usb_host_context_t * context); 85extern size_t gxio_usb_host_get_reg_len(gxio_usb_host_context_t *context);
86 86
87#endif /* _GXIO_USB_H_ */ 87#endif /* _GXIO_USB_H_ */
diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c
index ed378416b86a..49120843ff96 100644
--- a/arch/tile/kernel/compat.c
+++ b/arch/tile/kernel/compat.c
@@ -84,7 +84,7 @@ COMPAT_SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned int, offset_high,
84{ 84{
85 return sys_llseek(fd, offset_high, offset_low, result, origin); 85 return sys_llseek(fd, offset_high, offset_low, result, origin);
86} 86}
87 87
88/* Provide the compat syscall number to call mapping. */ 88/* Provide the compat syscall number to call mapping. */
89#undef __SYSCALL 89#undef __SYSCALL
90#define __SYSCALL(nr, call) [nr] = (call), 90#define __SYSCALL(nr, call) [nr] = (call),
diff --git a/arch/tile/kernel/futex_64.S b/arch/tile/kernel/futex_64.S
deleted file mode 100644
index f465d1eda20f..000000000000
--- a/arch/tile/kernel/futex_64.S
+++ /dev/null
@@ -1,55 +0,0 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * Atomically access user memory, but use MMU to avoid propagating
15 * kernel exceptions.
16 */
17
18#include <linux/linkage.h>
19#include <asm/errno.h>
20#include <asm/futex.h>
21#include <asm/page.h>
22#include <asm/processor.h>
23
24/*
25 * Provide a set of atomic memory operations supporting <asm/futex.h>.
26 *
27 * r0: user address to manipulate
28 * r1: new value to write, or for cmpxchg, old value to compare against
29 * r2: (cmpxchg only) new value to write
30 *
31 * Return __get_user struct, r0 with value, r1 with error.
32 */
33#define FUTEX_OP(name, ...) \
34STD_ENTRY(futex_##name) \
35 __VA_ARGS__; \
36 { \
37 move r1, zero; \
38 jrp lr \
39 }; \
40 STD_ENDPROC(futex_##name); \
41 .pushsection __ex_table,"a"; \
42 .quad 1b, get_user_fault; \
43 .popsection
44
45 .pushsection .fixup,"ax"
46get_user_fault:
47 { movei r1, -EFAULT; jrp lr }
48 ENDPROC(get_user_fault)
49 .popsection
50
51FUTEX_OP(cmpxchg, mtspr CMPEXCH_VALUE, r1; 1: cmpexch4 r0, r0, r2)
52FUTEX_OP(set, 1: exch4 r0, r0, r1)
53FUTEX_OP(add, 1: fetchadd4 r0, r0, r1)
54FUTEX_OP(or, 1: fetchor4 r0, r0, r1)
55FUTEX_OP(andn, nor r1, r1, zero; 1: fetchand4 r0, r0, r1)
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 4c34caea9dd3..74c91729a62a 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -1268,8 +1268,7 @@ static void __init validate_va(void)
1268 if ((long)VMALLOC_START >= 0) 1268 if ((long)VMALLOC_START >= 0)
1269 early_panic( 1269 early_panic(
1270 "Linux VMALLOC region below the 2GB line (%#lx)!\n" 1270 "Linux VMALLOC region below the 2GB line (%#lx)!\n"
1271 "Reconfigure the kernel with fewer NR_HUGE_VMAPS\n" 1271 "Reconfigure the kernel with smaller VMALLOC_RESERVE.\n",
1272 "or smaller VMALLOC_RESERVE.\n",
1273 VMALLOC_START); 1272 VMALLOC_START);
1274#endif 1273#endif
1275} 1274}
diff --git a/arch/tile/kernel/unaligned.c b/arch/tile/kernel/unaligned.c
index b425fb6a480d..b030b4e78845 100644
--- a/arch/tile/kernel/unaligned.c
+++ b/arch/tile/kernel/unaligned.c
@@ -551,8 +551,8 @@ static tilegx_bundle_bits jit_x1_bnezt(int ra, int broff)
551/* 551/*
552 * This function generates unalign fixup JIT. 552 * This function generates unalign fixup JIT.
553 * 553 *
554 * We fist find unalign load/store instruction's destination, source 554 * We first find unalign load/store instruction's destination, source
555 * reguisters: ra, rb and rd. and 3 scratch registers by calling 555 * registers: ra, rb and rd. and 3 scratch registers by calling
556 * find_regs(...). 3 scratch clobbers should not alias with any register 556 * find_regs(...). 3 scratch clobbers should not alias with any register
557 * used in the fault bundle. Then analyze the fault bundle to determine 557 * used in the fault bundle. Then analyze the fault bundle to determine
558 * if it's a load or store, operand width, branch or address increment etc. 558 * if it's a load or store, operand width, branch or address increment etc.
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 4c288f199453..6c0571216a9d 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -149,8 +149,6 @@ static inline int vmalloc_fault(pgd_t *pgd, unsigned long address)
149 pmd_k = vmalloc_sync_one(pgd, address); 149 pmd_k = vmalloc_sync_one(pgd, address);
150 if (!pmd_k) 150 if (!pmd_k)
151 return -1; 151 return -1;
152 if (pmd_huge(*pmd_k))
153 return 0; /* support TILE huge_vmap() API */
154 pte_k = pte_offset_kernel(pmd_k, address); 152 pte_k = pte_offset_kernel(pmd_k, address);
155 if (!pte_present(*pte_k)) 153 if (!pte_present(*pte_k))
156 return -1; 154 return -1;
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index 4e316deb92fd..0fa1acfac79a 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -828,10 +828,6 @@ void __init mem_init(void)
828 printk(KERN_DEBUG " PKMAP %#lx - %#lx\n", 828 printk(KERN_DEBUG " PKMAP %#lx - %#lx\n",
829 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP) - 1); 829 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP) - 1);
830#endif 830#endif
831#ifdef CONFIG_HUGEVMAP
832 printk(KERN_DEBUG " HUGEMAP %#lx - %#lx\n",
833 HUGE_VMAP_BASE, HUGE_VMAP_END - 1);
834#endif
835 printk(KERN_DEBUG " VMALLOC %#lx - %#lx\n", 831 printk(KERN_DEBUG " VMALLOC %#lx - %#lx\n",
836 _VMALLOC_START, _VMALLOC_END - 1); 832 _VMALLOC_START, _VMALLOC_END - 1);
837#ifdef __tilegx__ 833#ifdef __tilegx__
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 2deaddf3e01f..4fd9ec0b58ed 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -127,8 +127,7 @@ void shatter_huge_page(unsigned long addr)
127 } 127 }
128 128
129 /* Shatter the huge page into the preallocated L2 page table. */ 129 /* Shatter the huge page into the preallocated L2 page table. */
130 pmd_populate_kernel(&init_mm, pmd, 130 pmd_populate_kernel(&init_mm, pmd, get_prealloc_pte(pmd_pfn(*pmd)));
131 get_prealloc_pte(pte_pfn(*(pte_t *)pmd)));
132 131
133#ifdef __PAGETABLE_PMD_FOLDED 132#ifdef __PAGETABLE_PMD_FOLDED
134 /* Walk every pgd on the system and update the pmd there. */ 133 /* Walk every pgd on the system and update the pmd there. */
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index e241a1930c98..ee2fb9d37745 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -481,11 +481,12 @@ config X86_INTEL_LPSS
481 bool "Intel Low Power Subsystem Support" 481 bool "Intel Low Power Subsystem Support"
482 depends on ACPI 482 depends on ACPI
483 select COMMON_CLK 483 select COMMON_CLK
484 select PINCTRL
484 ---help--- 485 ---help---
485 Select to build support for Intel Low Power Subsystem such as 486 Select to build support for Intel Low Power Subsystem such as
486 found on Intel Lynxpoint PCH. Selecting this option enables 487 found on Intel Lynxpoint PCH. Selecting this option enables
487 things like clock tree (common clock framework) which are needed 488 things like clock tree (common clock framework) and pincontrol
488 by the LPSS peripheral drivers. 489 which are needed by the LPSS peripheral drivers.
489 490
490config X86_RDC321X 491config X86_RDC321X
491 bool "RDC R-321x SoC" 492 bool "RDC R-321x SoC"
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 6aef9fbc09b7..b913915e8e63 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -79,30 +79,38 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn)
79 return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY; 79 return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY;
80} 80}
81 81
82static inline unsigned long mfn_to_pfn(unsigned long mfn) 82static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn)
83{ 83{
84 unsigned long pfn; 84 unsigned long pfn;
85 int ret = 0; 85 int ret;
86 86
87 if (xen_feature(XENFEAT_auto_translated_physmap)) 87 if (xen_feature(XENFEAT_auto_translated_physmap))
88 return mfn; 88 return mfn;
89 89
90 if (unlikely(mfn >= machine_to_phys_nr)) { 90 if (unlikely(mfn >= machine_to_phys_nr))
91 pfn = ~0; 91 return ~0;
92 goto try_override; 92
93 }
94 pfn = 0;
95 /* 93 /*
96 * The array access can fail (e.g., device space beyond end of RAM). 94 * The array access can fail (e.g., device space beyond end of RAM).
97 * In such cases it doesn't matter what we return (we return garbage), 95 * In such cases it doesn't matter what we return (we return garbage),
98 * but we must handle the fault without crashing! 96 * but we must handle the fault without crashing!
99 */ 97 */
100 ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); 98 ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
101try_override:
102 /* ret might be < 0 if there are no entries in the m2p for mfn */
103 if (ret < 0) 99 if (ret < 0)
104 pfn = ~0; 100 return ~0;
105 else if (get_phys_to_machine(pfn) != mfn) 101
102 return pfn;
103}
104
105static inline unsigned long mfn_to_pfn(unsigned long mfn)
106{
107 unsigned long pfn;
108
109 if (xen_feature(XENFEAT_auto_translated_physmap))
110 return mfn;
111
112 pfn = mfn_to_pfn_no_overrides(mfn);
113 if (get_phys_to_machine(pfn) != mfn) {
106 /* 114 /*
107 * If this appears to be a foreign mfn (because the pfn 115 * If this appears to be a foreign mfn (because the pfn
108 * doesn't map back to the mfn), then check the local override 116 * doesn't map back to the mfn), then check the local override
@@ -111,6 +119,7 @@ try_override:
111 * m2p_find_override_pfn returns ~0 if it doesn't find anything. 119 * m2p_find_override_pfn returns ~0 if it doesn't find anything.
112 */ 120 */
113 pfn = m2p_find_override_pfn(mfn, ~0); 121 pfn = m2p_find_override_pfn(mfn, ~0);
122 }
114 123
115 /* 124 /*
116 * pfn is ~0 if there are no entries in the m2p for mfn or if the 125 * pfn is ~0 if there are no entries in the m2p for mfn or if the
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 8355c84b9729..897783b3302a 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1506,7 +1506,7 @@ static int __init init_hw_perf_events(void)
1506 err = amd_pmu_init(); 1506 err = amd_pmu_init();
1507 break; 1507 break;
1508 default: 1508 default:
1509 return 0; 1509 err = -ENOTSUPP;
1510 } 1510 }
1511 if (err != 0) { 1511 if (err != 0) {
1512 pr_cont("no PMU driver, software events only.\n"); 1512 pr_cont("no PMU driver, software events only.\n");
@@ -1883,9 +1883,9 @@ static struct pmu pmu = {
1883 1883
1884void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) 1884void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
1885{ 1885{
1886 userpg->cap_usr_time = 0; 1886 userpg->cap_user_time = 0;
1887 userpg->cap_usr_time_zero = 0; 1887 userpg->cap_user_time_zero = 0;
1888 userpg->cap_usr_rdpmc = x86_pmu.attr_rdpmc; 1888 userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc;
1889 userpg->pmc_width = x86_pmu.cntval_bits; 1889 userpg->pmc_width = x86_pmu.cntval_bits;
1890 1890
1891 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 1891 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
@@ -1894,13 +1894,13 @@ void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
1894 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) 1894 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
1895 return; 1895 return;
1896 1896
1897 userpg->cap_usr_time = 1; 1897 userpg->cap_user_time = 1;
1898 userpg->time_mult = this_cpu_read(cyc2ns); 1898 userpg->time_mult = this_cpu_read(cyc2ns);
1899 userpg->time_shift = CYC2NS_SCALE_FACTOR; 1899 userpg->time_shift = CYC2NS_SCALE_FACTOR;
1900 userpg->time_offset = this_cpu_read(cyc2ns_offset) - now; 1900 userpg->time_offset = this_cpu_read(cyc2ns_offset) - now;
1901 1901
1902 if (sched_clock_stable && !check_tsc_disabled()) { 1902 if (sched_clock_stable && !check_tsc_disabled()) {
1903 userpg->cap_usr_time_zero = 1; 1903 userpg->cap_user_time_zero = 1;
1904 userpg->time_zero = this_cpu_read(cyc2ns_offset); 1904 userpg->time_zero = this_cpu_read(cyc2ns_offset);
1905 } 1905 }
1906} 1906}
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index c62d88396ad5..f31a1655d1ff 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -899,8 +899,8 @@ static __initconst const u64 atom_hw_cache_event_ids
899static struct extra_reg intel_slm_extra_regs[] __read_mostly = 899static struct extra_reg intel_slm_extra_regs[] __read_mostly =
900{ 900{
901 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 901 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
902 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffff, RSP_0), 902 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0),
903 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x768005ffff, RSP_1), 903 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x768005ffffull, RSP_1),
904 EVENT_EXTRA_END 904 EVENT_EXTRA_END
905}; 905};
906 906
@@ -2325,6 +2325,7 @@ __init int intel_pmu_init(void)
2325 break; 2325 break;
2326 2326
2327 case 55: /* Atom 22nm "Silvermont" */ 2327 case 55: /* Atom 22nm "Silvermont" */
2328 case 77: /* Avoton "Silvermont" */
2328 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids, 2329 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
2329 sizeof(hw_cache_event_ids)); 2330 sizeof(hw_cache_event_ids));
2330 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs, 2331 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 63438aad177f..ab3ba1c1b7dd 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -584,6 +584,7 @@ struct event_constraint intel_snb_pebs_event_constraints[] = {
584 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */ 584 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
585 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ 585 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
586 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ 586 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
587 INTEL_EVENT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
587 INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */ 588 INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */
588 EVENT_CONSTRAINT_END 589 EVENT_CONSTRAINT_END
589}; 590};
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 8ed44589b0e4..4118f9f68315 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -2706,14 +2706,14 @@ static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
2706 box->hrtimer.function = uncore_pmu_hrtimer; 2706 box->hrtimer.function = uncore_pmu_hrtimer;
2707} 2707}
2708 2708
2709struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int cpu) 2709static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int node)
2710{ 2710{
2711 struct intel_uncore_box *box; 2711 struct intel_uncore_box *box;
2712 int i, size; 2712 int i, size;
2713 2713
2714 size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg); 2714 size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
2715 2715
2716 box = kzalloc_node(size, GFP_KERNEL, cpu_to_node(cpu)); 2716 box = kzalloc_node(size, GFP_KERNEL, node);
2717 if (!box) 2717 if (!box)
2718 return NULL; 2718 return NULL;
2719 2719
@@ -3031,7 +3031,7 @@ static int uncore_validate_group(struct intel_uncore_pmu *pmu,
3031 struct intel_uncore_box *fake_box; 3031 struct intel_uncore_box *fake_box;
3032 int ret = -EINVAL, n; 3032 int ret = -EINVAL, n;
3033 3033
3034 fake_box = uncore_alloc_box(pmu->type, smp_processor_id()); 3034 fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
3035 if (!fake_box) 3035 if (!fake_box)
3036 return -ENOMEM; 3036 return -ENOMEM;
3037 3037
@@ -3294,7 +3294,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
3294 } 3294 }
3295 3295
3296 type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)]; 3296 type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
3297 box = uncore_alloc_box(type, 0); 3297 box = uncore_alloc_box(type, NUMA_NO_NODE);
3298 if (!box) 3298 if (!box)
3299 return -ENOMEM; 3299 return -ENOMEM;
3300 3300
@@ -3499,7 +3499,7 @@ static int uncore_cpu_prepare(int cpu, int phys_id)
3499 if (pmu->func_id < 0) 3499 if (pmu->func_id < 0)
3500 pmu->func_id = j; 3500 pmu->func_id = j;
3501 3501
3502 box = uncore_alloc_box(type, cpu); 3502 box = uncore_alloc_box(type, cpu_to_node(cpu));
3503 if (!box) 3503 if (!box)
3504 return -ENOMEM; 3504 return -ENOMEM;
3505 3505
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 1b69951a81e2..b077f4cc225a 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -487,21 +487,6 @@ ENDPROC(native_usergs_sysret64)
487 TRACE_IRQS_OFF 487 TRACE_IRQS_OFF
488 .endm 488 .endm
489 489
490ENTRY(save_rest)
491 PARTIAL_FRAME 1 (REST_SKIP+8)
492 movq 5*8+16(%rsp), %r11 /* save return address */
493 movq_cfi rbx, RBX+16
494 movq_cfi rbp, RBP+16
495 movq_cfi r12, R12+16
496 movq_cfi r13, R13+16
497 movq_cfi r14, R14+16
498 movq_cfi r15, R15+16
499 movq %r11, 8(%rsp) /* return address */
500 FIXUP_TOP_OF_STACK %r11, 16
501 ret
502 CFI_ENDPROC
503END(save_rest)
504
505/* save complete stack frame */ 490/* save complete stack frame */
506 .pushsection .kprobes.text, "ax" 491 .pushsection .kprobes.text, "ax"
507ENTRY(save_paranoid) 492ENTRY(save_paranoid)
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index 7123b5df479d..af99f71aeb7f 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -216,6 +216,7 @@ int apply_microcode_amd(int cpu)
216 /* need to apply patch? */ 216 /* need to apply patch? */
217 if (rev >= mc_amd->hdr.patch_id) { 217 if (rev >= mc_amd->hdr.patch_id) {
218 c->microcode = rev; 218 c->microcode = rev;
219 uci->cpu_sig.rev = rev;
219 return 0; 220 return 0;
220 } 221 }
221 222
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 563ed91e6faa..e643e744e4d8 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -352,12 +352,28 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
352 }, 352 },
353 { /* Handle problems with rebooting on the Precision M6600. */ 353 { /* Handle problems with rebooting on the Precision M6600. */
354 .callback = set_pci_reboot, 354 .callback = set_pci_reboot,
355 .ident = "Dell OptiPlex 990", 355 .ident = "Dell Precision M6600",
356 .matches = { 356 .matches = {
357 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 357 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
358 DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"), 358 DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
359 }, 359 },
360 }, 360 },
361 { /* Handle problems with rebooting on the Dell PowerEdge C6100. */
362 .callback = set_pci_reboot,
363 .ident = "Dell PowerEdge C6100",
364 .matches = {
365 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
366 DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
367 },
368 },
369 { /* Some C6100 machines were shipped with vendor being 'Dell'. */
370 .callback = set_pci_reboot,
371 .ident = "Dell PowerEdge C6100",
372 .matches = {
373 DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
374 DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
375 },
376 },
361 { } 377 { }
362}; 378};
363 379
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index aecc98a93d1b..6cacab671f9b 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -653,6 +653,7 @@ static void announce_cpu(int cpu, int apicid)
653{ 653{
654 static int current_node = -1; 654 static int current_node = -1;
655 int node = early_cpu_to_node(cpu); 655 int node = early_cpu_to_node(cpu);
656 int max_cpu_present = find_last_bit(cpumask_bits(cpu_present_mask), NR_CPUS);
656 657
657 if (system_state == SYSTEM_BOOTING) { 658 if (system_state == SYSTEM_BOOTING) {
658 if (node != current_node) { 659 if (node != current_node) {
@@ -661,7 +662,7 @@ static void announce_cpu(int cpu, int apicid)
661 current_node = node; 662 current_node = node;
662 pr_info("Booting Node %3d, Processors ", node); 663 pr_info("Booting Node %3d, Processors ", node);
663 } 664 }
664 pr_cont(" #%d%s", cpu, cpu == (nr_cpu_ids - 1) ? " OK\n" : ""); 665 pr_cont(" #%4d%s", cpu, cpu == max_cpu_present ? " OK\n" : "");
665 return; 666 return;
666 } else 667 } else
667 pr_info("Booting Node %d Processor %d APIC 0x%x\n", 668 pr_info("Booting Node %d Processor %d APIC 0x%x\n",
diff --git a/arch/x86/kernel/sysfb_simplefb.c b/arch/x86/kernel/sysfb_simplefb.c
index 22513e96b012..86179d409893 100644
--- a/arch/x86/kernel/sysfb_simplefb.c
+++ b/arch/x86/kernel/sysfb_simplefb.c
@@ -72,14 +72,14 @@ __init int create_simplefb(const struct screen_info *si,
72 * the part that is occupied by the framebuffer */ 72 * the part that is occupied by the framebuffer */
73 len = mode->height * mode->stride; 73 len = mode->height * mode->stride;
74 len = PAGE_ALIGN(len); 74 len = PAGE_ALIGN(len);
75 if (len > si->lfb_size << 16) { 75 if (len > (u64)si->lfb_size << 16) {
76 printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n"); 76 printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n");
77 return -EINVAL; 77 return -EINVAL;
78 } 78 }
79 79
80 /* setup IORESOURCE_MEM as framebuffer memory */ 80 /* setup IORESOURCE_MEM as framebuffer memory */
81 memset(&res, 0, sizeof(res)); 81 memset(&res, 0, sizeof(res));
82 res.flags = IORESOURCE_MEM; 82 res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
83 res.name = simplefb_resname; 83 res.name = simplefb_resname;
84 res.start = si->lfb_base; 84 res.start = si->lfb_base;
85 res.end = si->lfb_base + len - 1; 85 res.end = si->lfb_base + len - 1;
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 2bc1e81045b0..ddc3f3d2afdb 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2025,6 +2025,17 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2025 return rc; 2025 return rc;
2026} 2026}
2027 2027
2028static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2029{
2030 int rc;
2031
2032 rc = em_ret_far(ctxt);
2033 if (rc != X86EMUL_CONTINUE)
2034 return rc;
2035 rsp_increment(ctxt, ctxt->src.val);
2036 return X86EMUL_CONTINUE;
2037}
2038
2028static int em_cmpxchg(struct x86_emulate_ctxt *ctxt) 2039static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2029{ 2040{
2030 /* Save real source value, then compare EAX against destination. */ 2041 /* Save real source value, then compare EAX against destination. */
@@ -3763,7 +3774,8 @@ static const struct opcode opcode_table[256] = {
3763 G(ByteOp, group11), G(0, group11), 3774 G(ByteOp, group11), G(0, group11),
3764 /* 0xC8 - 0xCF */ 3775 /* 0xC8 - 0xCF */
3765 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave), 3776 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
3766 N, I(ImplicitOps | Stack, em_ret_far), 3777 I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm),
3778 I(ImplicitOps | Stack, em_ret_far),
3767 D(ImplicitOps), DI(SrcImmByte, intn), 3779 D(ImplicitOps), DI(SrcImmByte, intn),
3768 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret), 3780 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
3769 /* 0xD0 - 0xD7 */ 3781 /* 0xD0 - 0xD7 */
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 043330159179..ad75d77999d0 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -99,6 +99,7 @@ struct guest_walker {
99 pt_element_t prefetch_ptes[PTE_PREFETCH_NUM]; 99 pt_element_t prefetch_ptes[PTE_PREFETCH_NUM];
100 gpa_t pte_gpa[PT_MAX_FULL_LEVELS]; 100 gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
101 pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS]; 101 pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS];
102 bool pte_writable[PT_MAX_FULL_LEVELS];
102 unsigned pt_access; 103 unsigned pt_access;
103 unsigned pte_access; 104 unsigned pte_access;
104 gfn_t gfn; 105 gfn_t gfn;
@@ -235,6 +236,22 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
235 if (pte == orig_pte) 236 if (pte == orig_pte)
236 continue; 237 continue;
237 238
239 /*
240 * If the slot is read-only, simply do not process the accessed
241 * and dirty bits. This is the correct thing to do if the slot
242 * is ROM, and page tables in read-as-ROM/write-as-MMIO slots
243 * are only supported if the accessed and dirty bits are already
244 * set in the ROM (so that MMIO writes are never needed).
245 *
246 * Note that NPT does not allow this at all and faults, since
247 * it always wants nested page table entries for the guest
248 * page tables to be writable. And EPT works but will simply
249 * overwrite the read-only memory to set the accessed and dirty
250 * bits.
251 */
252 if (unlikely(!walker->pte_writable[level - 1]))
253 continue;
254
238 ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte); 255 ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte);
239 if (ret) 256 if (ret)
240 return ret; 257 return ret;
@@ -309,7 +326,8 @@ retry_walk:
309 goto error; 326 goto error;
310 real_gfn = gpa_to_gfn(real_gfn); 327 real_gfn = gpa_to_gfn(real_gfn);
311 328
312 host_addr = gfn_to_hva(vcpu->kvm, real_gfn); 329 host_addr = gfn_to_hva_prot(vcpu->kvm, real_gfn,
330 &walker->pte_writable[walker->level - 1]);
313 if (unlikely(kvm_is_error_hva(host_addr))) 331 if (unlikely(kvm_is_error_hva(host_addr)))
314 goto error; 332 goto error;
315 333
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 1f1da43ff2a2..3b8e7459dd4d 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -5339,6 +5339,17 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
5339 return 0; 5339 return 0;
5340 } 5340 }
5341 5341
5342 /*
5343 * EPT violation happened while executing iret from NMI,
5344 * "blocked by NMI" bit has to be set before next VM entry.
5345 * There are errata that may cause this bit to not be set:
5346 * AAK134, BY25.
5347 */
5348 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
5349 cpu_has_virtual_nmis() &&
5350 (exit_qualification & INTR_INFO_UNBLOCK_NMI))
5351 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
5352
5342 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); 5353 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
5343 trace_kvm_page_fault(gpa, exit_qualification); 5354 trace_kvm_page_fault(gpa, exit_qualification);
5344 5355
@@ -7766,6 +7777,10 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
7766 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); 7777 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
7767 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); 7778 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
7768 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); 7779 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
7780 __clear_bit(VCPU_EXREG_PDPTR,
7781 (unsigned long *)&vcpu->arch.regs_avail);
7782 __clear_bit(VCPU_EXREG_PDPTR,
7783 (unsigned long *)&vcpu->arch.regs_dirty);
7769 } 7784 }
7770 7785
7771 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp); 7786 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 5596c7bdd327..082e88129712 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -700,7 +700,7 @@ int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end,
700 if (!(pci_probe & PCI_PROBE_MMCONF) || pci_mmcfg_arch_init_failed) 700 if (!(pci_probe & PCI_PROBE_MMCONF) || pci_mmcfg_arch_init_failed)
701 return -ENODEV; 701 return -ENODEV;
702 702
703 if (start > end || !addr) 703 if (start > end)
704 return -EINVAL; 704 return -EINVAL;
705 705
706 mutex_lock(&pci_mmcfg_lock); 706 mutex_lock(&pci_mmcfg_lock);
@@ -716,6 +716,11 @@ int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end,
716 return -EEXIST; 716 return -EEXIST;
717 } 717 }
718 718
719 if (!addr) {
720 mutex_unlock(&pci_mmcfg_lock);
721 return -EINVAL;
722 }
723
719 rc = -EBUSY; 724 rc = -EBUSY;
720 cfg = pci_mmconfig_alloc(seg, start, end, addr); 725 cfg = pci_mmconfig_alloc(seg, start, end, addr);
721 if (cfg == NULL) { 726 if (cfg == NULL) {
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 90f6ed127096..c7e22ab29a5a 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -912,10 +912,13 @@ void __init efi_enter_virtual_mode(void)
912 912
913 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { 913 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
914 md = p; 914 md = p;
915 if (!(md->attribute & EFI_MEMORY_RUNTIME) && 915 if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
916 md->type != EFI_BOOT_SERVICES_CODE && 916#ifdef CONFIG_X86_64
917 md->type != EFI_BOOT_SERVICES_DATA) 917 if (md->type != EFI_BOOT_SERVICES_CODE &&
918 continue; 918 md->type != EFI_BOOT_SERVICES_DATA)
919#endif
920 continue;
921 }
919 922
920 size = md->num_pages << EFI_PAGE_SHIFT; 923 size = md->num_pages << EFI_PAGE_SHIFT;
921 end = md->phys_addr + size; 924 end = md->phys_addr + size;
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 8b901e8d782d..a61c7d5811be 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -879,7 +879,6 @@ int m2p_add_override(unsigned long mfn, struct page *page,
879 unsigned long uninitialized_var(address); 879 unsigned long uninitialized_var(address);
880 unsigned level; 880 unsigned level;
881 pte_t *ptep = NULL; 881 pte_t *ptep = NULL;
882 int ret = 0;
883 882
884 pfn = page_to_pfn(page); 883 pfn = page_to_pfn(page);
885 if (!PageHighMem(page)) { 884 if (!PageHighMem(page)) {
@@ -926,8 +925,8 @@ int m2p_add_override(unsigned long mfn, struct page *page,
926 * frontend pages while they are being shared with the backend, 925 * frontend pages while they are being shared with the backend,
927 * because mfn_to_pfn (that ends up being called by GUPF) will 926 * because mfn_to_pfn (that ends up being called by GUPF) will
928 * return the backend pfn rather than the frontend pfn. */ 927 * return the backend pfn rather than the frontend pfn. */
929 ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); 928 pfn = mfn_to_pfn_no_overrides(mfn);
930 if (ret == 0 && get_phys_to_machine(pfn) == mfn) 929 if (get_phys_to_machine(pfn) == mfn)
931 set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)); 930 set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
932 931
933 return 0; 932 return 0;
@@ -942,7 +941,6 @@ int m2p_remove_override(struct page *page,
942 unsigned long uninitialized_var(address); 941 unsigned long uninitialized_var(address);
943 unsigned level; 942 unsigned level;
944 pte_t *ptep = NULL; 943 pte_t *ptep = NULL;
945 int ret = 0;
946 944
947 pfn = page_to_pfn(page); 945 pfn = page_to_pfn(page);
948 mfn = get_phys_to_machine(pfn); 946 mfn = get_phys_to_machine(pfn);
@@ -1029,8 +1027,8 @@ int m2p_remove_override(struct page *page,
1029 * the original pfn causes mfn_to_pfn(mfn) to return the frontend 1027 * the original pfn causes mfn_to_pfn(mfn) to return the frontend
1030 * pfn again. */ 1028 * pfn again. */
1031 mfn &= ~FOREIGN_FRAME_BIT; 1029 mfn &= ~FOREIGN_FRAME_BIT;
1032 ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); 1030 pfn = mfn_to_pfn_no_overrides(mfn);
1033 if (ret == 0 && get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) && 1031 if (get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
1034 m2p_find_override(mfn) == NULL) 1032 m2p_find_override(mfn) == NULL)
1035 set_phys_to_machine(pfn, mfn); 1033 set_phys_to_machine(pfn, mfn);
1036 1034
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 253f63fceea1..be6b86078957 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -259,6 +259,14 @@ void xen_uninit_lock_cpu(int cpu)
259} 259}
260 260
261 261
262/*
263 * Our init of PV spinlocks is split in two init functions due to us
264 * using paravirt patching and jump labels patching and having to do
265 * all of this before SMP code is invoked.
266 *
267 * The paravirt patching needs to be done _before_ the alternative asm code
268 * is started, otherwise we would not patch the core kernel code.
269 */
262void __init xen_init_spinlocks(void) 270void __init xen_init_spinlocks(void)
263{ 271{
264 272
@@ -267,12 +275,26 @@ void __init xen_init_spinlocks(void)
267 return; 275 return;
268 } 276 }
269 277
270 static_key_slow_inc(&paravirt_ticketlocks_enabled);
271
272 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning); 278 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning);
273 pv_lock_ops.unlock_kick = xen_unlock_kick; 279 pv_lock_ops.unlock_kick = xen_unlock_kick;
274} 280}
275 281
282/*
283 * While the jump_label init code needs to happend _after_ the jump labels are
284 * enabled and before SMP is started. Hence we use pre-SMP initcall level
285 * init. We cannot do it in xen_init_spinlocks as that is done before
286 * jump labels are activated.
287 */
288static __init int xen_init_spinlocks_jump(void)
289{
290 if (!xen_pvspin)
291 return 0;
292
293 static_key_slow_inc(&paravirt_ticketlocks_enabled);
294 return 0;
295}
296early_initcall(xen_init_spinlocks_jump);
297
276static __init int xen_parse_nopvspin(char *arg) 298static __init int xen_parse_nopvspin(char *arg)
277{ 299{
278 xen_pvspin = false; 300 xen_pvspin = false;
diff --git a/block/Kconfig b/block/Kconfig
index 7f38e40fee08..2429515c05c2 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -99,11 +99,16 @@ config BLK_DEV_THROTTLING
99 99
100 See Documentation/cgroups/blkio-controller.txt for more information. 100 See Documentation/cgroups/blkio-controller.txt for more information.
101 101
102config CMDLINE_PARSER 102config BLK_CMDLINE_PARSER
103 bool "Block device command line partition parser" 103 bool "Block device command line partition parser"
104 default n 104 default n
105 ---help--- 105 ---help---
106 Parsing command line, get the partitions information. 106 Enabling this option allows you to specify the partition layout from
107 the kernel boot args. This is typically of use for embedded devices
108 which don't otherwise have any standardized method for listing the
109 partitions on a block device.
110
111 See Documentation/block/cmdline-partition.txt for more information.
107 112
108menu "Partition Types" 113menu "Partition Types"
109 114
diff --git a/block/Makefile b/block/Makefile
index 4fa4be544ece..671a83d063a5 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -18,4 +18,4 @@ obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
18 18
19obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o 19obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
20obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o 20obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o
21obj-$(CONFIG_CMDLINE_PARSER) += cmdline-parser.o 21obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index e90c7c164c83..4e491d9b5292 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -235,8 +235,13 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
235 blkg->online = true; 235 blkg->online = true;
236 spin_unlock(&blkcg->lock); 236 spin_unlock(&blkcg->lock);
237 237
238 if (!ret) 238 if (!ret) {
239 if (blkcg == &blkcg_root) {
240 q->root_blkg = blkg;
241 q->root_rl.blkg = blkg;
242 }
239 return blkg; 243 return blkg;
244 }
240 245
241 /* @blkg failed fully initialized, use the usual release path */ 246 /* @blkg failed fully initialized, use the usual release path */
242 blkg_put(blkg); 247 blkg_put(blkg);
@@ -335,6 +340,15 @@ static void blkg_destroy(struct blkcg_gq *blkg)
335 rcu_assign_pointer(blkcg->blkg_hint, NULL); 340 rcu_assign_pointer(blkcg->blkg_hint, NULL);
336 341
337 /* 342 /*
343 * If root blkg is destroyed. Just clear the pointer since root_rl
344 * does not take reference on root blkg.
345 */
346 if (blkcg == &blkcg_root) {
347 blkg->q->root_blkg = NULL;
348 blkg->q->root_rl.blkg = NULL;
349 }
350
351 /*
338 * Put the reference taken at the time of creation so that when all 352 * Put the reference taken at the time of creation so that when all
339 * queues are gone, group can be destroyed. 353 * queues are gone, group can be destroyed.
340 */ 354 */
@@ -360,13 +374,6 @@ static void blkg_destroy_all(struct request_queue *q)
360 blkg_destroy(blkg); 374 blkg_destroy(blkg);
361 spin_unlock(&blkcg->lock); 375 spin_unlock(&blkcg->lock);
362 } 376 }
363
364 /*
365 * root blkg is destroyed. Just clear the pointer since
366 * root_rl does not take reference on root blkg.
367 */
368 q->root_blkg = NULL;
369 q->root_rl.blkg = NULL;
370} 377}
371 378
372/* 379/*
@@ -970,8 +977,6 @@ int blkcg_activate_policy(struct request_queue *q,
970 ret = PTR_ERR(blkg); 977 ret = PTR_ERR(blkg);
971 goto out_unlock; 978 goto out_unlock;
972 } 979 }
973 q->root_blkg = blkg;
974 q->root_rl.blkg = blkg;
975 980
976 list_for_each_entry(blkg, &q->blkg_list, q_node) 981 list_for_each_entry(blkg, &q->blkg_list, q_node)
977 cnt++; 982 cnt++;
diff --git a/block/blk-core.c b/block/blk-core.c
index c04505358342..0a00e4ecf87c 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1549,11 +1549,9 @@ get_rq:
1549 if (plug) { 1549 if (plug) {
1550 /* 1550 /*
1551 * If this is the first request added after a plug, fire 1551 * If this is the first request added after a plug, fire
1552 * of a plug trace. If others have been added before, check 1552 * of a plug trace.
1553 * if we have multiple devices in this plug. If so, make a
1554 * note to sort the list before dispatch.
1555 */ 1553 */
1556 if (list_empty(&plug->list)) 1554 if (!request_count)
1557 trace_block_plug(q); 1555 trace_block_plug(q);
1558 else { 1556 else {
1559 if (request_count >= BLK_MAX_REQUEST_COUNT) { 1557 if (request_count >= BLK_MAX_REQUEST_COUNT) {
diff --git a/block/blk-exec.c b/block/blk-exec.c
index e70621396129..ae4f27d7944e 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -68,9 +68,9 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
68 spin_lock_irq(q->queue_lock); 68 spin_lock_irq(q->queue_lock);
69 69
70 if (unlikely(blk_queue_dying(q))) { 70 if (unlikely(blk_queue_dying(q))) {
71 rq->cmd_flags |= REQ_QUIET;
71 rq->errors = -ENXIO; 72 rq->errors = -ENXIO;
72 if (rq->end_io) 73 __blk_end_request_all(rq, rq->errors);
73 rq->end_io(rq, rq->errors);
74 spin_unlock_irq(q->queue_lock); 74 spin_unlock_irq(q->queue_lock);
75 return; 75 return;
76 } 76 }
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index dabb9d02cf9a..434944cbd761 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1803,7 +1803,7 @@ static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
1803 1803
1804 if (samples) { 1804 if (samples) {
1805 v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum); 1805 v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
1806 do_div(v, samples); 1806 v = div64_u64(v, samples);
1807 } 1807 }
1808 __blkg_prfill_u64(sf, pd, v); 1808 __blkg_prfill_u64(sf, pd, v);
1809 return 0; 1809 return 0;
@@ -4358,7 +4358,7 @@ static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
4358 if (!eq) 4358 if (!eq)
4359 return -ENOMEM; 4359 return -ENOMEM;
4360 4360
4361 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node); 4361 cfqd = kzalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
4362 if (!cfqd) { 4362 if (!cfqd) {
4363 kobject_put(&eq->kobj); 4363 kobject_put(&eq->kobj);
4364 return -ENOMEM; 4364 return -ENOMEM;
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 20614a332362..9ef66406c625 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -346,7 +346,7 @@ static int deadline_init_queue(struct request_queue *q, struct elevator_type *e)
346 if (!eq) 346 if (!eq)
347 return -ENOMEM; 347 return -ENOMEM;
348 348
349 dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node); 349 dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
350 if (!dd) { 350 if (!dd) {
351 kobject_put(&eq->kobj); 351 kobject_put(&eq->kobj);
352 return -ENOMEM; 352 return -ENOMEM;
diff --git a/block/elevator.c b/block/elevator.c
index 668394d18588..2bcbd8cc14d4 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -155,7 +155,7 @@ struct elevator_queue *elevator_alloc(struct request_queue *q,
155{ 155{
156 struct elevator_queue *eq; 156 struct elevator_queue *eq;
157 157
158 eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node); 158 eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
159 if (unlikely(!eq)) 159 if (unlikely(!eq))
160 goto err; 160 goto err;
161 161
diff --git a/block/genhd.c b/block/genhd.c
index dadf42b454a3..791f41943132 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1252,8 +1252,7 @@ struct gendisk *alloc_disk_node(int minors, int node_id)
1252{ 1252{
1253 struct gendisk *disk; 1253 struct gendisk *disk;
1254 1254
1255 disk = kmalloc_node(sizeof(struct gendisk), 1255 disk = kzalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id);
1256 GFP_KERNEL | __GFP_ZERO, node_id);
1257 if (disk) { 1256 if (disk) {
1258 if (!init_part_stats(&disk->part0)) { 1257 if (!init_part_stats(&disk->part0)) {
1259 kfree(disk); 1258 kfree(disk);
diff --git a/block/partitions/Kconfig b/block/partitions/Kconfig
index 87a32086535d..9b29a996c311 100644
--- a/block/partitions/Kconfig
+++ b/block/partitions/Kconfig
@@ -263,7 +263,7 @@ config SYSV68_PARTITION
263 263
264config CMDLINE_PARTITION 264config CMDLINE_PARTITION
265 bool "Command line partition support" if PARTITION_ADVANCED 265 bool "Command line partition support" if PARTITION_ADVANCED
266 select CMDLINE_PARSER 266 select BLK_CMDLINE_PARSER
267 help 267 help
268 Say Y here if you would read the partitions table from bootargs. 268 Say Y here if you want to read the partition table from bootargs.
269 The format for the command line is just like mtdparts. 269 The format for the command line is just like mtdparts.
diff --git a/block/partitions/cmdline.c b/block/partitions/cmdline.c
index 56cf4ffad51e..5141b563adf1 100644
--- a/block/partitions/cmdline.c
+++ b/block/partitions/cmdline.c
@@ -2,15 +2,15 @@
2 * Copyright (C) 2013 HUAWEI 2 * Copyright (C) 2013 HUAWEI
3 * Author: Cai Zhiyong <caizhiyong@huawei.com> 3 * Author: Cai Zhiyong <caizhiyong@huawei.com>
4 * 4 *
5 * Read block device partition table from command line. 5 * Read block device partition table from the command line.
6 * The partition used for fixed block device (eMMC) embedded device. 6 * Typically used for fixed block (eMMC) embedded devices.
7 * It is no MBR, save storage space. Bootloader can be easily accessed 7 * It has no MBR, so saves storage space. Bootloader can be easily accessed
8 * by absolute address of data on the block device. 8 * by absolute address of data on the block device.
9 * Users can easily change the partition. 9 * Users can easily change the partition.
10 * 10 *
11 * The format for the command line is just like mtdparts. 11 * The format for the command line is just like mtdparts.
12 * 12 *
13 * Verbose config please reference "Documentation/block/cmdline-partition.txt" 13 * For further information, see "Documentation/block/cmdline-partition.txt"
14 * 14 *
15 */ 15 */
16 16
diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c
index f40acef80269..a6977e12d574 100644
--- a/drivers/acpi/acpi_ipmi.c
+++ b/drivers/acpi/acpi_ipmi.c
@@ -39,6 +39,7 @@
39#include <linux/ipmi.h> 39#include <linux/ipmi.h>
40#include <linux/device.h> 40#include <linux/device.h>
41#include <linux/pnp.h> 41#include <linux/pnp.h>
42#include <linux/spinlock.h>
42 43
43MODULE_AUTHOR("Zhao Yakui"); 44MODULE_AUTHOR("Zhao Yakui");
44MODULE_DESCRIPTION("ACPI IPMI Opregion driver"); 45MODULE_DESCRIPTION("ACPI IPMI Opregion driver");
@@ -57,7 +58,7 @@ struct acpi_ipmi_device {
57 struct list_head head; 58 struct list_head head;
58 /* the IPMI request message list */ 59 /* the IPMI request message list */
59 struct list_head tx_msg_list; 60 struct list_head tx_msg_list;
60 struct mutex tx_msg_lock; 61 spinlock_t tx_msg_lock;
61 acpi_handle handle; 62 acpi_handle handle;
62 struct pnp_dev *pnp_dev; 63 struct pnp_dev *pnp_dev;
63 ipmi_user_t user_interface; 64 ipmi_user_t user_interface;
@@ -147,6 +148,7 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
147 struct kernel_ipmi_msg *msg; 148 struct kernel_ipmi_msg *msg;
148 struct acpi_ipmi_buffer *buffer; 149 struct acpi_ipmi_buffer *buffer;
149 struct acpi_ipmi_device *device; 150 struct acpi_ipmi_device *device;
151 unsigned long flags;
150 152
151 msg = &tx_msg->tx_message; 153 msg = &tx_msg->tx_message;
152 /* 154 /*
@@ -177,10 +179,10 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
177 179
178 /* Get the msgid */ 180 /* Get the msgid */
179 device = tx_msg->device; 181 device = tx_msg->device;
180 mutex_lock(&device->tx_msg_lock); 182 spin_lock_irqsave(&device->tx_msg_lock, flags);
181 device->curr_msgid++; 183 device->curr_msgid++;
182 tx_msg->tx_msgid = device->curr_msgid; 184 tx_msg->tx_msgid = device->curr_msgid;
183 mutex_unlock(&device->tx_msg_lock); 185 spin_unlock_irqrestore(&device->tx_msg_lock, flags);
184} 186}
185 187
186static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg, 188static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
@@ -242,6 +244,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
242 int msg_found = 0; 244 int msg_found = 0;
243 struct acpi_ipmi_msg *tx_msg; 245 struct acpi_ipmi_msg *tx_msg;
244 struct pnp_dev *pnp_dev = ipmi_device->pnp_dev; 246 struct pnp_dev *pnp_dev = ipmi_device->pnp_dev;
247 unsigned long flags;
245 248
246 if (msg->user != ipmi_device->user_interface) { 249 if (msg->user != ipmi_device->user_interface) {
247 dev_warn(&pnp_dev->dev, "Unexpected response is returned. " 250 dev_warn(&pnp_dev->dev, "Unexpected response is returned. "
@@ -250,7 +253,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
250 ipmi_free_recv_msg(msg); 253 ipmi_free_recv_msg(msg);
251 return; 254 return;
252 } 255 }
253 mutex_lock(&ipmi_device->tx_msg_lock); 256 spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
254 list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) { 257 list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) {
255 if (msg->msgid == tx_msg->tx_msgid) { 258 if (msg->msgid == tx_msg->tx_msgid) {
256 msg_found = 1; 259 msg_found = 1;
@@ -258,7 +261,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
258 } 261 }
259 } 262 }
260 263
261 mutex_unlock(&ipmi_device->tx_msg_lock); 264 spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
262 if (!msg_found) { 265 if (!msg_found) {
263 dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is " 266 dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is "
264 "returned.\n", msg->msgid); 267 "returned.\n", msg->msgid);
@@ -378,6 +381,7 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
378 struct acpi_ipmi_device *ipmi_device = handler_context; 381 struct acpi_ipmi_device *ipmi_device = handler_context;
379 int err, rem_time; 382 int err, rem_time;
380 acpi_status status; 383 acpi_status status;
384 unsigned long flags;
381 /* 385 /*
382 * IPMI opregion message. 386 * IPMI opregion message.
383 * IPMI message is firstly written to the BMC and system software 387 * IPMI message is firstly written to the BMC and system software
@@ -395,9 +399,9 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
395 return AE_NO_MEMORY; 399 return AE_NO_MEMORY;
396 400
397 acpi_format_ipmi_msg(tx_msg, address, value); 401 acpi_format_ipmi_msg(tx_msg, address, value);
398 mutex_lock(&ipmi_device->tx_msg_lock); 402 spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
399 list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list); 403 list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
400 mutex_unlock(&ipmi_device->tx_msg_lock); 404 spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
401 err = ipmi_request_settime(ipmi_device->user_interface, 405 err = ipmi_request_settime(ipmi_device->user_interface,
402 &tx_msg->addr, 406 &tx_msg->addr,
403 tx_msg->tx_msgid, 407 tx_msg->tx_msgid,
@@ -413,9 +417,9 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
413 status = AE_OK; 417 status = AE_OK;
414 418
415end_label: 419end_label:
416 mutex_lock(&ipmi_device->tx_msg_lock); 420 spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
417 list_del(&tx_msg->head); 421 list_del(&tx_msg->head);
418 mutex_unlock(&ipmi_device->tx_msg_lock); 422 spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
419 kfree(tx_msg); 423 kfree(tx_msg);
420 return status; 424 return status;
421} 425}
@@ -457,7 +461,7 @@ static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device)
457 461
458 INIT_LIST_HEAD(&ipmi_device->head); 462 INIT_LIST_HEAD(&ipmi_device->head);
459 463
460 mutex_init(&ipmi_device->tx_msg_lock); 464 spin_lock_init(&ipmi_device->tx_msg_lock);
461 INIT_LIST_HEAD(&ipmi_device->tx_msg_list); 465 INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
462 ipmi_install_space_handler(ipmi_device); 466 ipmi_install_space_handler(ipmi_device);
463 467
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index fbdb82e70d10..407ad13cac2f 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -968,7 +968,7 @@ int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
968 } 968 }
969 return 0; 969 return 0;
970} 970}
971EXPORT_SYMBOL_GPL(acpi_bus_get_device); 971EXPORT_SYMBOL(acpi_bus_get_device);
972 972
973int acpi_device_add(struct acpi_device *device, 973int acpi_device_add(struct acpi_device *device,
974 void (*release)(struct device *)) 974 void (*release)(struct device *))
@@ -1121,7 +1121,7 @@ int acpi_bus_register_driver(struct acpi_driver *driver)
1121EXPORT_SYMBOL(acpi_bus_register_driver); 1121EXPORT_SYMBOL(acpi_bus_register_driver);
1122 1122
1123/** 1123/**
1124 * acpi_bus_unregister_driver - unregisters a driver with the APIC bus 1124 * acpi_bus_unregister_driver - unregisters a driver with the ACPI bus
1125 * @driver: driver to unregister 1125 * @driver: driver to unregister
1126 * 1126 *
1127 * Unregisters a driver with the ACPI bus. Searches the namespace for all 1127 * Unregisters a driver with the ACPI bus. Searches the namespace for all
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 958ba2a420c3..97f4acb54ad6 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -2,7 +2,7 @@
2 * sata_promise.c - Promise SATA 2 * sata_promise.c - Promise SATA
3 * 3 *
4 * Maintained by: Tejun Heo <tj@kernel.org> 4 * Maintained by: Tejun Heo <tj@kernel.org>
5 * Mikael Pettersson <mikpe@it.uu.se> 5 * Mikael Pettersson
6 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * Please ALWAYS copy linux-ide@vger.kernel.org
7 * on emails. 7 * on emails.
8 * 8 *
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 449f6298dc89..8557adcd34ee 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -2865,15 +2865,4 @@ static struct pci_driver he_driver = {
2865 .id_table = he_pci_tbl, 2865 .id_table = he_pci_tbl,
2866}; 2866};
2867 2867
2868static int __init he_init(void) 2868module_pci_driver(he_driver);
2869{
2870 return pci_register_driver(&he_driver);
2871}
2872
2873static void __exit he_cleanup(void)
2874{
2875 pci_unregister_driver(&he_driver);
2876}
2877
2878module_init(he_init);
2879module_exit(he_cleanup);
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 409502a78e7e..5aca5f4c5458 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -778,7 +778,7 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
778 return error; 778 return error;
779 } 779 }
780 780
781 if (mac[i] == NULL || mac_pton(mac[i], card->atmdev->esi)) { 781 if (mac[i] == NULL || !mac_pton(mac[i], card->atmdev->esi)) {
782 nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET, 782 nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET,
783 card->atmdev->esi, 6); 783 card->atmdev->esi, 6);
784 if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) == 784 if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) ==
diff --git a/drivers/base/core.c b/drivers/base/core.c
index c7cfadcf6752..34abf4d8a45f 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -2017,7 +2017,7 @@ EXPORT_SYMBOL_GPL(device_move);
2017 */ 2017 */
2018void device_shutdown(void) 2018void device_shutdown(void)
2019{ 2019{
2020 struct device *dev; 2020 struct device *dev, *parent;
2021 2021
2022 spin_lock(&devices_kset->list_lock); 2022 spin_lock(&devices_kset->list_lock);
2023 /* 2023 /*
@@ -2034,7 +2034,7 @@ void device_shutdown(void)
2034 * prevent it from being freed because parent's 2034 * prevent it from being freed because parent's
2035 * lock is to be held 2035 * lock is to be held
2036 */ 2036 */
2037 get_device(dev->parent); 2037 parent = get_device(dev->parent);
2038 get_device(dev); 2038 get_device(dev);
2039 /* 2039 /*
2040 * Make sure the device is off the kset list, in the 2040 * Make sure the device is off the kset list, in the
@@ -2044,8 +2044,8 @@ void device_shutdown(void)
2044 spin_unlock(&devices_kset->list_lock); 2044 spin_unlock(&devices_kset->list_lock);
2045 2045
2046 /* hold lock to avoid race with probe/release */ 2046 /* hold lock to avoid race with probe/release */
2047 if (dev->parent) 2047 if (parent)
2048 device_lock(dev->parent); 2048 device_lock(parent);
2049 device_lock(dev); 2049 device_lock(dev);
2050 2050
2051 /* Don't allow any more runtime suspends */ 2051 /* Don't allow any more runtime suspends */
@@ -2063,11 +2063,11 @@ void device_shutdown(void)
2063 } 2063 }
2064 2064
2065 device_unlock(dev); 2065 device_unlock(dev);
2066 if (dev->parent) 2066 if (parent)
2067 device_unlock(dev->parent); 2067 device_unlock(parent);
2068 2068
2069 put_device(dev); 2069 put_device(dev);
2070 put_device(dev->parent); 2070 put_device(parent);
2071 2071
2072 spin_lock(&devices_kset->list_lock); 2072 spin_lock(&devices_kset->list_lock);
2073 } 2073 }
diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c
index c9fd6943ce45..50329d1057ed 100644
--- a/drivers/bcma/driver_pci.c
+++ b/drivers/bcma/driver_pci.c
@@ -210,25 +210,6 @@ static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc)
210 } 210 }
211} 211}
212 212
213static void bcma_core_pci_power_save(struct bcma_drv_pci *pc, bool up)
214{
215 u16 data;
216
217 if (pc->core->id.rev >= 15 && pc->core->id.rev <= 20) {
218 data = up ? 0x74 : 0x7C;
219 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
220 BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7F64);
221 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
222 BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
223 } else if (pc->core->id.rev >= 21 && pc->core->id.rev <= 22) {
224 data = up ? 0x75 : 0x7D;
225 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
226 BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7E65);
227 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
228 BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
229 }
230}
231
232/************************************************** 213/**************************************************
233 * Init. 214 * Init.
234 **************************************************/ 215 **************************************************/
@@ -255,6 +236,32 @@ void bcma_core_pci_init(struct bcma_drv_pci *pc)
255 bcma_core_pci_clientmode_init(pc); 236 bcma_core_pci_clientmode_init(pc);
256} 237}
257 238
239void bcma_core_pci_power_save(struct bcma_bus *bus, bool up)
240{
241 struct bcma_drv_pci *pc;
242 u16 data;
243
244 if (bus->hosttype != BCMA_HOSTTYPE_PCI)
245 return;
246
247 pc = &bus->drv_pci[0];
248
249 if (pc->core->id.rev >= 15 && pc->core->id.rev <= 20) {
250 data = up ? 0x74 : 0x7C;
251 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
252 BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7F64);
253 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
254 BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
255 } else if (pc->core->id.rev >= 21 && pc->core->id.rev <= 22) {
256 data = up ? 0x75 : 0x7D;
257 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
258 BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7E65);
259 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
260 BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
261 }
262}
263EXPORT_SYMBOL_GPL(bcma_core_pci_power_save);
264
258int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core, 265int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core,
259 bool enable) 266 bool enable)
260{ 267{
@@ -310,8 +317,6 @@ void bcma_core_pci_up(struct bcma_bus *bus)
310 317
311 pc = &bus->drv_pci[0]; 318 pc = &bus->drv_pci[0];
312 319
313 bcma_core_pci_power_save(pc, true);
314
315 bcma_core_pci_extend_L1timer(pc, true); 320 bcma_core_pci_extend_L1timer(pc, true);
316} 321}
317EXPORT_SYMBOL_GPL(bcma_core_pci_up); 322EXPORT_SYMBOL_GPL(bcma_core_pci_up);
@@ -326,7 +331,5 @@ void bcma_core_pci_down(struct bcma_bus *bus)
326 pc = &bus->drv_pci[0]; 331 pc = &bus->drv_pci[0];
327 332
328 bcma_core_pci_extend_L1timer(pc, false); 333 bcma_core_pci_extend_L1timer(pc, false);
329
330 bcma_core_pci_power_save(pc, false);
331} 334}
332EXPORT_SYMBOL_GPL(bcma_core_pci_down); 335EXPORT_SYMBOL_GPL(bcma_core_pci_down);
diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c
index cd6b20fce680..37768401d113 100644
--- a/drivers/bcma/scan.c
+++ b/drivers/bcma/scan.c
@@ -269,6 +269,8 @@ static struct bcma_device *bcma_find_core_reverse(struct bcma_bus *bus, u16 core
269 return NULL; 269 return NULL;
270} 270}
271 271
272#define IS_ERR_VALUE_U32(x) ((x) >= (u32)-MAX_ERRNO)
273
272static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr, 274static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
273 struct bcma_device_id *match, int core_num, 275 struct bcma_device_id *match, int core_num,
274 struct bcma_device *core) 276 struct bcma_device *core)
@@ -351,11 +353,11 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
351 * the main register space for the core 353 * the main register space for the core
352 */ 354 */
353 tmp = bcma_erom_get_addr_desc(bus, eromptr, SCAN_ADDR_TYPE_SLAVE, 0); 355 tmp = bcma_erom_get_addr_desc(bus, eromptr, SCAN_ADDR_TYPE_SLAVE, 0);
354 if (tmp == 0 || IS_ERR_VALUE(tmp)) { 356 if (tmp == 0 || IS_ERR_VALUE_U32(tmp)) {
355 /* Try again to see if it is a bridge */ 357 /* Try again to see if it is a bridge */
356 tmp = bcma_erom_get_addr_desc(bus, eromptr, 358 tmp = bcma_erom_get_addr_desc(bus, eromptr,
357 SCAN_ADDR_TYPE_BRIDGE, 0); 359 SCAN_ADDR_TYPE_BRIDGE, 0);
358 if (tmp == 0 || IS_ERR_VALUE(tmp)) { 360 if (tmp == 0 || IS_ERR_VALUE_U32(tmp)) {
359 return -EILSEQ; 361 return -EILSEQ;
360 } else { 362 } else {
361 bcma_info(bus, "Bridge found\n"); 363 bcma_info(bus, "Bridge found\n");
@@ -369,7 +371,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
369 for (j = 0; ; j++) { 371 for (j = 0; ; j++) {
370 tmp = bcma_erom_get_addr_desc(bus, eromptr, 372 tmp = bcma_erom_get_addr_desc(bus, eromptr,
371 SCAN_ADDR_TYPE_SLAVE, i); 373 SCAN_ADDR_TYPE_SLAVE, i);
372 if (IS_ERR_VALUE(tmp)) { 374 if (IS_ERR_VALUE_U32(tmp)) {
373 /* no more entries for port _i_ */ 375 /* no more entries for port _i_ */
374 /* pr_debug("erom: slave port %d " 376 /* pr_debug("erom: slave port %d "
375 * "has %d descriptors\n", i, j); */ 377 * "has %d descriptors\n", i, j); */
@@ -386,7 +388,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
386 for (j = 0; ; j++) { 388 for (j = 0; ; j++) {
387 tmp = bcma_erom_get_addr_desc(bus, eromptr, 389 tmp = bcma_erom_get_addr_desc(bus, eromptr,
388 SCAN_ADDR_TYPE_MWRAP, i); 390 SCAN_ADDR_TYPE_MWRAP, i);
389 if (IS_ERR_VALUE(tmp)) { 391 if (IS_ERR_VALUE_U32(tmp)) {
390 /* no more entries for port _i_ */ 392 /* no more entries for port _i_ */
391 /* pr_debug("erom: master wrapper %d " 393 /* pr_debug("erom: master wrapper %d "
392 * "has %d descriptors\n", i, j); */ 394 * "has %d descriptors\n", i, j); */
@@ -404,7 +406,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
404 for (j = 0; ; j++) { 406 for (j = 0; ; j++) {
405 tmp = bcma_erom_get_addr_desc(bus, eromptr, 407 tmp = bcma_erom_get_addr_desc(bus, eromptr,
406 SCAN_ADDR_TYPE_SWRAP, i + hack); 408 SCAN_ADDR_TYPE_SWRAP, i + hack);
407 if (IS_ERR_VALUE(tmp)) { 409 if (IS_ERR_VALUE_U32(tmp)) {
408 /* no more entries for port _i_ */ 410 /* no more entries for port _i_ */
409 /* pr_debug("erom: master wrapper %d " 411 /* pr_debug("erom: master wrapper %d "
410 * has %d descriptors\n", i, j); */ 412 * has %d descriptors\n", i, j); */
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index d2d95ff5353b..edfa2515bc86 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1189,6 +1189,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
1189 int err; 1189 int err;
1190 u32 cp; 1190 u32 cp;
1191 1191
1192 memset(&arg64, 0, sizeof(arg64));
1192 err = 0; 1193 err = 0;
1193 err |= 1194 err |=
1194 copy_from_user(&arg64.LUN_info, &arg32->LUN_info, 1195 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 639d26b90b91..2b9440384536 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -1193,6 +1193,7 @@ out_passthru:
1193 ida_pci_info_struct pciinfo; 1193 ida_pci_info_struct pciinfo;
1194 1194
1195 if (!arg) return -EINVAL; 1195 if (!arg) return -EINVAL;
1196 memset(&pciinfo, 0, sizeof(pciinfo));
1196 pciinfo.bus = host->pci_dev->bus->number; 1197 pciinfo.bus = host->pci_dev->bus->number;
1197 pciinfo.dev_fn = host->pci_dev->devfn; 1198 pciinfo.dev_fn = host->pci_dev->devfn;
1198 pciinfo.board_id = host->board_id; 1199 pciinfo.board_id = host->board_id;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index b22a7d0fe5b7..cb1db2979d3d 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -931,12 +931,14 @@ static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
931 u64 snap_id) 931 u64 snap_id)
932{ 932{
933 u32 which; 933 u32 which;
934 const char *snap_name;
934 935
935 which = rbd_dev_snap_index(rbd_dev, snap_id); 936 which = rbd_dev_snap_index(rbd_dev, snap_id);
936 if (which == BAD_SNAP_INDEX) 937 if (which == BAD_SNAP_INDEX)
937 return NULL; 938 return ERR_PTR(-ENOENT);
938 939
939 return _rbd_dev_v1_snap_name(rbd_dev, which); 940 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
941 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
940} 942}
941 943
942static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id) 944static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
@@ -2812,7 +2814,7 @@ out_err:
2812 obj_request_done_set(obj_request); 2814 obj_request_done_set(obj_request);
2813} 2815}
2814 2816
2815static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id) 2817static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
2816{ 2818{
2817 struct rbd_obj_request *obj_request; 2819 struct rbd_obj_request *obj_request;
2818 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 2820 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
@@ -2827,16 +2829,17 @@ static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id)
2827 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request); 2829 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2828 if (!obj_request->osd_req) 2830 if (!obj_request->osd_req)
2829 goto out; 2831 goto out;
2830 obj_request->callback = rbd_obj_request_put;
2831 2832
2832 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK, 2833 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
2833 notify_id, 0, 0); 2834 notify_id, 0, 0);
2834 rbd_osd_req_format_read(obj_request); 2835 rbd_osd_req_format_read(obj_request);
2835 2836
2836 ret = rbd_obj_request_submit(osdc, obj_request); 2837 ret = rbd_obj_request_submit(osdc, obj_request);
2837out:
2838 if (ret) 2838 if (ret)
2839 rbd_obj_request_put(obj_request); 2839 goto out;
2840 ret = rbd_obj_request_wait(obj_request);
2841out:
2842 rbd_obj_request_put(obj_request);
2840 2843
2841 return ret; 2844 return ret;
2842} 2845}
@@ -2856,7 +2859,7 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2856 if (ret) 2859 if (ret)
2857 rbd_warn(rbd_dev, "header refresh error (%d)\n", ret); 2860 rbd_warn(rbd_dev, "header refresh error (%d)\n", ret);
2858 2861
2859 rbd_obj_notify_ack(rbd_dev, notify_id); 2862 rbd_obj_notify_ack_sync(rbd_dev, notify_id);
2860} 2863}
2861 2864
2862/* 2865/*
@@ -3328,6 +3331,31 @@ static void rbd_exists_validate(struct rbd_device *rbd_dev)
3328 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); 3331 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3329} 3332}
3330 3333
3334static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3335{
3336 sector_t size;
3337 bool removing;
3338
3339 /*
3340 * Don't hold the lock while doing disk operations,
3341 * or lock ordering will conflict with the bdev mutex via:
3342 * rbd_add() -> blkdev_get() -> rbd_open()
3343 */
3344 spin_lock_irq(&rbd_dev->lock);
3345 removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
3346 spin_unlock_irq(&rbd_dev->lock);
3347 /*
3348 * If the device is being removed, rbd_dev->disk has
3349 * been destroyed, so don't try to update its size
3350 */
3351 if (!removing) {
3352 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3353 dout("setting size to %llu sectors", (unsigned long long)size);
3354 set_capacity(rbd_dev->disk, size);
3355 revalidate_disk(rbd_dev->disk);
3356 }
3357}
3358
3331static int rbd_dev_refresh(struct rbd_device *rbd_dev) 3359static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3332{ 3360{
3333 u64 mapping_size; 3361 u64 mapping_size;
@@ -3347,12 +3375,7 @@ static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3347 up_write(&rbd_dev->header_rwsem); 3375 up_write(&rbd_dev->header_rwsem);
3348 3376
3349 if (mapping_size != rbd_dev->mapping.size) { 3377 if (mapping_size != rbd_dev->mapping.size) {
3350 sector_t size; 3378 rbd_dev_update_size(rbd_dev);
3351
3352 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3353 dout("setting size to %llu sectors", (unsigned long long)size);
3354 set_capacity(rbd_dev->disk, size);
3355 revalidate_disk(rbd_dev->disk);
3356 } 3379 }
3357 3380
3358 return ret; 3381 return ret;
@@ -4061,8 +4084,13 @@ static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4061 4084
4062 snap_id = snapc->snaps[which]; 4085 snap_id = snapc->snaps[which];
4063 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id); 4086 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4064 if (IS_ERR(snap_name)) 4087 if (IS_ERR(snap_name)) {
4065 break; 4088 /* ignore no-longer existing snapshots */
4089 if (PTR_ERR(snap_name) == -ENOENT)
4090 continue;
4091 else
4092 break;
4093 }
4066 found = !strcmp(name, snap_name); 4094 found = !strcmp(name, snap_name);
4067 kfree(snap_name); 4095 kfree(snap_name);
4068 } 4096 }
@@ -4141,8 +4169,8 @@ static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
4141 /* Look up the snapshot name, and make a copy */ 4169 /* Look up the snapshot name, and make a copy */
4142 4170
4143 snap_name = rbd_snap_name(rbd_dev, spec->snap_id); 4171 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
4144 if (!snap_name) { 4172 if (IS_ERR(snap_name)) {
4145 ret = -ENOMEM; 4173 ret = PTR_ERR(snap_name);
4146 goto out_err; 4174 goto out_err;
4147 } 4175 }
4148 4176
@@ -5163,10 +5191,23 @@ static ssize_t rbd_remove(struct bus_type *bus,
5163 if (ret < 0 || already) 5191 if (ret < 0 || already)
5164 return ret; 5192 return ret;
5165 5193
5166 rbd_bus_del_dev(rbd_dev);
5167 ret = rbd_dev_header_watch_sync(rbd_dev, false); 5194 ret = rbd_dev_header_watch_sync(rbd_dev, false);
5168 if (ret) 5195 if (ret)
5169 rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret); 5196 rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
5197
5198 /*
5199 * flush remaining watch callbacks - these must be complete
5200 * before the osd_client is shutdown
5201 */
5202 dout("%s: flushing notifies", __func__);
5203 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
5204 /*
5205 * Don't free anything from rbd_dev->disk until after all
5206 * notifies are completely processed. Otherwise
5207 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
5208 * in a potential use after free of rbd_dev->disk or rbd_dev.
5209 */
5210 rbd_bus_del_dev(rbd_dev);
5170 rbd_dev_image_release(rbd_dev); 5211 rbd_dev_image_release(rbd_dev);
5171 module_put(THIS_MODULE); 5212 module_put(THIS_MODULE);
5172 5213
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index a12b923bbaca..0a327f4154a2 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -85,6 +85,7 @@ static struct usb_device_id ath3k_table[] = {
85 { USB_DEVICE(0x04CA, 0x3008) }, 85 { USB_DEVICE(0x04CA, 0x3008) },
86 { USB_DEVICE(0x13d3, 0x3362) }, 86 { USB_DEVICE(0x13d3, 0x3362) },
87 { USB_DEVICE(0x0CF3, 0xE004) }, 87 { USB_DEVICE(0x0CF3, 0xE004) },
88 { USB_DEVICE(0x0CF3, 0xE005) },
88 { USB_DEVICE(0x0930, 0x0219) }, 89 { USB_DEVICE(0x0930, 0x0219) },
89 { USB_DEVICE(0x0489, 0xe057) }, 90 { USB_DEVICE(0x0489, 0xe057) },
90 { USB_DEVICE(0x13d3, 0x3393) }, 91 { USB_DEVICE(0x13d3, 0x3393) },
@@ -126,6 +127,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
126 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, 127 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
127 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, 128 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
128 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, 129 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
130 { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
129 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, 131 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
130 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, 132 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
131 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, 133 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 8e16f0af6358..f3dfc0a88fdc 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -102,6 +102,7 @@ static struct usb_device_id btusb_table[] = {
102 102
103 /* Broadcom BCM20702A0 */ 103 /* Broadcom BCM20702A0 */
104 { USB_DEVICE(0x0b05, 0x17b5) }, 104 { USB_DEVICE(0x0b05, 0x17b5) },
105 { USB_DEVICE(0x0b05, 0x17cb) },
105 { USB_DEVICE(0x04ca, 0x2003) }, 106 { USB_DEVICE(0x04ca, 0x2003) },
106 { USB_DEVICE(0x0489, 0xe042) }, 107 { USB_DEVICE(0x0489, 0xe042) },
107 { USB_DEVICE(0x413c, 0x8197) }, 108 { USB_DEVICE(0x413c, 0x8197) },
@@ -112,6 +113,9 @@ static struct usb_device_id btusb_table[] = {
112 /*Broadcom devices with vendor specific id */ 113 /*Broadcom devices with vendor specific id */
113 { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) }, 114 { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) },
114 115
116 /* Belkin F8065bf - Broadcom based */
117 { USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01) },
118
115 { } /* Terminating entry */ 119 { } /* Terminating entry */
116}; 120};
117 121
@@ -148,6 +152,7 @@ static struct usb_device_id blacklist_table[] = {
148 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, 152 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
149 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, 153 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
150 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, 154 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
155 { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
151 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, 156 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
152 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, 157 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
153 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, 158 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index 19ab6ff53d59..2394e9753ef5 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -700,6 +700,7 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
700 phys_addr_t sdramwins_phys_base, 700 phys_addr_t sdramwins_phys_base,
701 size_t sdramwins_size) 701 size_t sdramwins_size)
702{ 702{
703 struct device_node *np;
703 int win; 704 int win;
704 705
705 mbus->mbuswins_base = ioremap(mbuswins_phys_base, mbuswins_size); 706 mbus->mbuswins_base = ioremap(mbuswins_phys_base, mbuswins_size);
@@ -712,8 +713,11 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
712 return -ENOMEM; 713 return -ENOMEM;
713 } 714 }
714 715
715 if (of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric")) 716 np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric");
717 if (np) {
716 mbus->hw_io_coherency = 1; 718 mbus->hw_io_coherency = 1;
719 of_node_put(np);
720 }
717 721
718 for (win = 0; win < mbus->soc->num_wins; win++) 722 for (win = 0; win < mbus->soc->num_wins; win++)
719 mvebu_mbus_disable_window(mbus, win); 723 mvebu_mbus_disable_window(mbus, win);
@@ -861,11 +865,13 @@ static void __init mvebu_mbus_get_pcie_resources(struct device_node *np,
861 int ret; 865 int ret;
862 866
863 /* 867 /*
864 * These are optional, so we clear them and they'll 868 * These are optional, so we make sure that resource_size(x) will
865 * be zero if they are missing from the DT. 869 * return 0.
866 */ 870 */
867 memset(mem, 0, sizeof(struct resource)); 871 memset(mem, 0, sizeof(struct resource));
872 mem->end = -1;
868 memset(io, 0, sizeof(struct resource)); 873 memset(io, 0, sizeof(struct resource));
874 io->end = -1;
869 875
870 ret = of_property_read_u32_array(np, "pcie-mem-aperture", reg, ARRAY_SIZE(reg)); 876 ret = of_property_read_u32_array(np, "pcie-mem-aperture", reg, ARRAY_SIZE(reg));
871 if (!ret) { 877 if (!ret) {
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 7a7929ba2658..06189e55b4e5 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -142,32 +142,6 @@ static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
142 return length; 142 return length;
143} 143}
144 144
145ssize_t tpm_show_locality(struct device *dev, struct device_attribute *attr,
146 char *buf)
147{
148 struct tpm_chip *chip = dev_get_drvdata(dev);
149 struct tpm_private *priv = TPM_VPRIV(chip);
150 u8 locality = priv->shr->locality;
151
152 return sprintf(buf, "%d\n", locality);
153}
154
155ssize_t tpm_store_locality(struct device *dev, struct device_attribute *attr,
156 const char *buf, size_t len)
157{
158 struct tpm_chip *chip = dev_get_drvdata(dev);
159 struct tpm_private *priv = TPM_VPRIV(chip);
160 u8 val;
161
162 int rv = kstrtou8(buf, 0, &val);
163 if (rv)
164 return rv;
165
166 priv->shr->locality = val;
167
168 return len;
169}
170
171static const struct file_operations vtpm_ops = { 145static const struct file_operations vtpm_ops = {
172 .owner = THIS_MODULE, 146 .owner = THIS_MODULE,
173 .llseek = no_llseek, 147 .llseek = no_llseek,
@@ -188,8 +162,6 @@ static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
188static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); 162static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
189static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL); 163static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
190static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL); 164static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
191static DEVICE_ATTR(locality, S_IRUGO | S_IWUSR, tpm_show_locality,
192 tpm_store_locality);
193 165
194static struct attribute *vtpm_attrs[] = { 166static struct attribute *vtpm_attrs[] = {
195 &dev_attr_pubek.attr, 167 &dev_attr_pubek.attr,
@@ -202,7 +174,6 @@ static struct attribute *vtpm_attrs[] = {
202 &dev_attr_cancel.attr, 174 &dev_attr_cancel.attr,
203 &dev_attr_durations.attr, 175 &dev_attr_durations.attr,
204 &dev_attr_timeouts.attr, 176 &dev_attr_timeouts.attr,
205 &dev_attr_locality.attr,
206 NULL, 177 NULL,
207}; 178};
208 179
@@ -210,8 +181,6 @@ static struct attribute_group vtpm_attr_grp = {
210 .attrs = vtpm_attrs, 181 .attrs = vtpm_attrs,
211}; 182};
212 183
213#define TPM_LONG_TIMEOUT (10 * 60 * HZ)
214
215static const struct tpm_vendor_specific tpm_vtpm = { 184static const struct tpm_vendor_specific tpm_vtpm = {
216 .status = vtpm_status, 185 .status = vtpm_status,
217 .recv = vtpm_recv, 186 .recv = vtpm_recv,
@@ -224,11 +193,6 @@ static const struct tpm_vendor_specific tpm_vtpm = {
224 .miscdev = { 193 .miscdev = {
225 .fops = &vtpm_ops, 194 .fops = &vtpm_ops,
226 }, 195 },
227 .duration = {
228 TPM_LONG_TIMEOUT,
229 TPM_LONG_TIMEOUT,
230 TPM_LONG_TIMEOUT,
231 },
232}; 196};
233 197
234static irqreturn_t tpmif_interrupt(int dummy, void *dev_id) 198static irqreturn_t tpmif_interrupt(int dummy, void *dev_id)
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 41c69469ce20..971d796e071d 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -26,6 +26,7 @@ config DW_APB_TIMER_OF
26 26
27config ARMADA_370_XP_TIMER 27config ARMADA_370_XP_TIMER
28 bool 28 bool
29 select CLKSRC_OF
29 30
30config ORION_TIMER 31config ORION_TIMER
31 select CLKSRC_OF 32 select CLKSRC_OF
diff --git a/drivers/clocksource/clksrc-of.c b/drivers/clocksource/clksrc-of.c
index 37f5325bec95..b9ddd9e3a2f5 100644
--- a/drivers/clocksource/clksrc-of.c
+++ b/drivers/clocksource/clksrc-of.c
@@ -30,6 +30,9 @@ void __init clocksource_of_init(void)
30 clocksource_of_init_fn init_func; 30 clocksource_of_init_fn init_func;
31 31
32 for_each_matching_node_and_match(np, __clksrc_of_table, &match) { 32 for_each_matching_node_and_match(np, __clksrc_of_table, &match) {
33 if (!of_device_is_available(np))
34 continue;
35
33 init_func = match->data; 36 init_func = match->data;
34 init_func(np); 37 init_func(np);
35 } 38 }
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
index b9c81b7c3a3b..3a5909c12d42 100644
--- a/drivers/clocksource/em_sti.c
+++ b/drivers/clocksource/em_sti.c
@@ -301,7 +301,7 @@ static void em_sti_register_clockevent(struct em_sti_priv *p)
301 ced->name = dev_name(&p->pdev->dev); 301 ced->name = dev_name(&p->pdev->dev);
302 ced->features = CLOCK_EVT_FEAT_ONESHOT; 302 ced->features = CLOCK_EVT_FEAT_ONESHOT;
303 ced->rating = 200; 303 ced->rating = 200;
304 ced->cpumask = cpumask_of(0); 304 ced->cpumask = cpu_possible_mask;
305 ced->set_next_event = em_sti_clock_event_next; 305 ced->set_next_event = em_sti_clock_event_next;
306 ced->set_mode = em_sti_clock_event_mode; 306 ced->set_mode = em_sti_clock_event_mode;
307 307
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 5b34768f4d7c..62b0de6a1837 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -428,7 +428,6 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
428 evt->irq); 428 evt->irq);
429 return -EIO; 429 return -EIO;
430 } 430 }
431 irq_set_affinity(evt->irq, cpumask_of(cpu));
432 } else { 431 } else {
433 enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0); 432 enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
434 } 433 }
@@ -449,6 +448,7 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
449 unsigned long action, void *hcpu) 448 unsigned long action, void *hcpu)
450{ 449{
451 struct mct_clock_event_device *mevt; 450 struct mct_clock_event_device *mevt;
451 unsigned int cpu;
452 452
453 /* 453 /*
454 * Grab cpu pointer in each case to avoid spurious 454 * Grab cpu pointer in each case to avoid spurious
@@ -459,6 +459,12 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
459 mevt = this_cpu_ptr(&percpu_mct_tick); 459 mevt = this_cpu_ptr(&percpu_mct_tick);
460 exynos4_local_timer_setup(&mevt->evt); 460 exynos4_local_timer_setup(&mevt->evt);
461 break; 461 break;
462 case CPU_ONLINE:
463 cpu = (unsigned long)hcpu;
464 if (mct_int_type == MCT_INT_SPI)
465 irq_set_affinity(mct_irqs[MCT_L0_IRQ + cpu],
466 cpumask_of(cpu));
467 break;
462 case CPU_DYING: 468 case CPU_DYING:
463 mevt = this_cpu_ptr(&percpu_mct_tick); 469 mevt = this_cpu_ptr(&percpu_mct_tick);
464 exynos4_local_timer_stop(&mevt->evt); 470 exynos4_local_timer_stop(&mevt->evt);
@@ -500,6 +506,8 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem
500 &percpu_mct_tick); 506 &percpu_mct_tick);
501 WARN(err, "MCT: can't request IRQ %d (%d)\n", 507 WARN(err, "MCT: can't request IRQ %d (%d)\n",
502 mct_irqs[MCT_L0_IRQ], err); 508 mct_irqs[MCT_L0_IRQ], err);
509 } else {
510 irq_set_affinity(mct_irqs[MCT_L0_IRQ], cpumask_of(0));
503 } 511 }
504 512
505 err = register_cpu_notifier(&exynos4_mct_cpu_nb); 513 err = register_cpu_notifier(&exynos4_mct_cpu_nb);
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index a1260b4549db..d2c3253e015e 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -986,6 +986,10 @@ static int __init acpi_cpufreq_init(void)
986{ 986{
987 int ret; 987 int ret;
988 988
989 /* don't keep reloading if cpufreq_driver exists */
990 if (cpufreq_get_current_driver())
991 return 0;
992
989 if (acpi_disabled) 993 if (acpi_disabled)
990 return 0; 994 return 0;
991 995
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index cbfffa91ebdd..c522a95c0e16 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -12,6 +12,7 @@
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 13
14#include <linux/clk.h> 14#include <linux/clk.h>
15#include <linux/cpu.h>
15#include <linux/cpufreq.h> 16#include <linux/cpufreq.h>
16#include <linux/err.h> 17#include <linux/err.h>
17#include <linux/module.h> 18#include <linux/module.h>
@@ -177,7 +178,11 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
177 struct device_node *np; 178 struct device_node *np;
178 int ret; 179 int ret;
179 180
180 cpu_dev = &pdev->dev; 181 cpu_dev = get_cpu_device(0);
182 if (!cpu_dev) {
183 pr_err("failed to get cpu0 device\n");
184 return -ENODEV;
185 }
181 186
182 np = of_node_get(cpu_dev->of_node); 187 np = of_node_get(cpu_dev->of_node);
183 if (!np) { 188 if (!np) {
@@ -224,7 +229,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
224 if (of_property_read_u32(np, "clock-latency", &transition_latency)) 229 if (of_property_read_u32(np, "clock-latency", &transition_latency))
225 transition_latency = CPUFREQ_ETERNAL; 230 transition_latency = CPUFREQ_ETERNAL;
226 231
227 if (cpu_reg) { 232 if (!IS_ERR(cpu_reg)) {
228 struct opp *opp; 233 struct opp *opp;
229 unsigned long min_uV, max_uV; 234 unsigned long min_uV, max_uV;
230 int i; 235 int i;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 43c24aa756f6..04548f7023af 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -952,9 +952,20 @@ static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
952 if (cpu == policy->cpu) 952 if (cpu == policy->cpu)
953 return; 953 return;
954 954
955 /*
956 * Take direct locks as lock_policy_rwsem_write wouldn't work here.
957 * Also lock for last cpu is enough here as contention will happen only
958 * after policy->cpu is changed and after it is changed, other threads
959 * will try to acquire lock for new cpu. And policy is already updated
960 * by then.
961 */
962 down_write(&per_cpu(cpu_policy_rwsem, policy->cpu));
963
955 policy->last_cpu = policy->cpu; 964 policy->last_cpu = policy->cpu;
956 policy->cpu = cpu; 965 policy->cpu = cpu;
957 966
967 up_write(&per_cpu(cpu_policy_rwsem, policy->last_cpu));
968
958#ifdef CONFIG_CPU_FREQ_TABLE 969#ifdef CONFIG_CPU_FREQ_TABLE
959 cpufreq_frequency_table_update_policy_cpu(policy); 970 cpufreq_frequency_table_update_policy_cpu(policy);
960#endif 971#endif
@@ -1125,7 +1136,7 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
1125 int ret; 1136 int ret;
1126 1137
1127 /* first sibling now owns the new sysfs dir */ 1138 /* first sibling now owns the new sysfs dir */
1128 cpu_dev = get_cpu_device(cpumask_first(policy->cpus)); 1139 cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
1129 1140
1130 /* Don't touch sysfs files during light-weight tear-down */ 1141 /* Don't touch sysfs files during light-weight tear-down */
1131 if (frozen) 1142 if (frozen)
@@ -1189,12 +1200,9 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
1189 policy->governor->name, CPUFREQ_NAME_LEN); 1200 policy->governor->name, CPUFREQ_NAME_LEN);
1190#endif 1201#endif
1191 1202
1192 WARN_ON(lock_policy_rwsem_write(cpu)); 1203 lock_policy_rwsem_read(cpu);
1193 cpus = cpumask_weight(policy->cpus); 1204 cpus = cpumask_weight(policy->cpus);
1194 1205 unlock_policy_rwsem_read(cpu);
1195 if (cpus > 1)
1196 cpumask_clear_cpu(cpu, policy->cpus);
1197 unlock_policy_rwsem_write(cpu);
1198 1206
1199 if (cpu != policy->cpu) { 1207 if (cpu != policy->cpu) {
1200 if (!frozen) 1208 if (!frozen)
@@ -1203,9 +1211,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
1203 1211
1204 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen); 1212 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
1205 if (new_cpu >= 0) { 1213 if (new_cpu >= 0) {
1206 WARN_ON(lock_policy_rwsem_write(cpu));
1207 update_policy_cpu(policy, new_cpu); 1214 update_policy_cpu(policy, new_cpu);
1208 unlock_policy_rwsem_write(cpu);
1209 1215
1210 if (!frozen) { 1216 if (!frozen) {
1211 pr_debug("%s: policy Kobject moved to cpu: %d " 1217 pr_debug("%s: policy Kobject moved to cpu: %d "
@@ -1237,9 +1243,12 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1237 return -EINVAL; 1243 return -EINVAL;
1238 } 1244 }
1239 1245
1240 lock_policy_rwsem_read(cpu); 1246 WARN_ON(lock_policy_rwsem_write(cpu));
1241 cpus = cpumask_weight(policy->cpus); 1247 cpus = cpumask_weight(policy->cpus);
1242 unlock_policy_rwsem_read(cpu); 1248
1249 if (cpus > 1)
1250 cpumask_clear_cpu(cpu, policy->cpus);
1251 unlock_policy_rwsem_write(cpu);
1243 1252
1244 /* If cpu is last user of policy, free policy */ 1253 /* If cpu is last user of policy, free policy */
1245 if (cpus == 1) { 1254 if (cpus == 1) {
@@ -1451,6 +1460,9 @@ unsigned int cpufreq_get(unsigned int cpu)
1451{ 1460{
1452 unsigned int ret_freq = 0; 1461 unsigned int ret_freq = 0;
1453 1462
1463 if (cpufreq_disabled() || !cpufreq_driver)
1464 return -ENOENT;
1465
1454 if (!down_read_trylock(&cpufreq_rwsem)) 1466 if (!down_read_trylock(&cpufreq_rwsem))
1455 return 0; 1467 return 0;
1456 1468
@@ -2095,7 +2107,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2095 write_lock_irqsave(&cpufreq_driver_lock, flags); 2107 write_lock_irqsave(&cpufreq_driver_lock, flags);
2096 if (cpufreq_driver) { 2108 if (cpufreq_driver) {
2097 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 2109 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2098 return -EBUSY; 2110 return -EEXIST;
2099 } 2111 }
2100 cpufreq_driver = driver_data; 2112 cpufreq_driver = driver_data;
2101 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 2113 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c
index d514c152fd1a..be5380ecdcd4 100644
--- a/drivers/cpufreq/exynos5440-cpufreq.c
+++ b/drivers/cpufreq/exynos5440-cpufreq.c
@@ -457,7 +457,7 @@ err_free_table:
457 opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table); 457 opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
458err_put_node: 458err_put_node:
459 of_node_put(np); 459 of_node_put(np);
460 dev_err(dvfs_info->dev, "%s: failed initialization\n", __func__); 460 dev_err(&pdev->dev, "%s: failed initialization\n", __func__);
461 return ret; 461 return ret;
462} 462}
463 463
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 3e396543aea4..c3fd2a101ca0 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -7,6 +7,7 @@
7 */ 7 */
8 8
9#include <linux/clk.h> 9#include <linux/clk.h>
10#include <linux/cpu.h>
10#include <linux/cpufreq.h> 11#include <linux/cpufreq.h>
11#include <linux/delay.h> 12#include <linux/delay.h>
12#include <linux/err.h> 13#include <linux/err.h>
@@ -202,7 +203,11 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
202 unsigned long min_volt, max_volt; 203 unsigned long min_volt, max_volt;
203 int num, ret; 204 int num, ret;
204 205
205 cpu_dev = &pdev->dev; 206 cpu_dev = get_cpu_device(0);
207 if (!cpu_dev) {
208 pr_err("failed to get cpu0 device\n");
209 return -ENODEV;
210 }
206 211
207 np = of_node_get(cpu_dev->of_node); 212 np = of_node_get(cpu_dev->of_node);
208 if (!np) { 213 if (!np) {
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 9733f29ed148..32b3479a2405 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -394,7 +394,10 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
394 trace_cpu_frequency(pstate * 100000, cpu->cpu); 394 trace_cpu_frequency(pstate * 100000, cpu->cpu);
395 395
396 cpu->pstate.current_pstate = pstate; 396 cpu->pstate.current_pstate = pstate;
397 wrmsrl(MSR_IA32_PERF_CTL, pstate << 8); 397 if (limits.no_turbo)
398 wrmsrl(MSR_IA32_PERF_CTL, BIT(32) | (pstate << 8));
399 else
400 wrmsrl(MSR_IA32_PERF_CTL, pstate << 8);
398 401
399} 402}
400 403
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 19e364fa5955..3f418166ce02 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -113,7 +113,7 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
113 unsigned int target_freq, unsigned int relation) 113 unsigned int target_freq, unsigned int relation)
114{ 114{
115 struct cpufreq_freqs freqs; 115 struct cpufreq_freqs freqs;
116 unsigned long newfreq; 116 long newfreq;
117 struct clk *srcclk; 117 struct clk *srcclk;
118 int index, ret, mult = 1; 118 int index, ret, mult = 1;
119 119
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 526ec77c7ba0..f238cfd33847 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -198,6 +198,7 @@ config TI_EDMA
198 depends on ARCH_DAVINCI || ARCH_OMAP 198 depends on ARCH_DAVINCI || ARCH_OMAP
199 select DMA_ENGINE 199 select DMA_ENGINE
200 select DMA_VIRTUAL_CHANNELS 200 select DMA_VIRTUAL_CHANNELS
201 select TI_PRIV_EDMA
201 default n 202 default n
202 help 203 help
203 Enable support for the TI EDMA controller. This DMA 204 Enable support for the TI EDMA controller. This DMA
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index ff50ff4c6a57..098a8da450f0 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -749,6 +749,6 @@ static void __exit edma_exit(void)
749} 749}
750module_exit(edma_exit); 750module_exit(edma_exit);
751 751
752MODULE_AUTHOR("Matt Porter <mporter@ti.com>"); 752MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
753MODULE_DESCRIPTION("TI EDMA DMA engine driver"); 753MODULE_DESCRIPTION("TI EDMA DMA engine driver");
754MODULE_LICENSE("GPL v2"); 754MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 78f8ca5fccee..55852c026791 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -437,17 +437,18 @@ static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
437 struct imxdma_engine *imxdma = imxdmac->imxdma; 437 struct imxdma_engine *imxdma = imxdmac->imxdma;
438 int chno = imxdmac->channel; 438 int chno = imxdmac->channel;
439 struct imxdma_desc *desc; 439 struct imxdma_desc *desc;
440 unsigned long flags;
440 441
441 spin_lock(&imxdma->lock); 442 spin_lock_irqsave(&imxdma->lock, flags);
442 if (list_empty(&imxdmac->ld_active)) { 443 if (list_empty(&imxdmac->ld_active)) {
443 spin_unlock(&imxdma->lock); 444 spin_unlock_irqrestore(&imxdma->lock, flags);
444 goto out; 445 goto out;
445 } 446 }
446 447
447 desc = list_first_entry(&imxdmac->ld_active, 448 desc = list_first_entry(&imxdmac->ld_active,
448 struct imxdma_desc, 449 struct imxdma_desc,
449 node); 450 node);
450 spin_unlock(&imxdma->lock); 451 spin_unlock_irqrestore(&imxdma->lock, flags);
451 452
452 if (desc->sg) { 453 if (desc->sg) {
453 u32 tmp; 454 u32 tmp;
@@ -519,7 +520,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
519{ 520{
520 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); 521 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
521 struct imxdma_engine *imxdma = imxdmac->imxdma; 522 struct imxdma_engine *imxdma = imxdmac->imxdma;
522 unsigned long flags;
523 int slot = -1; 523 int slot = -1;
524 int i; 524 int i;
525 525
@@ -527,7 +527,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
527 switch (d->type) { 527 switch (d->type) {
528 case IMXDMA_DESC_INTERLEAVED: 528 case IMXDMA_DESC_INTERLEAVED:
529 /* Try to get a free 2D slot */ 529 /* Try to get a free 2D slot */
530 spin_lock_irqsave(&imxdma->lock, flags);
531 for (i = 0; i < IMX_DMA_2D_SLOTS; i++) { 530 for (i = 0; i < IMX_DMA_2D_SLOTS; i++) {
532 if ((imxdma->slots_2d[i].count > 0) && 531 if ((imxdma->slots_2d[i].count > 0) &&
533 ((imxdma->slots_2d[i].xsr != d->x) || 532 ((imxdma->slots_2d[i].xsr != d->x) ||
@@ -537,10 +536,8 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
537 slot = i; 536 slot = i;
538 break; 537 break;
539 } 538 }
540 if (slot < 0) { 539 if (slot < 0)
541 spin_unlock_irqrestore(&imxdma->lock, flags);
542 return -EBUSY; 540 return -EBUSY;
543 }
544 541
545 imxdma->slots_2d[slot].xsr = d->x; 542 imxdma->slots_2d[slot].xsr = d->x;
546 imxdma->slots_2d[slot].ysr = d->y; 543 imxdma->slots_2d[slot].ysr = d->y;
@@ -549,7 +546,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
549 546
550 imxdmac->slot_2d = slot; 547 imxdmac->slot_2d = slot;
551 imxdmac->enabled_2d = true; 548 imxdmac->enabled_2d = true;
552 spin_unlock_irqrestore(&imxdma->lock, flags);
553 549
554 if (slot == IMX_DMA_2D_SLOT_A) { 550 if (slot == IMX_DMA_2D_SLOT_A) {
555 d->config_mem &= ~CCR_MSEL_B; 551 d->config_mem &= ~CCR_MSEL_B;
@@ -625,18 +621,17 @@ static void imxdma_tasklet(unsigned long data)
625 struct imxdma_channel *imxdmac = (void *)data; 621 struct imxdma_channel *imxdmac = (void *)data;
626 struct imxdma_engine *imxdma = imxdmac->imxdma; 622 struct imxdma_engine *imxdma = imxdmac->imxdma;
627 struct imxdma_desc *desc; 623 struct imxdma_desc *desc;
624 unsigned long flags;
628 625
629 spin_lock(&imxdma->lock); 626 spin_lock_irqsave(&imxdma->lock, flags);
630 627
631 if (list_empty(&imxdmac->ld_active)) { 628 if (list_empty(&imxdmac->ld_active)) {
632 /* Someone might have called terminate all */ 629 /* Someone might have called terminate all */
633 goto out; 630 spin_unlock_irqrestore(&imxdma->lock, flags);
631 return;
634 } 632 }
635 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node); 633 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);
636 634
637 if (desc->desc.callback)
638 desc->desc.callback(desc->desc.callback_param);
639
640 /* If we are dealing with a cyclic descriptor, keep it on ld_active 635 /* If we are dealing with a cyclic descriptor, keep it on ld_active
641 * and dont mark the descriptor as complete. 636 * and dont mark the descriptor as complete.
642 * Only in non-cyclic cases it would be marked as complete 637 * Only in non-cyclic cases it would be marked as complete
@@ -663,7 +658,11 @@ static void imxdma_tasklet(unsigned long data)
663 __func__, imxdmac->channel); 658 __func__, imxdmac->channel);
664 } 659 }
665out: 660out:
666 spin_unlock(&imxdma->lock); 661 spin_unlock_irqrestore(&imxdma->lock, flags);
662
663 if (desc->desc.callback)
664 desc->desc.callback(desc->desc.callback_param);
665
667} 666}
668 667
669static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 668static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
@@ -883,7 +882,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
883 kfree(imxdmac->sg_list); 882 kfree(imxdmac->sg_list);
884 883
885 imxdmac->sg_list = kcalloc(periods + 1, 884 imxdmac->sg_list = kcalloc(periods + 1,
886 sizeof(struct scatterlist), GFP_KERNEL); 885 sizeof(struct scatterlist), GFP_ATOMIC);
887 if (!imxdmac->sg_list) 886 if (!imxdmac->sg_list)
888 return NULL; 887 return NULL;
889 888
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 0ff43552d472..89675f862308 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -63,6 +63,7 @@ struct gpio_bank {
63 struct gpio_chip chip; 63 struct gpio_chip chip;
64 struct clk *dbck; 64 struct clk *dbck;
65 u32 mod_usage; 65 u32 mod_usage;
66 u32 irq_usage;
66 u32 dbck_enable_mask; 67 u32 dbck_enable_mask;
67 bool dbck_enabled; 68 bool dbck_enabled;
68 struct device *dev; 69 struct device *dev;
@@ -86,6 +87,9 @@ struct gpio_bank {
86#define GPIO_BIT(bank, gpio) (1 << GPIO_INDEX(bank, gpio)) 87#define GPIO_BIT(bank, gpio) (1 << GPIO_INDEX(bank, gpio))
87#define GPIO_MOD_CTRL_BIT BIT(0) 88#define GPIO_MOD_CTRL_BIT BIT(0)
88 89
90#define BANK_USED(bank) (bank->mod_usage || bank->irq_usage)
91#define LINE_USED(line, offset) (line & (1 << offset))
92
89static int irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq) 93static int irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq)
90{ 94{
91 return bank->chip.base + gpio_irq; 95 return bank->chip.base + gpio_irq;
@@ -420,15 +424,69 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio,
420 return 0; 424 return 0;
421} 425}
422 426
427static void _enable_gpio_module(struct gpio_bank *bank, unsigned offset)
428{
429 if (bank->regs->pinctrl) {
430 void __iomem *reg = bank->base + bank->regs->pinctrl;
431
432 /* Claim the pin for MPU */
433 __raw_writel(__raw_readl(reg) | (1 << offset), reg);
434 }
435
436 if (bank->regs->ctrl && !BANK_USED(bank)) {
437 void __iomem *reg = bank->base + bank->regs->ctrl;
438 u32 ctrl;
439
440 ctrl = __raw_readl(reg);
441 /* Module is enabled, clocks are not gated */
442 ctrl &= ~GPIO_MOD_CTRL_BIT;
443 __raw_writel(ctrl, reg);
444 bank->context.ctrl = ctrl;
445 }
446}
447
448static void _disable_gpio_module(struct gpio_bank *bank, unsigned offset)
449{
450 void __iomem *base = bank->base;
451
452 if (bank->regs->wkup_en &&
453 !LINE_USED(bank->mod_usage, offset) &&
454 !LINE_USED(bank->irq_usage, offset)) {
455 /* Disable wake-up during idle for dynamic tick */
456 _gpio_rmw(base, bank->regs->wkup_en, 1 << offset, 0);
457 bank->context.wake_en =
458 __raw_readl(bank->base + bank->regs->wkup_en);
459 }
460
461 if (bank->regs->ctrl && !BANK_USED(bank)) {
462 void __iomem *reg = bank->base + bank->regs->ctrl;
463 u32 ctrl;
464
465 ctrl = __raw_readl(reg);
466 /* Module is disabled, clocks are gated */
467 ctrl |= GPIO_MOD_CTRL_BIT;
468 __raw_writel(ctrl, reg);
469 bank->context.ctrl = ctrl;
470 }
471}
472
473static int gpio_is_input(struct gpio_bank *bank, int mask)
474{
475 void __iomem *reg = bank->base + bank->regs->direction;
476
477 return __raw_readl(reg) & mask;
478}
479
423static int gpio_irq_type(struct irq_data *d, unsigned type) 480static int gpio_irq_type(struct irq_data *d, unsigned type)
424{ 481{
425 struct gpio_bank *bank = irq_data_get_irq_chip_data(d); 482 struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
426 unsigned gpio = 0; 483 unsigned gpio = 0;
427 int retval; 484 int retval;
428 unsigned long flags; 485 unsigned long flags;
486 unsigned offset;
429 487
430 if (WARN_ON(!bank->mod_usage)) 488 if (!BANK_USED(bank))
431 return -EINVAL; 489 pm_runtime_get_sync(bank->dev);
432 490
433#ifdef CONFIG_ARCH_OMAP1 491#ifdef CONFIG_ARCH_OMAP1
434 if (d->irq > IH_MPUIO_BASE) 492 if (d->irq > IH_MPUIO_BASE)
@@ -446,7 +504,17 @@ static int gpio_irq_type(struct irq_data *d, unsigned type)
446 return -EINVAL; 504 return -EINVAL;
447 505
448 spin_lock_irqsave(&bank->lock, flags); 506 spin_lock_irqsave(&bank->lock, flags);
449 retval = _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), type); 507 offset = GPIO_INDEX(bank, gpio);
508 retval = _set_gpio_triggering(bank, offset, type);
509 if (!LINE_USED(bank->mod_usage, offset)) {
510 _enable_gpio_module(bank, offset);
511 _set_gpio_direction(bank, offset, 1);
512 } else if (!gpio_is_input(bank, 1 << offset)) {
513 spin_unlock_irqrestore(&bank->lock, flags);
514 return -EINVAL;
515 }
516
517 bank->irq_usage |= 1 << GPIO_INDEX(bank, gpio);
450 spin_unlock_irqrestore(&bank->lock, flags); 518 spin_unlock_irqrestore(&bank->lock, flags);
451 519
452 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) 520 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
@@ -603,35 +671,19 @@ static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
603 * If this is the first gpio_request for the bank, 671 * If this is the first gpio_request for the bank,
604 * enable the bank module. 672 * enable the bank module.
605 */ 673 */
606 if (!bank->mod_usage) 674 if (!BANK_USED(bank))
607 pm_runtime_get_sync(bank->dev); 675 pm_runtime_get_sync(bank->dev);
608 676
609 spin_lock_irqsave(&bank->lock, flags); 677 spin_lock_irqsave(&bank->lock, flags);
610 /* Set trigger to none. You need to enable the desired trigger with 678 /* Set trigger to none. You need to enable the desired trigger with
611 * request_irq() or set_irq_type(). 679 * request_irq() or set_irq_type(). Only do this if the IRQ line has
680 * not already been requested.
612 */ 681 */
613 _set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); 682 if (!LINE_USED(bank->irq_usage, offset)) {
614 683 _set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
615 if (bank->regs->pinctrl) { 684 _enable_gpio_module(bank, offset);
616 void __iomem *reg = bank->base + bank->regs->pinctrl;
617
618 /* Claim the pin for MPU */
619 __raw_writel(__raw_readl(reg) | (1 << offset), reg);
620 }
621
622 if (bank->regs->ctrl && !bank->mod_usage) {
623 void __iomem *reg = bank->base + bank->regs->ctrl;
624 u32 ctrl;
625
626 ctrl = __raw_readl(reg);
627 /* Module is enabled, clocks are not gated */
628 ctrl &= ~GPIO_MOD_CTRL_BIT;
629 __raw_writel(ctrl, reg);
630 bank->context.ctrl = ctrl;
631 } 685 }
632
633 bank->mod_usage |= 1 << offset; 686 bank->mod_usage |= 1 << offset;
634
635 spin_unlock_irqrestore(&bank->lock, flags); 687 spin_unlock_irqrestore(&bank->lock, flags);
636 688
637 return 0; 689 return 0;
@@ -640,31 +692,11 @@ static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
640static void omap_gpio_free(struct gpio_chip *chip, unsigned offset) 692static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
641{ 693{
642 struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip); 694 struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
643 void __iomem *base = bank->base;
644 unsigned long flags; 695 unsigned long flags;
645 696
646 spin_lock_irqsave(&bank->lock, flags); 697 spin_lock_irqsave(&bank->lock, flags);
647
648 if (bank->regs->wkup_en) {
649 /* Disable wake-up during idle for dynamic tick */
650 _gpio_rmw(base, bank->regs->wkup_en, 1 << offset, 0);
651 bank->context.wake_en =
652 __raw_readl(bank->base + bank->regs->wkup_en);
653 }
654
655 bank->mod_usage &= ~(1 << offset); 698 bank->mod_usage &= ~(1 << offset);
656 699 _disable_gpio_module(bank, offset);
657 if (bank->regs->ctrl && !bank->mod_usage) {
658 void __iomem *reg = bank->base + bank->regs->ctrl;
659 u32 ctrl;
660
661 ctrl = __raw_readl(reg);
662 /* Module is disabled, clocks are gated */
663 ctrl |= GPIO_MOD_CTRL_BIT;
664 __raw_writel(ctrl, reg);
665 bank->context.ctrl = ctrl;
666 }
667
668 _reset_gpio(bank, bank->chip.base + offset); 700 _reset_gpio(bank, bank->chip.base + offset);
669 spin_unlock_irqrestore(&bank->lock, flags); 701 spin_unlock_irqrestore(&bank->lock, flags);
670 702
@@ -672,7 +704,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
672 * If this is the last gpio to be freed in the bank, 704 * If this is the last gpio to be freed in the bank,
673 * disable the bank module. 705 * disable the bank module.
674 */ 706 */
675 if (!bank->mod_usage) 707 if (!BANK_USED(bank))
676 pm_runtime_put(bank->dev); 708 pm_runtime_put(bank->dev);
677} 709}
678 710
@@ -762,10 +794,20 @@ static void gpio_irq_shutdown(struct irq_data *d)
762 struct gpio_bank *bank = irq_data_get_irq_chip_data(d); 794 struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
763 unsigned int gpio = irq_to_gpio(bank, d->hwirq); 795 unsigned int gpio = irq_to_gpio(bank, d->hwirq);
764 unsigned long flags; 796 unsigned long flags;
797 unsigned offset = GPIO_INDEX(bank, gpio);
765 798
766 spin_lock_irqsave(&bank->lock, flags); 799 spin_lock_irqsave(&bank->lock, flags);
800 bank->irq_usage &= ~(1 << offset);
801 _disable_gpio_module(bank, offset);
767 _reset_gpio(bank, gpio); 802 _reset_gpio(bank, gpio);
768 spin_unlock_irqrestore(&bank->lock, flags); 803 spin_unlock_irqrestore(&bank->lock, flags);
804
805 /*
806 * If this is the last IRQ to be freed in the bank,
807 * disable the bank module.
808 */
809 if (!BANK_USED(bank))
810 pm_runtime_put(bank->dev);
769} 811}
770 812
771static void gpio_ack_irq(struct irq_data *d) 813static void gpio_ack_irq(struct irq_data *d)
@@ -897,13 +939,6 @@ static int gpio_input(struct gpio_chip *chip, unsigned offset)
897 return 0; 939 return 0;
898} 940}
899 941
900static int gpio_is_input(struct gpio_bank *bank, int mask)
901{
902 void __iomem *reg = bank->base + bank->regs->direction;
903
904 return __raw_readl(reg) & mask;
905}
906
907static int gpio_get(struct gpio_chip *chip, unsigned offset) 942static int gpio_get(struct gpio_chip *chip, unsigned offset)
908{ 943{
909 struct gpio_bank *bank; 944 struct gpio_bank *bank;
@@ -922,13 +957,22 @@ static int gpio_output(struct gpio_chip *chip, unsigned offset, int value)
922{ 957{
923 struct gpio_bank *bank; 958 struct gpio_bank *bank;
924 unsigned long flags; 959 unsigned long flags;
960 int retval = 0;
925 961
926 bank = container_of(chip, struct gpio_bank, chip); 962 bank = container_of(chip, struct gpio_bank, chip);
927 spin_lock_irqsave(&bank->lock, flags); 963 spin_lock_irqsave(&bank->lock, flags);
964
965 if (LINE_USED(bank->irq_usage, offset)) {
966 retval = -EINVAL;
967 goto exit;
968 }
969
928 bank->set_dataout(bank, offset, value); 970 bank->set_dataout(bank, offset, value);
929 _set_gpio_direction(bank, offset, 0); 971 _set_gpio_direction(bank, offset, 0);
972
973exit:
930 spin_unlock_irqrestore(&bank->lock, flags); 974 spin_unlock_irqrestore(&bank->lock, flags);
931 return 0; 975 return retval;
932} 976}
933 977
934static int gpio_debounce(struct gpio_chip *chip, unsigned offset, 978static int gpio_debounce(struct gpio_chip *chip, unsigned offset,
@@ -1400,7 +1444,7 @@ void omap2_gpio_prepare_for_idle(int pwr_mode)
1400 struct gpio_bank *bank; 1444 struct gpio_bank *bank;
1401 1445
1402 list_for_each_entry(bank, &omap_gpio_list, node) { 1446 list_for_each_entry(bank, &omap_gpio_list, node) {
1403 if (!bank->mod_usage || !bank->loses_context) 1447 if (!BANK_USED(bank) || !bank->loses_context)
1404 continue; 1448 continue;
1405 1449
1406 bank->power_mode = pwr_mode; 1450 bank->power_mode = pwr_mode;
@@ -1414,7 +1458,7 @@ void omap2_gpio_resume_after_idle(void)
1414 struct gpio_bank *bank; 1458 struct gpio_bank *bank;
1415 1459
1416 list_for_each_entry(bank, &omap_gpio_list, node) { 1460 list_for_each_entry(bank, &omap_gpio_list, node) {
1417 if (!bank->mod_usage || !bank->loses_context) 1461 if (!BANK_USED(bank) || !bank->loses_context)
1418 continue; 1462 continue;
1419 1463
1420 pm_runtime_get_sync(bank->dev); 1464 pm_runtime_get_sync(bank->dev);
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index e3745eb07570..6038966ab045 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -293,10 +293,9 @@ static void gpio_rcar_parse_pdata(struct gpio_rcar_priv *p)
293 if (pdata) { 293 if (pdata) {
294 p->config = *pdata; 294 p->config = *pdata;
295 } else if (IS_ENABLED(CONFIG_OF) && np) { 295 } else if (IS_ENABLED(CONFIG_OF) && np) {
296 ret = of_parse_phandle_with_args(np, "gpio-ranges", 296 ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0,
297 "#gpio-range-cells", 0, &args); 297 &args);
298 p->config.number_of_pins = ret == 0 && args.args_count == 3 298 p->config.number_of_pins = ret == 0 ? args.args[2]
299 ? args.args[2]
300 : RCAR_MAX_GPIO_PER_BANK; 299 : RCAR_MAX_GPIO_PER_BANK;
301 p->config.gpio_base = -1; 300 p->config.gpio_base = -1;
302 } 301 }
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 796dbb212a41..8492b68e873c 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -177,7 +177,7 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast,
177 177
178static inline void ast_open_key(struct ast_private *ast) 178static inline void ast_open_key(struct ast_private *ast)
179{ 179{
180 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xA1, 0xFF, 0x04); 180 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x80, 0xA8);
181} 181}
182 182
183#define AST_VIDMEM_SIZE_8M 0x00800000 183#define AST_VIDMEM_SIZE_8M 0x00800000
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index b4fb86d89850..224ff965bcf7 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -42,6 +42,10 @@
42 42
43#include <drm/drmP.h> 43#include <drm/drmP.h>
44 44
45/******************************************************************/
46/** \name Context bitmap support */
47/*@{*/
48
45/** 49/**
46 * Free a handle from the context bitmap. 50 * Free a handle from the context bitmap.
47 * 51 *
@@ -52,48 +56,13 @@
52 * in drm_device::ctx_idr, while holding the drm_device::struct_mutex 56 * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
53 * lock. 57 * lock.
54 */ 58 */
55static void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle) 59void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
56{ 60{
57 if (drm_core_check_feature(dev, DRIVER_MODESET))
58 return;
59
60 mutex_lock(&dev->struct_mutex); 61 mutex_lock(&dev->struct_mutex);
61 idr_remove(&dev->ctx_idr, ctx_handle); 62 idr_remove(&dev->ctx_idr, ctx_handle);
62 mutex_unlock(&dev->struct_mutex); 63 mutex_unlock(&dev->struct_mutex);
63} 64}
64 65
65/******************************************************************/
66/** \name Context bitmap support */
67/*@{*/
68
69void drm_legacy_ctxbitmap_release(struct drm_device *dev,
70 struct drm_file *file_priv)
71{
72 if (drm_core_check_feature(dev, DRIVER_MODESET))
73 return;
74
75 mutex_lock(&dev->ctxlist_mutex);
76 if (!list_empty(&dev->ctxlist)) {
77 struct drm_ctx_list *pos, *n;
78
79 list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
80 if (pos->tag == file_priv &&
81 pos->handle != DRM_KERNEL_CONTEXT) {
82 if (dev->driver->context_dtor)
83 dev->driver->context_dtor(dev,
84 pos->handle);
85
86 drm_ctxbitmap_free(dev, pos->handle);
87
88 list_del(&pos->head);
89 kfree(pos);
90 --dev->ctx_count;
91 }
92 }
93 }
94 mutex_unlock(&dev->ctxlist_mutex);
95}
96
97/** 66/**
98 * Context bitmap allocation. 67 * Context bitmap allocation.
99 * 68 *
@@ -121,12 +90,10 @@ static int drm_ctxbitmap_next(struct drm_device * dev)
121 * 90 *
122 * Initialise the drm_device::ctx_idr 91 * Initialise the drm_device::ctx_idr
123 */ 92 */
124void drm_legacy_ctxbitmap_init(struct drm_device * dev) 93int drm_ctxbitmap_init(struct drm_device * dev)
125{ 94{
126 if (drm_core_check_feature(dev, DRIVER_MODESET))
127 return;
128
129 idr_init(&dev->ctx_idr); 95 idr_init(&dev->ctx_idr);
96 return 0;
130} 97}
131 98
132/** 99/**
@@ -137,7 +104,7 @@ void drm_legacy_ctxbitmap_init(struct drm_device * dev)
137 * Free all idr members using drm_ctx_sarea_free helper function 104 * Free all idr members using drm_ctx_sarea_free helper function
138 * while holding the drm_device::struct_mutex lock. 105 * while holding the drm_device::struct_mutex lock.
139 */ 106 */
140void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev) 107void drm_ctxbitmap_cleanup(struct drm_device * dev)
141{ 108{
142 mutex_lock(&dev->struct_mutex); 109 mutex_lock(&dev->struct_mutex);
143 idr_destroy(&dev->ctx_idr); 110 idr_destroy(&dev->ctx_idr);
@@ -169,9 +136,6 @@ int drm_getsareactx(struct drm_device *dev, void *data,
169 struct drm_local_map *map; 136 struct drm_local_map *map;
170 struct drm_map_list *_entry; 137 struct drm_map_list *_entry;
171 138
172 if (drm_core_check_feature(dev, DRIVER_MODESET))
173 return -EINVAL;
174
175 mutex_lock(&dev->struct_mutex); 139 mutex_lock(&dev->struct_mutex);
176 140
177 map = idr_find(&dev->ctx_idr, request->ctx_id); 141 map = idr_find(&dev->ctx_idr, request->ctx_id);
@@ -216,9 +180,6 @@ int drm_setsareactx(struct drm_device *dev, void *data,
216 struct drm_local_map *map = NULL; 180 struct drm_local_map *map = NULL;
217 struct drm_map_list *r_list = NULL; 181 struct drm_map_list *r_list = NULL;
218 182
219 if (drm_core_check_feature(dev, DRIVER_MODESET))
220 return -EINVAL;
221
222 mutex_lock(&dev->struct_mutex); 183 mutex_lock(&dev->struct_mutex);
223 list_for_each_entry(r_list, &dev->maplist, head) { 184 list_for_each_entry(r_list, &dev->maplist, head) {
224 if (r_list->map 185 if (r_list->map
@@ -319,9 +280,6 @@ int drm_resctx(struct drm_device *dev, void *data,
319 struct drm_ctx ctx; 280 struct drm_ctx ctx;
320 int i; 281 int i;
321 282
322 if (drm_core_check_feature(dev, DRIVER_MODESET))
323 return -EINVAL;
324
325 if (res->count >= DRM_RESERVED_CONTEXTS) { 283 if (res->count >= DRM_RESERVED_CONTEXTS) {
326 memset(&ctx, 0, sizeof(ctx)); 284 memset(&ctx, 0, sizeof(ctx));
327 for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { 285 for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
@@ -352,9 +310,6 @@ int drm_addctx(struct drm_device *dev, void *data,
352 struct drm_ctx_list *ctx_entry; 310 struct drm_ctx_list *ctx_entry;
353 struct drm_ctx *ctx = data; 311 struct drm_ctx *ctx = data;
354 312
355 if (drm_core_check_feature(dev, DRIVER_MODESET))
356 return -EINVAL;
357
358 ctx->handle = drm_ctxbitmap_next(dev); 313 ctx->handle = drm_ctxbitmap_next(dev);
359 if (ctx->handle == DRM_KERNEL_CONTEXT) { 314 if (ctx->handle == DRM_KERNEL_CONTEXT) {
360 /* Skip kernel's context and get a new one. */ 315 /* Skip kernel's context and get a new one. */
@@ -398,9 +353,6 @@ int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
398{ 353{
399 struct drm_ctx *ctx = data; 354 struct drm_ctx *ctx = data;
400 355
401 if (drm_core_check_feature(dev, DRIVER_MODESET))
402 return -EINVAL;
403
404 /* This is 0, because we don't handle any context flags */ 356 /* This is 0, because we don't handle any context flags */
405 ctx->flags = 0; 357 ctx->flags = 0;
406 358
@@ -423,9 +375,6 @@ int drm_switchctx(struct drm_device *dev, void *data,
423{ 375{
424 struct drm_ctx *ctx = data; 376 struct drm_ctx *ctx = data;
425 377
426 if (drm_core_check_feature(dev, DRIVER_MODESET))
427 return -EINVAL;
428
429 DRM_DEBUG("%d\n", ctx->handle); 378 DRM_DEBUG("%d\n", ctx->handle);
430 return drm_context_switch(dev, dev->last_context, ctx->handle); 379 return drm_context_switch(dev, dev->last_context, ctx->handle);
431} 380}
@@ -446,9 +395,6 @@ int drm_newctx(struct drm_device *dev, void *data,
446{ 395{
447 struct drm_ctx *ctx = data; 396 struct drm_ctx *ctx = data;
448 397
449 if (drm_core_check_feature(dev, DRIVER_MODESET))
450 return -EINVAL;
451
452 DRM_DEBUG("%d\n", ctx->handle); 398 DRM_DEBUG("%d\n", ctx->handle);
453 drm_context_switch_complete(dev, file_priv, ctx->handle); 399 drm_context_switch_complete(dev, file_priv, ctx->handle);
454 400
@@ -471,9 +417,6 @@ int drm_rmctx(struct drm_device *dev, void *data,
471{ 417{
472 struct drm_ctx *ctx = data; 418 struct drm_ctx *ctx = data;
473 419
474 if (drm_core_check_feature(dev, DRIVER_MODESET))
475 return -EINVAL;
476
477 DRM_DEBUG("%d\n", ctx->handle); 420 DRM_DEBUG("%d\n", ctx->handle);
478 if (ctx->handle != DRM_KERNEL_CONTEXT) { 421 if (ctx->handle != DRM_KERNEL_CONTEXT) {
479 if (dev->driver->context_dtor) 422 if (dev->driver->context_dtor)
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 3d13ca6e257f..f6f6cc7fc133 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -416,6 +416,14 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
416 return; 416 return;
417 417
418 /* 418 /*
419 * fbdev->blank can be called from irq context in case of a panic.
420 * Since we already have our own special panic handler which will
421 * restore the fbdev console mode completely, just bail out early.
422 */
423 if (oops_in_progress)
424 return;
425
426 /*
419 * For each CRTC in this fb, turn the connectors on/off. 427 * For each CRTC in this fb, turn the connectors on/off.
420 */ 428 */
421 drm_modeset_lock_all(dev); 429 drm_modeset_lock_all(dev);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 4be8e09a32ef..3f84277d7036 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -439,7 +439,26 @@ int drm_release(struct inode *inode, struct file *filp)
439 if (dev->driver->driver_features & DRIVER_GEM) 439 if (dev->driver->driver_features & DRIVER_GEM)
440 drm_gem_release(dev, file_priv); 440 drm_gem_release(dev, file_priv);
441 441
442 drm_legacy_ctxbitmap_release(dev, file_priv); 442 mutex_lock(&dev->ctxlist_mutex);
443 if (!list_empty(&dev->ctxlist)) {
444 struct drm_ctx_list *pos, *n;
445
446 list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
447 if (pos->tag == file_priv &&
448 pos->handle != DRM_KERNEL_CONTEXT) {
449 if (dev->driver->context_dtor)
450 dev->driver->context_dtor(dev,
451 pos->handle);
452
453 drm_ctxbitmap_free(dev, pos->handle);
454
455 list_del(&pos->head);
456 kfree(pos);
457 --dev->ctx_count;
458 }
459 }
460 }
461 mutex_unlock(&dev->ctxlist_mutex);
443 462
444 mutex_lock(&dev->struct_mutex); 463 mutex_lock(&dev->struct_mutex);
445 464
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index e7eb0276f7f1..39d864576be4 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -292,7 +292,13 @@ int drm_fill_in_dev(struct drm_device *dev,
292 goto error_out_unreg; 292 goto error_out_unreg;
293 } 293 }
294 294
295 drm_legacy_ctxbitmap_init(dev); 295
296
297 retcode = drm_ctxbitmap_init(dev);
298 if (retcode) {
299 DRM_ERROR("Cannot allocate memory for context bitmap.\n");
300 goto error_out_unreg;
301 }
296 302
297 if (driver->driver_features & DRIVER_GEM) { 303 if (driver->driver_features & DRIVER_GEM) {
298 retcode = drm_gem_init(dev); 304 retcode = drm_gem_init(dev);
@@ -446,7 +452,7 @@ void drm_put_dev(struct drm_device *dev)
446 drm_rmmap(dev, r_list->map); 452 drm_rmmap(dev, r_list->map);
447 drm_ht_remove(&dev->map_hash); 453 drm_ht_remove(&dev->map_hash);
448 454
449 drm_legacy_ctxbitmap_cleanup(dev); 455 drm_ctxbitmap_cleanup(dev);
450 456
451 if (drm_core_check_feature(dev, DRIVER_MODESET)) 457 if (drm_core_check_feature(dev, DRIVER_MODESET))
452 drm_put_minor(&dev->control); 458 drm_put_minor(&dev->control);
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 4752f223e5b2..45b6ef595965 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -56,7 +56,7 @@ config DRM_EXYNOS_IPP
56 56
57config DRM_EXYNOS_FIMC 57config DRM_EXYNOS_FIMC
58 bool "Exynos DRM FIMC" 58 bool "Exynos DRM FIMC"
59 depends on DRM_EXYNOS_IPP && MFD_SYSCON && OF 59 depends on DRM_EXYNOS_IPP && MFD_SYSCON
60 help 60 help
61 Choose this option if you want to use Exynos FIMC for DRM. 61 Choose this option if you want to use Exynos FIMC for DRM.
62 62
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 3445a0f3a6b2..9c8088462c26 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -63,7 +63,8 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
63 return -ENOMEM; 63 return -ENOMEM;
64 } 64 }
65 65
66 buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size, 66 buf->kvaddr = (void __iomem *)dma_alloc_attrs(dev->dev,
67 buf->size,
67 &buf->dma_addr, GFP_KERNEL, 68 &buf->dma_addr, GFP_KERNEL,
68 &buf->dma_attrs); 69 &buf->dma_attrs);
69 if (!buf->kvaddr) { 70 if (!buf->kvaddr) {
@@ -90,9 +91,9 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
90 } 91 }
91 92
92 buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages); 93 buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
93 if (!buf->sgt) { 94 if (IS_ERR(buf->sgt)) {
94 DRM_ERROR("failed to get sg table.\n"); 95 DRM_ERROR("failed to get sg table.\n");
95 ret = -ENOMEM; 96 ret = PTR_ERR(buf->sgt);
96 goto err_free_attrs; 97 goto err_free_attrs;
97 } 98 }
98 99
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 78e868bcf1ec..e7c2f2d07f19 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -99,12 +99,13 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
99 if (is_drm_iommu_supported(dev)) { 99 if (is_drm_iommu_supported(dev)) {
100 unsigned int nr_pages = buffer->size >> PAGE_SHIFT; 100 unsigned int nr_pages = buffer->size >> PAGE_SHIFT;
101 101
102 buffer->kvaddr = vmap(buffer->pages, nr_pages, VM_MAP, 102 buffer->kvaddr = (void __iomem *) vmap(buffer->pages,
103 nr_pages, VM_MAP,
103 pgprot_writecombine(PAGE_KERNEL)); 104 pgprot_writecombine(PAGE_KERNEL));
104 } else { 105 } else {
105 phys_addr_t dma_addr = buffer->dma_addr; 106 phys_addr_t dma_addr = buffer->dma_addr;
106 if (dma_addr) 107 if (dma_addr)
107 buffer->kvaddr = phys_to_virt(dma_addr); 108 buffer->kvaddr = (void __iomem *)phys_to_virt(dma_addr);
108 else 109 else
109 buffer->kvaddr = (void __iomem *)NULL; 110 buffer->kvaddr = (void __iomem *)NULL;
110 } 111 }
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index b1f8fc69023f..60e84043aa34 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -707,8 +707,7 @@ tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
707 reg_write(encoder, REG_VIP_CNTRL_2, priv->vip_cntrl_2); 707 reg_write(encoder, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
708 break; 708 break;
709 case DRM_MODE_DPMS_OFF: 709 case DRM_MODE_DPMS_OFF:
710 /* disable audio and video ports */ 710 /* disable video ports */
711 reg_write(encoder, REG_ENA_AP, 0x00);
712 reg_write(encoder, REG_ENA_VP_0, 0x00); 711 reg_write(encoder, REG_ENA_VP_0, 0x00);
713 reg_write(encoder, REG_ENA_VP_1, 0x00); 712 reg_write(encoder, REG_ENA_VP_1, 0x00);
714 reg_write(encoder, REG_ENA_VP_2, 0x00); 713 reg_write(encoder, REG_ENA_VP_2, 0x00);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8507c6d1e642..cdfb9da0e4ce 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1392,14 +1392,11 @@ out:
1392 if (i915_terminally_wedged(&dev_priv->gpu_error)) 1392 if (i915_terminally_wedged(&dev_priv->gpu_error))
1393 return VM_FAULT_SIGBUS; 1393 return VM_FAULT_SIGBUS;
1394 case -EAGAIN: 1394 case -EAGAIN:
1395 /* Give the error handler a chance to run and move the 1395 /*
1396 * objects off the GPU active list. Next time we service the 1396 * EAGAIN means the gpu is hung and we'll wait for the error
1397 * fault, we should be able to transition the page into the 1397 * handler to reset everything when re-faulting in
1398 * GTT without touching the GPU (and so avoid further 1398 * i915_mutex_lock_interruptible.
1399 * EIO/EGAIN). If the GPU is wedged, then there is no issue
1400 * with coherency, just lost writes.
1401 */ 1399 */
1402 set_need_resched();
1403 case 0: 1400 case 0:
1404 case -ERESTARTSYS: 1401 case -ERESTARTSYS:
1405 case -EINTR: 1402 case -EINTR:
@@ -4803,10 +4800,10 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
4803 4800
4804 if (!mutex_trylock(&dev->struct_mutex)) { 4801 if (!mutex_trylock(&dev->struct_mutex)) {
4805 if (!mutex_is_locked_by(&dev->struct_mutex, current)) 4802 if (!mutex_is_locked_by(&dev->struct_mutex, current))
4806 return SHRINK_STOP; 4803 return 0;
4807 4804
4808 if (dev_priv->mm.shrinker_no_lock_stealing) 4805 if (dev_priv->mm.shrinker_no_lock_stealing)
4809 return SHRINK_STOP; 4806 return 0;
4810 4807
4811 unlock = false; 4808 unlock = false;
4812 } 4809 }
@@ -4904,10 +4901,10 @@ i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
4904 4901
4905 if (!mutex_trylock(&dev->struct_mutex)) { 4902 if (!mutex_trylock(&dev->struct_mutex)) {
4906 if (!mutex_is_locked_by(&dev->struct_mutex, current)) 4903 if (!mutex_is_locked_by(&dev->struct_mutex, current))
4907 return 0; 4904 return SHRINK_STOP;
4908 4905
4909 if (dev_priv->mm.shrinker_no_lock_stealing) 4906 if (dev_priv->mm.shrinker_no_lock_stealing)
4910 return 0; 4907 return SHRINK_STOP;
4911 4908
4912 unlock = false; 4909 unlock = false;
4913 } 4910 }
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index aba9d7498996..dae364f0028c 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -143,8 +143,10 @@ static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
143 143
144 /* Seek the first printf which is hits start position */ 144 /* Seek the first printf which is hits start position */
145 if (e->pos < e->start) { 145 if (e->pos < e->start) {
146 len = vsnprintf(NULL, 0, f, args); 146 va_list tmp;
147 if (!__i915_error_seek(e, len)) 147
148 va_copy(tmp, args);
149 if (!__i915_error_seek(e, vsnprintf(NULL, 0, f, tmp)))
148 return; 150 return;
149 } 151 }
150 152
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 83cce0cdb769..4b91228fd9bd 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1469,6 +1469,34 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1469 return ret; 1469 return ret;
1470} 1470}
1471 1471
1472static void i915_error_wake_up(struct drm_i915_private *dev_priv,
1473 bool reset_completed)
1474{
1475 struct intel_ring_buffer *ring;
1476 int i;
1477
1478 /*
1479 * Notify all waiters for GPU completion events that reset state has
1480 * been changed, and that they need to restart their wait after
1481 * checking for potential errors (and bail out to drop locks if there is
1482 * a gpu reset pending so that i915_error_work_func can acquire them).
1483 */
1484
1485 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
1486 for_each_ring(ring, dev_priv, i)
1487 wake_up_all(&ring->irq_queue);
1488
1489 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
1490 wake_up_all(&dev_priv->pending_flip_queue);
1491
1492 /*
1493 * Signal tasks blocked in i915_gem_wait_for_error that the pending
1494 * reset state is cleared.
1495 */
1496 if (reset_completed)
1497 wake_up_all(&dev_priv->gpu_error.reset_queue);
1498}
1499
1472/** 1500/**
1473 * i915_error_work_func - do process context error handling work 1501 * i915_error_work_func - do process context error handling work
1474 * @work: work struct 1502 * @work: work struct
@@ -1483,11 +1511,10 @@ static void i915_error_work_func(struct work_struct *work)
1483 drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t, 1511 drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
1484 gpu_error); 1512 gpu_error);
1485 struct drm_device *dev = dev_priv->dev; 1513 struct drm_device *dev = dev_priv->dev;
1486 struct intel_ring_buffer *ring;
1487 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 1514 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1488 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 1515 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1489 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 1516 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1490 int i, ret; 1517 int ret;
1491 1518
1492 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); 1519 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
1493 1520
@@ -1506,8 +1533,16 @@ static void i915_error_work_func(struct work_struct *work)
1506 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, 1533 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
1507 reset_event); 1534 reset_event);
1508 1535
1536 /*
1537 * All state reset _must_ be completed before we update the
1538 * reset counter, for otherwise waiters might miss the reset
1539 * pending state and not properly drop locks, resulting in
1540 * deadlocks with the reset work.
1541 */
1509 ret = i915_reset(dev); 1542 ret = i915_reset(dev);
1510 1543
1544 intel_display_handle_reset(dev);
1545
1511 if (ret == 0) { 1546 if (ret == 0) {
1512 /* 1547 /*
1513 * After all the gem state is reset, increment the reset 1548 * After all the gem state is reset, increment the reset
@@ -1528,12 +1563,11 @@ static void i915_error_work_func(struct work_struct *work)
1528 atomic_set(&error->reset_counter, I915_WEDGED); 1563 atomic_set(&error->reset_counter, I915_WEDGED);
1529 } 1564 }
1530 1565
1531 for_each_ring(ring, dev_priv, i) 1566 /*
1532 wake_up_all(&ring->irq_queue); 1567 * Note: The wake_up also serves as a memory barrier so that
1533 1568 * waiters see the update value of the reset counter atomic_t.
1534 intel_display_handle_reset(dev); 1569 */
1535 1570 i915_error_wake_up(dev_priv, true);
1536 wake_up_all(&dev_priv->gpu_error.reset_queue);
1537 } 1571 }
1538} 1572}
1539 1573
@@ -1642,8 +1676,6 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
1642void i915_handle_error(struct drm_device *dev, bool wedged) 1676void i915_handle_error(struct drm_device *dev, bool wedged)
1643{ 1677{
1644 struct drm_i915_private *dev_priv = dev->dev_private; 1678 struct drm_i915_private *dev_priv = dev->dev_private;
1645 struct intel_ring_buffer *ring;
1646 int i;
1647 1679
1648 i915_capture_error_state(dev); 1680 i915_capture_error_state(dev);
1649 i915_report_and_clear_eir(dev); 1681 i915_report_and_clear_eir(dev);
@@ -1653,11 +1685,19 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
1653 &dev_priv->gpu_error.reset_counter); 1685 &dev_priv->gpu_error.reset_counter);
1654 1686
1655 /* 1687 /*
1656 * Wakeup waiting processes so that the reset work item 1688 * Wakeup waiting processes so that the reset work function
1657 * doesn't deadlock trying to grab various locks. 1689 * i915_error_work_func doesn't deadlock trying to grab various
1690 * locks. By bumping the reset counter first, the woken
1691 * processes will see a reset in progress and back off,
1692 * releasing their locks and then wait for the reset completion.
1693 * We must do this for _all_ gpu waiters that might hold locks
1694 * that the reset work needs to acquire.
1695 *
1696 * Note: The wake_up serves as the required memory barrier to
1697 * ensure that the waiters see the updated value of the reset
1698 * counter atomic_t.
1658 */ 1699 */
1659 for_each_ring(ring, dev_priv, i) 1700 i915_error_wake_up(dev_priv, false);
1660 wake_up_all(&ring->irq_queue);
1661 } 1701 }
1662 1702
1663 /* 1703 /*
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 63aca49d11a8..63de2701b974 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -778,7 +778,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
778 /* Can only use the always-on power well for eDP when 778 /* Can only use the always-on power well for eDP when
779 * not using the panel fitter, and when not using motion 779 * not using the panel fitter, and when not using motion
780 * blur mitigation (which we don't support). */ 780 * blur mitigation (which we don't support). */
781 if (intel_crtc->config.pch_pfit.size) 781 if (intel_crtc->config.pch_pfit.enabled)
782 temp |= TRANS_DDI_EDP_INPUT_A_ONOFF; 782 temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
783 else 783 else
784 temp |= TRANS_DDI_EDP_INPUT_A_ON; 784 temp |= TRANS_DDI_EDP_INPUT_A_ON;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 2489d0b4c7d2..e5822e79f912 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2249,7 +2249,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2249 I915_WRITE(PIPESRC(intel_crtc->pipe), 2249 I915_WRITE(PIPESRC(intel_crtc->pipe),
2250 ((crtc->mode.hdisplay - 1) << 16) | 2250 ((crtc->mode.hdisplay - 1) << 16) |
2251 (crtc->mode.vdisplay - 1)); 2251 (crtc->mode.vdisplay - 1));
2252 if (!intel_crtc->config.pch_pfit.size && 2252 if (!intel_crtc->config.pch_pfit.enabled &&
2253 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || 2253 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
2254 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { 2254 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2255 I915_WRITE(PF_CTL(intel_crtc->pipe), 0); 2255 I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
@@ -3203,7 +3203,7 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
3203 struct drm_i915_private *dev_priv = dev->dev_private; 3203 struct drm_i915_private *dev_priv = dev->dev_private;
3204 int pipe = crtc->pipe; 3204 int pipe = crtc->pipe;
3205 3205
3206 if (crtc->config.pch_pfit.size) { 3206 if (crtc->config.pch_pfit.enabled) {
3207 /* Force use of hard-coded filter coefficients 3207 /* Force use of hard-coded filter coefficients
3208 * as some pre-programmed values are broken, 3208 * as some pre-programmed values are broken,
3209 * e.g. x201. 3209 * e.g. x201.
@@ -3428,7 +3428,7 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc)
3428 3428
3429 /* To avoid upsetting the power well on haswell only disable the pfit if 3429 /* To avoid upsetting the power well on haswell only disable the pfit if
3430 * it's in use. The hw state code will make sure we get this right. */ 3430 * it's in use. The hw state code will make sure we get this right. */
3431 if (crtc->config.pch_pfit.size) { 3431 if (crtc->config.pch_pfit.enabled) {
3432 I915_WRITE(PF_CTL(pipe), 0); 3432 I915_WRITE(PF_CTL(pipe), 0);
3433 I915_WRITE(PF_WIN_POS(pipe), 0); 3433 I915_WRITE(PF_WIN_POS(pipe), 0);
3434 I915_WRITE(PF_WIN_SZ(pipe), 0); 3434 I915_WRITE(PF_WIN_SZ(pipe), 0);
@@ -4775,6 +4775,10 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
4775 4775
4776 pipeconf = 0; 4776 pipeconf = 0;
4777 4777
4778 if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
4779 I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
4780 pipeconf |= PIPECONF_ENABLE;
4781
4778 if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) { 4782 if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) {
4779 /* Enable pixel doubling when the dot clock is > 90% of the (display) 4783 /* Enable pixel doubling when the dot clock is > 90% of the (display)
4780 * core speed. 4784 * core speed.
@@ -4877,9 +4881,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4877 return -EINVAL; 4881 return -EINVAL;
4878 } 4882 }
4879 4883
4880 /* Ensure that the cursor is valid for the new mode before changing... */
4881 intel_crtc_update_cursor(crtc, true);
4882
4883 if (is_lvds && dev_priv->lvds_downclock_avail) { 4884 if (is_lvds && dev_priv->lvds_downclock_avail) {
4884 /* 4885 /*
4885 * Ensure we match the reduced clock's P to the target clock. 4886 * Ensure we match the reduced clock's P to the target clock.
@@ -5768,9 +5769,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5768 intel_crtc->config.dpll.p2 = clock.p2; 5769 intel_crtc->config.dpll.p2 = clock.p2;
5769 } 5770 }
5770 5771
5771 /* Ensure that the cursor is valid for the new mode before changing... */
5772 intel_crtc_update_cursor(crtc, true);
5773
5774 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ 5772 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
5775 if (intel_crtc->config.has_pch_encoder) { 5773 if (intel_crtc->config.has_pch_encoder) {
5776 fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll); 5774 fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll);
@@ -5859,6 +5857,7 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
5859 tmp = I915_READ(PF_CTL(crtc->pipe)); 5857 tmp = I915_READ(PF_CTL(crtc->pipe));
5860 5858
5861 if (tmp & PF_ENABLE) { 5859 if (tmp & PF_ENABLE) {
5860 pipe_config->pch_pfit.enabled = true;
5862 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe)); 5861 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
5863 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe)); 5862 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
5864 5863
@@ -6236,7 +6235,7 @@ static void haswell_modeset_global_resources(struct drm_device *dev)
6236 if (!crtc->base.enabled) 6235 if (!crtc->base.enabled)
6237 continue; 6236 continue;
6238 6237
6239 if (crtc->pipe != PIPE_A || crtc->config.pch_pfit.size || 6238 if (crtc->pipe != PIPE_A || crtc->config.pch_pfit.enabled ||
6240 crtc->config.cpu_transcoder != TRANSCODER_EDP) 6239 crtc->config.cpu_transcoder != TRANSCODER_EDP)
6241 enable = true; 6240 enable = true;
6242 } 6241 }
@@ -6259,9 +6258,6 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
6259 if (!intel_ddi_pll_mode_set(crtc)) 6258 if (!intel_ddi_pll_mode_set(crtc))
6260 return -EINVAL; 6259 return -EINVAL;
6261 6260
6262 /* Ensure that the cursor is valid for the new mode before changing... */
6263 intel_crtc_update_cursor(crtc, true);
6264
6265 if (intel_crtc->config.has_dp_encoder) 6261 if (intel_crtc->config.has_dp_encoder)
6266 intel_dp_set_m_n(intel_crtc); 6262 intel_dp_set_m_n(intel_crtc);
6267 6263
@@ -6494,15 +6490,15 @@ static void haswell_write_eld(struct drm_connector *connector,
6494 6490
6495 /* Set ELD valid state */ 6491 /* Set ELD valid state */
6496 tmp = I915_READ(aud_cntrl_st2); 6492 tmp = I915_READ(aud_cntrl_st2);
6497 DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%8x\n", tmp); 6493 DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%08x\n", tmp);
6498 tmp |= (AUDIO_ELD_VALID_A << (pipe * 4)); 6494 tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
6499 I915_WRITE(aud_cntrl_st2, tmp); 6495 I915_WRITE(aud_cntrl_st2, tmp);
6500 tmp = I915_READ(aud_cntrl_st2); 6496 tmp = I915_READ(aud_cntrl_st2);
6501 DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%8x\n", tmp); 6497 DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%08x\n", tmp);
6502 6498
6503 /* Enable HDMI mode */ 6499 /* Enable HDMI mode */
6504 tmp = I915_READ(aud_config); 6500 tmp = I915_READ(aud_config);
6505 DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%8x\n", tmp); 6501 DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%08x\n", tmp);
6506 /* clear N_programing_enable and N_value_index */ 6502 /* clear N_programing_enable and N_value_index */
6507 tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE); 6503 tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
6508 I915_WRITE(aud_config, tmp); 6504 I915_WRITE(aud_config, tmp);
@@ -6937,7 +6933,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
6937 intel_crtc->cursor_width = width; 6933 intel_crtc->cursor_width = width;
6938 intel_crtc->cursor_height = height; 6934 intel_crtc->cursor_height = height;
6939 6935
6940 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); 6936 if (intel_crtc->active)
6937 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
6941 6938
6942 return 0; 6939 return 0;
6943fail_unpin: 6940fail_unpin:
@@ -6956,7 +6953,8 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
6956 intel_crtc->cursor_x = x; 6953 intel_crtc->cursor_x = x;
6957 intel_crtc->cursor_y = y; 6954 intel_crtc->cursor_y = y;
6958 6955
6959 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); 6956 if (intel_crtc->active)
6957 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
6960 6958
6961 return 0; 6959 return 0;
6962} 6960}
@@ -8205,9 +8203,10 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
8205 pipe_config->gmch_pfit.control, 8203 pipe_config->gmch_pfit.control,
8206 pipe_config->gmch_pfit.pgm_ratios, 8204 pipe_config->gmch_pfit.pgm_ratios,
8207 pipe_config->gmch_pfit.lvds_border_bits); 8205 pipe_config->gmch_pfit.lvds_border_bits);
8208 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x\n", 8206 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
8209 pipe_config->pch_pfit.pos, 8207 pipe_config->pch_pfit.pos,
8210 pipe_config->pch_pfit.size); 8208 pipe_config->pch_pfit.size,
8209 pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
8211 DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled); 8210 DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
8212} 8211}
8213 8212
@@ -8603,8 +8602,11 @@ intel_pipe_config_compare(struct drm_device *dev,
8603 if (INTEL_INFO(dev)->gen < 4) 8602 if (INTEL_INFO(dev)->gen < 4)
8604 PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); 8603 PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
8605 PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits); 8604 PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
8606 PIPE_CONF_CHECK_I(pch_pfit.pos); 8605 PIPE_CONF_CHECK_I(pch_pfit.enabled);
8607 PIPE_CONF_CHECK_I(pch_pfit.size); 8606 if (current_config->pch_pfit.enabled) {
8607 PIPE_CONF_CHECK_I(pch_pfit.pos);
8608 PIPE_CONF_CHECK_I(pch_pfit.size);
8609 }
8608 8610
8609 PIPE_CONF_CHECK_I(ips_enabled); 8611 PIPE_CONF_CHECK_I(ips_enabled);
8610 8612
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 2151d13772b8..79c14e298ba6 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -588,7 +588,18 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
588 DRM_DEBUG_KMS("aux_ch native nack\n"); 588 DRM_DEBUG_KMS("aux_ch native nack\n");
589 return -EREMOTEIO; 589 return -EREMOTEIO;
590 case AUX_NATIVE_REPLY_DEFER: 590 case AUX_NATIVE_REPLY_DEFER:
591 udelay(100); 591 /*
592 * For now, just give more slack to branch devices. We
593 * could check the DPCD for I2C bit rate capabilities,
594 * and if available, adjust the interval. We could also
595 * be more careful with DP-to-Legacy adapters where a
596 * long legacy cable may force very low I2C bit rates.
597 */
598 if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
599 DP_DWN_STRM_PORT_PRESENT)
600 usleep_range(500, 600);
601 else
602 usleep_range(300, 400);
592 continue; 603 continue;
593 default: 604 default:
594 DRM_ERROR("aux_ch invalid native reply 0x%02x\n", 605 DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a47799e832c6..28cae80495e2 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -280,6 +280,7 @@ struct intel_crtc_config {
280 struct { 280 struct {
281 u32 pos; 281 u32 pos;
282 u32 size; 282 u32 size;
283 bool enabled;
283 } pch_pfit; 284 } pch_pfit;
284 285
285 /* FDI configuration, only valid if has_pch_encoder is set. */ 286 /* FDI configuration, only valid if has_pch_encoder is set. */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 406303b509c1..7fa7df546c1e 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -263,6 +263,8 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
263 C(vtotal); 263 C(vtotal);
264 C(clock); 264 C(clock);
265#undef C 265#undef C
266
267 drm_mode_set_crtcinfo(adjusted_mode, 0);
266 } 268 }
267 269
268 if (intel_dvo->dev.dev_ops->mode_fixup) 270 if (intel_dvo->dev.dev_ops->mode_fixup)
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 42114ecbae0e..293564a2896a 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -112,6 +112,7 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
112done: 112done:
113 pipe_config->pch_pfit.pos = (x << 16) | y; 113 pipe_config->pch_pfit.pos = (x << 16) | y;
114 pipe_config->pch_pfit.size = (width << 16) | height; 114 pipe_config->pch_pfit.size = (width << 16) | height;
115 pipe_config->pch_pfit.enabled = pipe_config->pch_pfit.size != 0;
115} 116}
116 117
117static void 118static void
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 0c115cc4899f..dd176b7296c1 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2096,16 +2096,16 @@ static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
2096 struct drm_crtc *crtc) 2096 struct drm_crtc *crtc)
2097{ 2097{
2098 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2098 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2099 uint32_t pixel_rate, pfit_size; 2099 uint32_t pixel_rate;
2100 2100
2101 pixel_rate = intel_crtc->config.adjusted_mode.clock; 2101 pixel_rate = intel_crtc->config.adjusted_mode.clock;
2102 2102
2103 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to 2103 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
2104 * adjust the pixel_rate here. */ 2104 * adjust the pixel_rate here. */
2105 2105
2106 pfit_size = intel_crtc->config.pch_pfit.size; 2106 if (intel_crtc->config.pch_pfit.enabled) {
2107 if (pfit_size) {
2108 uint64_t pipe_w, pipe_h, pfit_w, pfit_h; 2107 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
2108 uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
2109 2109
2110 pipe_w = intel_crtc->config.requested_mode.hdisplay; 2110 pipe_w = intel_crtc->config.requested_mode.hdisplay;
2111 pipe_h = intel_crtc->config.requested_mode.vdisplay; 2111 pipe_h = intel_crtc->config.requested_mode.vdisplay;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 85037b9d4934..49482fd5b76c 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -788,6 +788,8 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
788 uint16_t h_sync_offset, v_sync_offset; 788 uint16_t h_sync_offset, v_sync_offset;
789 int mode_clock; 789 int mode_clock;
790 790
791 memset(dtd, 0, sizeof(*dtd));
792
791 width = mode->hdisplay; 793 width = mode->hdisplay;
792 height = mode->vdisplay; 794 height = mode->vdisplay;
793 795
@@ -830,44 +832,51 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
830 if (mode->flags & DRM_MODE_FLAG_PVSYNC) 832 if (mode->flags & DRM_MODE_FLAG_PVSYNC)
831 dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE; 833 dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE;
832 834
833 dtd->part2.sdvo_flags = 0;
834 dtd->part2.v_sync_off_high = v_sync_offset & 0xc0; 835 dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
835 dtd->part2.reserved = 0;
836} 836}
837 837
838static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode, 838static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode *pmode,
839 const struct intel_sdvo_dtd *dtd) 839 const struct intel_sdvo_dtd *dtd)
840{ 840{
841 mode->hdisplay = dtd->part1.h_active; 841 struct drm_display_mode mode = {};
842 mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8; 842
843 mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off; 843 mode.hdisplay = dtd->part1.h_active;
844 mode->hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2; 844 mode.hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
845 mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width; 845 mode.hsync_start = mode.hdisplay + dtd->part2.h_sync_off;
846 mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4; 846 mode.hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2;
847 mode->htotal = mode->hdisplay + dtd->part1.h_blank; 847 mode.hsync_end = mode.hsync_start + dtd->part2.h_sync_width;
848 mode->htotal += (dtd->part1.h_high & 0xf) << 8; 848 mode.hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4;
849 849 mode.htotal = mode.hdisplay + dtd->part1.h_blank;
850 mode->vdisplay = dtd->part1.v_active; 850 mode.htotal += (dtd->part1.h_high & 0xf) << 8;
851 mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8; 851
852 mode->vsync_start = mode->vdisplay; 852 mode.vdisplay = dtd->part1.v_active;
853 mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf; 853 mode.vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
854 mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2; 854 mode.vsync_start = mode.vdisplay;
855 mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0; 855 mode.vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf;
856 mode->vsync_end = mode->vsync_start + 856 mode.vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2;
857 mode.vsync_start += dtd->part2.v_sync_off_high & 0xc0;
858 mode.vsync_end = mode.vsync_start +
857 (dtd->part2.v_sync_off_width & 0xf); 859 (dtd->part2.v_sync_off_width & 0xf);
858 mode->vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4; 860 mode.vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4;
859 mode->vtotal = mode->vdisplay + dtd->part1.v_blank; 861 mode.vtotal = mode.vdisplay + dtd->part1.v_blank;
860 mode->vtotal += (dtd->part1.v_high & 0xf) << 8; 862 mode.vtotal += (dtd->part1.v_high & 0xf) << 8;
861 863
862 mode->clock = dtd->part1.clock * 10; 864 mode.clock = dtd->part1.clock * 10;
863 865
864 mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
865 if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE) 866 if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE)
866 mode->flags |= DRM_MODE_FLAG_INTERLACE; 867 mode.flags |= DRM_MODE_FLAG_INTERLACE;
867 if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE) 868 if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
868 mode->flags |= DRM_MODE_FLAG_PHSYNC; 869 mode.flags |= DRM_MODE_FLAG_PHSYNC;
870 else
871 mode.flags |= DRM_MODE_FLAG_NHSYNC;
869 if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE) 872 if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
870 mode->flags |= DRM_MODE_FLAG_PVSYNC; 873 mode.flags |= DRM_MODE_FLAG_PVSYNC;
874 else
875 mode.flags |= DRM_MODE_FLAG_NVSYNC;
876
877 drm_mode_set_crtcinfo(&mode, 0);
878
879 drm_mode_copy(pmode, &mode);
871} 880}
872 881
873static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo) 882static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index f2c6d7909ae2..dd6f84bf6c22 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -916,6 +916,14 @@ intel_tv_compute_config(struct intel_encoder *encoder,
916 DRM_DEBUG_KMS("forcing bpc to 8 for TV\n"); 916 DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
917 pipe_config->pipe_bpp = 8*3; 917 pipe_config->pipe_bpp = 8*3;
918 918
919 /* TV has it's own notion of sync and other mode flags, so clear them. */
920 pipe_config->adjusted_mode.flags = 0;
921
922 /*
923 * FIXME: We don't check whether the input mode is actually what we want
924 * or whether userspace is doing something stupid.
925 */
926
919 return true; 927 return true;
920} 928}
921 929
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index a60584763b61..a0b9d8a95b16 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -124,6 +124,8 @@ void adreno_recover(struct msm_gpu *gpu)
124 124
125 /* reset completed fence seqno, just discard anything pending: */ 125 /* reset completed fence seqno, just discard anything pending: */
126 adreno_gpu->memptrs->fence = gpu->submitted_fence; 126 adreno_gpu->memptrs->fence = gpu->submitted_fence;
127 adreno_gpu->memptrs->rptr = 0;
128 adreno_gpu->memptrs->wptr = 0;
127 129
128 gpu->funcs->pm_resume(gpu); 130 gpu->funcs->pm_resume(gpu);
129 ret = gpu->funcs->hw_init(gpu); 131 ret = gpu->funcs->hw_init(gpu);
@@ -229,7 +231,7 @@ void adreno_idle(struct msm_gpu *gpu)
229 return; 231 return;
230 } while(time_before(jiffies, t)); 232 } while(time_before(jiffies, t));
231 233
232 DRM_ERROR("timeout waiting for %s to drain ringbuffer!\n", gpu->name); 234 DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name);
233 235
234 /* TODO maybe we need to reset GPU here to recover from hang? */ 236 /* TODO maybe we need to reset GPU here to recover from hang? */
235} 237}
@@ -256,11 +258,17 @@ void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
256{ 258{
257 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 259 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
258 uint32_t freedwords; 260 uint32_t freedwords;
261 unsigned long t = jiffies + ADRENO_IDLE_TIMEOUT;
259 do { 262 do {
260 uint32_t size = gpu->rb->size / 4; 263 uint32_t size = gpu->rb->size / 4;
261 uint32_t wptr = get_wptr(gpu->rb); 264 uint32_t wptr = get_wptr(gpu->rb);
262 uint32_t rptr = adreno_gpu->memptrs->rptr; 265 uint32_t rptr = adreno_gpu->memptrs->rptr;
263 freedwords = (rptr + (size - 1) - wptr) % size; 266 freedwords = (rptr + (size - 1) - wptr) % size;
267
268 if (time_after(jiffies, t)) {
269 DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name);
270 break;
271 }
264 } while(freedwords < ndwords); 272 } while(freedwords < ndwords);
265} 273}
266 274
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
index 5db5bbaedae2..bc7fd11ad8be 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
@@ -19,8 +19,6 @@
19#include "msm_drv.h" 19#include "msm_drv.h"
20#include "mdp4_kms.h" 20#include "mdp4_kms.h"
21 21
22#include <mach/iommu.h>
23
24static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev); 22static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev);
25 23
26static int mdp4_hw_init(struct msm_kms *kms) 24static int mdp4_hw_init(struct msm_kms *kms)
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 864c9773636b..b3a2f1629041 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -18,8 +18,6 @@
18#include "msm_drv.h" 18#include "msm_drv.h"
19#include "msm_gpu.h" 19#include "msm_gpu.h"
20 20
21#include <mach/iommu.h>
22
23static void msm_fb_output_poll_changed(struct drm_device *dev) 21static void msm_fb_output_poll_changed(struct drm_device *dev)
24{ 22{
25 struct msm_drm_private *priv = dev->dev_private; 23 struct msm_drm_private *priv = dev->dev_private;
@@ -62,6 +60,8 @@ int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
62 int i, ret; 60 int i, ret;
63 61
64 for (i = 0; i < cnt; i++) { 62 for (i = 0; i < cnt; i++) {
63 /* TODO maybe some day msm iommu won't require this hack: */
64 struct device *msm_iommu_get_ctx(const char *ctx_name);
65 struct device *ctx = msm_iommu_get_ctx(names[i]); 65 struct device *ctx = msm_iommu_get_ctx(names[i]);
66 if (!ctx) 66 if (!ctx)
67 continue; 67 continue;
@@ -199,7 +199,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
199 * imx drm driver on iMX5 199 * imx drm driver on iMX5
200 */ 200 */
201 dev_err(dev->dev, "failed to load kms\n"); 201 dev_err(dev->dev, "failed to load kms\n");
202 ret = PTR_ERR(priv->kms); 202 ret = PTR_ERR(kms);
203 goto fail; 203 goto fail;
204 } 204 }
205 205
@@ -499,25 +499,41 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
499 struct timespec *timeout) 499 struct timespec *timeout)
500{ 500{
501 struct msm_drm_private *priv = dev->dev_private; 501 struct msm_drm_private *priv = dev->dev_private;
502 unsigned long timeout_jiffies = timespec_to_jiffies(timeout);
503 unsigned long start_jiffies = jiffies;
504 unsigned long remaining_jiffies;
505 int ret; 502 int ret;
506 503
507 if (time_after(start_jiffies, timeout_jiffies)) 504 if (!priv->gpu)
508 remaining_jiffies = 0; 505 return 0;
509 else 506
510 remaining_jiffies = timeout_jiffies - start_jiffies; 507 if (fence > priv->gpu->submitted_fence) {
511 508 DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
512 ret = wait_event_interruptible_timeout(priv->fence_event, 509 fence, priv->gpu->submitted_fence);
513 priv->completed_fence >= fence, 510 return -EINVAL;
514 remaining_jiffies); 511 }
515 if (ret == 0) { 512
516 DBG("timeout waiting for fence: %u (completed: %u)", 513 if (!timeout) {
517 fence, priv->completed_fence); 514 /* no-wait: */
518 ret = -ETIMEDOUT; 515 ret = fence_completed(dev, fence) ? 0 : -EBUSY;
519 } else if (ret != -ERESTARTSYS) { 516 } else {
520 ret = 0; 517 unsigned long timeout_jiffies = timespec_to_jiffies(timeout);
518 unsigned long start_jiffies = jiffies;
519 unsigned long remaining_jiffies;
520
521 if (time_after(start_jiffies, timeout_jiffies))
522 remaining_jiffies = 0;
523 else
524 remaining_jiffies = timeout_jiffies - start_jiffies;
525
526 ret = wait_event_interruptible_timeout(priv->fence_event,
527 fence_completed(dev, fence),
528 remaining_jiffies);
529
530 if (ret == 0) {
531 DBG("timeout waiting for fence: %u (completed: %u)",
532 fence, priv->completed_fence);
533 ret = -ETIMEDOUT;
534 } else if (ret != -ERESTARTSYS) {
535 ret = 0;
536 }
521 } 537 }
522 538
523 return ret; 539 return ret;
@@ -681,7 +697,7 @@ static struct drm_driver msm_driver = {
681 .gem_vm_ops = &vm_ops, 697 .gem_vm_ops = &vm_ops,
682 .dumb_create = msm_gem_dumb_create, 698 .dumb_create = msm_gem_dumb_create,
683 .dumb_map_offset = msm_gem_dumb_map_offset, 699 .dumb_map_offset = msm_gem_dumb_map_offset,
684 .dumb_destroy = msm_gem_dumb_destroy, 700 .dumb_destroy = drm_gem_dumb_destroy,
685#ifdef CONFIG_DEBUG_FS 701#ifdef CONFIG_DEBUG_FS
686 .debugfs_init = msm_debugfs_init, 702 .debugfs_init = msm_debugfs_init,
687 .debugfs_cleanup = msm_debugfs_cleanup, 703 .debugfs_cleanup = msm_debugfs_cleanup,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 80d75094bf0a..df8f1d084bc1 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -153,7 +153,7 @@ void *msm_gem_vaddr(struct drm_gem_object *obj);
153int msm_gem_queue_inactive_work(struct drm_gem_object *obj, 153int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
154 struct work_struct *work); 154 struct work_struct *work);
155void msm_gem_move_to_active(struct drm_gem_object *obj, 155void msm_gem_move_to_active(struct drm_gem_object *obj,
156 struct msm_gpu *gpu, uint32_t fence); 156 struct msm_gpu *gpu, bool write, uint32_t fence);
157void msm_gem_move_to_inactive(struct drm_gem_object *obj); 157void msm_gem_move_to_inactive(struct drm_gem_object *obj);
158int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, 158int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
159 struct timespec *timeout); 159 struct timespec *timeout);
@@ -191,6 +191,12 @@ u32 msm_readl(const void __iomem *addr);
191#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) 191#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
192#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) 192#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
193 193
194static inline bool fence_completed(struct drm_device *dev, uint32_t fence)
195{
196 struct msm_drm_private *priv = dev->dev_private;
197 return priv->completed_fence >= fence;
198}
199
194static inline int align_pitch(int width, int bpp) 200static inline int align_pitch(int width, int bpp)
195{ 201{
196 int bytespp = (bpp + 7) / 8; 202 int bytespp = (bpp + 7) / 8;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 6b5a6c8c7658..2bae46c66a30 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -40,9 +40,9 @@ static struct page **get_pages(struct drm_gem_object *obj)
40 } 40 }
41 41
42 msm_obj->sgt = drm_prime_pages_to_sg(p, npages); 42 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
43 if (!msm_obj->sgt) { 43 if (IS_ERR(msm_obj->sgt)) {
44 dev_err(dev->dev, "failed to allocate sgt\n"); 44 dev_err(dev->dev, "failed to allocate sgt\n");
45 return ERR_PTR(-ENOMEM); 45 return ERR_CAST(msm_obj->sgt);
46 } 46 }
47 47
48 msm_obj->pages = p; 48 msm_obj->pages = p;
@@ -159,7 +159,6 @@ out_unlock:
159out: 159out:
160 switch (ret) { 160 switch (ret) {
161 case -EAGAIN: 161 case -EAGAIN:
162 set_need_resched();
163 case 0: 162 case 0:
164 case -ERESTARTSYS: 163 case -ERESTARTSYS:
165 case -EINTR: 164 case -EINTR:
@@ -320,13 +319,6 @@ int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
320 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle); 319 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
321} 320}
322 321
323int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
324 uint32_t handle)
325{
326 /* No special work needed, drop the reference and see what falls out */
327 return drm_gem_handle_delete(file, handle);
328}
329
330int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 322int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
331 uint32_t handle, uint64_t *offset) 323 uint32_t handle, uint64_t *offset)
332{ 324{
@@ -393,11 +385,14 @@ int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
393} 385}
394 386
395void msm_gem_move_to_active(struct drm_gem_object *obj, 387void msm_gem_move_to_active(struct drm_gem_object *obj,
396 struct msm_gpu *gpu, uint32_t fence) 388 struct msm_gpu *gpu, bool write, uint32_t fence)
397{ 389{
398 struct msm_gem_object *msm_obj = to_msm_bo(obj); 390 struct msm_gem_object *msm_obj = to_msm_bo(obj);
399 msm_obj->gpu = gpu; 391 msm_obj->gpu = gpu;
400 msm_obj->fence = fence; 392 if (write)
393 msm_obj->write_fence = fence;
394 else
395 msm_obj->read_fence = fence;
401 list_del_init(&msm_obj->mm_list); 396 list_del_init(&msm_obj->mm_list);
402 list_add_tail(&msm_obj->mm_list, &gpu->active_list); 397 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
403} 398}
@@ -411,7 +406,8 @@ void msm_gem_move_to_inactive(struct drm_gem_object *obj)
411 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 406 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
412 407
413 msm_obj->gpu = NULL; 408 msm_obj->gpu = NULL;
414 msm_obj->fence = 0; 409 msm_obj->read_fence = 0;
410 msm_obj->write_fence = 0;
415 list_del_init(&msm_obj->mm_list); 411 list_del_init(&msm_obj->mm_list);
416 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 412 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
417 413
@@ -433,8 +429,18 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
433 struct msm_gem_object *msm_obj = to_msm_bo(obj); 429 struct msm_gem_object *msm_obj = to_msm_bo(obj);
434 int ret = 0; 430 int ret = 0;
435 431
436 if (is_active(msm_obj) && !(op & MSM_PREP_NOSYNC)) 432 if (is_active(msm_obj)) {
437 ret = msm_wait_fence_interruptable(dev, msm_obj->fence, timeout); 433 uint32_t fence = 0;
434
435 if (op & MSM_PREP_READ)
436 fence = msm_obj->write_fence;
437 if (op & MSM_PREP_WRITE)
438 fence = max(fence, msm_obj->read_fence);
439 if (op & MSM_PREP_NOSYNC)
440 timeout = NULL;
441
442 ret = msm_wait_fence_interruptable(dev, fence, timeout);
443 }
438 444
439 /* TODO cache maintenance */ 445 /* TODO cache maintenance */
440 446
@@ -455,9 +461,10 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
455 uint64_t off = drm_vma_node_start(&obj->vma_node); 461 uint64_t off = drm_vma_node_start(&obj->vma_node);
456 462
457 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 463 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
458 seq_printf(m, "%08x: %c(%d) %2d (%2d) %08llx %p %d\n", 464 seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %d\n",
459 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', 465 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
460 msm_obj->fence, obj->name, obj->refcount.refcount.counter, 466 msm_obj->read_fence, msm_obj->write_fence,
467 obj->name, obj->refcount.refcount.counter,
461 off, msm_obj->vaddr, obj->size); 468 off, msm_obj->vaddr, obj->size);
462} 469}
463 470
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index d746f13d283c..0676f32e2c6a 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -36,7 +36,7 @@ struct msm_gem_object {
36 */ 36 */
37 struct list_head mm_list; 37 struct list_head mm_list;
38 struct msm_gpu *gpu; /* non-null if active */ 38 struct msm_gpu *gpu; /* non-null if active */
39 uint32_t fence; 39 uint32_t read_fence, write_fence;
40 40
41 /* Transiently in the process of submit ioctl, objects associated 41 /* Transiently in the process of submit ioctl, objects associated
42 * with the submit are on submit->bo_list.. this only lasts for 42 * with the submit are on submit->bo_list.. this only lasts for
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 3e1ef3a00f60..5281d4bc37f7 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -78,7 +78,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
78 } 78 }
79 79
80 if (submit_bo.flags & BO_INVALID_FLAGS) { 80 if (submit_bo.flags & BO_INVALID_FLAGS) {
81 DBG("invalid flags: %x", submit_bo.flags); 81 DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
82 ret = -EINVAL; 82 ret = -EINVAL;
83 goto out_unlock; 83 goto out_unlock;
84 } 84 }
@@ -92,7 +92,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
92 */ 92 */
93 obj = idr_find(&file->object_idr, submit_bo.handle); 93 obj = idr_find(&file->object_idr, submit_bo.handle);
94 if (!obj) { 94 if (!obj) {
95 DBG("invalid handle %u at index %u", submit_bo.handle, i); 95 DRM_ERROR("invalid handle %u at index %u\n", submit_bo.handle, i);
96 ret = -EINVAL; 96 ret = -EINVAL;
97 goto out_unlock; 97 goto out_unlock;
98 } 98 }
@@ -100,7 +100,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
100 msm_obj = to_msm_bo(obj); 100 msm_obj = to_msm_bo(obj);
101 101
102 if (!list_empty(&msm_obj->submit_entry)) { 102 if (!list_empty(&msm_obj->submit_entry)) {
103 DBG("handle %u at index %u already on submit list", 103 DRM_ERROR("handle %u at index %u already on submit list\n",
104 submit_bo.handle, i); 104 submit_bo.handle, i);
105 ret = -EINVAL; 105 ret = -EINVAL;
106 goto out_unlock; 106 goto out_unlock;
@@ -216,8 +216,9 @@ static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
216 struct msm_gem_object **obj, uint32_t *iova, bool *valid) 216 struct msm_gem_object **obj, uint32_t *iova, bool *valid)
217{ 217{
218 if (idx >= submit->nr_bos) { 218 if (idx >= submit->nr_bos) {
219 DBG("invalid buffer index: %u (out of %u)", idx, submit->nr_bos); 219 DRM_ERROR("invalid buffer index: %u (out of %u)\n",
220 return EINVAL; 220 idx, submit->nr_bos);
221 return -EINVAL;
221 } 222 }
222 223
223 if (obj) 224 if (obj)
@@ -239,7 +240,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
239 int ret; 240 int ret;
240 241
241 if (offset % 4) { 242 if (offset % 4) {
242 DBG("non-aligned cmdstream buffer: %u", offset); 243 DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
243 return -EINVAL; 244 return -EINVAL;
244 } 245 }
245 246
@@ -266,7 +267,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
266 return -EFAULT; 267 return -EFAULT;
267 268
268 if (submit_reloc.submit_offset % 4) { 269 if (submit_reloc.submit_offset % 4) {
269 DBG("non-aligned reloc offset: %u", 270 DRM_ERROR("non-aligned reloc offset: %u\n",
270 submit_reloc.submit_offset); 271 submit_reloc.submit_offset);
271 return -EINVAL; 272 return -EINVAL;
272 } 273 }
@@ -276,7 +277,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
276 277
277 if ((off >= (obj->base.size / 4)) || 278 if ((off >= (obj->base.size / 4)) ||
278 (off < last_offset)) { 279 (off < last_offset)) {
279 DBG("invalid offset %u at reloc %u", off, i); 280 DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
280 return -EINVAL; 281 return -EINVAL;
281 } 282 }
282 283
@@ -374,14 +375,15 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
374 goto out; 375 goto out;
375 376
376 if (submit_cmd.size % 4) { 377 if (submit_cmd.size % 4) {
377 DBG("non-aligned cmdstream buffer size: %u", 378 DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
378 submit_cmd.size); 379 submit_cmd.size);
379 ret = -EINVAL; 380 ret = -EINVAL;
380 goto out; 381 goto out;
381 } 382 }
382 383
383 if (submit_cmd.size >= msm_obj->base.size) { 384 if ((submit_cmd.size + submit_cmd.submit_offset) >=
384 DBG("invalid cmdstream size: %u", submit_cmd.size); 385 msm_obj->base.size) {
386 DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
385 ret = -EINVAL; 387 ret = -EINVAL;
386 goto out; 388 goto out;
387 } 389 }
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index e1e1ec9321ff..3bab937965d1 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -29,13 +29,14 @@
29static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev) 29static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev)
30{ 30{
31 struct drm_device *dev = gpu->dev; 31 struct drm_device *dev = gpu->dev;
32 struct kgsl_device_platform_data *pdata = pdev->dev.platform_data; 32 struct kgsl_device_platform_data *pdata;
33 33
34 if (!pdev) { 34 if (!pdev) {
35 dev_err(dev->dev, "could not find dtv pdata\n"); 35 dev_err(dev->dev, "could not find dtv pdata\n");
36 return; 36 return;
37 } 37 }
38 38
39 pdata = pdev->dev.platform_data;
39 if (pdata->bus_scale_table) { 40 if (pdata->bus_scale_table) {
40 gpu->bsc = msm_bus_scale_register_client(pdata->bus_scale_table); 41 gpu->bsc = msm_bus_scale_register_client(pdata->bus_scale_table);
41 DBG("bus scale client: %08x", gpu->bsc); 42 DBG("bus scale client: %08x", gpu->bsc);
@@ -230,6 +231,8 @@ static void hangcheck_timer_reset(struct msm_gpu *gpu)
230static void hangcheck_handler(unsigned long data) 231static void hangcheck_handler(unsigned long data)
231{ 232{
232 struct msm_gpu *gpu = (struct msm_gpu *)data; 233 struct msm_gpu *gpu = (struct msm_gpu *)data;
234 struct drm_device *dev = gpu->dev;
235 struct msm_drm_private *priv = dev->dev_private;
233 uint32_t fence = gpu->funcs->last_fence(gpu); 236 uint32_t fence = gpu->funcs->last_fence(gpu);
234 237
235 if (fence != gpu->hangcheck_fence) { 238 if (fence != gpu->hangcheck_fence) {
@@ -237,14 +240,22 @@ static void hangcheck_handler(unsigned long data)
237 gpu->hangcheck_fence = fence; 240 gpu->hangcheck_fence = fence;
238 } else if (fence < gpu->submitted_fence) { 241 } else if (fence < gpu->submitted_fence) {
239 /* no progress and not done.. hung! */ 242 /* no progress and not done.. hung! */
240 struct msm_drm_private *priv = gpu->dev->dev_private;
241 gpu->hangcheck_fence = fence; 243 gpu->hangcheck_fence = fence;
244 dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n",
245 gpu->name);
246 dev_err(dev->dev, "%s: completed fence: %u\n",
247 gpu->name, fence);
248 dev_err(dev->dev, "%s: submitted fence: %u\n",
249 gpu->name, gpu->submitted_fence);
242 queue_work(priv->wq, &gpu->recover_work); 250 queue_work(priv->wq, &gpu->recover_work);
243 } 251 }
244 252
245 /* if still more pending work, reset the hangcheck timer: */ 253 /* if still more pending work, reset the hangcheck timer: */
246 if (gpu->submitted_fence > gpu->hangcheck_fence) 254 if (gpu->submitted_fence > gpu->hangcheck_fence)
247 hangcheck_timer_reset(gpu); 255 hangcheck_timer_reset(gpu);
256
257 /* workaround for missing irq: */
258 queue_work(priv->wq, &gpu->retire_work);
248} 259}
249 260
250/* 261/*
@@ -265,7 +276,8 @@ static void retire_worker(struct work_struct *work)
265 obj = list_first_entry(&gpu->active_list, 276 obj = list_first_entry(&gpu->active_list,
266 struct msm_gem_object, mm_list); 277 struct msm_gem_object, mm_list);
267 278
268 if (obj->fence <= fence) { 279 if ((obj->read_fence <= fence) &&
280 (obj->write_fence <= fence)) {
269 /* move to inactive: */ 281 /* move to inactive: */
270 msm_gem_move_to_inactive(&obj->base); 282 msm_gem_move_to_inactive(&obj->base);
271 msm_gem_put_iova(&obj->base, gpu->id); 283 msm_gem_put_iova(&obj->base, gpu->id);
@@ -321,7 +333,11 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
321 submit->gpu->id, &iova); 333 submit->gpu->id, &iova);
322 } 334 }
323 335
324 msm_gem_move_to_active(&msm_obj->base, gpu, submit->fence); 336 if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
337 msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
338
339 if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
340 msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
325 } 341 }
326 hangcheck_timer_reset(gpu); 342 hangcheck_timer_reset(gpu);
327 mutex_unlock(&dev->struct_mutex); 343 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 2e11ea02cf87..57cda2a1437b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -579,8 +579,22 @@ static void
579init_reserved(struct nvbios_init *init) 579init_reserved(struct nvbios_init *init)
580{ 580{
581 u8 opcode = nv_ro08(init->bios, init->offset); 581 u8 opcode = nv_ro08(init->bios, init->offset);
582 trace("RESERVED\t0x%02x\n", opcode); 582 u8 length, i;
583 init->offset += 1; 583
584 switch (opcode) {
585 case 0xaa:
586 length = 4;
587 break;
588 default:
589 length = 1;
590 break;
591 }
592
593 trace("RESERVED 0x%02x\t", opcode);
594 for (i = 1; i < length; i++)
595 cont(" 0x%02x", nv_ro08(init->bios, init->offset + i));
596 cont("\n");
597 init->offset += length;
584} 598}
585 599
586/** 600/**
@@ -1437,7 +1451,7 @@ init_configure_mem(struct nvbios_init *init)
1437 data = init_rdvgai(init, 0x03c4, 0x01); 1451 data = init_rdvgai(init, 0x03c4, 0x01);
1438 init_wrvgai(init, 0x03c4, 0x01, data | 0x20); 1452 init_wrvgai(init, 0x03c4, 0x01, data | 0x20);
1439 1453
1440 while ((addr = nv_ro32(bios, sdata)) != 0xffffffff) { 1454 for (; (addr = nv_ro32(bios, sdata)) != 0xffffffff; sdata += 4) {
1441 switch (addr) { 1455 switch (addr) {
1442 case 0x10021c: /* CKE_NORMAL */ 1456 case 0x10021c: /* CKE_NORMAL */
1443 case 0x1002d0: /* CMD_REFRESH */ 1457 case 0x1002d0: /* CMD_REFRESH */
@@ -2135,6 +2149,7 @@ static struct nvbios_init_opcode {
2135 [0x99] = { init_zm_auxch }, 2149 [0x99] = { init_zm_auxch },
2136 [0x9a] = { init_i2c_long_if }, 2150 [0x9a] = { init_i2c_long_if },
2137 [0xa9] = { init_gpio_ne }, 2151 [0xa9] = { init_gpio_ne },
2152 [0xaa] = { init_reserved },
2138}; 2153};
2139 2154
2140#define init_opcode_nr (sizeof(init_opcode) / sizeof(init_opcode[0])) 2155#define init_opcode_nr (sizeof(init_opcode) / sizeof(init_opcode[0]))
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index d2712e6e5d31..7848590f5568 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -278,7 +278,6 @@ nouveau_display_create(struct drm_device *dev)
278{ 278{
279 struct nouveau_drm *drm = nouveau_drm(dev); 279 struct nouveau_drm *drm = nouveau_drm(dev);
280 struct nouveau_display *disp; 280 struct nouveau_display *disp;
281 u32 pclass = dev->pdev->class >> 8;
282 int ret, gen; 281 int ret, gen;
283 282
284 disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL); 283 disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL);
@@ -340,29 +339,25 @@ nouveau_display_create(struct drm_device *dev)
340 drm_kms_helper_poll_init(dev); 339 drm_kms_helper_poll_init(dev);
341 drm_kms_helper_poll_disable(dev); 340 drm_kms_helper_poll_disable(dev);
342 341
343 if (nouveau_modeset == 1 || 342 if (drm->vbios.dcb.entries) {
344 (nouveau_modeset < 0 && pclass == PCI_CLASS_DISPLAY_VGA)) { 343 if (nv_device(drm->device)->card_type < NV_50)
345 if (drm->vbios.dcb.entries) { 344 ret = nv04_display_create(dev);
346 if (nv_device(drm->device)->card_type < NV_50) 345 else
347 ret = nv04_display_create(dev); 346 ret = nv50_display_create(dev);
348 else 347 } else {
349 ret = nv50_display_create(dev); 348 ret = 0;
350 } else { 349 }
351 ret = 0;
352 }
353
354 if (ret)
355 goto disp_create_err;
356 350
357 if (dev->mode_config.num_crtc) { 351 if (ret)
358 ret = drm_vblank_init(dev, dev->mode_config.num_crtc); 352 goto disp_create_err;
359 if (ret)
360 goto vblank_err;
361 }
362 353
363 nouveau_backlight_init(dev); 354 if (dev->mode_config.num_crtc) {
355 ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
356 if (ret)
357 goto vblank_err;
364 } 358 }
365 359
360 nouveau_backlight_init(dev);
366 return 0; 361 return 0;
367 362
368vblank_err: 363vblank_err:
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 8f6d63d7edd3..a86ecf65c164 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -454,7 +454,8 @@ nouveau_fbcon_init(struct drm_device *dev)
454 int preferred_bpp; 454 int preferred_bpp;
455 int ret; 455 int ret;
456 456
457 if (!dev->mode_config.num_crtc) 457 if (!dev->mode_config.num_crtc ||
458 (dev->pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
458 return 0; 459 return 0;
459 460
460 fbcon = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL); 461 fbcon = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index ca5492ac2da5..0843ebc910d4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -104,9 +104,7 @@ nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
104 else 104 else
105 nvbe->ttm.ttm.func = &nv50_sgdma_backend; 105 nvbe->ttm.ttm.func = &nv50_sgdma_backend;
106 106
107 if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) { 107 if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page))
108 kfree(nvbe);
109 return NULL; 108 return NULL;
110 }
111 return &nvbe->ttm.ttm; 109 return &nvbe->ttm.ttm;
112} 110}
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index dfac7965ea28..32923d2f6002 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -707,8 +707,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
707 switch (connector->connector_type) { 707 switch (connector->connector_type) {
708 case DRM_MODE_CONNECTOR_DVII: 708 case DRM_MODE_CONNECTOR_DVII:
709 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ 709 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
710 if (drm_detect_hdmi_monitor(radeon_connector->edid) && 710 if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) ||
711 radeon_audio) 711 (drm_detect_hdmi_monitor(radeon_connector->edid) &&
712 (radeon_connector->audio == RADEON_AUDIO_AUTO)))
712 return ATOM_ENCODER_MODE_HDMI; 713 return ATOM_ENCODER_MODE_HDMI;
713 else if (radeon_connector->use_digital) 714 else if (radeon_connector->use_digital)
714 return ATOM_ENCODER_MODE_DVI; 715 return ATOM_ENCODER_MODE_DVI;
@@ -718,8 +719,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
718 case DRM_MODE_CONNECTOR_DVID: 719 case DRM_MODE_CONNECTOR_DVID:
719 case DRM_MODE_CONNECTOR_HDMIA: 720 case DRM_MODE_CONNECTOR_HDMIA:
720 default: 721 default:
721 if (drm_detect_hdmi_monitor(radeon_connector->edid) && 722 if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) ||
722 radeon_audio) 723 (drm_detect_hdmi_monitor(radeon_connector->edid) &&
724 (radeon_connector->audio == RADEON_AUDIO_AUTO)))
723 return ATOM_ENCODER_MODE_HDMI; 725 return ATOM_ENCODER_MODE_HDMI;
724 else 726 else
725 return ATOM_ENCODER_MODE_DVI; 727 return ATOM_ENCODER_MODE_DVI;
@@ -732,8 +734,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
732 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 734 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
733 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) 735 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
734 return ATOM_ENCODER_MODE_DP; 736 return ATOM_ENCODER_MODE_DP;
735 else if (drm_detect_hdmi_monitor(radeon_connector->edid) && 737 else if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) ||
736 radeon_audio) 738 (drm_detect_hdmi_monitor(radeon_connector->edid) &&
739 (radeon_connector->audio == RADEON_AUDIO_AUTO)))
737 return ATOM_ENCODER_MODE_HDMI; 740 return ATOM_ENCODER_MODE_HDMI;
738 else 741 else
739 return ATOM_ENCODER_MODE_DVI; 742 return ATOM_ENCODER_MODE_DVI;
@@ -1647,8 +1650,12 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
1647 atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0); 1650 atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
1648 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); 1651 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
1649 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); 1652 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1650 /* some early dce3.2 boards have a bug in their transmitter control table */ 1653 /* some dce3.x boards have a bug in their transmitter control table.
1651 if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730)) 1654 * ACTION_ENABLE_OUTPUT can probably be dropped since ACTION_ENABLE
1655 * does the same thing and more.
1656 */
1657 if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) &&
1658 (rdev->family != CHIP_RS880))
1652 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); 1659 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
1653 } 1660 }
1654 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { 1661 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 084e69414fd1..b162e98a2953 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -1168,6 +1168,23 @@ static const struct radeon_blacklist_clocks btc_blacklist_clocks[] =
1168 { 25000, 30000, RADEON_SCLK_UP } 1168 { 25000, 30000, RADEON_SCLK_UP }
1169}; 1169};
1170 1170
1171void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
1172 u32 *max_clock)
1173{
1174 u32 i, clock = 0;
1175
1176 if ((table == NULL) || (table->count == 0)) {
1177 *max_clock = clock;
1178 return;
1179 }
1180
1181 for (i = 0; i < table->count; i++) {
1182 if (clock < table->entries[i].clk)
1183 clock = table->entries[i].clk;
1184 }
1185 *max_clock = clock;
1186}
1187
1171void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table, 1188void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
1172 u32 clock, u16 max_voltage, u16 *voltage) 1189 u32 clock, u16 max_voltage, u16 *voltage)
1173{ 1190{
@@ -2080,6 +2097,7 @@ static void btc_apply_state_adjust_rules(struct radeon_device *rdev,
2080 bool disable_mclk_switching; 2097 bool disable_mclk_switching;
2081 u32 mclk, sclk; 2098 u32 mclk, sclk;
2082 u16 vddc, vddci; 2099 u16 vddc, vddci;
2100 u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
2083 2101
2084 if ((rdev->pm.dpm.new_active_crtc_count > 1) || 2102 if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
2085 btc_dpm_vblank_too_short(rdev)) 2103 btc_dpm_vblank_too_short(rdev))
@@ -2121,6 +2139,39 @@ static void btc_apply_state_adjust_rules(struct radeon_device *rdev,
2121 ps->low.vddci = max_limits->vddci; 2139 ps->low.vddci = max_limits->vddci;
2122 } 2140 }
2123 2141
2142 /* limit clocks to max supported clocks based on voltage dependency tables */
2143 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
2144 &max_sclk_vddc);
2145 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2146 &max_mclk_vddci);
2147 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2148 &max_mclk_vddc);
2149
2150 if (max_sclk_vddc) {
2151 if (ps->low.sclk > max_sclk_vddc)
2152 ps->low.sclk = max_sclk_vddc;
2153 if (ps->medium.sclk > max_sclk_vddc)
2154 ps->medium.sclk = max_sclk_vddc;
2155 if (ps->high.sclk > max_sclk_vddc)
2156 ps->high.sclk = max_sclk_vddc;
2157 }
2158 if (max_mclk_vddci) {
2159 if (ps->low.mclk > max_mclk_vddci)
2160 ps->low.mclk = max_mclk_vddci;
2161 if (ps->medium.mclk > max_mclk_vddci)
2162 ps->medium.mclk = max_mclk_vddci;
2163 if (ps->high.mclk > max_mclk_vddci)
2164 ps->high.mclk = max_mclk_vddci;
2165 }
2166 if (max_mclk_vddc) {
2167 if (ps->low.mclk > max_mclk_vddc)
2168 ps->low.mclk = max_mclk_vddc;
2169 if (ps->medium.mclk > max_mclk_vddc)
2170 ps->medium.mclk = max_mclk_vddc;
2171 if (ps->high.mclk > max_mclk_vddc)
2172 ps->high.mclk = max_mclk_vddc;
2173 }
2174
2124 /* XXX validate the min clocks required for display */ 2175 /* XXX validate the min clocks required for display */
2125 2176
2126 if (disable_mclk_switching) { 2177 if (disable_mclk_switching) {
@@ -2340,12 +2391,6 @@ int btc_dpm_set_power_state(struct radeon_device *rdev)
2340 return ret; 2391 return ret;
2341 } 2392 }
2342 2393
2343 ret = rv770_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
2344 if (ret) {
2345 DRM_ERROR("rv770_dpm_force_performance_level failed\n");
2346 return ret;
2347 }
2348
2349 return 0; 2394 return 0;
2350} 2395}
2351 2396
diff --git a/drivers/gpu/drm/radeon/btc_dpm.h b/drivers/gpu/drm/radeon/btc_dpm.h
index 1a15e0e41950..3b6f12b7760b 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.h
+++ b/drivers/gpu/drm/radeon/btc_dpm.h
@@ -46,6 +46,8 @@ void btc_adjust_clock_combinations(struct radeon_device *rdev,
46 struct rv7xx_pl *pl); 46 struct rv7xx_pl *pl);
47void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table, 47void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
48 u32 clock, u16 max_voltage, u16 *voltage); 48 u32 clock, u16 max_voltage, u16 *voltage);
49void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
50 u32 *max_clock);
49void btc_apply_voltage_delta_rules(struct radeon_device *rdev, 51void btc_apply_voltage_delta_rules(struct radeon_device *rdev,
50 u16 max_vddc, u16 max_vddci, 52 u16 max_vddc, u16 max_vddci,
51 u16 *vddc, u16 *vddci); 53 u16 *vddc, u16 *vddci);
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 3cce533397c6..51e947a97edf 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -146,6 +146,8 @@ static const struct ci_pt_config_reg didt_config_ci[] =
146}; 146};
147 147
148extern u8 rv770_get_memory_module_index(struct radeon_device *rdev); 148extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
149extern void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
150 u32 *max_clock);
149extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev, 151extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
150 u32 arb_freq_src, u32 arb_freq_dest); 152 u32 arb_freq_src, u32 arb_freq_dest);
151extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock); 153extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
@@ -712,6 +714,7 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
712 struct radeon_clock_and_voltage_limits *max_limits; 714 struct radeon_clock_and_voltage_limits *max_limits;
713 bool disable_mclk_switching; 715 bool disable_mclk_switching;
714 u32 sclk, mclk; 716 u32 sclk, mclk;
717 u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
715 int i; 718 int i;
716 719
717 if ((rdev->pm.dpm.new_active_crtc_count > 1) || 720 if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
@@ -739,6 +742,29 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
739 } 742 }
740 } 743 }
741 744
745 /* limit clocks to max supported clocks based on voltage dependency tables */
746 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
747 &max_sclk_vddc);
748 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
749 &max_mclk_vddci);
750 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
751 &max_mclk_vddc);
752
753 for (i = 0; i < ps->performance_level_count; i++) {
754 if (max_sclk_vddc) {
755 if (ps->performance_levels[i].sclk > max_sclk_vddc)
756 ps->performance_levels[i].sclk = max_sclk_vddc;
757 }
758 if (max_mclk_vddci) {
759 if (ps->performance_levels[i].mclk > max_mclk_vddci)
760 ps->performance_levels[i].mclk = max_mclk_vddci;
761 }
762 if (max_mclk_vddc) {
763 if (ps->performance_levels[i].mclk > max_mclk_vddc)
764 ps->performance_levels[i].mclk = max_mclk_vddc;
765 }
766 }
767
742 /* XXX validate the min clocks required for display */ 768 /* XXX validate the min clocks required for display */
743 769
744 if (disable_mclk_switching) { 770 if (disable_mclk_switching) {
@@ -4748,12 +4774,6 @@ int ci_dpm_set_power_state(struct radeon_device *rdev)
4748 if (pi->pcie_performance_request) 4774 if (pi->pcie_performance_request)
4749 ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps); 4775 ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
4750 4776
4751 ret = ci_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
4752 if (ret) {
4753 DRM_ERROR("ci_dpm_force_performance_level failed\n");
4754 return ret;
4755 }
4756
4757 cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX | 4777 cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
4758 RADEON_CG_BLOCK_MC | 4778 RADEON_CG_BLOCK_MC |
4759 RADEON_CG_BLOCK_SDMA | 4779 RADEON_CG_BLOCK_SDMA |
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c
index 53b43dd3cf1e..252e10a41cf5 100644
--- a/drivers/gpu/drm/radeon/ci_smc.c
+++ b/drivers/gpu/drm/radeon/ci_smc.c
@@ -47,10 +47,11 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev,
47 u32 smc_start_address, 47 u32 smc_start_address,
48 const u8 *src, u32 byte_count, u32 limit) 48 const u8 *src, u32 byte_count, u32 limit)
49{ 49{
50 unsigned long flags;
50 u32 data, original_data; 51 u32 data, original_data;
51 u32 addr; 52 u32 addr;
52 u32 extra_shift; 53 u32 extra_shift;
53 int ret; 54 int ret = 0;
54 55
55 if (smc_start_address & 3) 56 if (smc_start_address & 3)
56 return -EINVAL; 57 return -EINVAL;
@@ -59,13 +60,14 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev,
59 60
60 addr = smc_start_address; 61 addr = smc_start_address;
61 62
63 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
62 while (byte_count >= 4) { 64 while (byte_count >= 4) {
63 /* SMC address space is BE */ 65 /* SMC address space is BE */
64 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; 66 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
65 67
66 ret = ci_set_smc_sram_address(rdev, addr, limit); 68 ret = ci_set_smc_sram_address(rdev, addr, limit);
67 if (ret) 69 if (ret)
68 return ret; 70 goto done;
69 71
70 WREG32(SMC_IND_DATA_0, data); 72 WREG32(SMC_IND_DATA_0, data);
71 73
@@ -80,7 +82,7 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev,
80 82
81 ret = ci_set_smc_sram_address(rdev, addr, limit); 83 ret = ci_set_smc_sram_address(rdev, addr, limit);
82 if (ret) 84 if (ret)
83 return ret; 85 goto done;
84 86
85 original_data = RREG32(SMC_IND_DATA_0); 87 original_data = RREG32(SMC_IND_DATA_0);
86 88
@@ -97,11 +99,15 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev,
97 99
98 ret = ci_set_smc_sram_address(rdev, addr, limit); 100 ret = ci_set_smc_sram_address(rdev, addr, limit);
99 if (ret) 101 if (ret)
100 return ret; 102 goto done;
101 103
102 WREG32(SMC_IND_DATA_0, data); 104 WREG32(SMC_IND_DATA_0, data);
103 } 105 }
104 return 0; 106
107done:
108 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
109
110 return ret;
105} 111}
106 112
107void ci_start_smc(struct radeon_device *rdev) 113void ci_start_smc(struct radeon_device *rdev)
@@ -197,6 +203,7 @@ PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev)
197 203
198int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit) 204int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
199{ 205{
206 unsigned long flags;
200 u32 ucode_start_address; 207 u32 ucode_start_address;
201 u32 ucode_size; 208 u32 ucode_size;
202 const u8 *src; 209 const u8 *src;
@@ -219,6 +226,7 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
219 return -EINVAL; 226 return -EINVAL;
220 227
221 src = (const u8 *)rdev->smc_fw->data; 228 src = (const u8 *)rdev->smc_fw->data;
229 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
222 WREG32(SMC_IND_INDEX_0, ucode_start_address); 230 WREG32(SMC_IND_INDEX_0, ucode_start_address);
223 WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0); 231 WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
224 while (ucode_size >= 4) { 232 while (ucode_size >= 4) {
@@ -231,6 +239,7 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
231 ucode_size -= 4; 239 ucode_size -= 4;
232 } 240 }
233 WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0); 241 WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
242 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
234 243
235 return 0; 244 return 0;
236} 245}
@@ -238,25 +247,29 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
238int ci_read_smc_sram_dword(struct radeon_device *rdev, 247int ci_read_smc_sram_dword(struct radeon_device *rdev,
239 u32 smc_address, u32 *value, u32 limit) 248 u32 smc_address, u32 *value, u32 limit)
240{ 249{
250 unsigned long flags;
241 int ret; 251 int ret;
242 252
253 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
243 ret = ci_set_smc_sram_address(rdev, smc_address, limit); 254 ret = ci_set_smc_sram_address(rdev, smc_address, limit);
244 if (ret) 255 if (ret == 0)
245 return ret; 256 *value = RREG32(SMC_IND_DATA_0);
257 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
246 258
247 *value = RREG32(SMC_IND_DATA_0); 259 return ret;
248 return 0;
249} 260}
250 261
251int ci_write_smc_sram_dword(struct radeon_device *rdev, 262int ci_write_smc_sram_dword(struct radeon_device *rdev,
252 u32 smc_address, u32 value, u32 limit) 263 u32 smc_address, u32 value, u32 limit)
253{ 264{
265 unsigned long flags;
254 int ret; 266 int ret;
255 267
268 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
256 ret = ci_set_smc_sram_address(rdev, smc_address, limit); 269 ret = ci_set_smc_sram_address(rdev, smc_address, limit);
257 if (ret) 270 if (ret == 0)
258 return ret; 271 WREG32(SMC_IND_DATA_0, value);
272 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
259 273
260 WREG32(SMC_IND_DATA_0, value); 274 return ret;
261 return 0;
262} 275}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index a3bba0587276..d02fd1c045d5 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -77,6 +77,8 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev);
77static void cik_program_aspm(struct radeon_device *rdev); 77static void cik_program_aspm(struct radeon_device *rdev);
78static void cik_init_pg(struct radeon_device *rdev); 78static void cik_init_pg(struct radeon_device *rdev);
79static void cik_init_cg(struct radeon_device *rdev); 79static void cik_init_cg(struct radeon_device *rdev);
80static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
81 bool enable);
80 82
81/* get temperature in millidegrees */ 83/* get temperature in millidegrees */
82int ci_get_temp(struct radeon_device *rdev) 84int ci_get_temp(struct radeon_device *rdev)
@@ -120,20 +122,27 @@ int kv_get_temp(struct radeon_device *rdev)
120 */ 122 */
121u32 cik_pciep_rreg(struct radeon_device *rdev, u32 reg) 123u32 cik_pciep_rreg(struct radeon_device *rdev, u32 reg)
122{ 124{
125 unsigned long flags;
123 u32 r; 126 u32 r;
124 127
128 spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
125 WREG32(PCIE_INDEX, reg); 129 WREG32(PCIE_INDEX, reg);
126 (void)RREG32(PCIE_INDEX); 130 (void)RREG32(PCIE_INDEX);
127 r = RREG32(PCIE_DATA); 131 r = RREG32(PCIE_DATA);
132 spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
128 return r; 133 return r;
129} 134}
130 135
131void cik_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) 136void cik_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
132{ 137{
138 unsigned long flags;
139
140 spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
133 WREG32(PCIE_INDEX, reg); 141 WREG32(PCIE_INDEX, reg);
134 (void)RREG32(PCIE_INDEX); 142 (void)RREG32(PCIE_INDEX);
135 WREG32(PCIE_DATA, v); 143 WREG32(PCIE_DATA, v);
136 (void)RREG32(PCIE_DATA); 144 (void)RREG32(PCIE_DATA);
145 spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
137} 146}
138 147
139static const u32 spectre_rlc_save_restore_register_list[] = 148static const u32 spectre_rlc_save_restore_register_list[] =
@@ -2722,7 +2731,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
2722 } else if ((rdev->pdev->device == 0x1309) || 2731 } else if ((rdev->pdev->device == 0x1309) ||
2723 (rdev->pdev->device == 0x130A) || 2732 (rdev->pdev->device == 0x130A) ||
2724 (rdev->pdev->device == 0x130D) || 2733 (rdev->pdev->device == 0x130D) ||
2725 (rdev->pdev->device == 0x1313)) { 2734 (rdev->pdev->device == 0x1313) ||
2735 (rdev->pdev->device == 0x131D)) {
2726 rdev->config.cik.max_cu_per_sh = 6; 2736 rdev->config.cik.max_cu_per_sh = 6;
2727 rdev->config.cik.max_backends_per_se = 2; 2737 rdev->config.cik.max_backends_per_se = 2;
2728 } else if ((rdev->pdev->device == 0x1306) || 2738 } else if ((rdev->pdev->device == 0x1306) ||
@@ -2835,10 +2845,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
2835 rdev->config.cik.tile_config |= (3 << 0); 2845 rdev->config.cik.tile_config |= (3 << 0);
2836 break; 2846 break;
2837 } 2847 }
2838 if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) 2848 rdev->config.cik.tile_config |=
2839 rdev->config.cik.tile_config |= 1 << 4; 2849 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
2840 else
2841 rdev->config.cik.tile_config |= 0 << 4;
2842 rdev->config.cik.tile_config |= 2850 rdev->config.cik.tile_config |=
2843 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; 2851 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
2844 rdev->config.cik.tile_config |= 2852 rdev->config.cik.tile_config |=
@@ -4013,6 +4021,8 @@ static int cik_cp_resume(struct radeon_device *rdev)
4013{ 4021{
4014 int r; 4022 int r;
4015 4023
4024 cik_enable_gui_idle_interrupt(rdev, false);
4025
4016 r = cik_cp_load_microcode(rdev); 4026 r = cik_cp_load_microcode(rdev);
4017 if (r) 4027 if (r)
4018 return r; 4028 return r;
@@ -4024,6 +4034,8 @@ static int cik_cp_resume(struct radeon_device *rdev)
4024 if (r) 4034 if (r)
4025 return r; 4035 return r;
4026 4036
4037 cik_enable_gui_idle_interrupt(rdev, true);
4038
4027 return 0; 4039 return 0;
4028} 4040}
4029 4041
@@ -4442,8 +4454,8 @@ static int cik_mc_init(struct radeon_device *rdev)
4442 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); 4454 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
4443 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); 4455 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
4444 /* size in MB on si */ 4456 /* size in MB on si */
4445 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 4457 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
4446 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 4458 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
4447 rdev->mc.visible_vram_size = rdev->mc.aper_size; 4459 rdev->mc.visible_vram_size = rdev->mc.aper_size;
4448 si_vram_gtt_location(rdev, &rdev->mc); 4460 si_vram_gtt_location(rdev, &rdev->mc);
4449 radeon_update_bandwidth_info(rdev); 4461 radeon_update_bandwidth_info(rdev);
@@ -4721,12 +4733,13 @@ static void cik_vm_decode_fault(struct radeon_device *rdev,
4721 u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT; 4733 u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
4722 u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT; 4734 u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
4723 u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT; 4735 u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
4724 char *block = (char *)&mc_client; 4736 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
4737 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
4725 4738
4726 printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n", 4739 printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
4727 protections, vmid, addr, 4740 protections, vmid, addr,
4728 (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read", 4741 (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
4729 block, mc_id); 4742 block, mc_client, mc_id);
4730} 4743}
4731 4744
4732/** 4745/**
@@ -5376,7 +5389,9 @@ static void cik_enable_hdp_ls(struct radeon_device *rdev,
5376void cik_update_cg(struct radeon_device *rdev, 5389void cik_update_cg(struct radeon_device *rdev,
5377 u32 block, bool enable) 5390 u32 block, bool enable)
5378{ 5391{
5392
5379 if (block & RADEON_CG_BLOCK_GFX) { 5393 if (block & RADEON_CG_BLOCK_GFX) {
5394 cik_enable_gui_idle_interrupt(rdev, false);
5380 /* order matters! */ 5395 /* order matters! */
5381 if (enable) { 5396 if (enable) {
5382 cik_enable_mgcg(rdev, true); 5397 cik_enable_mgcg(rdev, true);
@@ -5385,6 +5400,7 @@ void cik_update_cg(struct radeon_device *rdev,
5385 cik_enable_cgcg(rdev, false); 5400 cik_enable_cgcg(rdev, false);
5386 cik_enable_mgcg(rdev, false); 5401 cik_enable_mgcg(rdev, false);
5387 } 5402 }
5403 cik_enable_gui_idle_interrupt(rdev, true);
5388 } 5404 }
5389 5405
5390 if (block & RADEON_CG_BLOCK_MC) { 5406 if (block & RADEON_CG_BLOCK_MC) {
@@ -5541,7 +5557,7 @@ static void cik_enable_gfx_cgpg(struct radeon_device *rdev,
5541{ 5557{
5542 u32 data, orig; 5558 u32 data, orig;
5543 5559
5544 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG)) { 5560 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) {
5545 orig = data = RREG32(RLC_PG_CNTL); 5561 orig = data = RREG32(RLC_PG_CNTL);
5546 data |= GFX_PG_ENABLE; 5562 data |= GFX_PG_ENABLE;
5547 if (orig != data) 5563 if (orig != data)
@@ -5805,7 +5821,7 @@ static void cik_init_pg(struct radeon_device *rdev)
5805 if (rdev->pg_flags) { 5821 if (rdev->pg_flags) {
5806 cik_enable_sck_slowdown_on_pu(rdev, true); 5822 cik_enable_sck_slowdown_on_pu(rdev, true);
5807 cik_enable_sck_slowdown_on_pd(rdev, true); 5823 cik_enable_sck_slowdown_on_pd(rdev, true);
5808 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) { 5824 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
5809 cik_init_gfx_cgpg(rdev); 5825 cik_init_gfx_cgpg(rdev);
5810 cik_enable_cp_pg(rdev, true); 5826 cik_enable_cp_pg(rdev, true);
5811 cik_enable_gds_pg(rdev, true); 5827 cik_enable_gds_pg(rdev, true);
@@ -5819,7 +5835,7 @@ static void cik_fini_pg(struct radeon_device *rdev)
5819{ 5835{
5820 if (rdev->pg_flags) { 5836 if (rdev->pg_flags) {
5821 cik_update_gfx_pg(rdev, false); 5837 cik_update_gfx_pg(rdev, false);
5822 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) { 5838 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
5823 cik_enable_cp_pg(rdev, false); 5839 cik_enable_cp_pg(rdev, false);
5824 cik_enable_gds_pg(rdev, false); 5840 cik_enable_gds_pg(rdev, false);
5825 } 5841 }
@@ -5895,7 +5911,9 @@ static void cik_disable_interrupt_state(struct radeon_device *rdev)
5895 u32 tmp; 5911 u32 tmp;
5896 5912
5897 /* gfx ring */ 5913 /* gfx ring */
5898 WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 5914 tmp = RREG32(CP_INT_CNTL_RING0) &
5915 (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
5916 WREG32(CP_INT_CNTL_RING0, tmp);
5899 /* sdma */ 5917 /* sdma */
5900 tmp = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; 5918 tmp = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
5901 WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, tmp); 5919 WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, tmp);
@@ -6036,8 +6054,7 @@ static int cik_irq_init(struct radeon_device *rdev)
6036 */ 6054 */
6037int cik_irq_set(struct radeon_device *rdev) 6055int cik_irq_set(struct radeon_device *rdev)
6038{ 6056{
6039 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE | 6057 u32 cp_int_cntl;
6040 PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE;
6041 u32 cp_m1p0, cp_m1p1, cp_m1p2, cp_m1p3; 6058 u32 cp_m1p0, cp_m1p1, cp_m1p2, cp_m1p3;
6042 u32 cp_m2p0, cp_m2p1, cp_m2p2, cp_m2p3; 6059 u32 cp_m2p0, cp_m2p1, cp_m2p2, cp_m2p3;
6043 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; 6060 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
@@ -6058,6 +6075,10 @@ int cik_irq_set(struct radeon_device *rdev)
6058 return 0; 6075 return 0;
6059 } 6076 }
6060 6077
6078 cp_int_cntl = RREG32(CP_INT_CNTL_RING0) &
6079 (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
6080 cp_int_cntl |= PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE;
6081
6061 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; 6082 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
6062 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; 6083 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
6063 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; 6084 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index 95a66db08d9b..91bb470de0a3 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -2014,12 +2014,6 @@ int cypress_dpm_set_power_state(struct radeon_device *rdev)
2014 if (eg_pi->pcie_performance_request) 2014 if (eg_pi->pcie_performance_request)
2015 cypress_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps); 2015 cypress_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
2016 2016
2017 ret = rv770_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
2018 if (ret) {
2019 DRM_ERROR("rv770_dpm_force_performance_level failed\n");
2020 return ret;
2021 }
2022
2023 return 0; 2017 return 0;
2024} 2018}
2025 2019
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 8953255e894b..85a69d2ea3d2 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -28,22 +28,30 @@
28static u32 dce6_endpoint_rreg(struct radeon_device *rdev, 28static u32 dce6_endpoint_rreg(struct radeon_device *rdev,
29 u32 block_offset, u32 reg) 29 u32 block_offset, u32 reg)
30{ 30{
31 unsigned long flags;
31 u32 r; 32 u32 r;
32 33
34 spin_lock_irqsave(&rdev->end_idx_lock, flags);
33 WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); 35 WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
34 r = RREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset); 36 r = RREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset);
37 spin_unlock_irqrestore(&rdev->end_idx_lock, flags);
38
35 return r; 39 return r;
36} 40}
37 41
38static void dce6_endpoint_wreg(struct radeon_device *rdev, 42static void dce6_endpoint_wreg(struct radeon_device *rdev,
39 u32 block_offset, u32 reg, u32 v) 43 u32 block_offset, u32 reg, u32 v)
40{ 44{
45 unsigned long flags;
46
47 spin_lock_irqsave(&rdev->end_idx_lock, flags);
41 if (ASIC_IS_DCE8(rdev)) 48 if (ASIC_IS_DCE8(rdev))
42 WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); 49 WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
43 else 50 else
44 WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, 51 WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset,
45 AZ_ENDPOINT_REG_WRITE_EN | AZ_ENDPOINT_REG_INDEX(reg)); 52 AZ_ENDPOINT_REG_WRITE_EN | AZ_ENDPOINT_REG_INDEX(reg));
46 WREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset, v); 53 WREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset, v);
54 spin_unlock_irqrestore(&rdev->end_idx_lock, flags);
47} 55}
48 56
49#define RREG32_ENDPOINT(block, reg) dce6_endpoint_rreg(rdev, (block), (reg)) 57#define RREG32_ENDPOINT(block, reg) dce6_endpoint_rreg(rdev, (block), (reg))
@@ -86,12 +94,12 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
86 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 94 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
87 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 95 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
88 u32 offset = dig->afmt->offset; 96 u32 offset = dig->afmt->offset;
89 u32 id = dig->afmt->pin->id;
90 97
91 if (!dig->afmt->pin) 98 if (!dig->afmt->pin)
92 return; 99 return;
93 100
94 WREG32(AFMT_AUDIO_SRC_CONTROL + offset, AFMT_AUDIO_SRC_SELECT(id)); 101 WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
102 AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
95} 103}
96 104
97void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder) 105void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index ecd60809db4e..71399065db04 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -40,6 +40,7 @@ static int kv_calculate_dpm_settings(struct radeon_device *rdev);
40static void kv_enable_new_levels(struct radeon_device *rdev); 40static void kv_enable_new_levels(struct radeon_device *rdev);
41static void kv_program_nbps_index_settings(struct radeon_device *rdev, 41static void kv_program_nbps_index_settings(struct radeon_device *rdev,
42 struct radeon_ps *new_rps); 42 struct radeon_ps *new_rps);
43static int kv_set_enabled_level(struct radeon_device *rdev, u32 level);
43static int kv_set_enabled_levels(struct radeon_device *rdev); 44static int kv_set_enabled_levels(struct radeon_device *rdev);
44static int kv_force_dpm_highest(struct radeon_device *rdev); 45static int kv_force_dpm_highest(struct radeon_device *rdev);
45static int kv_force_dpm_lowest(struct radeon_device *rdev); 46static int kv_force_dpm_lowest(struct radeon_device *rdev);
@@ -519,7 +520,7 @@ static int kv_set_dpm_boot_state(struct radeon_device *rdev)
519 520
520static void kv_program_vc(struct radeon_device *rdev) 521static void kv_program_vc(struct radeon_device *rdev)
521{ 522{
522 WREG32_SMC(CG_FTV_0, 0x3FFFC000); 523 WREG32_SMC(CG_FTV_0, 0x3FFFC100);
523} 524}
524 525
525static void kv_clear_vc(struct radeon_device *rdev) 526static void kv_clear_vc(struct radeon_device *rdev)
@@ -638,7 +639,10 @@ static int kv_force_lowest_valid(struct radeon_device *rdev)
638 639
639static int kv_unforce_levels(struct radeon_device *rdev) 640static int kv_unforce_levels(struct radeon_device *rdev)
640{ 641{
641 return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel); 642 if (rdev->family == CHIP_KABINI)
643 return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel);
644 else
645 return kv_set_enabled_levels(rdev);
642} 646}
643 647
644static int kv_update_sclk_t(struct radeon_device *rdev) 648static int kv_update_sclk_t(struct radeon_device *rdev)
@@ -667,9 +671,8 @@ static int kv_program_bootup_state(struct radeon_device *rdev)
667 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 671 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
668 672
669 if (table && table->count) { 673 if (table && table->count) {
670 for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) { 674 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
671 if ((table->entries[i].clk == pi->boot_pl.sclk) || 675 if (table->entries[i].clk == pi->boot_pl.sclk)
672 (i == 0))
673 break; 676 break;
674 } 677 }
675 678
@@ -682,9 +685,8 @@ static int kv_program_bootup_state(struct radeon_device *rdev)
682 if (table->num_max_dpm_entries == 0) 685 if (table->num_max_dpm_entries == 0)
683 return -EINVAL; 686 return -EINVAL;
684 687
685 for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) { 688 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
686 if ((table->entries[i].sclk_frequency == pi->boot_pl.sclk) || 689 if (table->entries[i].sclk_frequency == pi->boot_pl.sclk)
687 (i == 0))
688 break; 690 break;
689 } 691 }
690 692
@@ -1078,6 +1080,13 @@ static int kv_enable_ulv(struct radeon_device *rdev, bool enable)
1078 PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV); 1080 PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV);
1079} 1081}
1080 1082
1083static void kv_reset_acp_boot_level(struct radeon_device *rdev)
1084{
1085 struct kv_power_info *pi = kv_get_pi(rdev);
1086
1087 pi->acp_boot_level = 0xff;
1088}
1089
1081static void kv_update_current_ps(struct radeon_device *rdev, 1090static void kv_update_current_ps(struct radeon_device *rdev,
1082 struct radeon_ps *rps) 1091 struct radeon_ps *rps)
1083{ 1092{
@@ -1100,6 +1109,18 @@ static void kv_update_requested_ps(struct radeon_device *rdev,
1100 pi->requested_rps.ps_priv = &pi->requested_ps; 1109 pi->requested_rps.ps_priv = &pi->requested_ps;
1101} 1110}
1102 1111
1112void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable)
1113{
1114 struct kv_power_info *pi = kv_get_pi(rdev);
1115 int ret;
1116
1117 if (pi->bapm_enable) {
1118 ret = kv_smc_bapm_enable(rdev, enable);
1119 if (ret)
1120 DRM_ERROR("kv_smc_bapm_enable failed\n");
1121 }
1122}
1123
1103int kv_dpm_enable(struct radeon_device *rdev) 1124int kv_dpm_enable(struct radeon_device *rdev)
1104{ 1125{
1105 struct kv_power_info *pi = kv_get_pi(rdev); 1126 struct kv_power_info *pi = kv_get_pi(rdev);
@@ -1192,6 +1213,8 @@ int kv_dpm_enable(struct radeon_device *rdev)
1192 return ret; 1213 return ret;
1193 } 1214 }
1194 1215
1216 kv_reset_acp_boot_level(rdev);
1217
1195 if (rdev->irq.installed && 1218 if (rdev->irq.installed &&
1196 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { 1219 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
1197 ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); 1220 ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
@@ -1203,6 +1226,12 @@ int kv_dpm_enable(struct radeon_device *rdev)
1203 radeon_irq_set(rdev); 1226 radeon_irq_set(rdev);
1204 } 1227 }
1205 1228
1229 ret = kv_smc_bapm_enable(rdev, false);
1230 if (ret) {
1231 DRM_ERROR("kv_smc_bapm_enable failed\n");
1232 return ret;
1233 }
1234
1206 /* powerdown unused blocks for now */ 1235 /* powerdown unused blocks for now */
1207 kv_dpm_powergate_acp(rdev, true); 1236 kv_dpm_powergate_acp(rdev, true);
1208 kv_dpm_powergate_samu(rdev, true); 1237 kv_dpm_powergate_samu(rdev, true);
@@ -1226,6 +1255,8 @@ void kv_dpm_disable(struct radeon_device *rdev)
1226 RADEON_CG_BLOCK_BIF | 1255 RADEON_CG_BLOCK_BIF |
1227 RADEON_CG_BLOCK_HDP), false); 1256 RADEON_CG_BLOCK_HDP), false);
1228 1257
1258 kv_smc_bapm_enable(rdev, false);
1259
1229 /* powerup blocks */ 1260 /* powerup blocks */
1230 kv_dpm_powergate_acp(rdev, false); 1261 kv_dpm_powergate_acp(rdev, false);
1231 kv_dpm_powergate_samu(rdev, false); 1262 kv_dpm_powergate_samu(rdev, false);
@@ -1450,6 +1481,39 @@ static int kv_update_samu_dpm(struct radeon_device *rdev, bool gate)
1450 return kv_enable_samu_dpm(rdev, !gate); 1481 return kv_enable_samu_dpm(rdev, !gate);
1451} 1482}
1452 1483
1484static u8 kv_get_acp_boot_level(struct radeon_device *rdev)
1485{
1486 u8 i;
1487 struct radeon_clock_voltage_dependency_table *table =
1488 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
1489
1490 for (i = 0; i < table->count; i++) {
1491 if (table->entries[i].clk >= 0) /* XXX */
1492 break;
1493 }
1494
1495 if (i >= table->count)
1496 i = table->count - 1;
1497
1498 return i;
1499}
1500
1501static void kv_update_acp_boot_level(struct radeon_device *rdev)
1502{
1503 struct kv_power_info *pi = kv_get_pi(rdev);
1504 u8 acp_boot_level;
1505
1506 if (!pi->caps_stable_p_state) {
1507 acp_boot_level = kv_get_acp_boot_level(rdev);
1508 if (acp_boot_level != pi->acp_boot_level) {
1509 pi->acp_boot_level = acp_boot_level;
1510 kv_send_msg_to_smc_with_parameter(rdev,
1511 PPSMC_MSG_ACPDPM_SetEnabledMask,
1512 (1 << pi->acp_boot_level));
1513 }
1514 }
1515}
1516
1453static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate) 1517static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate)
1454{ 1518{
1455 struct kv_power_info *pi = kv_get_pi(rdev); 1519 struct kv_power_info *pi = kv_get_pi(rdev);
@@ -1461,7 +1525,7 @@ static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate)
1461 if (pi->caps_stable_p_state) 1525 if (pi->caps_stable_p_state)
1462 pi->acp_boot_level = table->count - 1; 1526 pi->acp_boot_level = table->count - 1;
1463 else 1527 else
1464 pi->acp_boot_level = 0; 1528 pi->acp_boot_level = kv_get_acp_boot_level(rdev);
1465 1529
1466 ret = kv_copy_bytes_to_smc(rdev, 1530 ret = kv_copy_bytes_to_smc(rdev,
1467 pi->dpm_table_start + 1531 pi->dpm_table_start +
@@ -1588,13 +1652,11 @@ static void kv_set_valid_clock_range(struct radeon_device *rdev,
1588 } 1652 }
1589 } 1653 }
1590 1654
1591 for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) { 1655 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
1592 if ((table->entries[i].clk <= new_ps->levels[new_ps->num_levels -1].sclk) || 1656 if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk)
1593 (i == 0)) {
1594 pi->highest_valid = i;
1595 break; 1657 break;
1596 }
1597 } 1658 }
1659 pi->highest_valid = i;
1598 1660
1599 if (pi->lowest_valid > pi->highest_valid) { 1661 if (pi->lowest_valid > pi->highest_valid) {
1600 if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) > 1662 if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) >
@@ -1615,14 +1677,12 @@ static void kv_set_valid_clock_range(struct radeon_device *rdev,
1615 } 1677 }
1616 } 1678 }
1617 1679
1618 for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) { 1680 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
1619 if (table->entries[i].sclk_frequency <= 1681 if (table->entries[i].sclk_frequency <=
1620 new_ps->levels[new_ps->num_levels - 1].sclk || 1682 new_ps->levels[new_ps->num_levels - 1].sclk)
1621 i == 0) {
1622 pi->highest_valid = i;
1623 break; 1683 break;
1624 }
1625 } 1684 }
1685 pi->highest_valid = i;
1626 1686
1627 if (pi->lowest_valid > pi->highest_valid) { 1687 if (pi->lowest_valid > pi->highest_valid) {
1628 if ((new_ps->levels[0].sclk - 1688 if ((new_ps->levels[0].sclk -
@@ -1724,6 +1784,14 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
1724 RADEON_CG_BLOCK_BIF | 1784 RADEON_CG_BLOCK_BIF |
1725 RADEON_CG_BLOCK_HDP), false); 1785 RADEON_CG_BLOCK_HDP), false);
1726 1786
1787 if (pi->bapm_enable) {
1788 ret = kv_smc_bapm_enable(rdev, rdev->pm.dpm.ac_power);
1789 if (ret) {
1790 DRM_ERROR("kv_smc_bapm_enable failed\n");
1791 return ret;
1792 }
1793 }
1794
1727 if (rdev->family == CHIP_KABINI) { 1795 if (rdev->family == CHIP_KABINI) {
1728 if (pi->enable_dpm) { 1796 if (pi->enable_dpm) {
1729 kv_set_valid_clock_range(rdev, new_ps); 1797 kv_set_valid_clock_range(rdev, new_ps);
@@ -1775,6 +1843,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
1775 return ret; 1843 return ret;
1776 } 1844 }
1777#endif 1845#endif
1846 kv_update_acp_boot_level(rdev);
1778 kv_update_sclk_t(rdev); 1847 kv_update_sclk_t(rdev);
1779 kv_enable_nb_dpm(rdev); 1848 kv_enable_nb_dpm(rdev);
1780 } 1849 }
@@ -1785,7 +1854,6 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
1785 RADEON_CG_BLOCK_BIF | 1854 RADEON_CG_BLOCK_BIF |
1786 RADEON_CG_BLOCK_HDP), true); 1855 RADEON_CG_BLOCK_HDP), true);
1787 1856
1788 rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
1789 return 0; 1857 return 0;
1790} 1858}
1791 1859
@@ -1806,12 +1874,23 @@ void kv_dpm_setup_asic(struct radeon_device *rdev)
1806 1874
1807void kv_dpm_reset_asic(struct radeon_device *rdev) 1875void kv_dpm_reset_asic(struct radeon_device *rdev)
1808{ 1876{
1809 kv_force_lowest_valid(rdev); 1877 struct kv_power_info *pi = kv_get_pi(rdev);
1810 kv_init_graphics_levels(rdev); 1878
1811 kv_program_bootup_state(rdev); 1879 if (rdev->family == CHIP_KABINI) {
1812 kv_upload_dpm_settings(rdev); 1880 kv_force_lowest_valid(rdev);
1813 kv_force_lowest_valid(rdev); 1881 kv_init_graphics_levels(rdev);
1814 kv_unforce_levels(rdev); 1882 kv_program_bootup_state(rdev);
1883 kv_upload_dpm_settings(rdev);
1884 kv_force_lowest_valid(rdev);
1885 kv_unforce_levels(rdev);
1886 } else {
1887 kv_init_graphics_levels(rdev);
1888 kv_program_bootup_state(rdev);
1889 kv_freeze_sclk_dpm(rdev, true);
1890 kv_upload_dpm_settings(rdev);
1891 kv_freeze_sclk_dpm(rdev, false);
1892 kv_set_enabled_level(rdev, pi->graphics_boot_level);
1893 }
1815} 1894}
1816 1895
1817//XXX use sumo_dpm_display_configuration_changed 1896//XXX use sumo_dpm_display_configuration_changed
@@ -1871,12 +1950,15 @@ static int kv_force_dpm_highest(struct radeon_device *rdev)
1871 if (ret) 1950 if (ret)
1872 return ret; 1951 return ret;
1873 1952
1874 for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i >= 0; i--) { 1953 for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) {
1875 if (enable_mask & (1 << i)) 1954 if (enable_mask & (1 << i))
1876 break; 1955 break;
1877 } 1956 }
1878 1957
1879 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); 1958 if (rdev->family == CHIP_KABINI)
1959 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
1960 else
1961 return kv_set_enabled_level(rdev, i);
1880} 1962}
1881 1963
1882static int kv_force_dpm_lowest(struct radeon_device *rdev) 1964static int kv_force_dpm_lowest(struct radeon_device *rdev)
@@ -1893,7 +1975,10 @@ static int kv_force_dpm_lowest(struct radeon_device *rdev)
1893 break; 1975 break;
1894 } 1976 }
1895 1977
1896 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); 1978 if (rdev->family == CHIP_KABINI)
1979 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
1980 else
1981 return kv_set_enabled_level(rdev, i);
1897} 1982}
1898 1983
1899static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev, 1984static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
@@ -1911,9 +1996,9 @@ static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
1911 if (!pi->caps_sclk_ds) 1996 if (!pi->caps_sclk_ds)
1912 return 0; 1997 return 0;
1913 1998
1914 for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i <= 0; i--) { 1999 for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) {
1915 temp = sclk / sumo_get_sleep_divider_from_id(i); 2000 temp = sclk / sumo_get_sleep_divider_from_id(i);
1916 if ((temp >= min) || (i == 0)) 2001 if (temp >= min)
1917 break; 2002 break;
1918 } 2003 }
1919 2004
@@ -2039,12 +2124,12 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
2039 ps->dpmx_nb_ps_lo = 0x1; 2124 ps->dpmx_nb_ps_lo = 0x1;
2040 ps->dpmx_nb_ps_hi = 0x0; 2125 ps->dpmx_nb_ps_hi = 0x0;
2041 } else { 2126 } else {
2042 ps->dpm0_pg_nb_ps_lo = 0x1; 2127 ps->dpm0_pg_nb_ps_lo = 0x3;
2043 ps->dpm0_pg_nb_ps_hi = 0x0; 2128 ps->dpm0_pg_nb_ps_hi = 0x0;
2044 ps->dpmx_nb_ps_lo = 0x2; 2129 ps->dpmx_nb_ps_lo = 0x3;
2045 ps->dpmx_nb_ps_hi = 0x1; 2130 ps->dpmx_nb_ps_hi = 0x0;
2046 2131
2047 if (pi->sys_info.nb_dpm_enable && pi->battery_state) { 2132 if (pi->sys_info.nb_dpm_enable) {
2048 force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) || 2133 force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) ||
2049 pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) || 2134 pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) ||
2050 pi->disable_nb_ps3_in_battery; 2135 pi->disable_nb_ps3_in_battery;
@@ -2210,6 +2295,15 @@ static void kv_enable_new_levels(struct radeon_device *rdev)
2210 } 2295 }
2211} 2296}
2212 2297
2298static int kv_set_enabled_level(struct radeon_device *rdev, u32 level)
2299{
2300 u32 new_mask = (1 << level);
2301
2302 return kv_send_msg_to_smc_with_parameter(rdev,
2303 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2304 new_mask);
2305}
2306
2213static int kv_set_enabled_levels(struct radeon_device *rdev) 2307static int kv_set_enabled_levels(struct radeon_device *rdev)
2214{ 2308{
2215 struct kv_power_info *pi = kv_get_pi(rdev); 2309 struct kv_power_info *pi = kv_get_pi(rdev);
diff --git a/drivers/gpu/drm/radeon/kv_dpm.h b/drivers/gpu/drm/radeon/kv_dpm.h
index 32bb079572d7..8cef7525d7a8 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.h
+++ b/drivers/gpu/drm/radeon/kv_dpm.h
@@ -192,6 +192,7 @@ int kv_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
192int kv_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address, 192int kv_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
193 u32 *value, u32 limit); 193 u32 *value, u32 limit);
194int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable); 194int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable);
195int kv_smc_bapm_enable(struct radeon_device *rdev, bool enable);
195int kv_copy_bytes_to_smc(struct radeon_device *rdev, 196int kv_copy_bytes_to_smc(struct radeon_device *rdev,
196 u32 smc_start_address, 197 u32 smc_start_address,
197 const u8 *src, u32 byte_count, u32 limit); 198 const u8 *src, u32 byte_count, u32 limit);
diff --git a/drivers/gpu/drm/radeon/kv_smc.c b/drivers/gpu/drm/radeon/kv_smc.c
index 34a226d7e34a..0000b59a6d05 100644
--- a/drivers/gpu/drm/radeon/kv_smc.c
+++ b/drivers/gpu/drm/radeon/kv_smc.c
@@ -107,6 +107,14 @@ int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable)
107 return kv_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Disable); 107 return kv_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Disable);
108} 108}
109 109
110int kv_smc_bapm_enable(struct radeon_device *rdev, bool enable)
111{
112 if (enable)
113 return kv_notify_message_to_smu(rdev, PPSMC_MSG_EnableBAPM);
114 else
115 return kv_notify_message_to_smu(rdev, PPSMC_MSG_DisableBAPM);
116}
117
110int kv_copy_bytes_to_smc(struct radeon_device *rdev, 118int kv_copy_bytes_to_smc(struct radeon_device *rdev,
111 u32 smc_start_address, 119 u32 smc_start_address,
112 const u8 *src, u32 byte_count, u32 limit) 120 const u8 *src, u32 byte_count, u32 limit)
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index f7b625c9e0e9..f26339028154 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -787,6 +787,7 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
787 bool disable_mclk_switching; 787 bool disable_mclk_switching;
788 u32 mclk, sclk; 788 u32 mclk, sclk;
789 u16 vddc, vddci; 789 u16 vddc, vddci;
790 u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
790 int i; 791 int i;
791 792
792 if ((rdev->pm.dpm.new_active_crtc_count > 1) || 793 if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
@@ -813,6 +814,29 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
813 } 814 }
814 } 815 }
815 816
817 /* limit clocks to max supported clocks based on voltage dependency tables */
818 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
819 &max_sclk_vddc);
820 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
821 &max_mclk_vddci);
822 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
823 &max_mclk_vddc);
824
825 for (i = 0; i < ps->performance_level_count; i++) {
826 if (max_sclk_vddc) {
827 if (ps->performance_levels[i].sclk > max_sclk_vddc)
828 ps->performance_levels[i].sclk = max_sclk_vddc;
829 }
830 if (max_mclk_vddci) {
831 if (ps->performance_levels[i].mclk > max_mclk_vddci)
832 ps->performance_levels[i].mclk = max_mclk_vddci;
833 }
834 if (max_mclk_vddc) {
835 if (ps->performance_levels[i].mclk > max_mclk_vddc)
836 ps->performance_levels[i].mclk = max_mclk_vddc;
837 }
838 }
839
816 /* XXX validate the min clocks required for display */ 840 /* XXX validate the min clocks required for display */
817 841
818 if (disable_mclk_switching) { 842 if (disable_mclk_switching) {
@@ -3865,12 +3889,6 @@ int ni_dpm_set_power_state(struct radeon_device *rdev)
3865 return ret; 3889 return ret;
3866 } 3890 }
3867 3891
3868 ret = ni_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
3869 if (ret) {
3870 DRM_ERROR("ni_dpm_force_performance_level failed\n");
3871 return ret;
3872 }
3873
3874 return 0; 3892 return 0;
3875} 3893}
3876 3894
diff --git a/drivers/gpu/drm/radeon/ppsmc.h b/drivers/gpu/drm/radeon/ppsmc.h
index 682842804bce..5670b8291285 100644
--- a/drivers/gpu/drm/radeon/ppsmc.h
+++ b/drivers/gpu/drm/radeon/ppsmc.h
@@ -163,6 +163,8 @@ typedef uint8_t PPSMC_Result;
163#define PPSMC_MSG_VCEPowerON ((uint32_t) 0x10f) 163#define PPSMC_MSG_VCEPowerON ((uint32_t) 0x10f)
164#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d) 164#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d)
165#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e) 165#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e)
166#define PPSMC_MSG_EnableBAPM ((uint32_t) 0x120)
167#define PPSMC_MSG_DisableBAPM ((uint32_t) 0x121)
166#define PPSMC_MSG_UVD_DPM_Config ((uint32_t) 0x124) 168#define PPSMC_MSG_UVD_DPM_Config ((uint32_t) 0x124)
167 169
168 170
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 9fc61dd68bc0..d71333033b2b 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -2853,21 +2853,28 @@ static void r100_pll_errata_after_data(struct radeon_device *rdev)
2853 2853
2854uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg) 2854uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
2855{ 2855{
2856 unsigned long flags;
2856 uint32_t data; 2857 uint32_t data;
2857 2858
2859 spin_lock_irqsave(&rdev->pll_idx_lock, flags);
2858 WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f); 2860 WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
2859 r100_pll_errata_after_index(rdev); 2861 r100_pll_errata_after_index(rdev);
2860 data = RREG32(RADEON_CLOCK_CNTL_DATA); 2862 data = RREG32(RADEON_CLOCK_CNTL_DATA);
2861 r100_pll_errata_after_data(rdev); 2863 r100_pll_errata_after_data(rdev);
2864 spin_unlock_irqrestore(&rdev->pll_idx_lock, flags);
2862 return data; 2865 return data;
2863} 2866}
2864 2867
2865void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 2868void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
2866{ 2869{
2870 unsigned long flags;
2871
2872 spin_lock_irqsave(&rdev->pll_idx_lock, flags);
2867 WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN)); 2873 WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
2868 r100_pll_errata_after_index(rdev); 2874 r100_pll_errata_after_index(rdev);
2869 WREG32(RADEON_CLOCK_CNTL_DATA, v); 2875 WREG32(RADEON_CLOCK_CNTL_DATA, v);
2870 r100_pll_errata_after_data(rdev); 2876 r100_pll_errata_after_data(rdev);
2877 spin_unlock_irqrestore(&rdev->pll_idx_lock, flags);
2871} 2878}
2872 2879
2873static void r100_set_safe_registers(struct radeon_device *rdev) 2880static void r100_set_safe_registers(struct radeon_device *rdev)
@@ -2926,9 +2933,11 @@ static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
2926 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp); 2933 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
2927 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); 2934 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
2928 seq_printf(m, "%u dwords in ring\n", count); 2935 seq_printf(m, "%u dwords in ring\n", count);
2929 for (j = 0; j <= count; j++) { 2936 if (ring->ready) {
2930 i = (rdp + j) & ring->ptr_mask; 2937 for (j = 0; j <= count; j++) {
2931 seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]); 2938 i = (rdp + j) & ring->ptr_mask;
2939 seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
2940 }
2932 } 2941 }
2933 return 0; 2942 return 0;
2934} 2943}
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 4e796ecf9ea4..6edf2b3a52b4 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -160,18 +160,25 @@ void r420_pipes_init(struct radeon_device *rdev)
160 160
161u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg) 161u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg)
162{ 162{
163 unsigned long flags;
163 u32 r; 164 u32 r;
164 165
166 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
165 WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg)); 167 WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg));
166 r = RREG32(R_0001FC_MC_IND_DATA); 168 r = RREG32(R_0001FC_MC_IND_DATA);
169 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
167 return r; 170 return r;
168} 171}
169 172
170void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v) 173void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
171{ 174{
175 unsigned long flags;
176
177 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
172 WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) | 178 WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) |
173 S_0001F8_MC_IND_WR_EN(1)); 179 S_0001F8_MC_IND_WR_EN(1));
174 WREG32(R_0001FC_MC_IND_DATA, v); 180 WREG32(R_0001FC_MC_IND_DATA, v);
181 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
175} 182}
176 183
177static void r420_debugfs(struct radeon_device *rdev) 184static void r420_debugfs(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index ea4d3734e6d9..2a1b1876b431 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -119,6 +119,11 @@ u32 r600_get_xclk(struct radeon_device *rdev)
119 return rdev->clock.spll.reference_freq; 119 return rdev->clock.spll.reference_freq;
120} 120}
121 121
122int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
123{
124 return 0;
125}
126
122/* get temperature in millidegrees */ 127/* get temperature in millidegrees */
123int rv6xx_get_temp(struct radeon_device *rdev) 128int rv6xx_get_temp(struct radeon_device *rdev)
124{ 129{
@@ -1045,20 +1050,27 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev)
1045 1050
1046uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg) 1051uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg)
1047{ 1052{
1053 unsigned long flags;
1048 uint32_t r; 1054 uint32_t r;
1049 1055
1056 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
1050 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg)); 1057 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg));
1051 r = RREG32(R_0028FC_MC_DATA); 1058 r = RREG32(R_0028FC_MC_DATA);
1052 WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR); 1059 WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR);
1060 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
1053 return r; 1061 return r;
1054} 1062}
1055 1063
1056void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 1064void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1057{ 1065{
1066 unsigned long flags;
1067
1068 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
1058 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) | 1069 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) |
1059 S_0028F8_MC_IND_WR_EN(1)); 1070 S_0028F8_MC_IND_WR_EN(1));
1060 WREG32(R_0028FC_MC_DATA, v); 1071 WREG32(R_0028FC_MC_DATA, v);
1061 WREG32(R_0028F8_MC_INDEX, 0x7F); 1072 WREG32(R_0028F8_MC_INDEX, 0x7F);
1073 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
1062} 1074}
1063 1075
1064static void r600_mc_program(struct radeon_device *rdev) 1076static void r600_mc_program(struct radeon_device *rdev)
@@ -2092,20 +2104,27 @@ static void r600_gpu_init(struct radeon_device *rdev)
2092 */ 2104 */
2093u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg) 2105u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
2094{ 2106{
2107 unsigned long flags;
2095 u32 r; 2108 u32 r;
2096 2109
2110 spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
2097 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff)); 2111 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
2098 (void)RREG32(PCIE_PORT_INDEX); 2112 (void)RREG32(PCIE_PORT_INDEX);
2099 r = RREG32(PCIE_PORT_DATA); 2113 r = RREG32(PCIE_PORT_DATA);
2114 spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
2100 return r; 2115 return r;
2101} 2116}
2102 2117
2103void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2118void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2104{ 2119{
2120 unsigned long flags;
2121
2122 spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
2105 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff)); 2123 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
2106 (void)RREG32(PCIE_PORT_INDEX); 2124 (void)RREG32(PCIE_PORT_INDEX);
2107 WREG32(PCIE_PORT_DATA, (v)); 2125 WREG32(PCIE_PORT_DATA, (v));
2108 (void)RREG32(PCIE_PORT_DATA); 2126 (void)RREG32(PCIE_PORT_DATA);
2127 spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
2109} 2128}
2110 2129
2111/* 2130/*
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index fa0de46fcc0d..5513d8f06252 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -1084,7 +1084,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
1084 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk = 1084 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
1085 le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16); 1085 le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
1086 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v = 1086 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
1087 le16_to_cpu(limits->entries[i].usVoltage); 1087 le16_to_cpu(entry->usVoltage);
1088 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *) 1088 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
1089 ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record)); 1089 ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
1090 } 1090 }
@@ -1219,30 +1219,20 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
1219 1219
1220void r600_free_extended_power_table(struct radeon_device *rdev) 1220void r600_free_extended_power_table(struct radeon_device *rdev)
1221{ 1221{
1222 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries) 1222 struct radeon_dpm_dynamic_state *dyn_state = &rdev->pm.dpm.dyn_state;
1223 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); 1223
1224 if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) 1224 kfree(dyn_state->vddc_dependency_on_sclk.entries);
1225 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); 1225 kfree(dyn_state->vddci_dependency_on_mclk.entries);
1226 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) 1226 kfree(dyn_state->vddc_dependency_on_mclk.entries);
1227 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); 1227 kfree(dyn_state->mvdd_dependency_on_mclk.entries);
1228 if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) 1228 kfree(dyn_state->cac_leakage_table.entries);
1229 kfree(rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries); 1229 kfree(dyn_state->phase_shedding_limits_table.entries);
1230 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) 1230 kfree(dyn_state->ppm_table);
1231 kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries); 1231 kfree(dyn_state->cac_tdp_table);
1232 if (rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) 1232 kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
1233 kfree(rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries); 1233 kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
1234 if (rdev->pm.dpm.dyn_state.ppm_table) 1234 kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
1235 kfree(rdev->pm.dpm.dyn_state.ppm_table); 1235 kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
1236 if (rdev->pm.dpm.dyn_state.cac_tdp_table)
1237 kfree(rdev->pm.dpm.dyn_state.cac_tdp_table);
1238 if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries)
1239 kfree(rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries);
1240 if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries)
1241 kfree(rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries);
1242 if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries)
1243 kfree(rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries);
1244 if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries)
1245 kfree(rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries);
1246} 1236}
1247 1237
1248enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev, 1238enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index f443010ce90b..b0fa6002af3e 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -257,10 +257,7 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
257 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE 257 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
258 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator 258 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
259 */ 259 */
260 if (ASIC_IS_DCE3(rdev)) { 260 if (ASIC_IS_DCE32(rdev)) {
261 /* according to the reg specs, this should DCE3.2 only, but in
262 * practice it seems to cover DCE3.0 as well.
263 */
264 if (dig->dig_encoder == 0) { 261 if (dig->dig_encoder == 0) {
265 dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; 262 dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
266 dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); 263 dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
@@ -276,8 +273,21 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
276 WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo); 273 WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo);
277 WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ 274 WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
278 } 275 }
276 } else if (ASIC_IS_DCE3(rdev)) {
277 /* according to the reg specs, this should DCE3.2 only, but in
278 * practice it seems to cover DCE3.0/3.1 as well.
279 */
280 if (dig->dig_encoder == 0) {
281 WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
282 WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
283 WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
284 } else {
285 WREG32(DCCG_AUDIO_DTO1_PHASE, base_rate * 100);
286 WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100);
287 WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
288 }
279 } else { 289 } else {
280 /* according to the reg specs, this should be DCE2.0 and DCE3.0 */ 290 /* according to the reg specs, this should be DCE2.0 and DCE3.0/3.1 */
281 WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) | 291 WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
282 AUDIO_DTO_MODULE(clock / 10)); 292 AUDIO_DTO_MODULE(clock / 10));
283 } 293 }
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 454f90a849e4..e673fe26ea84 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1040,7 +1040,7 @@
1040# define HDMI0_AVI_INFO_CONT (1 << 1) 1040# define HDMI0_AVI_INFO_CONT (1 << 1)
1041# define HDMI0_AUDIO_INFO_SEND (1 << 4) 1041# define HDMI0_AUDIO_INFO_SEND (1 << 4)
1042# define HDMI0_AUDIO_INFO_CONT (1 << 5) 1042# define HDMI0_AUDIO_INFO_CONT (1 << 5)
1043# define HDMI0_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */ 1043# define HDMI0_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hdmi regs */
1044# define HDMI0_AUDIO_INFO_UPDATE (1 << 7) 1044# define HDMI0_AUDIO_INFO_UPDATE (1 << 7)
1045# define HDMI0_MPEG_INFO_SEND (1 << 8) 1045# define HDMI0_MPEG_INFO_SEND (1 << 8)
1046# define HDMI0_MPEG_INFO_CONT (1 << 9) 1046# define HDMI0_MPEG_INFO_CONT (1 << 9)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index ff8b564ce2b2..a400ac1c4147 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -181,7 +181,7 @@ extern int radeon_aspm;
181#define RADEON_CG_SUPPORT_HDP_MGCG (1 << 16) 181#define RADEON_CG_SUPPORT_HDP_MGCG (1 << 16)
182 182
183/* PG flags */ 183/* PG flags */
184#define RADEON_PG_SUPPORT_GFX_CG (1 << 0) 184#define RADEON_PG_SUPPORT_GFX_PG (1 << 0)
185#define RADEON_PG_SUPPORT_GFX_SMG (1 << 1) 185#define RADEON_PG_SUPPORT_GFX_SMG (1 << 1)
186#define RADEON_PG_SUPPORT_GFX_DMG (1 << 2) 186#define RADEON_PG_SUPPORT_GFX_DMG (1 << 2)
187#define RADEON_PG_SUPPORT_UVD (1 << 3) 187#define RADEON_PG_SUPPORT_UVD (1 << 3)
@@ -1778,6 +1778,7 @@ struct radeon_asic {
1778 int (*force_performance_level)(struct radeon_device *rdev, enum radeon_dpm_forced_level level); 1778 int (*force_performance_level)(struct radeon_device *rdev, enum radeon_dpm_forced_level level);
1779 bool (*vblank_too_short)(struct radeon_device *rdev); 1779 bool (*vblank_too_short)(struct radeon_device *rdev);
1780 void (*powergate_uvd)(struct radeon_device *rdev, bool gate); 1780 void (*powergate_uvd)(struct radeon_device *rdev, bool gate);
1781 void (*enable_bapm)(struct radeon_device *rdev, bool enable);
1781 } dpm; 1782 } dpm;
1782 /* pageflipping */ 1783 /* pageflipping */
1783 struct { 1784 struct {
@@ -2110,6 +2111,28 @@ struct radeon_device {
2110 resource_size_t rmmio_size; 2111 resource_size_t rmmio_size;
2111 /* protects concurrent MM_INDEX/DATA based register access */ 2112 /* protects concurrent MM_INDEX/DATA based register access */
2112 spinlock_t mmio_idx_lock; 2113 spinlock_t mmio_idx_lock;
2114 /* protects concurrent SMC based register access */
2115 spinlock_t smc_idx_lock;
2116 /* protects concurrent PLL register access */
2117 spinlock_t pll_idx_lock;
2118 /* protects concurrent MC register access */
2119 spinlock_t mc_idx_lock;
2120 /* protects concurrent PCIE register access */
2121 spinlock_t pcie_idx_lock;
2122 /* protects concurrent PCIE_PORT register access */
2123 spinlock_t pciep_idx_lock;
2124 /* protects concurrent PIF register access */
2125 spinlock_t pif_idx_lock;
2126 /* protects concurrent CG register access */
2127 spinlock_t cg_idx_lock;
2128 /* protects concurrent UVD register access */
2129 spinlock_t uvd_idx_lock;
2130 /* protects concurrent RCU register access */
2131 spinlock_t rcu_idx_lock;
2132 /* protects concurrent DIDT register access */
2133 spinlock_t didt_idx_lock;
2134 /* protects concurrent ENDPOINT (audio) register access */
2135 spinlock_t end_idx_lock;
2113 void __iomem *rmmio; 2136 void __iomem *rmmio;
2114 radeon_rreg_t mc_rreg; 2137 radeon_rreg_t mc_rreg;
2115 radeon_wreg_t mc_wreg; 2138 radeon_wreg_t mc_wreg;
@@ -2277,123 +2300,179 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v);
2277 */ 2300 */
2278static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg) 2301static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
2279{ 2302{
2303 unsigned long flags;
2280 uint32_t r; 2304 uint32_t r;
2281 2305
2306 spin_lock_irqsave(&rdev->pcie_idx_lock, flags);
2282 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); 2307 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
2283 r = RREG32(RADEON_PCIE_DATA); 2308 r = RREG32(RADEON_PCIE_DATA);
2309 spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags);
2284 return r; 2310 return r;
2285} 2311}
2286 2312
2287static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 2313static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
2288{ 2314{
2315 unsigned long flags;
2316
2317 spin_lock_irqsave(&rdev->pcie_idx_lock, flags);
2289 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); 2318 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
2290 WREG32(RADEON_PCIE_DATA, (v)); 2319 WREG32(RADEON_PCIE_DATA, (v));
2320 spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags);
2291} 2321}
2292 2322
2293static inline u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg) 2323static inline u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg)
2294{ 2324{
2325 unsigned long flags;
2295 u32 r; 2326 u32 r;
2296 2327
2328 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
2297 WREG32(TN_SMC_IND_INDEX_0, (reg)); 2329 WREG32(TN_SMC_IND_INDEX_0, (reg));
2298 r = RREG32(TN_SMC_IND_DATA_0); 2330 r = RREG32(TN_SMC_IND_DATA_0);
2331 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
2299 return r; 2332 return r;
2300} 2333}
2301 2334
2302static inline void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2335static inline void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2303{ 2336{
2337 unsigned long flags;
2338
2339 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
2304 WREG32(TN_SMC_IND_INDEX_0, (reg)); 2340 WREG32(TN_SMC_IND_INDEX_0, (reg));
2305 WREG32(TN_SMC_IND_DATA_0, (v)); 2341 WREG32(TN_SMC_IND_DATA_0, (v));
2342 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
2306} 2343}
2307 2344
2308static inline u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg) 2345static inline u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg)
2309{ 2346{
2347 unsigned long flags;
2310 u32 r; 2348 u32 r;
2311 2349
2350 spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
2312 WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); 2351 WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
2313 r = RREG32(R600_RCU_DATA); 2352 r = RREG32(R600_RCU_DATA);
2353 spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
2314 return r; 2354 return r;
2315} 2355}
2316 2356
2317static inline void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2357static inline void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2318{ 2358{
2359 unsigned long flags;
2360
2361 spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
2319 WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); 2362 WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
2320 WREG32(R600_RCU_DATA, (v)); 2363 WREG32(R600_RCU_DATA, (v));
2364 spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
2321} 2365}
2322 2366
2323static inline u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg) 2367static inline u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg)
2324{ 2368{
2369 unsigned long flags;
2325 u32 r; 2370 u32 r;
2326 2371
2372 spin_lock_irqsave(&rdev->cg_idx_lock, flags);
2327 WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff)); 2373 WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff));
2328 r = RREG32(EVERGREEN_CG_IND_DATA); 2374 r = RREG32(EVERGREEN_CG_IND_DATA);
2375 spin_unlock_irqrestore(&rdev->cg_idx_lock, flags);
2329 return r; 2376 return r;
2330} 2377}
2331 2378
2332static inline void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2379static inline void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2333{ 2380{
2381 unsigned long flags;
2382
2383 spin_lock_irqsave(&rdev->cg_idx_lock, flags);
2334 WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff)); 2384 WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff));
2335 WREG32(EVERGREEN_CG_IND_DATA, (v)); 2385 WREG32(EVERGREEN_CG_IND_DATA, (v));
2386 spin_unlock_irqrestore(&rdev->cg_idx_lock, flags);
2336} 2387}
2337 2388
2338static inline u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg) 2389static inline u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg)
2339{ 2390{
2391 unsigned long flags;
2340 u32 r; 2392 u32 r;
2341 2393
2394 spin_lock_irqsave(&rdev->pif_idx_lock, flags);
2342 WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); 2395 WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
2343 r = RREG32(EVERGREEN_PIF_PHY0_DATA); 2396 r = RREG32(EVERGREEN_PIF_PHY0_DATA);
2397 spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
2344 return r; 2398 return r;
2345} 2399}
2346 2400
2347static inline void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2401static inline void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2348{ 2402{
2403 unsigned long flags;
2404
2405 spin_lock_irqsave(&rdev->pif_idx_lock, flags);
2349 WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); 2406 WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
2350 WREG32(EVERGREEN_PIF_PHY0_DATA, (v)); 2407 WREG32(EVERGREEN_PIF_PHY0_DATA, (v));
2408 spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
2351} 2409}
2352 2410
2353static inline u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg) 2411static inline u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg)
2354{ 2412{
2413 unsigned long flags;
2355 u32 r; 2414 u32 r;
2356 2415
2416 spin_lock_irqsave(&rdev->pif_idx_lock, flags);
2357 WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); 2417 WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
2358 r = RREG32(EVERGREEN_PIF_PHY1_DATA); 2418 r = RREG32(EVERGREEN_PIF_PHY1_DATA);
2419 spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
2359 return r; 2420 return r;
2360} 2421}
2361 2422
2362static inline void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2423static inline void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2363{ 2424{
2425 unsigned long flags;
2426
2427 spin_lock_irqsave(&rdev->pif_idx_lock, flags);
2364 WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); 2428 WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
2365 WREG32(EVERGREEN_PIF_PHY1_DATA, (v)); 2429 WREG32(EVERGREEN_PIF_PHY1_DATA, (v));
2430 spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
2366} 2431}
2367 2432
2368static inline u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg) 2433static inline u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg)
2369{ 2434{
2435 unsigned long flags;
2370 u32 r; 2436 u32 r;
2371 2437
2438 spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
2372 WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); 2439 WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
2373 r = RREG32(R600_UVD_CTX_DATA); 2440 r = RREG32(R600_UVD_CTX_DATA);
2441 spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
2374 return r; 2442 return r;
2375} 2443}
2376 2444
2377static inline void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2445static inline void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2378{ 2446{
2447 unsigned long flags;
2448
2449 spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
2379 WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); 2450 WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
2380 WREG32(R600_UVD_CTX_DATA, (v)); 2451 WREG32(R600_UVD_CTX_DATA, (v));
2452 spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
2381} 2453}
2382 2454
2383 2455
2384static inline u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg) 2456static inline u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg)
2385{ 2457{
2458 unsigned long flags;
2386 u32 r; 2459 u32 r;
2387 2460
2461 spin_lock_irqsave(&rdev->didt_idx_lock, flags);
2388 WREG32(CIK_DIDT_IND_INDEX, (reg)); 2462 WREG32(CIK_DIDT_IND_INDEX, (reg));
2389 r = RREG32(CIK_DIDT_IND_DATA); 2463 r = RREG32(CIK_DIDT_IND_DATA);
2464 spin_unlock_irqrestore(&rdev->didt_idx_lock, flags);
2390 return r; 2465 return r;
2391} 2466}
2392 2467
2393static inline void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2468static inline void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2394{ 2469{
2470 unsigned long flags;
2471
2472 spin_lock_irqsave(&rdev->didt_idx_lock, flags);
2395 WREG32(CIK_DIDT_IND_INDEX, (reg)); 2473 WREG32(CIK_DIDT_IND_INDEX, (reg));
2396 WREG32(CIK_DIDT_IND_DATA, (v)); 2474 WREG32(CIK_DIDT_IND_DATA, (v));
2475 spin_unlock_irqrestore(&rdev->didt_idx_lock, flags);
2397} 2476}
2398 2477
2399void r100_pll_errata_after_index(struct radeon_device *rdev); 2478void r100_pll_errata_after_index(struct radeon_device *rdev);
@@ -2569,6 +2648,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
2569#define radeon_dpm_force_performance_level(rdev, l) rdev->asic->dpm.force_performance_level((rdev), (l)) 2648#define radeon_dpm_force_performance_level(rdev, l) rdev->asic->dpm.force_performance_level((rdev), (l))
2570#define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev)) 2649#define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev))
2571#define radeon_dpm_powergate_uvd(rdev, g) rdev->asic->dpm.powergate_uvd((rdev), (g)) 2650#define radeon_dpm_powergate_uvd(rdev, g) rdev->asic->dpm.powergate_uvd((rdev), (g))
2651#define radeon_dpm_enable_bapm(rdev, e) rdev->asic->dpm.enable_bapm((rdev), (e))
2572 2652
2573/* Common functions */ 2653/* Common functions */
2574/* AGP */ 2654/* AGP */
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 630853b96841..8f7e04538fd6 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1004,6 +1004,8 @@ static struct radeon_asic rv6xx_asic = {
1004 .wait_for_vblank = &avivo_wait_for_vblank, 1004 .wait_for_vblank = &avivo_wait_for_vblank,
1005 .set_backlight_level = &atombios_set_backlight_level, 1005 .set_backlight_level = &atombios_set_backlight_level,
1006 .get_backlight_level = &atombios_get_backlight_level, 1006 .get_backlight_level = &atombios_get_backlight_level,
1007 .hdmi_enable = &r600_hdmi_enable,
1008 .hdmi_setmode = &r600_hdmi_setmode,
1007 }, 1009 },
1008 .copy = { 1010 .copy = {
1009 .blit = &r600_copy_cpdma, 1011 .blit = &r600_copy_cpdma,
@@ -1037,6 +1039,7 @@ static struct radeon_asic rv6xx_asic = {
1037 .set_pcie_lanes = &r600_set_pcie_lanes, 1039 .set_pcie_lanes = &r600_set_pcie_lanes,
1038 .set_clock_gating = NULL, 1040 .set_clock_gating = NULL,
1039 .get_temperature = &rv6xx_get_temp, 1041 .get_temperature = &rv6xx_get_temp,
1042 .set_uvd_clocks = &r600_set_uvd_clocks,
1040 }, 1043 },
1041 .dpm = { 1044 .dpm = {
1042 .init = &rv6xx_dpm_init, 1045 .init = &rv6xx_dpm_init,
@@ -1126,6 +1129,7 @@ static struct radeon_asic rs780_asic = {
1126 .set_pcie_lanes = NULL, 1129 .set_pcie_lanes = NULL,
1127 .set_clock_gating = NULL, 1130 .set_clock_gating = NULL,
1128 .get_temperature = &rv6xx_get_temp, 1131 .get_temperature = &rv6xx_get_temp,
1132 .set_uvd_clocks = &r600_set_uvd_clocks,
1129 }, 1133 },
1130 .dpm = { 1134 .dpm = {
1131 .init = &rs780_dpm_init, 1135 .init = &rs780_dpm_init,
@@ -1141,6 +1145,7 @@ static struct radeon_asic rs780_asic = {
1141 .get_mclk = &rs780_dpm_get_mclk, 1145 .get_mclk = &rs780_dpm_get_mclk,
1142 .print_power_state = &rs780_dpm_print_power_state, 1146 .print_power_state = &rs780_dpm_print_power_state,
1143 .debugfs_print_current_performance_level = &rs780_dpm_debugfs_print_current_performance_level, 1147 .debugfs_print_current_performance_level = &rs780_dpm_debugfs_print_current_performance_level,
1148 .force_performance_level = &rs780_dpm_force_performance_level,
1144 }, 1149 },
1145 .pflip = { 1150 .pflip = {
1146 .pre_page_flip = &rs600_pre_page_flip, 1151 .pre_page_flip = &rs600_pre_page_flip,
@@ -1791,6 +1796,7 @@ static struct radeon_asic trinity_asic = {
1791 .print_power_state = &trinity_dpm_print_power_state, 1796 .print_power_state = &trinity_dpm_print_power_state,
1792 .debugfs_print_current_performance_level = &trinity_dpm_debugfs_print_current_performance_level, 1797 .debugfs_print_current_performance_level = &trinity_dpm_debugfs_print_current_performance_level,
1793 .force_performance_level = &trinity_dpm_force_performance_level, 1798 .force_performance_level = &trinity_dpm_force_performance_level,
1799 .enable_bapm = &trinity_dpm_enable_bapm,
1794 }, 1800 },
1795 .pflip = { 1801 .pflip = {
1796 .pre_page_flip = &evergreen_pre_page_flip, 1802 .pre_page_flip = &evergreen_pre_page_flip,
@@ -2166,6 +2172,7 @@ static struct radeon_asic kv_asic = {
2166 .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level, 2172 .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level,
2167 .force_performance_level = &kv_dpm_force_performance_level, 2173 .force_performance_level = &kv_dpm_force_performance_level,
2168 .powergate_uvd = &kv_dpm_powergate_uvd, 2174 .powergate_uvd = &kv_dpm_powergate_uvd,
2175 .enable_bapm = &kv_dpm_enable_bapm,
2169 }, 2176 },
2170 .pflip = { 2177 .pflip = {
2171 .pre_page_flip = &evergreen_pre_page_flip, 2178 .pre_page_flip = &evergreen_pre_page_flip,
@@ -2390,7 +2397,7 @@ int radeon_asic_init(struct radeon_device *rdev)
2390 RADEON_CG_SUPPORT_HDP_LS | 2397 RADEON_CG_SUPPORT_HDP_LS |
2391 RADEON_CG_SUPPORT_HDP_MGCG; 2398 RADEON_CG_SUPPORT_HDP_MGCG;
2392 rdev->pg_flags = 0 | 2399 rdev->pg_flags = 0 |
2393 /*RADEON_PG_SUPPORT_GFX_CG | */ 2400 /*RADEON_PG_SUPPORT_GFX_PG | */
2394 RADEON_PG_SUPPORT_SDMA; 2401 RADEON_PG_SUPPORT_SDMA;
2395 break; 2402 break;
2396 case CHIP_OLAND: 2403 case CHIP_OLAND:
@@ -2479,7 +2486,7 @@ int radeon_asic_init(struct radeon_device *rdev)
2479 RADEON_CG_SUPPORT_HDP_LS | 2486 RADEON_CG_SUPPORT_HDP_LS |
2480 RADEON_CG_SUPPORT_HDP_MGCG; 2487 RADEON_CG_SUPPORT_HDP_MGCG;
2481 rdev->pg_flags = 0; 2488 rdev->pg_flags = 0;
2482 /*RADEON_PG_SUPPORT_GFX_CG | 2489 /*RADEON_PG_SUPPORT_GFX_PG |
2483 RADEON_PG_SUPPORT_GFX_SMG | 2490 RADEON_PG_SUPPORT_GFX_SMG |
2484 RADEON_PG_SUPPORT_GFX_DMG | 2491 RADEON_PG_SUPPORT_GFX_DMG |
2485 RADEON_PG_SUPPORT_UVD | 2492 RADEON_PG_SUPPORT_UVD |
@@ -2507,7 +2514,7 @@ int radeon_asic_init(struct radeon_device *rdev)
2507 RADEON_CG_SUPPORT_HDP_LS | 2514 RADEON_CG_SUPPORT_HDP_LS |
2508 RADEON_CG_SUPPORT_HDP_MGCG; 2515 RADEON_CG_SUPPORT_HDP_MGCG;
2509 rdev->pg_flags = 0; 2516 rdev->pg_flags = 0;
2510 /*RADEON_PG_SUPPORT_GFX_CG | 2517 /*RADEON_PG_SUPPORT_GFX_PG |
2511 RADEON_PG_SUPPORT_GFX_SMG | 2518 RADEON_PG_SUPPORT_GFX_SMG |
2512 RADEON_PG_SUPPORT_UVD | 2519 RADEON_PG_SUPPORT_UVD |
2513 RADEON_PG_SUPPORT_VCE | 2520 RADEON_PG_SUPPORT_VCE |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 818bbe6b884b..70c29d5e080d 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -389,6 +389,7 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev);
389u32 r600_get_xclk(struct radeon_device *rdev); 389u32 r600_get_xclk(struct radeon_device *rdev);
390uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev); 390uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev);
391int rv6xx_get_temp(struct radeon_device *rdev); 391int rv6xx_get_temp(struct radeon_device *rdev);
392int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
392int r600_dpm_pre_set_power_state(struct radeon_device *rdev); 393int r600_dpm_pre_set_power_state(struct radeon_device *rdev);
393void r600_dpm_post_set_power_state(struct radeon_device *rdev); 394void r600_dpm_post_set_power_state(struct radeon_device *rdev);
394/* r600 dma */ 395/* r600 dma */
@@ -428,6 +429,8 @@ void rs780_dpm_print_power_state(struct radeon_device *rdev,
428 struct radeon_ps *ps); 429 struct radeon_ps *ps);
429void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, 430void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
430 struct seq_file *m); 431 struct seq_file *m);
432int rs780_dpm_force_performance_level(struct radeon_device *rdev,
433 enum radeon_dpm_forced_level level);
431 434
432/* 435/*
433 * rv770,rv730,rv710,rv740 436 * rv770,rv730,rv710,rv740
@@ -625,6 +628,7 @@ void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *r
625 struct seq_file *m); 628 struct seq_file *m);
626int trinity_dpm_force_performance_level(struct radeon_device *rdev, 629int trinity_dpm_force_performance_level(struct radeon_device *rdev,
627 enum radeon_dpm_forced_level level); 630 enum radeon_dpm_forced_level level);
631void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable);
628 632
629/* DCE6 - SI */ 633/* DCE6 - SI */
630void dce6_bandwidth_update(struct radeon_device *rdev); 634void dce6_bandwidth_update(struct radeon_device *rdev);
@@ -781,6 +785,7 @@ void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
781int kv_dpm_force_performance_level(struct radeon_device *rdev, 785int kv_dpm_force_performance_level(struct radeon_device *rdev,
782 enum radeon_dpm_forced_level level); 786 enum radeon_dpm_forced_level level);
783void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate); 787void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
788void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable);
784 789
785/* uvd v1.0 */ 790/* uvd v1.0 */
786uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev, 791uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 404e25d285ba..f79ee184ffd5 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1367,6 +1367,7 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
1367 int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info); 1367 int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info);
1368 uint16_t data_offset, size; 1368 uint16_t data_offset, size;
1369 struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info; 1369 struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
1370 struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT *ss_assign;
1370 uint8_t frev, crev; 1371 uint8_t frev, crev;
1371 int i, num_indices; 1372 int i, num_indices;
1372 1373
@@ -1378,18 +1379,21 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
1378 1379
1379 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / 1380 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
1380 sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT); 1381 sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT);
1381 1382 ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*)
1383 ((u8 *)&ss_info->asSS_Info[0]);
1382 for (i = 0; i < num_indices; i++) { 1384 for (i = 0; i < num_indices; i++) {
1383 if (ss_info->asSS_Info[i].ucSS_Id == id) { 1385 if (ss_assign->ucSS_Id == id) {
1384 ss->percentage = 1386 ss->percentage =
1385 le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage); 1387 le16_to_cpu(ss_assign->usSpreadSpectrumPercentage);
1386 ss->type = ss_info->asSS_Info[i].ucSpreadSpectrumType; 1388 ss->type = ss_assign->ucSpreadSpectrumType;
1387 ss->step = ss_info->asSS_Info[i].ucSS_Step; 1389 ss->step = ss_assign->ucSS_Step;
1388 ss->delay = ss_info->asSS_Info[i].ucSS_Delay; 1390 ss->delay = ss_assign->ucSS_Delay;
1389 ss->range = ss_info->asSS_Info[i].ucSS_Range; 1391 ss->range = ss_assign->ucSS_Range;
1390 ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div; 1392 ss->refdiv = ss_assign->ucRecommendedRef_Div;
1391 return true; 1393 return true;
1392 } 1394 }
1395 ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*)
1396 ((u8 *)ss_assign + sizeof(struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT));
1393 } 1397 }
1394 } 1398 }
1395 return false; 1399 return false;
@@ -1477,6 +1481,12 @@ union asic_ss_info {
1477 struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3; 1481 struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3;
1478}; 1482};
1479 1483
1484union asic_ss_assignment {
1485 struct _ATOM_ASIC_SS_ASSIGNMENT v1;
1486 struct _ATOM_ASIC_SS_ASSIGNMENT_V2 v2;
1487 struct _ATOM_ASIC_SS_ASSIGNMENT_V3 v3;
1488};
1489
1480bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, 1490bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1481 struct radeon_atom_ss *ss, 1491 struct radeon_atom_ss *ss,
1482 int id, u32 clock) 1492 int id, u32 clock)
@@ -1485,6 +1495,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1485 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); 1495 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
1486 uint16_t data_offset, size; 1496 uint16_t data_offset, size;
1487 union asic_ss_info *ss_info; 1497 union asic_ss_info *ss_info;
1498 union asic_ss_assignment *ss_assign;
1488 uint8_t frev, crev; 1499 uint8_t frev, crev;
1489 int i, num_indices; 1500 int i, num_indices;
1490 1501
@@ -1509,45 +1520,52 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1509 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / 1520 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
1510 sizeof(ATOM_ASIC_SS_ASSIGNMENT); 1521 sizeof(ATOM_ASIC_SS_ASSIGNMENT);
1511 1522
1523 ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info.asSpreadSpectrum[0]);
1512 for (i = 0; i < num_indices; i++) { 1524 for (i = 0; i < num_indices; i++) {
1513 if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) && 1525 if ((ss_assign->v1.ucClockIndication == id) &&
1514 (clock <= le32_to_cpu(ss_info->info.asSpreadSpectrum[i].ulTargetClockRange))) { 1526 (clock <= le32_to_cpu(ss_assign->v1.ulTargetClockRange))) {
1515 ss->percentage = 1527 ss->percentage =
1516 le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage); 1528 le16_to_cpu(ss_assign->v1.usSpreadSpectrumPercentage);
1517 ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode; 1529 ss->type = ss_assign->v1.ucSpreadSpectrumMode;
1518 ss->rate = le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadRateInKhz); 1530 ss->rate = le16_to_cpu(ss_assign->v1.usSpreadRateInKhz);
1519 return true; 1531 return true;
1520 } 1532 }
1533 ss_assign = (union asic_ss_assignment *)
1534 ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT));
1521 } 1535 }
1522 break; 1536 break;
1523 case 2: 1537 case 2:
1524 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / 1538 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
1525 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2); 1539 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
1540 ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_2.asSpreadSpectrum[0]);
1526 for (i = 0; i < num_indices; i++) { 1541 for (i = 0; i < num_indices; i++) {
1527 if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) && 1542 if ((ss_assign->v2.ucClockIndication == id) &&
1528 (clock <= le32_to_cpu(ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange))) { 1543 (clock <= le32_to_cpu(ss_assign->v2.ulTargetClockRange))) {
1529 ss->percentage = 1544 ss->percentage =
1530 le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage); 1545 le16_to_cpu(ss_assign->v2.usSpreadSpectrumPercentage);
1531 ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode; 1546 ss->type = ss_assign->v2.ucSpreadSpectrumMode;
1532 ss->rate = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadRateIn10Hz); 1547 ss->rate = le16_to_cpu(ss_assign->v2.usSpreadRateIn10Hz);
1533 if ((crev == 2) && 1548 if ((crev == 2) &&
1534 ((id == ASIC_INTERNAL_ENGINE_SS) || 1549 ((id == ASIC_INTERNAL_ENGINE_SS) ||
1535 (id == ASIC_INTERNAL_MEMORY_SS))) 1550 (id == ASIC_INTERNAL_MEMORY_SS)))
1536 ss->rate /= 100; 1551 ss->rate /= 100;
1537 return true; 1552 return true;
1538 } 1553 }
1554 ss_assign = (union asic_ss_assignment *)
1555 ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2));
1539 } 1556 }
1540 break; 1557 break;
1541 case 3: 1558 case 3:
1542 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / 1559 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
1543 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3); 1560 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
1561 ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_3.asSpreadSpectrum[0]);
1544 for (i = 0; i < num_indices; i++) { 1562 for (i = 0; i < num_indices; i++) {
1545 if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) && 1563 if ((ss_assign->v3.ucClockIndication == id) &&
1546 (clock <= le32_to_cpu(ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange))) { 1564 (clock <= le32_to_cpu(ss_assign->v3.ulTargetClockRange))) {
1547 ss->percentage = 1565 ss->percentage =
1548 le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage); 1566 le16_to_cpu(ss_assign->v3.usSpreadSpectrumPercentage);
1549 ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode; 1567 ss->type = ss_assign->v3.ucSpreadSpectrumMode;
1550 ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz); 1568 ss->rate = le16_to_cpu(ss_assign->v3.usSpreadRateIn10Hz);
1551 if ((id == ASIC_INTERNAL_ENGINE_SS) || 1569 if ((id == ASIC_INTERNAL_ENGINE_SS) ||
1552 (id == ASIC_INTERNAL_MEMORY_SS)) 1570 (id == ASIC_INTERNAL_MEMORY_SS))
1553 ss->rate /= 100; 1571 ss->rate /= 100;
@@ -1555,6 +1573,8 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1555 radeon_atombios_get_igp_ss_overrides(rdev, ss, id); 1573 radeon_atombios_get_igp_ss_overrides(rdev, ss, id);
1556 return true; 1574 return true;
1557 } 1575 }
1576 ss_assign = (union asic_ss_assignment *)
1577 ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3));
1558 } 1578 }
1559 break; 1579 break;
1560 default: 1580 default:
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 2399f25ec037..79159b5da05b 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -396,6 +396,21 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
396 } 396 }
397 } 397 }
398 398
399 if (property == rdev->mode_info.audio_property) {
400 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
401 /* need to find digital encoder on connector */
402 encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
403 if (!encoder)
404 return 0;
405
406 radeon_encoder = to_radeon_encoder(encoder);
407
408 if (radeon_connector->audio != val) {
409 radeon_connector->audio = val;
410 radeon_property_change_mode(&radeon_encoder->base);
411 }
412 }
413
399 if (property == rdev->mode_info.underscan_property) { 414 if (property == rdev->mode_info.underscan_property) {
400 /* need to find digital encoder on connector */ 415 /* need to find digital encoder on connector */
401 encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); 416 encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
@@ -1420,7 +1435,7 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1420 if (radeon_dp_getdpcd(radeon_connector)) 1435 if (radeon_dp_getdpcd(radeon_connector))
1421 ret = connector_status_connected; 1436 ret = connector_status_connected;
1422 } else { 1437 } else {
1423 /* try non-aux ddc (DP to DVI/HMDI/etc. adapter) */ 1438 /* try non-aux ddc (DP to DVI/HDMI/etc. adapter) */
1424 if (radeon_ddc_probe(radeon_connector, false)) 1439 if (radeon_ddc_probe(radeon_connector, false))
1425 ret = connector_status_connected; 1440 ret = connector_status_connected;
1426 } 1441 }
@@ -1489,6 +1504,24 @@ static const struct drm_connector_funcs radeon_dp_connector_funcs = {
1489 .force = radeon_dvi_force, 1504 .force = radeon_dvi_force,
1490}; 1505};
1491 1506
1507static const struct drm_connector_funcs radeon_edp_connector_funcs = {
1508 .dpms = drm_helper_connector_dpms,
1509 .detect = radeon_dp_detect,
1510 .fill_modes = drm_helper_probe_single_connector_modes,
1511 .set_property = radeon_lvds_set_property,
1512 .destroy = radeon_dp_connector_destroy,
1513 .force = radeon_dvi_force,
1514};
1515
1516static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = {
1517 .dpms = drm_helper_connector_dpms,
1518 .detect = radeon_dp_detect,
1519 .fill_modes = drm_helper_probe_single_connector_modes,
1520 .set_property = radeon_lvds_set_property,
1521 .destroy = radeon_dp_connector_destroy,
1522 .force = radeon_dvi_force,
1523};
1524
1492void 1525void
1493radeon_add_atom_connector(struct drm_device *dev, 1526radeon_add_atom_connector(struct drm_device *dev,
1494 uint32_t connector_id, 1527 uint32_t connector_id,
@@ -1580,8 +1613,6 @@ radeon_add_atom_connector(struct drm_device *dev,
1580 goto failed; 1613 goto failed;
1581 radeon_dig_connector->igp_lane_info = igp_lane_info; 1614 radeon_dig_connector->igp_lane_info = igp_lane_info;
1582 radeon_connector->con_priv = radeon_dig_connector; 1615 radeon_connector->con_priv = radeon_dig_connector;
1583 drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
1584 drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
1585 if (i2c_bus->valid) { 1616 if (i2c_bus->valid) {
1586 /* add DP i2c bus */ 1617 /* add DP i2c bus */
1587 if (connector_type == DRM_MODE_CONNECTOR_eDP) 1618 if (connector_type == DRM_MODE_CONNECTOR_eDP)
@@ -1598,6 +1629,10 @@ radeon_add_atom_connector(struct drm_device *dev,
1598 case DRM_MODE_CONNECTOR_VGA: 1629 case DRM_MODE_CONNECTOR_VGA:
1599 case DRM_MODE_CONNECTOR_DVIA: 1630 case DRM_MODE_CONNECTOR_DVIA:
1600 default: 1631 default:
1632 drm_connector_init(dev, &radeon_connector->base,
1633 &radeon_dp_connector_funcs, connector_type);
1634 drm_connector_helper_add(&radeon_connector->base,
1635 &radeon_dp_connector_helper_funcs);
1601 connector->interlace_allowed = true; 1636 connector->interlace_allowed = true;
1602 connector->doublescan_allowed = true; 1637 connector->doublescan_allowed = true;
1603 radeon_connector->dac_load_detect = true; 1638 radeon_connector->dac_load_detect = true;
@@ -1610,6 +1645,10 @@ radeon_add_atom_connector(struct drm_device *dev,
1610 case DRM_MODE_CONNECTOR_HDMIA: 1645 case DRM_MODE_CONNECTOR_HDMIA:
1611 case DRM_MODE_CONNECTOR_HDMIB: 1646 case DRM_MODE_CONNECTOR_HDMIB:
1612 case DRM_MODE_CONNECTOR_DisplayPort: 1647 case DRM_MODE_CONNECTOR_DisplayPort:
1648 drm_connector_init(dev, &radeon_connector->base,
1649 &radeon_dp_connector_funcs, connector_type);
1650 drm_connector_helper_add(&radeon_connector->base,
1651 &radeon_dp_connector_helper_funcs);
1613 drm_object_attach_property(&radeon_connector->base.base, 1652 drm_object_attach_property(&radeon_connector->base.base,
1614 rdev->mode_info.underscan_property, 1653 rdev->mode_info.underscan_property,
1615 UNDERSCAN_OFF); 1654 UNDERSCAN_OFF);
@@ -1619,6 +1658,9 @@ radeon_add_atom_connector(struct drm_device *dev,
1619 drm_object_attach_property(&radeon_connector->base.base, 1658 drm_object_attach_property(&radeon_connector->base.base,
1620 rdev->mode_info.underscan_vborder_property, 1659 rdev->mode_info.underscan_vborder_property,
1621 0); 1660 0);
1661 drm_object_attach_property(&radeon_connector->base.base,
1662 rdev->mode_info.audio_property,
1663 RADEON_AUDIO_DISABLE);
1622 subpixel_order = SubPixelHorizontalRGB; 1664 subpixel_order = SubPixelHorizontalRGB;
1623 connector->interlace_allowed = true; 1665 connector->interlace_allowed = true;
1624 if (connector_type == DRM_MODE_CONNECTOR_HDMIB) 1666 if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@@ -1634,6 +1676,10 @@ radeon_add_atom_connector(struct drm_device *dev,
1634 break; 1676 break;
1635 case DRM_MODE_CONNECTOR_LVDS: 1677 case DRM_MODE_CONNECTOR_LVDS:
1636 case DRM_MODE_CONNECTOR_eDP: 1678 case DRM_MODE_CONNECTOR_eDP:
1679 drm_connector_init(dev, &radeon_connector->base,
1680 &radeon_lvds_bridge_connector_funcs, connector_type);
1681 drm_connector_helper_add(&radeon_connector->base,
1682 &radeon_dp_connector_helper_funcs);
1637 drm_object_attach_property(&radeon_connector->base.base, 1683 drm_object_attach_property(&radeon_connector->base.base,
1638 dev->mode_config.scaling_mode_property, 1684 dev->mode_config.scaling_mode_property,
1639 DRM_MODE_SCALE_FULLSCREEN); 1685 DRM_MODE_SCALE_FULLSCREEN);
@@ -1708,6 +1754,11 @@ radeon_add_atom_connector(struct drm_device *dev,
1708 rdev->mode_info.underscan_vborder_property, 1754 rdev->mode_info.underscan_vborder_property,
1709 0); 1755 0);
1710 } 1756 }
1757 if (ASIC_IS_DCE2(rdev)) {
1758 drm_object_attach_property(&radeon_connector->base.base,
1759 rdev->mode_info.audio_property,
1760 RADEON_AUDIO_DISABLE);
1761 }
1711 if (connector_type == DRM_MODE_CONNECTOR_DVII) { 1762 if (connector_type == DRM_MODE_CONNECTOR_DVII) {
1712 radeon_connector->dac_load_detect = true; 1763 radeon_connector->dac_load_detect = true;
1713 drm_object_attach_property(&radeon_connector->base.base, 1764 drm_object_attach_property(&radeon_connector->base.base,
@@ -1748,6 +1799,11 @@ radeon_add_atom_connector(struct drm_device *dev,
1748 rdev->mode_info.underscan_vborder_property, 1799 rdev->mode_info.underscan_vborder_property,
1749 0); 1800 0);
1750 } 1801 }
1802 if (ASIC_IS_DCE2(rdev)) {
1803 drm_object_attach_property(&radeon_connector->base.base,
1804 rdev->mode_info.audio_property,
1805 RADEON_AUDIO_DISABLE);
1806 }
1751 subpixel_order = SubPixelHorizontalRGB; 1807 subpixel_order = SubPixelHorizontalRGB;
1752 connector->interlace_allowed = true; 1808 connector->interlace_allowed = true;
1753 if (connector_type == DRM_MODE_CONNECTOR_HDMIB) 1809 if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@@ -1787,6 +1843,11 @@ radeon_add_atom_connector(struct drm_device *dev,
1787 rdev->mode_info.underscan_vborder_property, 1843 rdev->mode_info.underscan_vborder_property,
1788 0); 1844 0);
1789 } 1845 }
1846 if (ASIC_IS_DCE2(rdev)) {
1847 drm_object_attach_property(&radeon_connector->base.base,
1848 rdev->mode_info.audio_property,
1849 RADEON_AUDIO_DISABLE);
1850 }
1790 connector->interlace_allowed = true; 1851 connector->interlace_allowed = true;
1791 /* in theory with a DP to VGA converter... */ 1852 /* in theory with a DP to VGA converter... */
1792 connector->doublescan_allowed = false; 1853 connector->doublescan_allowed = false;
@@ -1797,7 +1858,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1797 goto failed; 1858 goto failed;
1798 radeon_dig_connector->igp_lane_info = igp_lane_info; 1859 radeon_dig_connector->igp_lane_info = igp_lane_info;
1799 radeon_connector->con_priv = radeon_dig_connector; 1860 radeon_connector->con_priv = radeon_dig_connector;
1800 drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type); 1861 drm_connector_init(dev, &radeon_connector->base, &radeon_edp_connector_funcs, connector_type);
1801 drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); 1862 drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
1802 if (i2c_bus->valid) { 1863 if (i2c_bus->valid) {
1803 /* add DP i2c bus */ 1864 /* add DP i2c bus */
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index a56084410372..66c222836631 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -28,6 +28,7 @@
28#include <drm/radeon_drm.h> 28#include <drm/radeon_drm.h>
29#include "radeon_reg.h" 29#include "radeon_reg.h"
30#include "radeon.h" 30#include "radeon.h"
31#include "radeon_trace.h"
31 32
32static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) 33static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
33{ 34{
@@ -80,10 +81,13 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
80 p->relocs[i].lobj.bo = p->relocs[i].robj; 81 p->relocs[i].lobj.bo = p->relocs[i].robj;
81 p->relocs[i].lobj.written = !!r->write_domain; 82 p->relocs[i].lobj.written = !!r->write_domain;
82 83
83 /* the first reloc of an UVD job is the 84 /* the first reloc of an UVD job is the msg and that must be in
84 msg and that must be in VRAM */ 85 VRAM, also but everything into VRAM on AGP cards to avoid
85 if (p->ring == R600_RING_TYPE_UVD_INDEX && i == 0) { 86 image corruptions */
86 /* TODO: is this still needed for NI+ ? */ 87 if (p->ring == R600_RING_TYPE_UVD_INDEX &&
88 p->rdev->family < CHIP_PALM &&
89 (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) {
90
87 p->relocs[i].lobj.domain = 91 p->relocs[i].lobj.domain =
88 RADEON_GEM_DOMAIN_VRAM; 92 RADEON_GEM_DOMAIN_VRAM;
89 93
@@ -559,6 +563,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
559 return r; 563 return r;
560 } 564 }
561 565
566 trace_radeon_cs(&parser);
567
562 r = radeon_cs_ib_chunk(rdev, &parser); 568 r = radeon_cs_ib_chunk(rdev, &parser);
563 if (r) { 569 if (r) {
564 goto out; 570 goto out;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 16cb8792b1e6..841d0e09be3e 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1249,6 +1249,17 @@ int radeon_device_init(struct radeon_device *rdev,
1249 /* Registers mapping */ 1249 /* Registers mapping */
1250 /* TODO: block userspace mapping of io register */ 1250 /* TODO: block userspace mapping of io register */
1251 spin_lock_init(&rdev->mmio_idx_lock); 1251 spin_lock_init(&rdev->mmio_idx_lock);
1252 spin_lock_init(&rdev->smc_idx_lock);
1253 spin_lock_init(&rdev->pll_idx_lock);
1254 spin_lock_init(&rdev->mc_idx_lock);
1255 spin_lock_init(&rdev->pcie_idx_lock);
1256 spin_lock_init(&rdev->pciep_idx_lock);
1257 spin_lock_init(&rdev->pif_idx_lock);
1258 spin_lock_init(&rdev->cg_idx_lock);
1259 spin_lock_init(&rdev->uvd_idx_lock);
1260 spin_lock_init(&rdev->rcu_idx_lock);
1261 spin_lock_init(&rdev->didt_idx_lock);
1262 spin_lock_init(&rdev->end_idx_lock);
1252 if (rdev->family >= CHIP_BONAIRE) { 1263 if (rdev->family >= CHIP_BONAIRE) {
1253 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5); 1264 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1254 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5); 1265 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
@@ -1309,13 +1320,22 @@ int radeon_device_init(struct radeon_device *rdev,
1309 return r; 1320 return r;
1310 } 1321 }
1311 if ((radeon_testing & 1)) { 1322 if ((radeon_testing & 1)) {
1312 radeon_test_moves(rdev); 1323 if (rdev->accel_working)
1324 radeon_test_moves(rdev);
1325 else
1326 DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
1313 } 1327 }
1314 if ((radeon_testing & 2)) { 1328 if ((radeon_testing & 2)) {
1315 radeon_test_syncing(rdev); 1329 if (rdev->accel_working)
1330 radeon_test_syncing(rdev);
1331 else
1332 DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
1316 } 1333 }
1317 if (radeon_benchmarking) { 1334 if (radeon_benchmarking) {
1318 radeon_benchmark(rdev, radeon_benchmarking); 1335 if (rdev->accel_working)
1336 radeon_benchmark(rdev, radeon_benchmarking);
1337 else
1338 DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
1319 } 1339 }
1320 return 0; 1340 return 0;
1321} 1341}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index b055bddaa94c..0d1aa050d41d 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1172,6 +1172,12 @@ static struct drm_prop_enum_list radeon_underscan_enum_list[] =
1172 { UNDERSCAN_AUTO, "auto" }, 1172 { UNDERSCAN_AUTO, "auto" },
1173}; 1173};
1174 1174
1175static struct drm_prop_enum_list radeon_audio_enum_list[] =
1176{ { RADEON_AUDIO_DISABLE, "off" },
1177 { RADEON_AUDIO_ENABLE, "on" },
1178 { RADEON_AUDIO_AUTO, "auto" },
1179};
1180
1175static int radeon_modeset_create_props(struct radeon_device *rdev) 1181static int radeon_modeset_create_props(struct radeon_device *rdev)
1176{ 1182{
1177 int sz; 1183 int sz;
@@ -1222,6 +1228,12 @@ static int radeon_modeset_create_props(struct radeon_device *rdev)
1222 if (!rdev->mode_info.underscan_vborder_property) 1228 if (!rdev->mode_info.underscan_vborder_property)
1223 return -ENOMEM; 1229 return -ENOMEM;
1224 1230
1231 sz = ARRAY_SIZE(radeon_audio_enum_list);
1232 rdev->mode_info.audio_property =
1233 drm_property_create_enum(rdev->ddev, 0,
1234 "audio",
1235 radeon_audio_enum_list, sz);
1236
1225 return 0; 1237 return 0;
1226} 1238}
1227 1239
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index cb4445f55a96..cdd12dcd988b 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -153,7 +153,7 @@ int radeon_benchmarking = 0;
153int radeon_testing = 0; 153int radeon_testing = 0;
154int radeon_connector_table = 0; 154int radeon_connector_table = 0;
155int radeon_tv = 1; 155int radeon_tv = 1;
156int radeon_audio = 0; 156int radeon_audio = 1;
157int radeon_disp_priority = 0; 157int radeon_disp_priority = 0;
158int radeon_hw_i2c = 0; 158int radeon_hw_i2c = 0;
159int radeon_pcie_gen2 = -1; 159int radeon_pcie_gen2 = -1;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index d908d8d68f6b..ef63d3f00b2f 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -247,6 +247,8 @@ struct radeon_mode_info {
247 struct drm_property *underscan_property; 247 struct drm_property *underscan_property;
248 struct drm_property *underscan_hborder_property; 248 struct drm_property *underscan_hborder_property;
249 struct drm_property *underscan_vborder_property; 249 struct drm_property *underscan_vborder_property;
250 /* audio */
251 struct drm_property *audio_property;
250 /* hardcoded DFP edid from BIOS */ 252 /* hardcoded DFP edid from BIOS */
251 struct edid *bios_hardcoded_edid; 253 struct edid *bios_hardcoded_edid;
252 int bios_hardcoded_edid_size; 254 int bios_hardcoded_edid_size;
@@ -471,6 +473,12 @@ struct radeon_router {
471 u8 cd_mux_state; 473 u8 cd_mux_state;
472}; 474};
473 475
476enum radeon_connector_audio {
477 RADEON_AUDIO_DISABLE = 0,
478 RADEON_AUDIO_ENABLE = 1,
479 RADEON_AUDIO_AUTO = 2
480};
481
474struct radeon_connector { 482struct radeon_connector {
475 struct drm_connector base; 483 struct drm_connector base;
476 uint32_t connector_id; 484 uint32_t connector_id;
@@ -489,6 +497,7 @@ struct radeon_connector {
489 struct radeon_hpd hpd; 497 struct radeon_hpd hpd;
490 struct radeon_router router; 498 struct radeon_router router;
491 struct radeon_i2c_chan *router_bus; 499 struct radeon_i2c_chan *router_bus;
500 enum radeon_connector_audio audio;
492}; 501};
493 502
494struct radeon_framebuffer { 503struct radeon_framebuffer {
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index d7555369a3e5..ac07ad1d4f8c 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -67,7 +67,16 @@ int radeon_pm_get_type_index(struct radeon_device *rdev,
67 67
68void radeon_pm_acpi_event_handler(struct radeon_device *rdev) 68void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
69{ 69{
70 if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 70 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
71 mutex_lock(&rdev->pm.mutex);
72 if (power_supply_is_system_supplied() > 0)
73 rdev->pm.dpm.ac_power = true;
74 else
75 rdev->pm.dpm.ac_power = false;
76 if (rdev->asic->dpm.enable_bapm)
77 radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
78 mutex_unlock(&rdev->pm.mutex);
79 } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
71 if (rdev->pm.profile == PM_PROFILE_AUTO) { 80 if (rdev->pm.profile == PM_PROFILE_AUTO) {
72 mutex_lock(&rdev->pm.mutex); 81 mutex_lock(&rdev->pm.mutex);
73 radeon_pm_update_profile(rdev); 82 radeon_pm_update_profile(rdev);
@@ -333,7 +342,7 @@ static ssize_t radeon_get_pm_profile(struct device *dev,
333 struct device_attribute *attr, 342 struct device_attribute *attr,
334 char *buf) 343 char *buf)
335{ 344{
336 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 345 struct drm_device *ddev = dev_get_drvdata(dev);
337 struct radeon_device *rdev = ddev->dev_private; 346 struct radeon_device *rdev = ddev->dev_private;
338 int cp = rdev->pm.profile; 347 int cp = rdev->pm.profile;
339 348
@@ -349,7 +358,7 @@ static ssize_t radeon_set_pm_profile(struct device *dev,
349 const char *buf, 358 const char *buf,
350 size_t count) 359 size_t count)
351{ 360{
352 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 361 struct drm_device *ddev = dev_get_drvdata(dev);
353 struct radeon_device *rdev = ddev->dev_private; 362 struct radeon_device *rdev = ddev->dev_private;
354 363
355 mutex_lock(&rdev->pm.mutex); 364 mutex_lock(&rdev->pm.mutex);
@@ -383,7 +392,7 @@ static ssize_t radeon_get_pm_method(struct device *dev,
383 struct device_attribute *attr, 392 struct device_attribute *attr,
384 char *buf) 393 char *buf)
385{ 394{
386 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 395 struct drm_device *ddev = dev_get_drvdata(dev);
387 struct radeon_device *rdev = ddev->dev_private; 396 struct radeon_device *rdev = ddev->dev_private;
388 int pm = rdev->pm.pm_method; 397 int pm = rdev->pm.pm_method;
389 398
@@ -397,7 +406,7 @@ static ssize_t radeon_set_pm_method(struct device *dev,
397 const char *buf, 406 const char *buf,
398 size_t count) 407 size_t count)
399{ 408{
400 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 409 struct drm_device *ddev = dev_get_drvdata(dev);
401 struct radeon_device *rdev = ddev->dev_private; 410 struct radeon_device *rdev = ddev->dev_private;
402 411
403 /* we don't support the legacy modes with dpm */ 412 /* we don't support the legacy modes with dpm */
@@ -433,7 +442,7 @@ static ssize_t radeon_get_dpm_state(struct device *dev,
433 struct device_attribute *attr, 442 struct device_attribute *attr,
434 char *buf) 443 char *buf)
435{ 444{
436 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 445 struct drm_device *ddev = dev_get_drvdata(dev);
437 struct radeon_device *rdev = ddev->dev_private; 446 struct radeon_device *rdev = ddev->dev_private;
438 enum radeon_pm_state_type pm = rdev->pm.dpm.user_state; 447 enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
439 448
@@ -447,7 +456,7 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
447 const char *buf, 456 const char *buf,
448 size_t count) 457 size_t count)
449{ 458{
450 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 459 struct drm_device *ddev = dev_get_drvdata(dev);
451 struct radeon_device *rdev = ddev->dev_private; 460 struct radeon_device *rdev = ddev->dev_private;
452 461
453 mutex_lock(&rdev->pm.mutex); 462 mutex_lock(&rdev->pm.mutex);
@@ -472,7 +481,7 @@ static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
472 struct device_attribute *attr, 481 struct device_attribute *attr,
473 char *buf) 482 char *buf)
474{ 483{
475 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 484 struct drm_device *ddev = dev_get_drvdata(dev);
476 struct radeon_device *rdev = ddev->dev_private; 485 struct radeon_device *rdev = ddev->dev_private;
477 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; 486 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
478 487
@@ -486,7 +495,7 @@ static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
486 const char *buf, 495 const char *buf,
487 size_t count) 496 size_t count)
488{ 497{
489 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 498 struct drm_device *ddev = dev_get_drvdata(dev);
490 struct radeon_device *rdev = ddev->dev_private; 499 struct radeon_device *rdev = ddev->dev_private;
491 enum radeon_dpm_forced_level level; 500 enum radeon_dpm_forced_level level;
492 int ret = 0; 501 int ret = 0;
@@ -524,7 +533,7 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
524 struct device_attribute *attr, 533 struct device_attribute *attr,
525 char *buf) 534 char *buf)
526{ 535{
527 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 536 struct drm_device *ddev = dev_get_drvdata(dev);
528 struct radeon_device *rdev = ddev->dev_private; 537 struct radeon_device *rdev = ddev->dev_private;
529 int temp; 538 int temp;
530 539
@@ -536,6 +545,23 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
536 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 545 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
537} 546}
538 547
548static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
549 struct device_attribute *attr,
550 char *buf)
551{
552 struct drm_device *ddev = dev_get_drvdata(dev);
553 struct radeon_device *rdev = ddev->dev_private;
554 int hyst = to_sensor_dev_attr(attr)->index;
555 int temp;
556
557 if (hyst)
558 temp = rdev->pm.dpm.thermal.min_temp;
559 else
560 temp = rdev->pm.dpm.thermal.max_temp;
561
562 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
563}
564
539static ssize_t radeon_hwmon_show_name(struct device *dev, 565static ssize_t radeon_hwmon_show_name(struct device *dev,
540 struct device_attribute *attr, 566 struct device_attribute *attr,
541 char *buf) 567 char *buf)
@@ -544,16 +570,37 @@ static ssize_t radeon_hwmon_show_name(struct device *dev,
544} 570}
545 571
546static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0); 572static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
573static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0);
574static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1);
547static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0); 575static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0);
548 576
549static struct attribute *hwmon_attributes[] = { 577static struct attribute *hwmon_attributes[] = {
550 &sensor_dev_attr_temp1_input.dev_attr.attr, 578 &sensor_dev_attr_temp1_input.dev_attr.attr,
579 &sensor_dev_attr_temp1_crit.dev_attr.attr,
580 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
551 &sensor_dev_attr_name.dev_attr.attr, 581 &sensor_dev_attr_name.dev_attr.attr,
552 NULL 582 NULL
553}; 583};
554 584
585static umode_t hwmon_attributes_visible(struct kobject *kobj,
586 struct attribute *attr, int index)
587{
588 struct device *dev = container_of(kobj, struct device, kobj);
589 struct drm_device *ddev = dev_get_drvdata(dev);
590 struct radeon_device *rdev = ddev->dev_private;
591
592 /* Skip limit attributes if DPM is not enabled */
593 if (rdev->pm.pm_method != PM_METHOD_DPM &&
594 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
595 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
596 return 0;
597
598 return attr->mode;
599}
600
555static const struct attribute_group hwmon_attrgroup = { 601static const struct attribute_group hwmon_attrgroup = {
556 .attrs = hwmon_attributes, 602 .attrs = hwmon_attributes,
603 .is_visible = hwmon_attributes_visible,
557}; 604};
558 605
559static int radeon_hwmon_init(struct radeon_device *rdev) 606static int radeon_hwmon_init(struct radeon_device *rdev)
@@ -870,10 +917,13 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
870 917
871 radeon_dpm_post_set_power_state(rdev); 918 radeon_dpm_post_set_power_state(rdev);
872 919
873 /* force low perf level for thermal */ 920 if (rdev->asic->dpm.force_performance_level) {
874 if (rdev->pm.dpm.thermal_active && 921 if (rdev->pm.dpm.thermal_active)
875 rdev->asic->dpm.force_performance_level) { 922 /* force low perf level for thermal */
876 radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW); 923 radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
924 else
925 /* otherwise, enable auto */
926 radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
877 } 927 }
878 928
879done: 929done:
@@ -952,7 +1002,7 @@ static void radeon_pm_resume_old(struct radeon_device *rdev)
952{ 1002{
953 /* set up the default clocks if the MC ucode is loaded */ 1003 /* set up the default clocks if the MC ucode is loaded */
954 if ((rdev->family >= CHIP_BARTS) && 1004 if ((rdev->family >= CHIP_BARTS) &&
955 (rdev->family <= CHIP_HAINAN) && 1005 (rdev->family <= CHIP_CAYMAN) &&
956 rdev->mc_fw) { 1006 rdev->mc_fw) {
957 if (rdev->pm.default_vddc) 1007 if (rdev->pm.default_vddc)
958 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1008 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
@@ -996,7 +1046,7 @@ static void radeon_pm_resume_dpm(struct radeon_device *rdev)
996 if (ret) { 1046 if (ret) {
997 DRM_ERROR("radeon: dpm resume failed\n"); 1047 DRM_ERROR("radeon: dpm resume failed\n");
998 if ((rdev->family >= CHIP_BARTS) && 1048 if ((rdev->family >= CHIP_BARTS) &&
999 (rdev->family <= CHIP_HAINAN) && 1049 (rdev->family <= CHIP_CAYMAN) &&
1000 rdev->mc_fw) { 1050 rdev->mc_fw) {
1001 if (rdev->pm.default_vddc) 1051 if (rdev->pm.default_vddc)
1002 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1052 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
@@ -1047,7 +1097,7 @@ static int radeon_pm_init_old(struct radeon_device *rdev)
1047 radeon_pm_init_profile(rdev); 1097 radeon_pm_init_profile(rdev);
1048 /* set up the default clocks if the MC ucode is loaded */ 1098 /* set up the default clocks if the MC ucode is loaded */
1049 if ((rdev->family >= CHIP_BARTS) && 1099 if ((rdev->family >= CHIP_BARTS) &&
1050 (rdev->family <= CHIP_HAINAN) && 1100 (rdev->family <= CHIP_CAYMAN) &&
1051 rdev->mc_fw) { 1101 rdev->mc_fw) {
1052 if (rdev->pm.default_vddc) 1102 if (rdev->pm.default_vddc)
1053 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1103 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
@@ -1102,9 +1152,10 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
1102{ 1152{
1103 int ret; 1153 int ret;
1104 1154
1105 /* default to performance state */ 1155 /* default to balanced state */
1106 rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; 1156 rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
1107 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; 1157 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
1158 rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
1108 rdev->pm.default_sclk = rdev->clock.default_sclk; 1159 rdev->pm.default_sclk = rdev->clock.default_sclk;
1109 rdev->pm.default_mclk = rdev->clock.default_mclk; 1160 rdev->pm.default_mclk = rdev->clock.default_mclk;
1110 rdev->pm.current_sclk = rdev->clock.default_sclk; 1161 rdev->pm.current_sclk = rdev->clock.default_sclk;
@@ -1132,7 +1183,7 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
1132 if (ret) { 1183 if (ret) {
1133 rdev->pm.dpm_enabled = false; 1184 rdev->pm.dpm_enabled = false;
1134 if ((rdev->family >= CHIP_BARTS) && 1185 if ((rdev->family >= CHIP_BARTS) &&
1135 (rdev->family <= CHIP_HAINAN) && 1186 (rdev->family <= CHIP_CAYMAN) &&
1136 rdev->mc_fw) { 1187 rdev->mc_fw) {
1137 if (rdev->pm.default_vddc) 1188 if (rdev->pm.default_vddc)
1138 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1189 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 46a25f037b84..18254e1c3e71 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -839,9 +839,11 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
839 * packet that is the root issue 839 * packet that is the root issue
840 */ 840 */
841 i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask; 841 i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
842 for (j = 0; j <= (count + 32); j++) { 842 if (ring->ready) {
843 seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]); 843 for (j = 0; j <= (count + 32); j++) {
844 i = (i + 1) & ring->ptr_mask; 844 seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]);
845 i = (i + 1) & ring->ptr_mask;
846 }
845 } 847 }
846 return 0; 848 return 0;
847} 849}
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index eafd8160a155..f7e367815964 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -27,6 +27,26 @@ TRACE_EVENT(radeon_bo_create,
27 TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages) 27 TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
28); 28);
29 29
30TRACE_EVENT(radeon_cs,
31 TP_PROTO(struct radeon_cs_parser *p),
32 TP_ARGS(p),
33 TP_STRUCT__entry(
34 __field(u32, ring)
35 __field(u32, dw)
36 __field(u32, fences)
37 ),
38
39 TP_fast_assign(
40 __entry->ring = p->ring;
41 __entry->dw = p->chunks[p->chunk_ib_idx].length_dw;
42 __entry->fences = radeon_fence_count_emitted(
43 p->rdev, p->ring);
44 ),
45 TP_printk("ring=%u, dw=%u, fences=%u",
46 __entry->ring, __entry->dw,
47 __entry->fences)
48);
49
30DECLARE_EVENT_CLASS(radeon_fence_request, 50DECLARE_EVENT_CLASS(radeon_fence_request,
31 51
32 TP_PROTO(struct drm_device *dev, u32 seqno), 52 TP_PROTO(struct drm_device *dev, u32 seqno),
@@ -53,13 +73,6 @@ DEFINE_EVENT(radeon_fence_request, radeon_fence_emit,
53 TP_ARGS(dev, seqno) 73 TP_ARGS(dev, seqno)
54); 74);
55 75
56DEFINE_EVENT(radeon_fence_request, radeon_fence_retire,
57
58 TP_PROTO(struct drm_device *dev, u32 seqno),
59
60 TP_ARGS(dev, seqno)
61);
62
63DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin, 76DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin,
64 77
65 TP_PROTO(struct drm_device *dev, u32 seqno), 78 TP_PROTO(struct drm_device *dev, u32 seqno),
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 1a01bbff9bfa..a0f11856ddde 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -476,8 +476,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
476 return -EINVAL; 476 return -EINVAL;
477 } 477 }
478 478
479 /* TODO: is this still necessary on NI+ ? */ 479 if (p->rdev->family < CHIP_PALM && (cmd == 0 || cmd == 0x3) &&
480 if ((cmd == 0 || cmd == 0x3) &&
481 (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) { 480 (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
482 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", 481 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
483 start, end); 482 start, end);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index b8074a8ec75a..9566b5940a5a 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -274,19 +274,26 @@ static void rs400_mc_init(struct radeon_device *rdev)
274 274
275uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg) 275uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
276{ 276{
277 unsigned long flags;
277 uint32_t r; 278 uint32_t r;
278 279
280 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
279 WREG32(RS480_NB_MC_INDEX, reg & 0xff); 281 WREG32(RS480_NB_MC_INDEX, reg & 0xff);
280 r = RREG32(RS480_NB_MC_DATA); 282 r = RREG32(RS480_NB_MC_DATA);
281 WREG32(RS480_NB_MC_INDEX, 0xff); 283 WREG32(RS480_NB_MC_INDEX, 0xff);
284 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
282 return r; 285 return r;
283} 286}
284 287
285void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 288void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
286{ 289{
290 unsigned long flags;
291
292 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
287 WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN); 293 WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN);
288 WREG32(RS480_NB_MC_DATA, (v)); 294 WREG32(RS480_NB_MC_DATA, (v));
289 WREG32(RS480_NB_MC_INDEX, 0xff); 295 WREG32(RS480_NB_MC_INDEX, 0xff);
296 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
290} 297}
291 298
292#if defined(CONFIG_DEBUG_FS) 299#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 670b555d2ca2..6acba8017b9a 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -847,16 +847,26 @@ void rs600_bandwidth_update(struct radeon_device *rdev)
847 847
848uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) 848uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
849{ 849{
850 unsigned long flags;
851 u32 r;
852
853 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
850 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | 854 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
851 S_000070_MC_IND_CITF_ARB0(1)); 855 S_000070_MC_IND_CITF_ARB0(1));
852 return RREG32(R_000074_MC_IND_DATA); 856 r = RREG32(R_000074_MC_IND_DATA);
857 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
858 return r;
853} 859}
854 860
855void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 861void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
856{ 862{
863 unsigned long flags;
864
865 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
857 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | 866 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
858 S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1)); 867 S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1));
859 WREG32(R_000074_MC_IND_DATA, v); 868 WREG32(R_000074_MC_IND_DATA, v);
869 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
860} 870}
861 871
862static void rs600_debugfs(struct radeon_device *rdev) 872static void rs600_debugfs(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index d8ddfb34545d..1447d794c22a 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -631,20 +631,27 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
631 631
632uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg) 632uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg)
633{ 633{
634 unsigned long flags;
634 uint32_t r; 635 uint32_t r;
635 636
637 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
636 WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg)); 638 WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg));
637 r = RREG32(R_00007C_MC_DATA); 639 r = RREG32(R_00007C_MC_DATA);
638 WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR); 640 WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR);
641 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
639 return r; 642 return r;
640} 643}
641 644
642void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 645void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
643{ 646{
647 unsigned long flags;
648
649 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
644 WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) | 650 WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) |
645 S_000078_MC_IND_WR_EN(1)); 651 S_000078_MC_IND_WR_EN(1));
646 WREG32(R_00007C_MC_DATA, v); 652 WREG32(R_00007C_MC_DATA, v);
647 WREG32(R_000078_MC_INDEX, 0x7F); 653 WREG32(R_000078_MC_INDEX, 0x7F);
654 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
648} 655}
649 656
650static void rs690_mc_program(struct radeon_device *rdev) 657static void rs690_mc_program(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c
index d1a1ce73bd45..6af8505cf4d2 100644
--- a/drivers/gpu/drm/radeon/rs780_dpm.c
+++ b/drivers/gpu/drm/radeon/rs780_dpm.c
@@ -62,9 +62,7 @@ static void rs780_get_pm_mode_parameters(struct radeon_device *rdev)
62 radeon_crtc = to_radeon_crtc(crtc); 62 radeon_crtc = to_radeon_crtc(crtc);
63 pi->crtc_id = radeon_crtc->crtc_id; 63 pi->crtc_id = radeon_crtc->crtc_id;
64 if (crtc->mode.htotal && crtc->mode.vtotal) 64 if (crtc->mode.htotal && crtc->mode.vtotal)
65 pi->refresh_rate = 65 pi->refresh_rate = drm_mode_vrefresh(&crtc->mode);
66 (crtc->mode.clock * 1000) /
67 (crtc->mode.htotal * crtc->mode.vtotal);
68 break; 66 break;
69 } 67 }
70 } 68 }
@@ -376,9 +374,8 @@ static void rs780_disable_vbios_powersaving(struct radeon_device *rdev)
376 WREG32_P(CG_INTGFX_MISC, 0, ~0xFFF00000); 374 WREG32_P(CG_INTGFX_MISC, 0, ~0xFFF00000);
377} 375}
378 376
379static void rs780_force_voltage_to_high(struct radeon_device *rdev) 377static void rs780_force_voltage(struct radeon_device *rdev, u16 voltage)
380{ 378{
381 struct igp_power_info *pi = rs780_get_pi(rdev);
382 struct igp_ps *current_state = rs780_get_ps(rdev->pm.dpm.current_ps); 379 struct igp_ps *current_state = rs780_get_ps(rdev->pm.dpm.current_ps);
383 380
384 if ((current_state->max_voltage == RS780_VDDC_LEVEL_HIGH) && 381 if ((current_state->max_voltage == RS780_VDDC_LEVEL_HIGH) &&
@@ -390,7 +387,7 @@ static void rs780_force_voltage_to_high(struct radeon_device *rdev)
390 udelay(1); 387 udelay(1);
391 388
392 WREG32_P(FVTHROT_PWM_CTRL_REG0, 389 WREG32_P(FVTHROT_PWM_CTRL_REG0,
393 STARTING_PWM_HIGHTIME(pi->max_voltage), 390 STARTING_PWM_HIGHTIME(voltage),
394 ~STARTING_PWM_HIGHTIME_MASK); 391 ~STARTING_PWM_HIGHTIME_MASK);
395 392
396 WREG32_P(FVTHROT_PWM_CTRL_REG0, 393 WREG32_P(FVTHROT_PWM_CTRL_REG0,
@@ -404,6 +401,26 @@ static void rs780_force_voltage_to_high(struct radeon_device *rdev)
404 WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL); 401 WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL);
405} 402}
406 403
404static void rs780_force_fbdiv(struct radeon_device *rdev, u32 fb_div)
405{
406 struct igp_ps *current_state = rs780_get_ps(rdev->pm.dpm.current_ps);
407
408 if (current_state->sclk_low == current_state->sclk_high)
409 return;
410
411 WREG32_P(GFX_MACRO_BYPASS_CNTL, SPLL_BYPASS_CNTL, ~SPLL_BYPASS_CNTL);
412
413 WREG32_P(FVTHROT_FBDIV_REG2, FORCED_FEEDBACK_DIV(fb_div),
414 ~FORCED_FEEDBACK_DIV_MASK);
415 WREG32_P(FVTHROT_FBDIV_REG1, STARTING_FEEDBACK_DIV(fb_div),
416 ~STARTING_FEEDBACK_DIV_MASK);
417 WREG32_P(FVTHROT_FBDIV_REG1, FORCE_FEEDBACK_DIV, ~FORCE_FEEDBACK_DIV);
418
419 udelay(100);
420
421 WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL);
422}
423
407static int rs780_set_engine_clock_scaling(struct radeon_device *rdev, 424static int rs780_set_engine_clock_scaling(struct radeon_device *rdev,
408 struct radeon_ps *new_ps, 425 struct radeon_ps *new_ps,
409 struct radeon_ps *old_ps) 426 struct radeon_ps *old_ps)
@@ -432,17 +449,13 @@ static int rs780_set_engine_clock_scaling(struct radeon_device *rdev,
432 if (ret) 449 if (ret)
433 return ret; 450 return ret;
434 451
435 WREG32_P(GFX_MACRO_BYPASS_CNTL, SPLL_BYPASS_CNTL, ~SPLL_BYPASS_CNTL); 452 if ((min_dividers.ref_div != max_dividers.ref_div) ||
436 453 (min_dividers.post_div != max_dividers.post_div) ||
437 WREG32_P(FVTHROT_FBDIV_REG2, FORCED_FEEDBACK_DIV(max_dividers.fb_div), 454 (max_dividers.ref_div != current_max_dividers.ref_div) ||
438 ~FORCED_FEEDBACK_DIV_MASK); 455 (max_dividers.post_div != current_max_dividers.post_div))
439 WREG32_P(FVTHROT_FBDIV_REG1, STARTING_FEEDBACK_DIV(max_dividers.fb_div), 456 return -EINVAL;
440 ~STARTING_FEEDBACK_DIV_MASK);
441 WREG32_P(FVTHROT_FBDIV_REG1, FORCE_FEEDBACK_DIV, ~FORCE_FEEDBACK_DIV);
442
443 udelay(100);
444 457
445 WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL); 458 rs780_force_fbdiv(rdev, max_dividers.fb_div);
446 459
447 if (max_dividers.fb_div > min_dividers.fb_div) { 460 if (max_dividers.fb_div > min_dividers.fb_div) {
448 WREG32_P(FVTHROT_FBDIV_REG0, 461 WREG32_P(FVTHROT_FBDIV_REG0,
@@ -486,6 +499,9 @@ static void rs780_activate_engine_clk_scaling(struct radeon_device *rdev,
486 (new_state->sclk_low == old_state->sclk_low)) 499 (new_state->sclk_low == old_state->sclk_low))
487 return; 500 return;
488 501
502 if (new_state->sclk_high == new_state->sclk_low)
503 return;
504
489 rs780_clk_scaling_enable(rdev, true); 505 rs780_clk_scaling_enable(rdev, true);
490} 506}
491 507
@@ -649,7 +665,7 @@ int rs780_dpm_set_power_state(struct radeon_device *rdev)
649 rs780_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); 665 rs780_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
650 666
651 if (pi->voltage_control) { 667 if (pi->voltage_control) {
652 rs780_force_voltage_to_high(rdev); 668 rs780_force_voltage(rdev, pi->max_voltage);
653 mdelay(5); 669 mdelay(5);
654 } 670 }
655 671
@@ -717,14 +733,18 @@ static void rs780_parse_pplib_non_clock_info(struct radeon_device *rdev,
717 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { 733 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
718 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); 734 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
719 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); 735 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
720 } else if (r600_is_uvd_state(rps->class, rps->class2)) {
721 rps->vclk = RS780_DEFAULT_VCLK_FREQ;
722 rps->dclk = RS780_DEFAULT_DCLK_FREQ;
723 } else { 736 } else {
724 rps->vclk = 0; 737 rps->vclk = 0;
725 rps->dclk = 0; 738 rps->dclk = 0;
726 } 739 }
727 740
741 if (r600_is_uvd_state(rps->class, rps->class2)) {
742 if ((rps->vclk == 0) || (rps->dclk == 0)) {
743 rps->vclk = RS780_DEFAULT_VCLK_FREQ;
744 rps->dclk = RS780_DEFAULT_DCLK_FREQ;
745 }
746 }
747
728 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) 748 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
729 rdev->pm.dpm.boot_ps = rps; 749 rdev->pm.dpm.boot_ps = rps;
730 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 750 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
@@ -986,3 +1006,55 @@ void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rde
986 seq_printf(m, "power level 1 sclk: %u vddc_index: %d\n", 1006 seq_printf(m, "power level 1 sclk: %u vddc_index: %d\n",
987 ps->sclk_high, ps->max_voltage); 1007 ps->sclk_high, ps->max_voltage);
988} 1008}
1009
1010int rs780_dpm_force_performance_level(struct radeon_device *rdev,
1011 enum radeon_dpm_forced_level level)
1012{
1013 struct igp_power_info *pi = rs780_get_pi(rdev);
1014 struct radeon_ps *rps = rdev->pm.dpm.current_ps;
1015 struct igp_ps *ps = rs780_get_ps(rps);
1016 struct atom_clock_dividers dividers;
1017 int ret;
1018
1019 rs780_clk_scaling_enable(rdev, false);
1020 rs780_voltage_scaling_enable(rdev, false);
1021
1022 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
1023 if (pi->voltage_control)
1024 rs780_force_voltage(rdev, pi->max_voltage);
1025
1026 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
1027 ps->sclk_high, false, &dividers);
1028 if (ret)
1029 return ret;
1030
1031 rs780_force_fbdiv(rdev, dividers.fb_div);
1032 } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
1033 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
1034 ps->sclk_low, false, &dividers);
1035 if (ret)
1036 return ret;
1037
1038 rs780_force_fbdiv(rdev, dividers.fb_div);
1039
1040 if (pi->voltage_control)
1041 rs780_force_voltage(rdev, pi->min_voltage);
1042 } else {
1043 if (pi->voltage_control)
1044 rs780_force_voltage(rdev, pi->max_voltage);
1045
1046 if (ps->sclk_high != ps->sclk_low) {
1047 WREG32_P(FVTHROT_FBDIV_REG1, 0, ~FORCE_FEEDBACK_DIV);
1048 rs780_clk_scaling_enable(rdev, true);
1049 }
1050
1051 if (pi->voltage_control) {
1052 rs780_voltage_scaling_enable(rdev, true);
1053 rs780_enable_voltage_scaling(rdev, rps);
1054 }
1055 }
1056
1057 rdev->pm.dpm.forced_level = level;
1058
1059 return 0;
1060}
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 8ea1573ae820..873eb4b193b4 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -209,19 +209,27 @@ static void rv515_mc_init(struct radeon_device *rdev)
209 209
210uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg) 210uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
211{ 211{
212 unsigned long flags;
212 uint32_t r; 213 uint32_t r;
213 214
215 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
214 WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff)); 216 WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff));
215 r = RREG32(MC_IND_DATA); 217 r = RREG32(MC_IND_DATA);
216 WREG32(MC_IND_INDEX, 0); 218 WREG32(MC_IND_INDEX, 0);
219 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
220
217 return r; 221 return r;
218} 222}
219 223
220void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 224void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
221{ 225{
226 unsigned long flags;
227
228 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
222 WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff)); 229 WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff));
223 WREG32(MC_IND_DATA, (v)); 230 WREG32(MC_IND_DATA, (v));
224 WREG32(MC_IND_INDEX, 0); 231 WREG32(MC_IND_INDEX, 0);
232 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
225} 233}
226 234
227#if defined(CONFIG_DEBUG_FS) 235#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index ab1f2016f21e..5811d277a36a 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -1758,8 +1758,6 @@ int rv6xx_dpm_set_power_state(struct radeon_device *rdev)
1758 1758
1759 rv6xx_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); 1759 rv6xx_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
1760 1760
1761 rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
1762
1763 return 0; 1761 return 0;
1764} 1762}
1765 1763
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 8cbb85dae5aa..913b025ae9b3 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2064,12 +2064,6 @@ int rv770_dpm_set_power_state(struct radeon_device *rdev)
2064 rv770_program_dcodt_after_state_switch(rdev, new_ps, old_ps); 2064 rv770_program_dcodt_after_state_switch(rdev, new_ps, old_ps);
2065 rv770_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); 2065 rv770_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
2066 2066
2067 ret = rv770_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
2068 if (ret) {
2069 DRM_ERROR("rv770_dpm_force_performance_level failed\n");
2070 return ret;
2071 }
2072
2073 return 0; 2067 return 0;
2074} 2068}
2075 2069
@@ -2147,14 +2141,18 @@ static void rv7xx_parse_pplib_non_clock_info(struct radeon_device *rdev,
2147 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { 2141 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
2148 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); 2142 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
2149 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); 2143 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
2150 } else if (r600_is_uvd_state(rps->class, rps->class2)) {
2151 rps->vclk = RV770_DEFAULT_VCLK_FREQ;
2152 rps->dclk = RV770_DEFAULT_DCLK_FREQ;
2153 } else { 2144 } else {
2154 rps->vclk = 0; 2145 rps->vclk = 0;
2155 rps->dclk = 0; 2146 rps->dclk = 0;
2156 } 2147 }
2157 2148
2149 if (r600_is_uvd_state(rps->class, rps->class2)) {
2150 if ((rps->vclk == 0) || (rps->dclk == 0)) {
2151 rps->vclk = RV770_DEFAULT_VCLK_FREQ;
2152 rps->dclk = RV770_DEFAULT_DCLK_FREQ;
2153 }
2154 }
2155
2158 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) 2156 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
2159 rdev->pm.dpm.boot_ps = rps; 2157 rdev->pm.dpm.boot_ps = rps;
2160 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 2158 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
diff --git a/drivers/gpu/drm/radeon/rv770_smc.c b/drivers/gpu/drm/radeon/rv770_smc.c
index ab95da570215..b2a224407365 100644
--- a/drivers/gpu/drm/radeon/rv770_smc.c
+++ b/drivers/gpu/drm/radeon/rv770_smc.c
@@ -274,8 +274,8 @@ static const u8 cayman_smc_int_vectors[] =
274 0x08, 0x72, 0x08, 0x72 274 0x08, 0x72, 0x08, 0x72
275}; 275};
276 276
277int rv770_set_smc_sram_address(struct radeon_device *rdev, 277static int rv770_set_smc_sram_address(struct radeon_device *rdev,
278 u16 smc_address, u16 limit) 278 u16 smc_address, u16 limit)
279{ 279{
280 u32 addr; 280 u32 addr;
281 281
@@ -296,9 +296,10 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
296 u16 smc_start_address, const u8 *src, 296 u16 smc_start_address, const u8 *src,
297 u16 byte_count, u16 limit) 297 u16 byte_count, u16 limit)
298{ 298{
299 unsigned long flags;
299 u32 data, original_data, extra_shift; 300 u32 data, original_data, extra_shift;
300 u16 addr; 301 u16 addr;
301 int ret; 302 int ret = 0;
302 303
303 if (smc_start_address & 3) 304 if (smc_start_address & 3)
304 return -EINVAL; 305 return -EINVAL;
@@ -307,13 +308,14 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
307 308
308 addr = smc_start_address; 309 addr = smc_start_address;
309 310
311 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
310 while (byte_count >= 4) { 312 while (byte_count >= 4) {
311 /* SMC address space is BE */ 313 /* SMC address space is BE */
312 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; 314 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
313 315
314 ret = rv770_set_smc_sram_address(rdev, addr, limit); 316 ret = rv770_set_smc_sram_address(rdev, addr, limit);
315 if (ret) 317 if (ret)
316 return ret; 318 goto done;
317 319
318 WREG32(SMC_SRAM_DATA, data); 320 WREG32(SMC_SRAM_DATA, data);
319 321
@@ -328,7 +330,7 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
328 330
329 ret = rv770_set_smc_sram_address(rdev, addr, limit); 331 ret = rv770_set_smc_sram_address(rdev, addr, limit);
330 if (ret) 332 if (ret)
331 return ret; 333 goto done;
332 334
333 original_data = RREG32(SMC_SRAM_DATA); 335 original_data = RREG32(SMC_SRAM_DATA);
334 336
@@ -346,12 +348,15 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
346 348
347 ret = rv770_set_smc_sram_address(rdev, addr, limit); 349 ret = rv770_set_smc_sram_address(rdev, addr, limit);
348 if (ret) 350 if (ret)
349 return ret; 351 goto done;
350 352
351 WREG32(SMC_SRAM_DATA, data); 353 WREG32(SMC_SRAM_DATA, data);
352 } 354 }
353 355
354 return 0; 356done:
357 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
358
359 return ret;
355} 360}
356 361
357static int rv770_program_interrupt_vectors(struct radeon_device *rdev, 362static int rv770_program_interrupt_vectors(struct radeon_device *rdev,
@@ -461,12 +466,15 @@ PPSMC_Result rv770_wait_for_smc_inactive(struct radeon_device *rdev)
461 466
462static void rv770_clear_smc_sram(struct radeon_device *rdev, u16 limit) 467static void rv770_clear_smc_sram(struct radeon_device *rdev, u16 limit)
463{ 468{
469 unsigned long flags;
464 u16 i; 470 u16 i;
465 471
472 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
466 for (i = 0; i < limit; i += 4) { 473 for (i = 0; i < limit; i += 4) {
467 rv770_set_smc_sram_address(rdev, i, limit); 474 rv770_set_smc_sram_address(rdev, i, limit);
468 WREG32(SMC_SRAM_DATA, 0); 475 WREG32(SMC_SRAM_DATA, 0);
469 } 476 }
477 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
470} 478}
471 479
472int rv770_load_smc_ucode(struct radeon_device *rdev, 480int rv770_load_smc_ucode(struct radeon_device *rdev,
@@ -595,27 +603,29 @@ int rv770_load_smc_ucode(struct radeon_device *rdev,
595int rv770_read_smc_sram_dword(struct radeon_device *rdev, 603int rv770_read_smc_sram_dword(struct radeon_device *rdev,
596 u16 smc_address, u32 *value, u16 limit) 604 u16 smc_address, u32 *value, u16 limit)
597{ 605{
606 unsigned long flags;
598 int ret; 607 int ret;
599 608
609 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
600 ret = rv770_set_smc_sram_address(rdev, smc_address, limit); 610 ret = rv770_set_smc_sram_address(rdev, smc_address, limit);
601 if (ret) 611 if (ret == 0)
602 return ret; 612 *value = RREG32(SMC_SRAM_DATA);
603 613 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
604 *value = RREG32(SMC_SRAM_DATA);
605 614
606 return 0; 615 return ret;
607} 616}
608 617
609int rv770_write_smc_sram_dword(struct radeon_device *rdev, 618int rv770_write_smc_sram_dword(struct radeon_device *rdev,
610 u16 smc_address, u32 value, u16 limit) 619 u16 smc_address, u32 value, u16 limit)
611{ 620{
621 unsigned long flags;
612 int ret; 622 int ret;
613 623
624 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
614 ret = rv770_set_smc_sram_address(rdev, smc_address, limit); 625 ret = rv770_set_smc_sram_address(rdev, smc_address, limit);
615 if (ret) 626 if (ret == 0)
616 return ret; 627 WREG32(SMC_SRAM_DATA, value);
628 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
617 629
618 WREG32(SMC_SRAM_DATA, value); 630 return ret;
619
620 return 0;
621} 631}
diff --git a/drivers/gpu/drm/radeon/rv770_smc.h b/drivers/gpu/drm/radeon/rv770_smc.h
index f78d92a4b325..3b2c963c4880 100644
--- a/drivers/gpu/drm/radeon/rv770_smc.h
+++ b/drivers/gpu/drm/radeon/rv770_smc.h
@@ -187,8 +187,6 @@ typedef struct RV770_SMC_STATETABLE RV770_SMC_STATETABLE;
187#define RV770_SMC_SOFT_REGISTER_uvd_enabled 0x9C 187#define RV770_SMC_SOFT_REGISTER_uvd_enabled 0x9C
188#define RV770_SMC_SOFT_REGISTER_is_asic_lombok 0xA0 188#define RV770_SMC_SOFT_REGISTER_is_asic_lombok 0xA0
189 189
190int rv770_set_smc_sram_address(struct radeon_device *rdev,
191 u16 smc_address, u16 limit);
192int rv770_copy_bytes_to_smc(struct radeon_device *rdev, 190int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
193 u16 smc_start_address, const u8 *src, 191 u16 smc_start_address, const u8 *src,
194 u16 byte_count, u16 limit); 192 u16 byte_count, u16 limit);
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 9fe60e542922..1ae277152cc7 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -852,7 +852,7 @@
852#define AFMT_VBI_PACKET_CONTROL 0x7608 852#define AFMT_VBI_PACKET_CONTROL 0x7608
853# define AFMT_GENERIC0_UPDATE (1 << 2) 853# define AFMT_GENERIC0_UPDATE (1 << 2)
854#define AFMT_INFOFRAME_CONTROL0 0x760c 854#define AFMT_INFOFRAME_CONTROL0 0x760c
855# define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */ 855# define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hdmi regs */
856# define AFMT_AUDIO_INFO_UPDATE (1 << 7) 856# define AFMT_AUDIO_INFO_UPDATE (1 << 7)
857# define AFMT_MPEG_INFO_UPDATE (1 << 10) 857# define AFMT_MPEG_INFO_UPDATE (1 << 10)
858#define AFMT_GENERIC0_7 0x7610 858#define AFMT_GENERIC0_7 0x7610
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 3e23b757dcfa..c354c1094967 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -83,6 +83,8 @@ extern void si_dma_vm_set_page(struct radeon_device *rdev,
83 uint64_t pe, 83 uint64_t pe,
84 uint64_t addr, unsigned count, 84 uint64_t addr, unsigned count,
85 uint32_t incr, uint32_t flags); 85 uint32_t incr, uint32_t flags);
86static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
87 bool enable);
86 88
87static const u32 verde_rlc_save_restore_register_list[] = 89static const u32 verde_rlc_save_restore_register_list[] =
88{ 90{
@@ -3386,6 +3388,8 @@ static int si_cp_resume(struct radeon_device *rdev)
3386 u32 rb_bufsz; 3388 u32 rb_bufsz;
3387 int r; 3389 int r;
3388 3390
3391 si_enable_gui_idle_interrupt(rdev, false);
3392
3389 WREG32(CP_SEM_WAIT_TIMER, 0x0); 3393 WREG32(CP_SEM_WAIT_TIMER, 0x0);
3390 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); 3394 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
3391 3395
@@ -3501,6 +3505,8 @@ static int si_cp_resume(struct radeon_device *rdev)
3501 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; 3505 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
3502 } 3506 }
3503 3507
3508 si_enable_gui_idle_interrupt(rdev, true);
3509
3504 return 0; 3510 return 0;
3505} 3511}
3506 3512
@@ -4888,7 +4894,7 @@ static void si_enable_gfx_cgpg(struct radeon_device *rdev,
4888{ 4894{
4889 u32 tmp; 4895 u32 tmp;
4890 4896
4891 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG)) { 4897 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) {
4892 tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10); 4898 tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10);
4893 WREG32(RLC_TTOP_D, tmp); 4899 WREG32(RLC_TTOP_D, tmp);
4894 4900
@@ -5250,6 +5256,7 @@ void si_update_cg(struct radeon_device *rdev,
5250 u32 block, bool enable) 5256 u32 block, bool enable)
5251{ 5257{
5252 if (block & RADEON_CG_BLOCK_GFX) { 5258 if (block & RADEON_CG_BLOCK_GFX) {
5259 si_enable_gui_idle_interrupt(rdev, false);
5253 /* order matters! */ 5260 /* order matters! */
5254 if (enable) { 5261 if (enable) {
5255 si_enable_mgcg(rdev, true); 5262 si_enable_mgcg(rdev, true);
@@ -5258,6 +5265,7 @@ void si_update_cg(struct radeon_device *rdev,
5258 si_enable_cgcg(rdev, false); 5265 si_enable_cgcg(rdev, false);
5259 si_enable_mgcg(rdev, false); 5266 si_enable_mgcg(rdev, false);
5260 } 5267 }
5268 si_enable_gui_idle_interrupt(rdev, true);
5261 } 5269 }
5262 5270
5263 if (block & RADEON_CG_BLOCK_MC) { 5271 if (block & RADEON_CG_BLOCK_MC) {
@@ -5408,7 +5416,7 @@ static void si_init_pg(struct radeon_device *rdev)
5408 si_init_dma_pg(rdev); 5416 si_init_dma_pg(rdev);
5409 } 5417 }
5410 si_init_ao_cu_mask(rdev); 5418 si_init_ao_cu_mask(rdev);
5411 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) { 5419 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
5412 si_init_gfx_cgpg(rdev); 5420 si_init_gfx_cgpg(rdev);
5413 } 5421 }
5414 si_enable_dma_pg(rdev, true); 5422 si_enable_dma_pg(rdev, true);
@@ -5560,7 +5568,9 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
5560{ 5568{
5561 u32 tmp; 5569 u32 tmp;
5562 5570
5563 WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 5571 tmp = RREG32(CP_INT_CNTL_RING0) &
5572 (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
5573 WREG32(CP_INT_CNTL_RING0, tmp);
5564 WREG32(CP_INT_CNTL_RING1, 0); 5574 WREG32(CP_INT_CNTL_RING1, 0);
5565 WREG32(CP_INT_CNTL_RING2, 0); 5575 WREG32(CP_INT_CNTL_RING2, 0);
5566 tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; 5576 tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
@@ -5685,7 +5695,7 @@ static int si_irq_init(struct radeon_device *rdev)
5685 5695
5686int si_irq_set(struct radeon_device *rdev) 5696int si_irq_set(struct radeon_device *rdev)
5687{ 5697{
5688 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; 5698 u32 cp_int_cntl;
5689 u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0; 5699 u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
5690 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; 5700 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
5691 u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0; 5701 u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0;
@@ -5706,6 +5716,9 @@ int si_irq_set(struct radeon_device *rdev)
5706 return 0; 5716 return 0;
5707 } 5717 }
5708 5718
5719 cp_int_cntl = RREG32(CP_INT_CNTL_RING0) &
5720 (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
5721
5709 if (!ASIC_IS_NODCE(rdev)) { 5722 if (!ASIC_IS_NODCE(rdev)) {
5710 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; 5723 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
5711 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; 5724 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 5be9b4e72350..9ace28702c76 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2910,6 +2910,7 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
2910 bool disable_sclk_switching = false; 2910 bool disable_sclk_switching = false;
2911 u32 mclk, sclk; 2911 u32 mclk, sclk;
2912 u16 vddc, vddci; 2912 u16 vddc, vddci;
2913 u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
2913 int i; 2914 int i;
2914 2915
2915 if ((rdev->pm.dpm.new_active_crtc_count > 1) || 2916 if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
@@ -2943,6 +2944,29 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
2943 } 2944 }
2944 } 2945 }
2945 2946
2947 /* limit clocks to max supported clocks based on voltage dependency tables */
2948 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
2949 &max_sclk_vddc);
2950 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2951 &max_mclk_vddci);
2952 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2953 &max_mclk_vddc);
2954
2955 for (i = 0; i < ps->performance_level_count; i++) {
2956 if (max_sclk_vddc) {
2957 if (ps->performance_levels[i].sclk > max_sclk_vddc)
2958 ps->performance_levels[i].sclk = max_sclk_vddc;
2959 }
2960 if (max_mclk_vddci) {
2961 if (ps->performance_levels[i].mclk > max_mclk_vddci)
2962 ps->performance_levels[i].mclk = max_mclk_vddci;
2963 }
2964 if (max_mclk_vddc) {
2965 if (ps->performance_levels[i].mclk > max_mclk_vddc)
2966 ps->performance_levels[i].mclk = max_mclk_vddc;
2967 }
2968 }
2969
2946 /* XXX validate the min clocks required for display */ 2970 /* XXX validate the min clocks required for display */
2947 2971
2948 if (disable_mclk_switching) { 2972 if (disable_mclk_switching) {
@@ -6075,12 +6099,6 @@ int si_dpm_set_power_state(struct radeon_device *rdev)
6075 return ret; 6099 return ret;
6076 } 6100 }
6077 6101
6078 ret = si_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
6079 if (ret) {
6080 DRM_ERROR("si_dpm_force_performance_level failed\n");
6081 return ret;
6082 }
6083
6084 si_update_cg(rdev, (RADEON_CG_BLOCK_GFX | 6102 si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
6085 RADEON_CG_BLOCK_MC | 6103 RADEON_CG_BLOCK_MC |
6086 RADEON_CG_BLOCK_SDMA | 6104 RADEON_CG_BLOCK_SDMA |
diff --git a/drivers/gpu/drm/radeon/si_smc.c b/drivers/gpu/drm/radeon/si_smc.c
index 5f524c0a541e..d422a1cbf727 100644
--- a/drivers/gpu/drm/radeon/si_smc.c
+++ b/drivers/gpu/drm/radeon/si_smc.c
@@ -29,8 +29,8 @@
29#include "ppsmc.h" 29#include "ppsmc.h"
30#include "radeon_ucode.h" 30#include "radeon_ucode.h"
31 31
32int si_set_smc_sram_address(struct radeon_device *rdev, 32static int si_set_smc_sram_address(struct radeon_device *rdev,
33 u32 smc_address, u32 limit) 33 u32 smc_address, u32 limit)
34{ 34{
35 if (smc_address & 3) 35 if (smc_address & 3)
36 return -EINVAL; 36 return -EINVAL;
@@ -47,7 +47,8 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev,
47 u32 smc_start_address, 47 u32 smc_start_address,
48 const u8 *src, u32 byte_count, u32 limit) 48 const u8 *src, u32 byte_count, u32 limit)
49{ 49{
50 int ret; 50 unsigned long flags;
51 int ret = 0;
51 u32 data, original_data, addr, extra_shift; 52 u32 data, original_data, addr, extra_shift;
52 53
53 if (smc_start_address & 3) 54 if (smc_start_address & 3)
@@ -57,13 +58,14 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev,
57 58
58 addr = smc_start_address; 59 addr = smc_start_address;
59 60
61 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
60 while (byte_count >= 4) { 62 while (byte_count >= 4) {
61 /* SMC address space is BE */ 63 /* SMC address space is BE */
62 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; 64 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
63 65
64 ret = si_set_smc_sram_address(rdev, addr, limit); 66 ret = si_set_smc_sram_address(rdev, addr, limit);
65 if (ret) 67 if (ret)
66 return ret; 68 goto done;
67 69
68 WREG32(SMC_IND_DATA_0, data); 70 WREG32(SMC_IND_DATA_0, data);
69 71
@@ -78,7 +80,7 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev,
78 80
79 ret = si_set_smc_sram_address(rdev, addr, limit); 81 ret = si_set_smc_sram_address(rdev, addr, limit);
80 if (ret) 82 if (ret)
81 return ret; 83 goto done;
82 84
83 original_data = RREG32(SMC_IND_DATA_0); 85 original_data = RREG32(SMC_IND_DATA_0);
84 86
@@ -96,11 +98,15 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev,
96 98
97 ret = si_set_smc_sram_address(rdev, addr, limit); 99 ret = si_set_smc_sram_address(rdev, addr, limit);
98 if (ret) 100 if (ret)
99 return ret; 101 goto done;
100 102
101 WREG32(SMC_IND_DATA_0, data); 103 WREG32(SMC_IND_DATA_0, data);
102 } 104 }
103 return 0; 105
106done:
107 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
108
109 return ret;
104} 110}
105 111
106void si_start_smc(struct radeon_device *rdev) 112void si_start_smc(struct radeon_device *rdev)
@@ -203,6 +209,7 @@ PPSMC_Result si_wait_for_smc_inactive(struct radeon_device *rdev)
203 209
204int si_load_smc_ucode(struct radeon_device *rdev, u32 limit) 210int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
205{ 211{
212 unsigned long flags;
206 u32 ucode_start_address; 213 u32 ucode_start_address;
207 u32 ucode_size; 214 u32 ucode_size;
208 const u8 *src; 215 const u8 *src;
@@ -241,6 +248,7 @@ int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
241 return -EINVAL; 248 return -EINVAL;
242 249
243 src = (const u8 *)rdev->smc_fw->data; 250 src = (const u8 *)rdev->smc_fw->data;
251 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
244 WREG32(SMC_IND_INDEX_0, ucode_start_address); 252 WREG32(SMC_IND_INDEX_0, ucode_start_address);
245 WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0); 253 WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
246 while (ucode_size >= 4) { 254 while (ucode_size >= 4) {
@@ -253,6 +261,7 @@ int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
253 ucode_size -= 4; 261 ucode_size -= 4;
254 } 262 }
255 WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0); 263 WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
264 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
256 265
257 return 0; 266 return 0;
258} 267}
@@ -260,25 +269,29 @@ int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
260int si_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address, 269int si_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
261 u32 *value, u32 limit) 270 u32 *value, u32 limit)
262{ 271{
272 unsigned long flags;
263 int ret; 273 int ret;
264 274
275 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
265 ret = si_set_smc_sram_address(rdev, smc_address, limit); 276 ret = si_set_smc_sram_address(rdev, smc_address, limit);
266 if (ret) 277 if (ret == 0)
267 return ret; 278 *value = RREG32(SMC_IND_DATA_0);
279 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
268 280
269 *value = RREG32(SMC_IND_DATA_0); 281 return ret;
270 return 0;
271} 282}
272 283
273int si_write_smc_sram_dword(struct radeon_device *rdev, u32 smc_address, 284int si_write_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
274 u32 value, u32 limit) 285 u32 value, u32 limit)
275{ 286{
287 unsigned long flags;
276 int ret; 288 int ret;
277 289
290 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
278 ret = si_set_smc_sram_address(rdev, smc_address, limit); 291 ret = si_set_smc_sram_address(rdev, smc_address, limit);
279 if (ret) 292 if (ret == 0)
280 return ret; 293 WREG32(SMC_IND_DATA_0, value);
294 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
281 295
282 WREG32(SMC_IND_DATA_0, value); 296 return ret;
283 return 0;
284} 297}
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index 864761c0120e..96ea6db8bf57 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -1319,8 +1319,6 @@ int sumo_dpm_set_power_state(struct radeon_device *rdev)
1319 if (pi->enable_dpm) 1319 if (pi->enable_dpm)
1320 sumo_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); 1320 sumo_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
1321 1321
1322 rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
1323
1324 return 0; 1322 return 0;
1325} 1323}
1326 1324
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index b07b7b8f1aff..7f998bf1cc9d 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1068,6 +1068,17 @@ static void trinity_update_requested_ps(struct radeon_device *rdev,
1068 pi->requested_rps.ps_priv = &pi->requested_ps; 1068 pi->requested_rps.ps_priv = &pi->requested_ps;
1069} 1069}
1070 1070
1071void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable)
1072{
1073 struct trinity_power_info *pi = trinity_get_pi(rdev);
1074
1075 if (pi->enable_bapm) {
1076 trinity_acquire_mutex(rdev);
1077 trinity_dpm_bapm_enable(rdev, enable);
1078 trinity_release_mutex(rdev);
1079 }
1080}
1081
1071int trinity_dpm_enable(struct radeon_device *rdev) 1082int trinity_dpm_enable(struct radeon_device *rdev)
1072{ 1083{
1073 struct trinity_power_info *pi = trinity_get_pi(rdev); 1084 struct trinity_power_info *pi = trinity_get_pi(rdev);
@@ -1091,6 +1102,7 @@ int trinity_dpm_enable(struct radeon_device *rdev)
1091 trinity_program_sclk_dpm(rdev); 1102 trinity_program_sclk_dpm(rdev);
1092 trinity_start_dpm(rdev); 1103 trinity_start_dpm(rdev);
1093 trinity_wait_for_dpm_enabled(rdev); 1104 trinity_wait_for_dpm_enabled(rdev);
1105 trinity_dpm_bapm_enable(rdev, false);
1094 trinity_release_mutex(rdev); 1106 trinity_release_mutex(rdev);
1095 1107
1096 if (rdev->irq.installed && 1108 if (rdev->irq.installed &&
@@ -1116,6 +1128,7 @@ void trinity_dpm_disable(struct radeon_device *rdev)
1116 trinity_release_mutex(rdev); 1128 trinity_release_mutex(rdev);
1117 return; 1129 return;
1118 } 1130 }
1131 trinity_dpm_bapm_enable(rdev, false);
1119 trinity_disable_clock_power_gating(rdev); 1132 trinity_disable_clock_power_gating(rdev);
1120 sumo_clear_vc(rdev); 1133 sumo_clear_vc(rdev);
1121 trinity_wait_for_level_0(rdev); 1134 trinity_wait_for_level_0(rdev);
@@ -1212,6 +1225,8 @@ int trinity_dpm_set_power_state(struct radeon_device *rdev)
1212 1225
1213 trinity_acquire_mutex(rdev); 1226 trinity_acquire_mutex(rdev);
1214 if (pi->enable_dpm) { 1227 if (pi->enable_dpm) {
1228 if (pi->enable_bapm)
1229 trinity_dpm_bapm_enable(rdev, rdev->pm.dpm.ac_power);
1215 trinity_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); 1230 trinity_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
1216 trinity_enable_power_level_0(rdev); 1231 trinity_enable_power_level_0(rdev);
1217 trinity_force_level_0(rdev); 1232 trinity_force_level_0(rdev);
@@ -1221,7 +1236,6 @@ int trinity_dpm_set_power_state(struct radeon_device *rdev)
1221 trinity_force_level_0(rdev); 1236 trinity_force_level_0(rdev);
1222 trinity_unforce_levels(rdev); 1237 trinity_unforce_levels(rdev);
1223 trinity_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); 1238 trinity_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
1224 rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
1225 } 1239 }
1226 trinity_release_mutex(rdev); 1240 trinity_release_mutex(rdev);
1227 1241
@@ -1854,6 +1868,7 @@ int trinity_dpm_init(struct radeon_device *rdev)
1854 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) 1868 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
1855 pi->at[i] = TRINITY_AT_DFLT; 1869 pi->at[i] = TRINITY_AT_DFLT;
1856 1870
1871 pi->enable_bapm = true;
1857 pi->enable_nbps_policy = true; 1872 pi->enable_nbps_policy = true;
1858 pi->enable_sclk_ds = true; 1873 pi->enable_sclk_ds = true;
1859 pi->enable_gfx_power_gating = true; 1874 pi->enable_gfx_power_gating = true;
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.h b/drivers/gpu/drm/radeon/trinity_dpm.h
index e82df071f8b3..c261657750ca 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.h
+++ b/drivers/gpu/drm/radeon/trinity_dpm.h
@@ -108,6 +108,7 @@ struct trinity_power_info {
108 bool enable_auto_thermal_throttling; 108 bool enable_auto_thermal_throttling;
109 bool enable_dpm; 109 bool enable_dpm;
110 bool enable_sclk_ds; 110 bool enable_sclk_ds;
111 bool enable_bapm;
111 bool uvd_dpm; 112 bool uvd_dpm;
112 struct radeon_ps current_rps; 113 struct radeon_ps current_rps;
113 struct trinity_ps current_ps; 114 struct trinity_ps current_ps;
@@ -118,6 +119,7 @@ struct trinity_power_info {
118#define TRINITY_AT_DFLT 30 119#define TRINITY_AT_DFLT 30
119 120
120/* trinity_smc.c */ 121/* trinity_smc.c */
122int trinity_dpm_bapm_enable(struct radeon_device *rdev, bool enable);
121int trinity_dpm_config(struct radeon_device *rdev, bool enable); 123int trinity_dpm_config(struct radeon_device *rdev, bool enable);
122int trinity_uvd_dpm_config(struct radeon_device *rdev); 124int trinity_uvd_dpm_config(struct radeon_device *rdev);
123int trinity_dpm_force_state(struct radeon_device *rdev, u32 n); 125int trinity_dpm_force_state(struct radeon_device *rdev, u32 n);
diff --git a/drivers/gpu/drm/radeon/trinity_smc.c b/drivers/gpu/drm/radeon/trinity_smc.c
index a42d89f1830c..9672bcbc7312 100644
--- a/drivers/gpu/drm/radeon/trinity_smc.c
+++ b/drivers/gpu/drm/radeon/trinity_smc.c
@@ -56,6 +56,14 @@ static int trinity_notify_message_to_smu(struct radeon_device *rdev, u32 id)
56 return 0; 56 return 0;
57} 57}
58 58
59int trinity_dpm_bapm_enable(struct radeon_device *rdev, bool enable)
60{
61 if (enable)
62 return trinity_notify_message_to_smu(rdev, PPSMC_MSG_EnableBAPM);
63 else
64 return trinity_notify_message_to_smu(rdev, PPSMC_MSG_DisableBAPM);
65}
66
59int trinity_dpm_config(struct radeon_device *rdev, bool enable) 67int trinity_dpm_config(struct radeon_device *rdev, bool enable)
60{ 68{
61 if (enable) 69 if (enable)
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
index 7266805d9786..3100fa9cb52f 100644
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -212,8 +212,8 @@ int uvd_v1_0_start(struct radeon_device *rdev)
212 /* enable VCPU clock */ 212 /* enable VCPU clock */
213 WREG32(UVD_VCPU_CNTL, 1 << 9); 213 WREG32(UVD_VCPU_CNTL, 1 << 9);
214 214
215 /* enable UMC */ 215 /* enable UMC and NC0 */
216 WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8)); 216 WREG32_P(UVD_LMI_CTRL2, 1 << 13, ~((1 << 8) | (1 << 13)));
217 217
218 /* boot up the VCPU */ 218 /* boot up the VCPU */
219 WREG32(UVD_SOFT_RESET, 0); 219 WREG32(UVD_SOFT_RESET, 0);
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 58a5f3261c0b..a868176c258a 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -218,7 +218,7 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
218 uint32_t key) 218 uint32_t key)
219{ 219{
220 struct ttm_object_device *tdev = tfile->tdev; 220 struct ttm_object_device *tdev = tfile->tdev;
221 struct ttm_base_object *base; 221 struct ttm_base_object *uninitialized_var(base);
222 struct drm_hash_item *hash; 222 struct drm_hash_item *hash;
223 int ret; 223 int ret;
224 224
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 5e93a52d4f2c..210d50365162 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -170,7 +170,7 @@ void ttm_tt_destroy(struct ttm_tt *ttm)
170 ttm_tt_unbind(ttm); 170 ttm_tt_unbind(ttm);
171 } 171 }
172 172
173 if (likely(ttm->pages != NULL)) { 173 if (ttm->state == tt_unbound) {
174 ttm->bdev->driver->ttm_tt_unpopulate(ttm); 174 ttm->bdev->driver->ttm_tt_unpopulate(ttm);
175 } 175 }
176 176
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 8dbe9d0ae9a7..8bf646183bac 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -97,7 +97,6 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
97 ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page); 97 ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
98 switch (ret) { 98 switch (ret) {
99 case -EAGAIN: 99 case -EAGAIN:
100 set_need_resched();
101 case 0: 100 case 0:
102 case -ERESTARTSYS: 101 case -ERESTARTSYS:
103 return VM_FAULT_NOPAGE; 102 return VM_FAULT_NOPAGE;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index ae88a97f976e..b8470b1a10fe 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -94,7 +94,6 @@ EXPORT_SYMBOL_GPL(hid_register_report);
94static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages, unsigned values) 94static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages, unsigned values)
95{ 95{
96 struct hid_field *field; 96 struct hid_field *field;
97 int i;
98 97
99 if (report->maxfield == HID_MAX_FIELDS) { 98 if (report->maxfield == HID_MAX_FIELDS) {
100 hid_err(report->device, "too many fields in report\n"); 99 hid_err(report->device, "too many fields in report\n");
@@ -113,9 +112,6 @@ static struct hid_field *hid_register_field(struct hid_report *report, unsigned
113 field->value = (s32 *)(field->usage + usages); 112 field->value = (s32 *)(field->usage + usages);
114 field->report = report; 113 field->report = report;
115 114
116 for (i = 0; i < usages; i++)
117 field->usage[i].usage_index = i;
118
119 return field; 115 return field;
120} 116}
121 117
@@ -226,9 +222,9 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
226{ 222{
227 struct hid_report *report; 223 struct hid_report *report;
228 struct hid_field *field; 224 struct hid_field *field;
229 int usages; 225 unsigned usages;
230 unsigned offset; 226 unsigned offset;
231 int i; 227 unsigned i;
232 228
233 report = hid_register_report(parser->device, report_type, parser->global.report_id); 229 report = hid_register_report(parser->device, report_type, parser->global.report_id);
234 if (!report) { 230 if (!report) {
@@ -255,7 +251,8 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
255 if (!parser->local.usage_index) /* Ignore padding fields */ 251 if (!parser->local.usage_index) /* Ignore padding fields */
256 return 0; 252 return 0;
257 253
258 usages = max_t(int, parser->local.usage_index, parser->global.report_count); 254 usages = max_t(unsigned, parser->local.usage_index,
255 parser->global.report_count);
259 256
260 field = hid_register_field(report, usages, parser->global.report_count); 257 field = hid_register_field(report, usages, parser->global.report_count);
261 if (!field) 258 if (!field)
@@ -266,13 +263,14 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
266 field->application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION); 263 field->application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION);
267 264
268 for (i = 0; i < usages; i++) { 265 for (i = 0; i < usages; i++) {
269 int j = i; 266 unsigned j = i;
270 /* Duplicate the last usage we parsed if we have excess values */ 267 /* Duplicate the last usage we parsed if we have excess values */
271 if (i >= parser->local.usage_index) 268 if (i >= parser->local.usage_index)
272 j = parser->local.usage_index - 1; 269 j = parser->local.usage_index - 1;
273 field->usage[i].hid = parser->local.usage[j]; 270 field->usage[i].hid = parser->local.usage[j];
274 field->usage[i].collection_index = 271 field->usage[i].collection_index =
275 parser->local.collection_index[j]; 272 parser->local.collection_index[j];
273 field->usage[i].usage_index = i;
276 } 274 }
277 275
278 field->maxusage = usages; 276 field->maxusage = usages;
@@ -801,6 +799,64 @@ int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size)
801} 799}
802EXPORT_SYMBOL_GPL(hid_parse_report); 800EXPORT_SYMBOL_GPL(hid_parse_report);
803 801
802static const char * const hid_report_names[] = {
803 "HID_INPUT_REPORT",
804 "HID_OUTPUT_REPORT",
805 "HID_FEATURE_REPORT",
806};
807/**
808 * hid_validate_values - validate existing device report's value indexes
809 *
810 * @device: hid device
811 * @type: which report type to examine
812 * @id: which report ID to examine (0 for first)
813 * @field_index: which report field to examine
814 * @report_counts: expected number of values
815 *
816 * Validate the number of values in a given field of a given report, after
817 * parsing.
818 */
819struct hid_report *hid_validate_values(struct hid_device *hid,
820 unsigned int type, unsigned int id,
821 unsigned int field_index,
822 unsigned int report_counts)
823{
824 struct hid_report *report;
825
826 if (type > HID_FEATURE_REPORT) {
827 hid_err(hid, "invalid HID report type %u\n", type);
828 return NULL;
829 }
830
831 if (id >= HID_MAX_IDS) {
832 hid_err(hid, "invalid HID report id %u\n", id);
833 return NULL;
834 }
835
836 /*
837 * Explicitly not using hid_get_report() here since it depends on
838 * ->numbered being checked, which may not always be the case when
839 * drivers go to access report values.
840 */
841 report = hid->report_enum[type].report_id_hash[id];
842 if (!report) {
843 hid_err(hid, "missing %s %u\n", hid_report_names[type], id);
844 return NULL;
845 }
846 if (report->maxfield <= field_index) {
847 hid_err(hid, "not enough fields in %s %u\n",
848 hid_report_names[type], id);
849 return NULL;
850 }
851 if (report->field[field_index]->report_count < report_counts) {
852 hid_err(hid, "not enough values in %s %u field %u\n",
853 hid_report_names[type], id, field_index);
854 return NULL;
855 }
856 return report;
857}
858EXPORT_SYMBOL_GPL(hid_validate_values);
859
804/** 860/**
805 * hid_open_report - open a driver-specific device report 861 * hid_open_report - open a driver-specific device report
806 * 862 *
@@ -1296,7 +1352,7 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
1296 goto out; 1352 goto out;
1297 } 1353 }
1298 1354
1299 if (hid->claimed != HID_CLAIMED_HIDRAW) { 1355 if (hid->claimed != HID_CLAIMED_HIDRAW && report->maxfield) {
1300 for (a = 0; a < report->maxfield; a++) 1356 for (a = 0; a < report->maxfield; a++)
1301 hid_input_field(hid, report->field[a], cdata, interrupt); 1357 hid_input_field(hid, report->field[a], cdata, interrupt);
1302 hdrv = hid->driver; 1358 hdrv = hid->driver;
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index b420f4a0fd28..8741d953dcc8 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -485,6 +485,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
485 if (field->flags & HID_MAIN_ITEM_CONSTANT) 485 if (field->flags & HID_MAIN_ITEM_CONSTANT)
486 goto ignore; 486 goto ignore;
487 487
488 /* Ignore if report count is out of bounds. */
489 if (field->report_count < 1)
490 goto ignore;
491
488 /* only LED usages are supported in output fields */ 492 /* only LED usages are supported in output fields */
489 if (field->report_type == HID_OUTPUT_REPORT && 493 if (field->report_type == HID_OUTPUT_REPORT &&
490 (usage->hid & HID_USAGE_PAGE) != HID_UP_LED) { 494 (usage->hid & HID_USAGE_PAGE) != HID_UP_LED) {
@@ -1236,7 +1240,11 @@ static void report_features(struct hid_device *hid)
1236 1240
1237 rep_enum = &hid->report_enum[HID_FEATURE_REPORT]; 1241 rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
1238 list_for_each_entry(rep, &rep_enum->report_list, list) 1242 list_for_each_entry(rep, &rep_enum->report_list, list)
1239 for (i = 0; i < rep->maxfield; i++) 1243 for (i = 0; i < rep->maxfield; i++) {
1244 /* Ignore if report count is out of bounds. */
1245 if (rep->field[i]->report_count < 1)
1246 continue;
1247
1240 for (j = 0; j < rep->field[i]->maxusage; j++) { 1248 for (j = 0; j < rep->field[i]->maxusage; j++) {
1241 /* Verify if Battery Strength feature is available */ 1249 /* Verify if Battery Strength feature is available */
1242 hidinput_setup_battery(hid, HID_FEATURE_REPORT, rep->field[i]); 1250 hidinput_setup_battery(hid, HID_FEATURE_REPORT, rep->field[i]);
@@ -1245,6 +1253,7 @@ static void report_features(struct hid_device *hid)
1245 drv->feature_mapping(hid, rep->field[i], 1253 drv->feature_mapping(hid, rep->field[i],
1246 rep->field[i]->usage + j); 1254 rep->field[i]->usage + j);
1247 } 1255 }
1256 }
1248} 1257}
1249 1258
1250static struct hid_input *hidinput_allocate(struct hid_device *hid) 1259static struct hid_input *hidinput_allocate(struct hid_device *hid)
diff --git a/drivers/hid/hid-lenovo-tpkbd.c b/drivers/hid/hid-lenovo-tpkbd.c
index 07837f5a4eb8..31cf29a6ba17 100644
--- a/drivers/hid/hid-lenovo-tpkbd.c
+++ b/drivers/hid/hid-lenovo-tpkbd.c
@@ -339,7 +339,15 @@ static int tpkbd_probe_tp(struct hid_device *hdev)
339 struct tpkbd_data_pointer *data_pointer; 339 struct tpkbd_data_pointer *data_pointer;
340 size_t name_sz = strlen(dev_name(dev)) + 16; 340 size_t name_sz = strlen(dev_name(dev)) + 16;
341 char *name_mute, *name_micmute; 341 char *name_mute, *name_micmute;
342 int ret; 342 int i, ret;
343
344 /* Validate required reports. */
345 for (i = 0; i < 4; i++) {
346 if (!hid_validate_values(hdev, HID_FEATURE_REPORT, 4, i, 1))
347 return -ENODEV;
348 }
349 if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, 3, 0, 2))
350 return -ENODEV;
343 351
344 if (sysfs_create_group(&hdev->dev.kobj, 352 if (sysfs_create_group(&hdev->dev.kobj,
345 &tpkbd_attr_group_pointer)) { 353 &tpkbd_attr_group_pointer)) {
@@ -406,22 +414,27 @@ static int tpkbd_probe(struct hid_device *hdev,
406 ret = hid_parse(hdev); 414 ret = hid_parse(hdev);
407 if (ret) { 415 if (ret) {
408 hid_err(hdev, "hid_parse failed\n"); 416 hid_err(hdev, "hid_parse failed\n");
409 goto err_free; 417 goto err;
410 } 418 }
411 419
412 ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); 420 ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
413 if (ret) { 421 if (ret) {
414 hid_err(hdev, "hid_hw_start failed\n"); 422 hid_err(hdev, "hid_hw_start failed\n");
415 goto err_free; 423 goto err;
416 } 424 }
417 425
418 uhdev = (struct usbhid_device *) hdev->driver_data; 426 uhdev = (struct usbhid_device *) hdev->driver_data;
419 427
420 if (uhdev->ifnum == 1) 428 if (uhdev->ifnum == 1) {
421 return tpkbd_probe_tp(hdev); 429 ret = tpkbd_probe_tp(hdev);
430 if (ret)
431 goto err_hid;
432 }
422 433
423 return 0; 434 return 0;
424err_free: 435err_hid:
436 hid_hw_stop(hdev);
437err:
425 return ret; 438 return ret;
426} 439}
427 440
diff --git a/drivers/hid/hid-lg2ff.c b/drivers/hid/hid-lg2ff.c
index b3cd1507dda2..1a42eaa6ca02 100644
--- a/drivers/hid/hid-lg2ff.c
+++ b/drivers/hid/hid-lg2ff.c
@@ -64,26 +64,13 @@ int lg2ff_init(struct hid_device *hid)
64 struct hid_report *report; 64 struct hid_report *report;
65 struct hid_input *hidinput = list_entry(hid->inputs.next, 65 struct hid_input *hidinput = list_entry(hid->inputs.next,
66 struct hid_input, list); 66 struct hid_input, list);
67 struct list_head *report_list =
68 &hid->report_enum[HID_OUTPUT_REPORT].report_list;
69 struct input_dev *dev = hidinput->input; 67 struct input_dev *dev = hidinput->input;
70 int error; 68 int error;
71 69
72 if (list_empty(report_list)) { 70 /* Check that the report looks ok */
73 hid_err(hid, "no output report found\n"); 71 report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7);
72 if (!report)
74 return -ENODEV; 73 return -ENODEV;
75 }
76
77 report = list_entry(report_list->next, struct hid_report, list);
78
79 if (report->maxfield < 1) {
80 hid_err(hid, "output report is empty\n");
81 return -ENODEV;
82 }
83 if (report->field[0]->report_count < 7) {
84 hid_err(hid, "not enough values in the field\n");
85 return -ENODEV;
86 }
87 74
88 lg2ff = kmalloc(sizeof(struct lg2ff_device), GFP_KERNEL); 75 lg2ff = kmalloc(sizeof(struct lg2ff_device), GFP_KERNEL);
89 if (!lg2ff) 76 if (!lg2ff)
diff --git a/drivers/hid/hid-lg3ff.c b/drivers/hid/hid-lg3ff.c
index e52f181f6aa1..8c2da183d3bc 100644
--- a/drivers/hid/hid-lg3ff.c
+++ b/drivers/hid/hid-lg3ff.c
@@ -66,10 +66,11 @@ static int hid_lg3ff_play(struct input_dev *dev, void *data,
66 int x, y; 66 int x, y;
67 67
68/* 68/*
69 * Maxusage should always be 63 (maximum fields) 69 * Available values in the field should always be 63, but we only use up to
70 * likely a better way to ensure this data is clean 70 * 35. Instead, clear the entire area, however big it is.
71 */ 71 */
72 memset(report->field[0]->value, 0, sizeof(__s32)*report->field[0]->maxusage); 72 memset(report->field[0]->value, 0,
73 sizeof(__s32) * report->field[0]->report_count);
73 74
74 switch (effect->type) { 75 switch (effect->type) {
75 case FF_CONSTANT: 76 case FF_CONSTANT:
@@ -129,32 +130,14 @@ static const signed short ff3_joystick_ac[] = {
129int lg3ff_init(struct hid_device *hid) 130int lg3ff_init(struct hid_device *hid)
130{ 131{
131 struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); 132 struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
132 struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
133 struct input_dev *dev = hidinput->input; 133 struct input_dev *dev = hidinput->input;
134 struct hid_report *report;
135 struct hid_field *field;
136 const signed short *ff_bits = ff3_joystick_ac; 134 const signed short *ff_bits = ff3_joystick_ac;
137 int error; 135 int error;
138 int i; 136 int i;
139 137
140 /* Find the report to use */
141 if (list_empty(report_list)) {
142 hid_err(hid, "No output report found\n");
143 return -1;
144 }
145
146 /* Check that the report looks ok */ 138 /* Check that the report looks ok */
147 report = list_entry(report_list->next, struct hid_report, list); 139 if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 35))
148 if (!report) { 140 return -ENODEV;
149 hid_err(hid, "NULL output report\n");
150 return -1;
151 }
152
153 field = report->field[0];
154 if (!field) {
155 hid_err(hid, "NULL field\n");
156 return -1;
157 }
158 141
159 /* Assume single fixed device G940 */ 142 /* Assume single fixed device G940 */
160 for (i = 0; ff_bits[i] >= 0; i++) 143 for (i = 0; ff_bits[i] >= 0; i++)
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index 0ddae2a00d59..8782fe1aaa07 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -484,34 +484,16 @@ static enum led_brightness lg4ff_led_get_brightness(struct led_classdev *led_cde
484int lg4ff_init(struct hid_device *hid) 484int lg4ff_init(struct hid_device *hid)
485{ 485{
486 struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); 486 struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
487 struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
488 struct input_dev *dev = hidinput->input; 487 struct input_dev *dev = hidinput->input;
489 struct hid_report *report;
490 struct hid_field *field;
491 struct lg4ff_device_entry *entry; 488 struct lg4ff_device_entry *entry;
492 struct lg_drv_data *drv_data; 489 struct lg_drv_data *drv_data;
493 struct usb_device_descriptor *udesc; 490 struct usb_device_descriptor *udesc;
494 int error, i, j; 491 int error, i, j;
495 __u16 bcdDevice, rev_maj, rev_min; 492 __u16 bcdDevice, rev_maj, rev_min;
496 493
497 /* Find the report to use */
498 if (list_empty(report_list)) {
499 hid_err(hid, "No output report found\n");
500 return -1;
501 }
502
503 /* Check that the report looks ok */ 494 /* Check that the report looks ok */
504 report = list_entry(report_list->next, struct hid_report, list); 495 if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
505 if (!report) {
506 hid_err(hid, "NULL output report\n");
507 return -1; 496 return -1;
508 }
509
510 field = report->field[0];
511 if (!field) {
512 hid_err(hid, "NULL field\n");
513 return -1;
514 }
515 497
516 /* Check what wheel has been connected */ 498 /* Check what wheel has been connected */
517 for (i = 0; i < ARRAY_SIZE(lg4ff_devices); i++) { 499 for (i = 0; i < ARRAY_SIZE(lg4ff_devices); i++) {
diff --git a/drivers/hid/hid-lgff.c b/drivers/hid/hid-lgff.c
index d7ea8c845b40..e1394af0ae7b 100644
--- a/drivers/hid/hid-lgff.c
+++ b/drivers/hid/hid-lgff.c
@@ -128,27 +128,14 @@ static void hid_lgff_set_autocenter(struct input_dev *dev, u16 magnitude)
128int lgff_init(struct hid_device* hid) 128int lgff_init(struct hid_device* hid)
129{ 129{
130 struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); 130 struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
131 struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
132 struct input_dev *dev = hidinput->input; 131 struct input_dev *dev = hidinput->input;
133 struct hid_report *report;
134 struct hid_field *field;
135 const signed short *ff_bits = ff_joystick; 132 const signed short *ff_bits = ff_joystick;
136 int error; 133 int error;
137 int i; 134 int i;
138 135
139 /* Find the report to use */
140 if (list_empty(report_list)) {
141 hid_err(hid, "No output report found\n");
142 return -1;
143 }
144
145 /* Check that the report looks ok */ 136 /* Check that the report looks ok */
146 report = list_entry(report_list->next, struct hid_report, list); 137 if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
147 field = report->field[0]; 138 return -ENODEV;
148 if (!field) {
149 hid_err(hid, "NULL field\n");
150 return -1;
151 }
152 139
153 for (i = 0; i < ARRAY_SIZE(devices); i++) { 140 for (i = 0; i < ARRAY_SIZE(devices); i++) {
154 if (dev->id.vendor == devices[i].idVendor && 141 if (dev->id.vendor == devices[i].idVendor &&
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 7800b1410562..2e5302462efb 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -461,7 +461,7 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
461 struct hid_report *report; 461 struct hid_report *report;
462 struct hid_report_enum *output_report_enum; 462 struct hid_report_enum *output_report_enum;
463 u8 *data = (u8 *)(&dj_report->device_index); 463 u8 *data = (u8 *)(&dj_report->device_index);
464 int i; 464 unsigned int i;
465 465
466 output_report_enum = &hdev->report_enum[HID_OUTPUT_REPORT]; 466 output_report_enum = &hdev->report_enum[HID_OUTPUT_REPORT];
467 report = output_report_enum->report_id_hash[REPORT_ID_DJ_SHORT]; 467 report = output_report_enum->report_id_hash[REPORT_ID_DJ_SHORT];
@@ -471,7 +471,7 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
471 return -ENODEV; 471 return -ENODEV;
472 } 472 }
473 473
474 for (i = 0; i < report->field[0]->report_count; i++) 474 for (i = 0; i < DJREPORT_SHORT_LENGTH - 1; i++)
475 report->field[0]->value[i] = data[i]; 475 report->field[0]->value[i] = data[i];
476 476
477 hid_hw_request(hdev, report, HID_REQ_SET_REPORT); 477 hid_hw_request(hdev, report, HID_REQ_SET_REPORT);
@@ -791,6 +791,12 @@ static int logi_dj_probe(struct hid_device *hdev,
791 goto hid_parse_fail; 791 goto hid_parse_fail;
792 } 792 }
793 793
794 if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, REPORT_ID_DJ_SHORT,
795 0, DJREPORT_SHORT_LENGTH - 1)) {
796 retval = -ENODEV;
797 goto hid_parse_fail;
798 }
799
794 /* Starts the usb device and connects to upper interfaces hiddev and 800 /* Starts the usb device and connects to upper interfaces hiddev and
795 * hidraw */ 801 * hidraw */
796 retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT); 802 retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index ac28f08c3866..5e5fe1b8eebb 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -101,9 +101,9 @@ struct mt_device {
101 unsigned last_slot_field; /* the last field of a slot */ 101 unsigned last_slot_field; /* the last field of a slot */
102 unsigned mt_report_id; /* the report ID of the multitouch device */ 102 unsigned mt_report_id; /* the report ID of the multitouch device */
103 unsigned pen_report_id; /* the report ID of the pen device */ 103 unsigned pen_report_id; /* the report ID of the pen device */
104 __s8 inputmode; /* InputMode HID feature, -1 if non-existent */ 104 __s16 inputmode; /* InputMode HID feature, -1 if non-existent */
105 __s8 inputmode_index; /* InputMode HID feature index in the report */ 105 __s16 inputmode_index; /* InputMode HID feature index in the report */
106 __s8 maxcontact_report_id; /* Maximum Contact Number HID feature, 106 __s16 maxcontact_report_id; /* Maximum Contact Number HID feature,
107 -1 if non-existent */ 107 -1 if non-existent */
108 __u8 num_received; /* how many contacts we received */ 108 __u8 num_received; /* how many contacts we received */
109 __u8 num_expected; /* expected last contact index */ 109 __u8 num_expected; /* expected last contact index */
@@ -312,20 +312,18 @@ static void mt_feature_mapping(struct hid_device *hdev,
312 struct hid_field *field, struct hid_usage *usage) 312 struct hid_field *field, struct hid_usage *usage)
313{ 313{
314 struct mt_device *td = hid_get_drvdata(hdev); 314 struct mt_device *td = hid_get_drvdata(hdev);
315 int i;
316 315
317 switch (usage->hid) { 316 switch (usage->hid) {
318 case HID_DG_INPUTMODE: 317 case HID_DG_INPUTMODE:
319 td->inputmode = field->report->id; 318 /* Ignore if value index is out of bounds. */
320 td->inputmode_index = 0; /* has to be updated below */ 319 if (usage->usage_index >= field->report_count) {
321 320 dev_err(&hdev->dev, "HID_DG_INPUTMODE out of range\n");
322 for (i=0; i < field->maxusage; i++) { 321 break;
323 if (field->usage[i].hid == usage->hid) {
324 td->inputmode_index = i;
325 break;
326 }
327 } 322 }
328 323
324 td->inputmode = field->report->id;
325 td->inputmode_index = usage->usage_index;
326
329 break; 327 break;
330 case HID_DG_CONTACTMAX: 328 case HID_DG_CONTACTMAX:
331 td->maxcontact_report_id = field->report->id; 329 td->maxcontact_report_id = field->report->id;
@@ -511,6 +509,10 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
511 mt_store_field(usage, td, hi); 509 mt_store_field(usage, td, hi);
512 return 1; 510 return 1;
513 case HID_DG_CONTACTCOUNT: 511 case HID_DG_CONTACTCOUNT:
512 /* Ignore if indexes are out of bounds. */
513 if (field->index >= field->report->maxfield ||
514 usage->usage_index >= field->report_count)
515 return 1;
514 td->cc_index = field->index; 516 td->cc_index = field->index;
515 td->cc_value_index = usage->usage_index; 517 td->cc_value_index = usage->usage_index;
516 return 1; 518 return 1;
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 30dbb6b40bbf..b18320db5f7d 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -537,6 +537,10 @@ static int buzz_init(struct hid_device *hdev)
537 drv_data = hid_get_drvdata(hdev); 537 drv_data = hid_get_drvdata(hdev);
538 BUG_ON(!(drv_data->quirks & BUZZ_CONTROLLER)); 538 BUG_ON(!(drv_data->quirks & BUZZ_CONTROLLER));
539 539
540 /* Validate expected report characteristics. */
541 if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, 0, 0, 7))
542 return -ENODEV;
543
540 buzz = kzalloc(sizeof(*buzz), GFP_KERNEL); 544 buzz = kzalloc(sizeof(*buzz), GFP_KERNEL);
541 if (!buzz) { 545 if (!buzz) {
542 hid_err(hdev, "Insufficient memory, cannot allocate driver data\n"); 546 hid_err(hdev, "Insufficient memory, cannot allocate driver data\n");
diff --git a/drivers/hid/hid-steelseries.c b/drivers/hid/hid-steelseries.c
index d16491192112..29f328f411fb 100644
--- a/drivers/hid/hid-steelseries.c
+++ b/drivers/hid/hid-steelseries.c
@@ -249,6 +249,11 @@ static int steelseries_srws1_probe(struct hid_device *hdev,
249 goto err_free; 249 goto err_free;
250 } 250 }
251 251
252 if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, 0, 0, 16)) {
253 ret = -ENODEV;
254 goto err_free;
255 }
256
252 ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); 257 ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
253 if (ret) { 258 if (ret) {
254 hid_err(hdev, "hw start failed\n"); 259 hid_err(hdev, "hw start failed\n");
diff --git a/drivers/hid/hid-zpff.c b/drivers/hid/hid-zpff.c
index 6ec28a37c146..a29756c6ca02 100644
--- a/drivers/hid/hid-zpff.c
+++ b/drivers/hid/hid-zpff.c
@@ -68,21 +68,13 @@ static int zpff_init(struct hid_device *hid)
68 struct hid_report *report; 68 struct hid_report *report;
69 struct hid_input *hidinput = list_entry(hid->inputs.next, 69 struct hid_input *hidinput = list_entry(hid->inputs.next,
70 struct hid_input, list); 70 struct hid_input, list);
71 struct list_head *report_list =
72 &hid->report_enum[HID_OUTPUT_REPORT].report_list;
73 struct input_dev *dev = hidinput->input; 71 struct input_dev *dev = hidinput->input;
74 int error; 72 int i, error;
75 73
76 if (list_empty(report_list)) { 74 for (i = 0; i < 4; i++) {
77 hid_err(hid, "no output report found\n"); 75 report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, i, 1);
78 return -ENODEV; 76 if (!report)
79 } 77 return -ENODEV;
80
81 report = list_entry(report_list->next, struct hid_report, list);
82
83 if (report->maxfield < 4) {
84 hid_err(hid, "not enough fields in report\n");
85 return -ENODEV;
86 } 78 }
87 79
88 zpff = kzalloc(sizeof(struct zpff_device), GFP_KERNEL); 80 zpff = kzalloc(sizeof(struct zpff_device), GFP_KERNEL);
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 8f4743ab5fb2..936093e0271e 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -195,7 +195,7 @@ int vmbus_connect(void)
195 195
196 do { 196 do {
197 ret = vmbus_negotiate_version(msginfo, version); 197 ret = vmbus_negotiate_version(msginfo, version);
198 if (ret) 198 if (ret == -ETIMEDOUT)
199 goto cleanup; 199 goto cleanup;
200 200
201 if (vmbus_connection.conn_state == CONNECTED) 201 if (vmbus_connection.conn_state == CONNECTED)
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index 28b03325b872..09988b289622 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -32,13 +32,17 @@
32/* 32/*
33 * Pre win8 version numbers used in ws2008 and ws 2008 r2 (win7) 33 * Pre win8 version numbers used in ws2008 and ws 2008 r2 (win7)
34 */ 34 */
35#define WS2008_SRV_MAJOR 1
36#define WS2008_SRV_MINOR 0
37#define WS2008_SRV_VERSION (WS2008_SRV_MAJOR << 16 | WS2008_SRV_MINOR)
38
35#define WIN7_SRV_MAJOR 3 39#define WIN7_SRV_MAJOR 3
36#define WIN7_SRV_MINOR 0 40#define WIN7_SRV_MINOR 0
37#define WIN7_SRV_MAJOR_MINOR (WIN7_SRV_MAJOR << 16 | WIN7_SRV_MINOR) 41#define WIN7_SRV_VERSION (WIN7_SRV_MAJOR << 16 | WIN7_SRV_MINOR)
38 42
39#define WIN8_SRV_MAJOR 4 43#define WIN8_SRV_MAJOR 4
40#define WIN8_SRV_MINOR 0 44#define WIN8_SRV_MINOR 0
41#define WIN8_SRV_MAJOR_MINOR (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR) 45#define WIN8_SRV_VERSION (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
42 46
43/* 47/*
44 * Global state maintained for transaction that is being processed. 48 * Global state maintained for transaction that is being processed.
@@ -587,6 +591,8 @@ void hv_kvp_onchannelcallback(void *context)
587 591
588 struct icmsg_hdr *icmsghdrp; 592 struct icmsg_hdr *icmsghdrp;
589 struct icmsg_negotiate *negop = NULL; 593 struct icmsg_negotiate *negop = NULL;
594 int util_fw_version;
595 int kvp_srv_version;
590 596
591 if (kvp_transaction.active) { 597 if (kvp_transaction.active) {
592 /* 598 /*
@@ -606,17 +612,26 @@ void hv_kvp_onchannelcallback(void *context)
606 612
607 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { 613 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
608 /* 614 /*
609 * We start with win8 version and if the host cannot 615 * Based on the host, select appropriate
610 * support that we use the previous version. 616 * framework and service versions we will
617 * negotiate.
611 */ 618 */
612 if (vmbus_prep_negotiate_resp(icmsghdrp, negop, 619 switch (vmbus_proto_version) {
613 recv_buffer, UTIL_FW_MAJOR_MINOR, 620 case (VERSION_WS2008):
614 WIN8_SRV_MAJOR_MINOR)) 621 util_fw_version = UTIL_WS2K8_FW_VERSION;
615 goto done; 622 kvp_srv_version = WS2008_SRV_VERSION;
616 623 break;
624 case (VERSION_WIN7):
625 util_fw_version = UTIL_FW_VERSION;
626 kvp_srv_version = WIN7_SRV_VERSION;
627 break;
628 default:
629 util_fw_version = UTIL_FW_VERSION;
630 kvp_srv_version = WIN8_SRV_VERSION;
631 }
617 vmbus_prep_negotiate_resp(icmsghdrp, negop, 632 vmbus_prep_negotiate_resp(icmsghdrp, negop,
618 recv_buffer, UTIL_FW_MAJOR_MINOR, 633 recv_buffer, util_fw_version,
619 WIN7_SRV_MAJOR_MINOR); 634 kvp_srv_version);
620 635
621 } else { 636 } else {
622 kvp_msg = (struct hv_kvp_msg *)&recv_buffer[ 637 kvp_msg = (struct hv_kvp_msg *)&recv_buffer[
@@ -649,7 +664,6 @@ void hv_kvp_onchannelcallback(void *context)
649 return; 664 return;
650 665
651 } 666 }
652done:
653 667
654 icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION 668 icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
655 | ICMSGHDRFLAG_RESPONSE; 669 | ICMSGHDRFLAG_RESPONSE;
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index e4572f3f2834..0c3546224376 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -26,7 +26,7 @@
26 26
27#define VSS_MAJOR 5 27#define VSS_MAJOR 5
28#define VSS_MINOR 0 28#define VSS_MINOR 0
29#define VSS_MAJOR_MINOR (VSS_MAJOR << 16 | VSS_MINOR) 29#define VSS_VERSION (VSS_MAJOR << 16 | VSS_MINOR)
30 30
31 31
32 32
@@ -190,8 +190,8 @@ void hv_vss_onchannelcallback(void *context)
190 190
191 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { 191 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
192 vmbus_prep_negotiate_resp(icmsghdrp, negop, 192 vmbus_prep_negotiate_resp(icmsghdrp, negop,
193 recv_buffer, UTIL_FW_MAJOR_MINOR, 193 recv_buffer, UTIL_FW_VERSION,
194 VSS_MAJOR_MINOR); 194 VSS_VERSION);
195 } else { 195 } else {
196 vss_msg = (struct hv_vss_msg *)&recv_buffer[ 196 vss_msg = (struct hv_vss_msg *)&recv_buffer[
197 sizeof(struct vmbuspipe_hdr) + 197 sizeof(struct vmbuspipe_hdr) +
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index cb82233541b1..273e3ddb3a20 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -28,17 +28,32 @@
28#include <linux/reboot.h> 28#include <linux/reboot.h>
29#include <linux/hyperv.h> 29#include <linux/hyperv.h>
30 30
31#define SHUTDOWN_MAJOR 3
32#define SHUTDOWN_MINOR 0
33#define SHUTDOWN_MAJOR_MINOR (SHUTDOWN_MAJOR << 16 | SHUTDOWN_MINOR)
34 31
35#define TIMESYNCH_MAJOR 3 32#define SD_MAJOR 3
36#define TIMESYNCH_MINOR 0 33#define SD_MINOR 0
37#define TIMESYNCH_MAJOR_MINOR (TIMESYNCH_MAJOR << 16 | TIMESYNCH_MINOR) 34#define SD_VERSION (SD_MAJOR << 16 | SD_MINOR)
38 35
39#define HEARTBEAT_MAJOR 3 36#define SD_WS2008_MAJOR 1
40#define HEARTBEAT_MINOR 0 37#define SD_WS2008_VERSION (SD_WS2008_MAJOR << 16 | SD_MINOR)
41#define HEARTBEAT_MAJOR_MINOR (HEARTBEAT_MAJOR << 16 | HEARTBEAT_MINOR) 38
39#define TS_MAJOR 3
40#define TS_MINOR 0
41#define TS_VERSION (TS_MAJOR << 16 | TS_MINOR)
42
43#define TS_WS2008_MAJOR 1
44#define TS_WS2008_VERSION (TS_WS2008_MAJOR << 16 | TS_MINOR)
45
46#define HB_MAJOR 3
47#define HB_MINOR 0
48#define HB_VERSION (HB_MAJOR << 16 | HB_MINOR)
49
50#define HB_WS2008_MAJOR 1
51#define HB_WS2008_VERSION (HB_WS2008_MAJOR << 16 | HB_MINOR)
52
53static int sd_srv_version;
54static int ts_srv_version;
55static int hb_srv_version;
56static int util_fw_version;
42 57
43static void shutdown_onchannelcallback(void *context); 58static void shutdown_onchannelcallback(void *context);
44static struct hv_util_service util_shutdown = { 59static struct hv_util_service util_shutdown = {
@@ -99,8 +114,8 @@ static void shutdown_onchannelcallback(void *context)
99 114
100 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { 115 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
101 vmbus_prep_negotiate_resp(icmsghdrp, negop, 116 vmbus_prep_negotiate_resp(icmsghdrp, negop,
102 shut_txf_buf, UTIL_FW_MAJOR_MINOR, 117 shut_txf_buf, util_fw_version,
103 SHUTDOWN_MAJOR_MINOR); 118 sd_srv_version);
104 } else { 119 } else {
105 shutdown_msg = 120 shutdown_msg =
106 (struct shutdown_msg_data *)&shut_txf_buf[ 121 (struct shutdown_msg_data *)&shut_txf_buf[
@@ -216,6 +231,7 @@ static void timesync_onchannelcallback(void *context)
216 struct icmsg_hdr *icmsghdrp; 231 struct icmsg_hdr *icmsghdrp;
217 struct ictimesync_data *timedatap; 232 struct ictimesync_data *timedatap;
218 u8 *time_txf_buf = util_timesynch.recv_buffer; 233 u8 *time_txf_buf = util_timesynch.recv_buffer;
234 struct icmsg_negotiate *negop = NULL;
219 235
220 vmbus_recvpacket(channel, time_txf_buf, 236 vmbus_recvpacket(channel, time_txf_buf,
221 PAGE_SIZE, &recvlen, &requestid); 237 PAGE_SIZE, &recvlen, &requestid);
@@ -225,9 +241,10 @@ static void timesync_onchannelcallback(void *context)
225 sizeof(struct vmbuspipe_hdr)]; 241 sizeof(struct vmbuspipe_hdr)];
226 242
227 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { 243 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
228 vmbus_prep_negotiate_resp(icmsghdrp, NULL, time_txf_buf, 244 vmbus_prep_negotiate_resp(icmsghdrp, negop,
229 UTIL_FW_MAJOR_MINOR, 245 time_txf_buf,
230 TIMESYNCH_MAJOR_MINOR); 246 util_fw_version,
247 ts_srv_version);
231 } else { 248 } else {
232 timedatap = (struct ictimesync_data *)&time_txf_buf[ 249 timedatap = (struct ictimesync_data *)&time_txf_buf[
233 sizeof(struct vmbuspipe_hdr) + 250 sizeof(struct vmbuspipe_hdr) +
@@ -257,6 +274,7 @@ static void heartbeat_onchannelcallback(void *context)
257 struct icmsg_hdr *icmsghdrp; 274 struct icmsg_hdr *icmsghdrp;
258 struct heartbeat_msg_data *heartbeat_msg; 275 struct heartbeat_msg_data *heartbeat_msg;
259 u8 *hbeat_txf_buf = util_heartbeat.recv_buffer; 276 u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
277 struct icmsg_negotiate *negop = NULL;
260 278
261 vmbus_recvpacket(channel, hbeat_txf_buf, 279 vmbus_recvpacket(channel, hbeat_txf_buf,
262 PAGE_SIZE, &recvlen, &requestid); 280 PAGE_SIZE, &recvlen, &requestid);
@@ -266,9 +284,9 @@ static void heartbeat_onchannelcallback(void *context)
266 sizeof(struct vmbuspipe_hdr)]; 284 sizeof(struct vmbuspipe_hdr)];
267 285
268 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { 286 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
269 vmbus_prep_negotiate_resp(icmsghdrp, NULL, 287 vmbus_prep_negotiate_resp(icmsghdrp, negop,
270 hbeat_txf_buf, UTIL_FW_MAJOR_MINOR, 288 hbeat_txf_buf, util_fw_version,
271 HEARTBEAT_MAJOR_MINOR); 289 hb_srv_version);
272 } else { 290 } else {
273 heartbeat_msg = 291 heartbeat_msg =
274 (struct heartbeat_msg_data *)&hbeat_txf_buf[ 292 (struct heartbeat_msg_data *)&hbeat_txf_buf[
@@ -321,6 +339,25 @@ static int util_probe(struct hv_device *dev,
321 goto error; 339 goto error;
322 340
323 hv_set_drvdata(dev, srv); 341 hv_set_drvdata(dev, srv);
342 /*
343 * Based on the host; initialize the framework and
344 * service version numbers we will negotiate.
345 */
346 switch (vmbus_proto_version) {
347 case (VERSION_WS2008):
348 util_fw_version = UTIL_WS2K8_FW_VERSION;
349 sd_srv_version = SD_WS2008_VERSION;
350 ts_srv_version = TS_WS2008_VERSION;
351 hb_srv_version = HB_WS2008_VERSION;
352 break;
353
354 default:
355 util_fw_version = UTIL_FW_VERSION;
356 sd_srv_version = SD_VERSION;
357 ts_srv_version = TS_VERSION;
358 hb_srv_version = HB_VERSION;
359 }
360
324 return 0; 361 return 0;
325 362
326error: 363error:
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 62c2e32e25ef..98814d12a604 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -525,16 +525,25 @@ static int applesmc_init_smcreg_try(void)
525{ 525{
526 struct applesmc_registers *s = &smcreg; 526 struct applesmc_registers *s = &smcreg;
527 bool left_light_sensor, right_light_sensor; 527 bool left_light_sensor, right_light_sensor;
528 unsigned int count;
528 u8 tmp[1]; 529 u8 tmp[1];
529 int ret; 530 int ret;
530 531
531 if (s->init_complete) 532 if (s->init_complete)
532 return 0; 533 return 0;
533 534
534 ret = read_register_count(&s->key_count); 535 ret = read_register_count(&count);
535 if (ret) 536 if (ret)
536 return ret; 537 return ret;
537 538
539 if (s->cache && s->key_count != count) {
540 pr_warn("key count changed from %d to %d\n",
541 s->key_count, count);
542 kfree(s->cache);
543 s->cache = NULL;
544 }
545 s->key_count = count;
546
538 if (!s->cache) 547 if (!s->cache)
539 s->cache = kcalloc(s->key_count, sizeof(*s->cache), GFP_KERNEL); 548 s->cache = kcalloc(s->key_count, sizeof(*s->cache), GFP_KERNEL);
540 if (!s->cache) 549 if (!s->cache)
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index dbecf08399f8..5888feef1ac5 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -98,6 +98,8 @@
98 98
99#define DW_IC_ERR_TX_ABRT 0x1 99#define DW_IC_ERR_TX_ABRT 0x1
100 100
101#define DW_IC_TAR_10BITADDR_MASTER BIT(12)
102
101/* 103/*
102 * status codes 104 * status codes
103 */ 105 */
@@ -388,22 +390,34 @@ static int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev)
388static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) 390static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
389{ 391{
390 struct i2c_msg *msgs = dev->msgs; 392 struct i2c_msg *msgs = dev->msgs;
391 u32 ic_con; 393 u32 ic_con, ic_tar = 0;
392 394
393 /* Disable the adapter */ 395 /* Disable the adapter */
394 __i2c_dw_enable(dev, false); 396 __i2c_dw_enable(dev, false);
395 397
396 /* set the slave (target) address */
397 dw_writel(dev, msgs[dev->msg_write_idx].addr, DW_IC_TAR);
398
399 /* if the slave address is ten bit address, enable 10BITADDR */ 398 /* if the slave address is ten bit address, enable 10BITADDR */
400 ic_con = dw_readl(dev, DW_IC_CON); 399 ic_con = dw_readl(dev, DW_IC_CON);
401 if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) 400 if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) {
402 ic_con |= DW_IC_CON_10BITADDR_MASTER; 401 ic_con |= DW_IC_CON_10BITADDR_MASTER;
403 else 402 /*
403 * If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing
404 * mode has to be enabled via bit 12 of IC_TAR register.
405 * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be
406 * detected from registers.
407 */
408 ic_tar = DW_IC_TAR_10BITADDR_MASTER;
409 } else {
404 ic_con &= ~DW_IC_CON_10BITADDR_MASTER; 410 ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
411 }
412
405 dw_writel(dev, ic_con, DW_IC_CON); 413 dw_writel(dev, ic_con, DW_IC_CON);
406 414
415 /*
416 * Set the slave (target) address and enable 10-bit addressing mode
417 * if applicable.
418 */
419 dw_writel(dev, msgs[dev->msg_write_idx].addr | ic_tar, DW_IC_TAR);
420
407 /* Enable the adapter */ 421 /* Enable the adapter */
408 __i2c_dw_enable(dev, true); 422 __i2c_dw_enable(dev, true);
409 423
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 8ed79a086f85..1672effbcebb 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -393,6 +393,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
393 393
394 desc = &priv->hw[priv->head]; 394 desc = &priv->hw[priv->head];
395 395
396 /* Initialize the DMA buffer */
397 memset(priv->dma_buffer, 0, sizeof(priv->dma_buffer));
398
396 /* Initialize the descriptor */ 399 /* Initialize the descriptor */
397 memset(desc, 0, sizeof(struct ismt_desc)); 400 memset(desc, 0, sizeof(struct ismt_desc));
398 desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write); 401 desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write);
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 7f3a47443494..d3e9cc3153a9 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -234,9 +234,9 @@ static int mv64xxx_i2c_offload_msg(struct mv64xxx_i2c_data *drv_data)
234 ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_WR | 234 ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_WR |
235 (msg->len - 1) << MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT; 235 (msg->len - 1) << MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT;
236 236
237 writel_relaxed(data_reg_lo, 237 writel(data_reg_lo,
238 drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_LO); 238 drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_LO);
239 writel_relaxed(data_reg_hi, 239 writel(data_reg_hi,
240 drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_HI); 240 drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_HI);
241 241
242 } else { 242 } else {
@@ -697,6 +697,7 @@ static const struct of_device_id mv64xxx_i2c_of_match_table[] = {
697MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table); 697MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table);
698 698
699#ifdef CONFIG_OF 699#ifdef CONFIG_OF
700#ifdef CONFIG_HAVE_CLK
700static int 701static int
701mv64xxx_calc_freq(const int tclk, const int n, const int m) 702mv64xxx_calc_freq(const int tclk, const int n, const int m)
702{ 703{
@@ -726,16 +727,12 @@ mv64xxx_find_baud_factors(const u32 req_freq, const u32 tclk, u32 *best_n,
726 return false; 727 return false;
727 return true; 728 return true;
728} 729}
730#endif /* CONFIG_HAVE_CLK */
729 731
730static int 732static int
731mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data, 733mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
732 struct device *dev) 734 struct device *dev)
733{ 735{
734 const struct of_device_id *device;
735 struct device_node *np = dev->of_node;
736 u32 bus_freq, tclk;
737 int rc = 0;
738
739 /* CLK is mandatory when using DT to describe the i2c bus. We 736 /* CLK is mandatory when using DT to describe the i2c bus. We
740 * need to know tclk in order to calculate bus clock 737 * need to know tclk in order to calculate bus clock
741 * factors. 738 * factors.
@@ -744,6 +741,11 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
744 /* Have OF but no CLK */ 741 /* Have OF but no CLK */
745 return -ENODEV; 742 return -ENODEV;
746#else 743#else
744 const struct of_device_id *device;
745 struct device_node *np = dev->of_node;
746 u32 bus_freq, tclk;
747 int rc = 0;
748
747 if (IS_ERR(drv_data->clk)) { 749 if (IS_ERR(drv_data->clk)) {
748 rc = -ENODEV; 750 rc = -ENODEV;
749 goto out; 751 goto out;
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 3535f3c0f7b4..3747b9bf67d6 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -1178,8 +1178,6 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev)
1178 1178
1179 i2c_del_adapter(&i2c->adap); 1179 i2c_del_adapter(&i2c->adap);
1180 1180
1181 clk_disable_unprepare(i2c->clk);
1182
1183 if (pdev->dev.of_node && IS_ERR(i2c->pctrl)) 1181 if (pdev->dev.of_node && IS_ERR(i2c->pctrl))
1184 s3c24xx_i2c_dt_gpio_free(i2c); 1182 s3c24xx_i2c_dt_gpio_free(i2c);
1185 1183
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index 12e32e6b4103..81e3dc260993 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -620,7 +620,7 @@ static int bma180_remove(struct i2c_client *client)
620#ifdef CONFIG_PM_SLEEP 620#ifdef CONFIG_PM_SLEEP
621static int bma180_suspend(struct device *dev) 621static int bma180_suspend(struct device *dev)
622{ 622{
623 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 623 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
624 struct bma180_data *data = iio_priv(indio_dev); 624 struct bma180_data *data = iio_priv(indio_dev);
625 int ret; 625 int ret;
626 626
@@ -633,7 +633,7 @@ static int bma180_suspend(struct device *dev)
633 633
634static int bma180_resume(struct device *dev) 634static int bma180_resume(struct device *dev)
635{ 635{
636 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 636 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
637 struct bma180_data *data = iio_priv(indio_dev); 637 struct bma180_data *data = iio_priv(indio_dev);
638 int ret; 638 int ret;
639 639
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index 84be63bdf038..0f16b553e063 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -556,7 +556,7 @@ static const struct iio_info at91_adc_info = {
556 556
557static int at91_adc_probe(struct platform_device *pdev) 557static int at91_adc_probe(struct platform_device *pdev)
558{ 558{
559 unsigned int prsc, mstrclk, ticks, adc_clk, shtim; 559 unsigned int prsc, mstrclk, ticks, adc_clk, adc_clk_khz, shtim;
560 int ret; 560 int ret;
561 struct iio_dev *idev; 561 struct iio_dev *idev;
562 struct at91_adc_state *st; 562 struct at91_adc_state *st;
@@ -649,6 +649,7 @@ static int at91_adc_probe(struct platform_device *pdev)
649 */ 649 */
650 mstrclk = clk_get_rate(st->clk); 650 mstrclk = clk_get_rate(st->clk);
651 adc_clk = clk_get_rate(st->adc_clk); 651 adc_clk = clk_get_rate(st->adc_clk);
652 adc_clk_khz = adc_clk / 1000;
652 prsc = (mstrclk / (2 * adc_clk)) - 1; 653 prsc = (mstrclk / (2 * adc_clk)) - 1;
653 654
654 if (!st->startup_time) { 655 if (!st->startup_time) {
@@ -662,15 +663,15 @@ static int at91_adc_probe(struct platform_device *pdev)
662 * defined in the electrical characteristics of the board, divided by 8. 663 * defined in the electrical characteristics of the board, divided by 8.
663 * The formula thus is : Startup Time = (ticks + 1) * 8 / ADC Clock 664 * The formula thus is : Startup Time = (ticks + 1) * 8 / ADC Clock
664 */ 665 */
665 ticks = round_up((st->startup_time * adc_clk / 666 ticks = round_up((st->startup_time * adc_clk_khz /
666 1000000) - 1, 8) / 8; 667 1000) - 1, 8) / 8;
667 /* 668 /*
668 * a minimal Sample and Hold Time is necessary for the ADC to guarantee 669 * a minimal Sample and Hold Time is necessary for the ADC to guarantee
669 * the best converted final value between two channels selection 670 * the best converted final value between two channels selection
670 * The formula thus is : Sample and Hold Time = (shtim + 1) / ADCClock 671 * The formula thus is : Sample and Hold Time = (shtim + 1) / ADCClock
671 */ 672 */
672 shtim = round_up((st->sample_hold_time * adc_clk / 673 shtim = round_up((st->sample_hold_time * adc_clk_khz /
673 1000000) - 1, 1); 674 1000) - 1, 1);
674 675
675 reg = AT91_ADC_PRESCAL_(prsc) & st->registers->mr_prescal_mask; 676 reg = AT91_ADC_PRESCAL_(prsc) & st->registers->mr_prescal_mask;
676 reg |= AT91_ADC_STARTUP_(ticks) & st->registers->mr_startup_mask; 677 reg |= AT91_ADC_STARTUP_(ticks) & st->registers->mr_startup_mask;
diff --git a/drivers/iio/amplifiers/ad8366.c b/drivers/iio/amplifiers/ad8366.c
index d0a79a4bce1c..ba6f6a91dfff 100644
--- a/drivers/iio/amplifiers/ad8366.c
+++ b/drivers/iio/amplifiers/ad8366.c
@@ -185,10 +185,8 @@ static int ad8366_remove(struct spi_device *spi)
185 185
186 iio_device_unregister(indio_dev); 186 iio_device_unregister(indio_dev);
187 187
188 if (!IS_ERR(reg)) { 188 if (!IS_ERR(reg))
189 regulator_disable(reg); 189 regulator_disable(reg);
190 regulator_put(reg);
191 }
192 190
193 return 0; 191 return 0;
194} 192}
diff --git a/drivers/iio/buffer_cb.c b/drivers/iio/buffer_cb.c
index 9d19ba74f22b..415f3c6efd72 100644
--- a/drivers/iio/buffer_cb.c
+++ b/drivers/iio/buffer_cb.c
@@ -41,6 +41,8 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
41 goto error_ret; 41 goto error_ret;
42 } 42 }
43 43
44 iio_buffer_init(&cb_buff->buffer);
45
44 cb_buff->private = private; 46 cb_buff->private = private;
45 cb_buff->cb = cb; 47 cb_buff->cb = cb;
46 cb_buff->buffer.access = &iio_cb_access; 48 cb_buff->buffer.access = &iio_cb_access;
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index 1f4a48e6a82c..1397b6e0e414 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -37,21 +37,21 @@ struct mcp4725_data {
37 37
38static int mcp4725_suspend(struct device *dev) 38static int mcp4725_suspend(struct device *dev)
39{ 39{
40 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 40 struct mcp4725_data *data = iio_priv(i2c_get_clientdata(
41 struct mcp4725_data *data = iio_priv(indio_dev); 41 to_i2c_client(dev)));
42 u8 outbuf[2]; 42 u8 outbuf[2];
43 43
44 outbuf[0] = (data->powerdown_mode + 1) << 4; 44 outbuf[0] = (data->powerdown_mode + 1) << 4;
45 outbuf[1] = 0; 45 outbuf[1] = 0;
46 data->powerdown = true; 46 data->powerdown = true;
47 47
48 return i2c_master_send(to_i2c_client(dev), outbuf, 2); 48 return i2c_master_send(data->client, outbuf, 2);
49} 49}
50 50
51static int mcp4725_resume(struct device *dev) 51static int mcp4725_resume(struct device *dev)
52{ 52{
53 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 53 struct mcp4725_data *data = iio_priv(i2c_get_clientdata(
54 struct mcp4725_data *data = iio_priv(indio_dev); 54 to_i2c_client(dev)));
55 u8 outbuf[2]; 55 u8 outbuf[2];
56 56
57 /* restore previous DAC value */ 57 /* restore previous DAC value */
@@ -59,7 +59,7 @@ static int mcp4725_resume(struct device *dev)
59 outbuf[1] = data->dac_value & 0xff; 59 outbuf[1] = data->dac_value & 0xff;
60 data->powerdown = false; 60 data->powerdown = false;
61 61
62 return i2c_master_send(to_i2c_client(dev), outbuf, 2); 62 return i2c_master_send(data->client, outbuf, 2);
63} 63}
64 64
65#ifdef CONFIG_PM_SLEEP 65#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/iio/iio_core.h b/drivers/iio/iio_core.h
index 05c1b74502a3..9b32253b824b 100644
--- a/drivers/iio/iio_core.h
+++ b/drivers/iio/iio_core.h
@@ -49,11 +49,15 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
49#define iio_buffer_poll_addr (&iio_buffer_poll) 49#define iio_buffer_poll_addr (&iio_buffer_poll)
50#define iio_buffer_read_first_n_outer_addr (&iio_buffer_read_first_n_outer) 50#define iio_buffer_read_first_n_outer_addr (&iio_buffer_read_first_n_outer)
51 51
52void iio_disable_all_buffers(struct iio_dev *indio_dev);
53
52#else 54#else
53 55
54#define iio_buffer_poll_addr NULL 56#define iio_buffer_poll_addr NULL
55#define iio_buffer_read_first_n_outer_addr NULL 57#define iio_buffer_read_first_n_outer_addr NULL
56 58
59static inline void iio_disable_all_buffers(struct iio_dev *indio_dev) {}
60
57#endif 61#endif
58 62
59int iio_device_register_eventset(struct iio_dev *indio_dev); 63int iio_device_register_eventset(struct iio_dev *indio_dev);
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index e73033f3839a..2710f7245c3b 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -460,6 +460,25 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const long *mask,
460 return bytes; 460 return bytes;
461} 461}
462 462
463void iio_disable_all_buffers(struct iio_dev *indio_dev)
464{
465 struct iio_buffer *buffer, *_buffer;
466
467 if (list_empty(&indio_dev->buffer_list))
468 return;
469
470 if (indio_dev->setup_ops->predisable)
471 indio_dev->setup_ops->predisable(indio_dev);
472
473 list_for_each_entry_safe(buffer, _buffer,
474 &indio_dev->buffer_list, buffer_list)
475 list_del_init(&buffer->buffer_list);
476
477 indio_dev->currentmode = INDIO_DIRECT_MODE;
478 if (indio_dev->setup_ops->postdisable)
479 indio_dev->setup_ops->postdisable(indio_dev);
480}
481
463int iio_update_buffers(struct iio_dev *indio_dev, 482int iio_update_buffers(struct iio_dev *indio_dev,
464 struct iio_buffer *insert_buffer, 483 struct iio_buffer *insert_buffer,
465 struct iio_buffer *remove_buffer) 484 struct iio_buffer *remove_buffer)
@@ -528,8 +547,15 @@ int iio_update_buffers(struct iio_dev *indio_dev,
528 * Note can only occur when adding a buffer. 547 * Note can only occur when adding a buffer.
529 */ 548 */
530 list_del(&insert_buffer->buffer_list); 549 list_del(&insert_buffer->buffer_list);
531 indio_dev->active_scan_mask = old_mask; 550 if (old_mask) {
532 success = -EINVAL; 551 indio_dev->active_scan_mask = old_mask;
552 success = -EINVAL;
553 }
554 else {
555 kfree(compound_mask);
556 ret = -EINVAL;
557 goto error_ret;
558 }
533 } 559 }
534 } else { 560 } else {
535 indio_dev->active_scan_mask = compound_mask; 561 indio_dev->active_scan_mask = compound_mask;
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 97f0297b120f..f95c6979efd8 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -848,13 +848,10 @@ static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
848static void iio_dev_release(struct device *device) 848static void iio_dev_release(struct device *device)
849{ 849{
850 struct iio_dev *indio_dev = dev_to_iio_dev(device); 850 struct iio_dev *indio_dev = dev_to_iio_dev(device);
851 if (indio_dev->chrdev.dev)
852 cdev_del(&indio_dev->chrdev);
853 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) 851 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
854 iio_device_unregister_trigger_consumer(indio_dev); 852 iio_device_unregister_trigger_consumer(indio_dev);
855 iio_device_unregister_eventset(indio_dev); 853 iio_device_unregister_eventset(indio_dev);
856 iio_device_unregister_sysfs(indio_dev); 854 iio_device_unregister_sysfs(indio_dev);
857 iio_device_unregister_debugfs(indio_dev);
858 855
859 ida_simple_remove(&iio_ida, indio_dev->id); 856 ida_simple_remove(&iio_ida, indio_dev->id);
860 kfree(indio_dev); 857 kfree(indio_dev);
@@ -970,6 +967,8 @@ static int iio_chrdev_open(struct inode *inode, struct file *filp)
970 if (test_and_set_bit(IIO_BUSY_BIT_POS, &indio_dev->flags)) 967 if (test_and_set_bit(IIO_BUSY_BIT_POS, &indio_dev->flags))
971 return -EBUSY; 968 return -EBUSY;
972 969
970 iio_device_get(indio_dev);
971
973 filp->private_data = indio_dev; 972 filp->private_data = indio_dev;
974 973
975 return 0; 974 return 0;
@@ -983,6 +982,8 @@ static int iio_chrdev_release(struct inode *inode, struct file *filp)
983 struct iio_dev *indio_dev = container_of(inode->i_cdev, 982 struct iio_dev *indio_dev = container_of(inode->i_cdev,
984 struct iio_dev, chrdev); 983 struct iio_dev, chrdev);
985 clear_bit(IIO_BUSY_BIT_POS, &indio_dev->flags); 984 clear_bit(IIO_BUSY_BIT_POS, &indio_dev->flags);
985 iio_device_put(indio_dev);
986
986 return 0; 987 return 0;
987} 988}
988 989
@@ -1052,18 +1053,20 @@ int iio_device_register(struct iio_dev *indio_dev)
1052 indio_dev->setup_ops == NULL) 1053 indio_dev->setup_ops == NULL)
1053 indio_dev->setup_ops = &noop_ring_setup_ops; 1054 indio_dev->setup_ops = &noop_ring_setup_ops;
1054 1055
1055 ret = device_add(&indio_dev->dev);
1056 if (ret < 0)
1057 goto error_unreg_eventset;
1058 cdev_init(&indio_dev->chrdev, &iio_buffer_fileops); 1056 cdev_init(&indio_dev->chrdev, &iio_buffer_fileops);
1059 indio_dev->chrdev.owner = indio_dev->info->driver_module; 1057 indio_dev->chrdev.owner = indio_dev->info->driver_module;
1058 indio_dev->chrdev.kobj.parent = &indio_dev->dev.kobj;
1060 ret = cdev_add(&indio_dev->chrdev, indio_dev->dev.devt, 1); 1059 ret = cdev_add(&indio_dev->chrdev, indio_dev->dev.devt, 1);
1061 if (ret < 0) 1060 if (ret < 0)
1062 goto error_del_device; 1061 goto error_unreg_eventset;
1063 return 0;
1064 1062
1065error_del_device: 1063 ret = device_add(&indio_dev->dev);
1066 device_del(&indio_dev->dev); 1064 if (ret < 0)
1065 goto error_cdev_del;
1066
1067 return 0;
1068error_cdev_del:
1069 cdev_del(&indio_dev->chrdev);
1067error_unreg_eventset: 1070error_unreg_eventset:
1068 iio_device_unregister_eventset(indio_dev); 1071 iio_device_unregister_eventset(indio_dev);
1069error_free_sysfs: 1072error_free_sysfs:
@@ -1078,9 +1081,17 @@ EXPORT_SYMBOL(iio_device_register);
1078void iio_device_unregister(struct iio_dev *indio_dev) 1081void iio_device_unregister(struct iio_dev *indio_dev)
1079{ 1082{
1080 mutex_lock(&indio_dev->info_exist_lock); 1083 mutex_lock(&indio_dev->info_exist_lock);
1084
1085 device_del(&indio_dev->dev);
1086
1087 if (indio_dev->chrdev.dev)
1088 cdev_del(&indio_dev->chrdev);
1089 iio_device_unregister_debugfs(indio_dev);
1090
1091 iio_disable_all_buffers(indio_dev);
1092
1081 indio_dev->info = NULL; 1093 indio_dev->info = NULL;
1082 mutex_unlock(&indio_dev->info_exist_lock); 1094 mutex_unlock(&indio_dev->info_exist_lock);
1083 device_del(&indio_dev->dev);
1084} 1095}
1085EXPORT_SYMBOL(iio_device_unregister); 1096EXPORT_SYMBOL(iio_device_unregister);
1086subsys_initcall(iio_init); 1097subsys_initcall(iio_init);
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index 10aa9ef86cec..6be65ef5faa9 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -72,7 +72,8 @@ EXPORT_SYMBOL(iio_push_event);
72static unsigned int iio_event_poll(struct file *filep, 72static unsigned int iio_event_poll(struct file *filep,
73 struct poll_table_struct *wait) 73 struct poll_table_struct *wait)
74{ 74{
75 struct iio_event_interface *ev_int = filep->private_data; 75 struct iio_dev *indio_dev = filep->private_data;
76 struct iio_event_interface *ev_int = indio_dev->event_interface;
76 unsigned int events = 0; 77 unsigned int events = 0;
77 78
78 poll_wait(filep, &ev_int->wait, wait); 79 poll_wait(filep, &ev_int->wait, wait);
@@ -90,7 +91,8 @@ static ssize_t iio_event_chrdev_read(struct file *filep,
90 size_t count, 91 size_t count,
91 loff_t *f_ps) 92 loff_t *f_ps)
92{ 93{
93 struct iio_event_interface *ev_int = filep->private_data; 94 struct iio_dev *indio_dev = filep->private_data;
95 struct iio_event_interface *ev_int = indio_dev->event_interface;
94 unsigned int copied; 96 unsigned int copied;
95 int ret; 97 int ret;
96 98
@@ -121,7 +123,8 @@ error_unlock:
121 123
122static int iio_event_chrdev_release(struct inode *inode, struct file *filep) 124static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
123{ 125{
124 struct iio_event_interface *ev_int = filep->private_data; 126 struct iio_dev *indio_dev = filep->private_data;
127 struct iio_event_interface *ev_int = indio_dev->event_interface;
125 128
126 spin_lock_irq(&ev_int->wait.lock); 129 spin_lock_irq(&ev_int->wait.lock);
127 __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags); 130 __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
@@ -133,6 +136,8 @@ static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
133 kfifo_reset_out(&ev_int->det_events); 136 kfifo_reset_out(&ev_int->det_events);
134 spin_unlock_irq(&ev_int->wait.lock); 137 spin_unlock_irq(&ev_int->wait.lock);
135 138
139 iio_device_put(indio_dev);
140
136 return 0; 141 return 0;
137} 142}
138 143
@@ -158,12 +163,15 @@ int iio_event_getfd(struct iio_dev *indio_dev)
158 return -EBUSY; 163 return -EBUSY;
159 } 164 }
160 spin_unlock_irq(&ev_int->wait.lock); 165 spin_unlock_irq(&ev_int->wait.lock);
161 fd = anon_inode_getfd("iio:event", 166 iio_device_get(indio_dev);
162 &iio_event_chrdev_fileops, ev_int, O_RDONLY); 167
168 fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops,
169 indio_dev, O_RDONLY);
163 if (fd < 0) { 170 if (fd < 0) {
164 spin_lock_irq(&ev_int->wait.lock); 171 spin_lock_irq(&ev_int->wait.lock);
165 __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags); 172 __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
166 spin_unlock_irq(&ev_int->wait.lock); 173 spin_unlock_irq(&ev_int->wait.lock);
174 iio_device_put(indio_dev);
167 } 175 }
168 return fd; 176 return fd;
169} 177}
@@ -276,7 +284,7 @@ static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
276 goto error_ret; 284 goto error_ret;
277 } 285 }
278 if (chan->modified) 286 if (chan->modified)
279 mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel, 287 mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel2,
280 i/IIO_EV_DIR_MAX, 288 i/IIO_EV_DIR_MAX,
281 i%IIO_EV_DIR_MAX); 289 i%IIO_EV_DIR_MAX);
282 else if (chan->differential) 290 else if (chan->differential)
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index e8d2849cc81d..cab3bc7494a2 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -29,9 +29,9 @@
29#define ST_MAGN_NUMBER_DATA_CHANNELS 3 29#define ST_MAGN_NUMBER_DATA_CHANNELS 3
30 30
31/* DEFAULT VALUE FOR SENSORS */ 31/* DEFAULT VALUE FOR SENSORS */
32#define ST_MAGN_DEFAULT_OUT_X_L_ADDR 0X04 32#define ST_MAGN_DEFAULT_OUT_X_H_ADDR 0X03
33#define ST_MAGN_DEFAULT_OUT_Y_L_ADDR 0X08 33#define ST_MAGN_DEFAULT_OUT_Y_H_ADDR 0X07
34#define ST_MAGN_DEFAULT_OUT_Z_L_ADDR 0X06 34#define ST_MAGN_DEFAULT_OUT_Z_H_ADDR 0X05
35 35
36/* FULLSCALE */ 36/* FULLSCALE */
37#define ST_MAGN_FS_AVL_1300MG 1300 37#define ST_MAGN_FS_AVL_1300MG 1300
@@ -117,16 +117,16 @@
117static const struct iio_chan_spec st_magn_16bit_channels[] = { 117static const struct iio_chan_spec st_magn_16bit_channels[] = {
118 ST_SENSORS_LSM_CHANNELS(IIO_MAGN, 118 ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
119 BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), 119 BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
120 ST_SENSORS_SCAN_X, 1, IIO_MOD_X, 's', IIO_LE, 16, 16, 120 ST_SENSORS_SCAN_X, 1, IIO_MOD_X, 's', IIO_BE, 16, 16,
121 ST_MAGN_DEFAULT_OUT_X_L_ADDR), 121 ST_MAGN_DEFAULT_OUT_X_H_ADDR),
122 ST_SENSORS_LSM_CHANNELS(IIO_MAGN, 122 ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
123 BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), 123 BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
124 ST_SENSORS_SCAN_Y, 1, IIO_MOD_Y, 's', IIO_LE, 16, 16, 124 ST_SENSORS_SCAN_Y, 1, IIO_MOD_Y, 's', IIO_BE, 16, 16,
125 ST_MAGN_DEFAULT_OUT_Y_L_ADDR), 125 ST_MAGN_DEFAULT_OUT_Y_H_ADDR),
126 ST_SENSORS_LSM_CHANNELS(IIO_MAGN, 126 ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
127 BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), 127 BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
128 ST_SENSORS_SCAN_Z, 1, IIO_MOD_Z, 's', IIO_LE, 16, 16, 128 ST_SENSORS_SCAN_Z, 1, IIO_MOD_Z, 's', IIO_BE, 16, 16,
129 ST_MAGN_DEFAULT_OUT_Z_L_ADDR), 129 ST_MAGN_DEFAULT_OUT_Z_H_ADDR),
130 IIO_CHAN_SOFT_TIMESTAMP(3) 130 IIO_CHAN_SOFT_TIMESTAMP(3)
131}; 131};
132 132
diff --git a/drivers/iio/temperature/tmp006.c b/drivers/iio/temperature/tmp006.c
index 64ccde3f1f7a..6d63883da1ab 100644
--- a/drivers/iio/temperature/tmp006.c
+++ b/drivers/iio/temperature/tmp006.c
@@ -255,12 +255,14 @@ static int tmp006_remove(struct i2c_client *client)
255#ifdef CONFIG_PM_SLEEP 255#ifdef CONFIG_PM_SLEEP
256static int tmp006_suspend(struct device *dev) 256static int tmp006_suspend(struct device *dev)
257{ 257{
258 return tmp006_powerdown(iio_priv(dev_to_iio_dev(dev))); 258 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
259 return tmp006_powerdown(iio_priv(indio_dev));
259} 260}
260 261
261static int tmp006_resume(struct device *dev) 262static int tmp006_resume(struct device *dev)
262{ 263{
263 struct tmp006_data *data = iio_priv(dev_to_iio_dev(dev)); 264 struct tmp006_data *data = iio_priv(i2c_get_clientdata(
265 to_i2c_client(dev)));
264 return i2c_smbus_write_word_swapped(data->client, TMP006_CONFIG, 266 return i2c_smbus_write_word_swapped(data->client, TMP006_CONFIG,
265 data->config | TMP006_CONFIG_MOD_MASK); 267 data->config | TMP006_CONFIG_MOD_MASK);
266} 268}
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 653ac6bfc57a..6c923c7039a1 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1588,7 +1588,7 @@ static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
1588 int resp_data_len; 1588 int resp_data_len;
1589 int resp_len; 1589 int resp_len;
1590 1590
1591 resp_data_len = (rsp_code == SRP_TSK_MGMT_SUCCESS) ? 0 : 4; 1591 resp_data_len = 4;
1592 resp_len = sizeof(*srp_rsp) + resp_data_len; 1592 resp_len = sizeof(*srp_rsp) + resp_data_len;
1593 1593
1594 srp_rsp = ioctx->ioctx.buf; 1594 srp_rsp = ioctx->ioctx.buf;
@@ -1600,11 +1600,9 @@ static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
1600 + atomic_xchg(&ch->req_lim_delta, 0)); 1600 + atomic_xchg(&ch->req_lim_delta, 0));
1601 srp_rsp->tag = tag; 1601 srp_rsp->tag = tag;
1602 1602
1603 if (rsp_code != SRP_TSK_MGMT_SUCCESS) { 1603 srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
1604 srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID; 1604 srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
1605 srp_rsp->resp_data_len = cpu_to_be32(resp_data_len); 1605 srp_rsp->data[3] = rsp_code;
1606 srp_rsp->data[3] = rsp_code;
1607 }
1608 1606
1609 return resp_len; 1607 return resp_len;
1610} 1608}
@@ -2358,6 +2356,8 @@ static void srpt_release_channel_work(struct work_struct *w)
2358 transport_deregister_session(se_sess); 2356 transport_deregister_session(se_sess);
2359 ch->sess = NULL; 2357 ch->sess = NULL;
2360 2358
2359 ib_destroy_cm_id(ch->cm_id);
2360
2361 srpt_destroy_ch_ib(ch); 2361 srpt_destroy_ch_ib(ch);
2362 2362
2363 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring, 2363 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
@@ -2368,8 +2368,6 @@ static void srpt_release_channel_work(struct work_struct *w)
2368 list_del(&ch->list); 2368 list_del(&ch->list);
2369 spin_unlock_irq(&sdev->spinlock); 2369 spin_unlock_irq(&sdev->spinlock);
2370 2370
2371 ib_destroy_cm_id(ch->cm_id);
2372
2373 if (ch->release_done) 2371 if (ch->release_done)
2374 complete(ch->release_done); 2372 complete(ch->release_done);
2375 2373
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index f417e89e1e7e..181c9ba929cd 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -377,6 +377,7 @@ struct arm_smmu_cfg {
377 u32 cbar; 377 u32 cbar;
378 pgd_t *pgd; 378 pgd_t *pgd;
379}; 379};
380#define INVALID_IRPTNDX 0xff
380 381
381#define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx) 382#define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx)
382#define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1) 383#define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1)
@@ -840,7 +841,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
840 if (IS_ERR_VALUE(ret)) { 841 if (IS_ERR_VALUE(ret)) {
841 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", 842 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
842 root_cfg->irptndx, irq); 843 root_cfg->irptndx, irq);
843 root_cfg->irptndx = -1; 844 root_cfg->irptndx = INVALID_IRPTNDX;
844 goto out_free_context; 845 goto out_free_context;
845 } 846 }
846 847
@@ -869,7 +870,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
869 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); 870 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
870 arm_smmu_tlb_inv_context(root_cfg); 871 arm_smmu_tlb_inv_context(root_cfg);
871 872
872 if (root_cfg->irptndx != -1) { 873 if (root_cfg->irptndx != INVALID_IRPTNDX) {
873 irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx]; 874 irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx];
874 free_irq(irq, domain); 875 free_irq(irq, domain);
875 } 876 }
@@ -1857,8 +1858,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1857 goto out_put_parent; 1858 goto out_put_parent;
1858 } 1859 }
1859 1860
1860 arm_smmu_device_reset(smmu);
1861
1862 for (i = 0; i < smmu->num_global_irqs; ++i) { 1861 for (i = 0; i < smmu->num_global_irqs; ++i) {
1863 err = request_irq(smmu->irqs[i], 1862 err = request_irq(smmu->irqs[i],
1864 arm_smmu_global_fault, 1863 arm_smmu_global_fault,
@@ -1876,6 +1875,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1876 spin_lock(&arm_smmu_devices_lock); 1875 spin_lock(&arm_smmu_devices_lock);
1877 list_add(&smmu->list, &arm_smmu_devices); 1876 list_add(&smmu->list, &arm_smmu_devices);
1878 spin_unlock(&arm_smmu_devices_lock); 1877 spin_unlock(&arm_smmu_devices_lock);
1878
1879 arm_smmu_device_reset(smmu);
1879 return 0; 1880 return 0;
1880 1881
1881out_free_irqs: 1882out_free_irqs:
@@ -1966,10 +1967,10 @@ static int __init arm_smmu_init(void)
1966 return ret; 1967 return ret;
1967 1968
1968 /* Oh, for a proper bus abstraction */ 1969 /* Oh, for a proper bus abstraction */
1969 if (!iommu_present(&platform_bus_type)); 1970 if (!iommu_present(&platform_bus_type))
1970 bus_set_iommu(&platform_bus_type, &arm_smmu_ops); 1971 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
1971 1972
1972 if (!iommu_present(&amba_bustype)); 1973 if (!iommu_present(&amba_bustype))
1973 bus_set_iommu(&amba_bustype, &arm_smmu_ops); 1974 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
1974 1975
1975 return 0; 1976 return 0;
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 7f910c76ca0a..3c92780bda09 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -2295,8 +2295,8 @@ _hfcpci_softirq(struct device *dev, void *arg)
2295static void 2295static void
2296hfcpci_softirq(void *arg) 2296hfcpci_softirq(void *arg)
2297{ 2297{
2298 (void) driver_for_each_device(&hfc_driver.driver, NULL, arg, 2298 WARN_ON_ONCE(driver_for_each_device(&hfc_driver.driver, NULL, arg,
2299 _hfcpci_softirq); 2299 _hfcpci_softirq) != 0);
2300 2300
2301 /* if next event would be in the past ... */ 2301 /* if next event would be in the past ... */
2302 if ((s32)(hfc_jiffies + tics - jiffies) <= 0) 2302 if ((s32)(hfc_jiffies + tics - jiffies) <= 0)
diff --git a/drivers/isdn/hisax/amd7930_fn.c b/drivers/isdn/hisax/amd7930_fn.c
index 1063babe1d3a..36817e0a0b94 100644
--- a/drivers/isdn/hisax/amd7930_fn.c
+++ b/drivers/isdn/hisax/amd7930_fn.c
@@ -314,7 +314,7 @@ Amd7930_empty_Dfifo(struct IsdnCardState *cs, int flag)
314 314
315 t += sprintf(t, "Amd7930: empty_Dfifo cnt: %d |", cs->rcvidx); 315 t += sprintf(t, "Amd7930: empty_Dfifo cnt: %d |", cs->rcvidx);
316 QuickHex(t, cs->rcvbuf, cs->rcvidx); 316 QuickHex(t, cs->rcvbuf, cs->rcvidx);
317 debugl1(cs, cs->dlog); 317 debugl1(cs, "%s", cs->dlog);
318 } 318 }
319 /* moves received data in sk-buffer */ 319 /* moves received data in sk-buffer */
320 memcpy(skb_put(skb, cs->rcvidx), cs->rcvbuf, cs->rcvidx); 320 memcpy(skb_put(skb, cs->rcvidx), cs->rcvbuf, cs->rcvidx);
@@ -406,7 +406,7 @@ Amd7930_fill_Dfifo(struct IsdnCardState *cs)
406 406
407 t += sprintf(t, "Amd7930: fill_Dfifo cnt: %d |", count); 407 t += sprintf(t, "Amd7930: fill_Dfifo cnt: %d |", count);
408 QuickHex(t, deb_ptr, count); 408 QuickHex(t, deb_ptr, count);
409 debugl1(cs, cs->dlog); 409 debugl1(cs, "%s", cs->dlog);
410 } 410 }
411 /* AMD interrupts on */ 411 /* AMD interrupts on */
412 AmdIrqOn(cs); 412 AmdIrqOn(cs);
diff --git a/drivers/isdn/hisax/avm_pci.c b/drivers/isdn/hisax/avm_pci.c
index ee9b9a03cffa..d1427bd6452d 100644
--- a/drivers/isdn/hisax/avm_pci.c
+++ b/drivers/isdn/hisax/avm_pci.c
@@ -285,7 +285,7 @@ hdlc_empty_fifo(struct BCState *bcs, int count)
285 t += sprintf(t, "hdlc_empty_fifo %c cnt %d", 285 t += sprintf(t, "hdlc_empty_fifo %c cnt %d",
286 bcs->channel ? 'B' : 'A', count); 286 bcs->channel ? 'B' : 'A', count);
287 QuickHex(t, p, count); 287 QuickHex(t, p, count);
288 debugl1(cs, bcs->blog); 288 debugl1(cs, "%s", bcs->blog);
289 } 289 }
290} 290}
291 291
@@ -345,7 +345,7 @@ hdlc_fill_fifo(struct BCState *bcs)
345 t += sprintf(t, "hdlc_fill_fifo %c cnt %d", 345 t += sprintf(t, "hdlc_fill_fifo %c cnt %d",
346 bcs->channel ? 'B' : 'A', count); 346 bcs->channel ? 'B' : 'A', count);
347 QuickHex(t, p, count); 347 QuickHex(t, p, count);
348 debugl1(cs, bcs->blog); 348 debugl1(cs, "%s", bcs->blog);
349 } 349 }
350} 350}
351 351
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index bf04d2a3cf4a..b33f53b3ca93 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -1896,7 +1896,7 @@ static void EChannel_proc_rcv(struct hisax_d_if *d_if)
1896 ptr--; 1896 ptr--;
1897 *ptr++ = '\n'; 1897 *ptr++ = '\n';
1898 *ptr = 0; 1898 *ptr = 0;
1899 HiSax_putstatus(cs, NULL, cs->dlog); 1899 HiSax_putstatus(cs, NULL, "%s", cs->dlog);
1900 } else 1900 } else
1901 HiSax_putstatus(cs, "LogEcho: ", 1901 HiSax_putstatus(cs, "LogEcho: ",
1902 "warning Frame too big (%d)", 1902 "warning Frame too big (%d)",
diff --git a/drivers/isdn/hisax/diva.c b/drivers/isdn/hisax/diva.c
index 8d0cf6e4dc00..4fc90de68d18 100644
--- a/drivers/isdn/hisax/diva.c
+++ b/drivers/isdn/hisax/diva.c
@@ -427,7 +427,7 @@ Memhscx_empty_fifo(struct BCState *bcs, int count)
427 t += sprintf(t, "hscx_empty_fifo %c cnt %d", 427 t += sprintf(t, "hscx_empty_fifo %c cnt %d",
428 bcs->hw.hscx.hscx ? 'B' : 'A', count); 428 bcs->hw.hscx.hscx ? 'B' : 'A', count);
429 QuickHex(t, ptr, count); 429 QuickHex(t, ptr, count);
430 debugl1(cs, bcs->blog); 430 debugl1(cs, "%s", bcs->blog);
431 } 431 }
432} 432}
433 433
@@ -469,7 +469,7 @@ Memhscx_fill_fifo(struct BCState *bcs)
469 t += sprintf(t, "hscx_fill_fifo %c cnt %d", 469 t += sprintf(t, "hscx_fill_fifo %c cnt %d",
470 bcs->hw.hscx.hscx ? 'B' : 'A', count); 470 bcs->hw.hscx.hscx ? 'B' : 'A', count);
471 QuickHex(t, ptr, count); 471 QuickHex(t, ptr, count);
472 debugl1(cs, bcs->blog); 472 debugl1(cs, "%s", bcs->blog);
473 } 473 }
474} 474}
475 475
diff --git a/drivers/isdn/hisax/elsa.c b/drivers/isdn/hisax/elsa.c
index 1df6f9a56ca2..2be1c8a3bb5f 100644
--- a/drivers/isdn/hisax/elsa.c
+++ b/drivers/isdn/hisax/elsa.c
@@ -535,7 +535,7 @@ check_arcofi(struct IsdnCardState *cs)
535 t = tmp; 535 t = tmp;
536 t += sprintf(tmp, "Arcofi data"); 536 t += sprintf(tmp, "Arcofi data");
537 QuickHex(t, p, cs->dc.isac.mon_rxp); 537 QuickHex(t, p, cs->dc.isac.mon_rxp);
538 debugl1(cs, tmp); 538 debugl1(cs, "%s", tmp);
539 if ((cs->dc.isac.mon_rxp == 2) && (cs->dc.isac.mon_rx[0] == 0xa0)) { 539 if ((cs->dc.isac.mon_rxp == 2) && (cs->dc.isac.mon_rx[0] == 0xa0)) {
540 switch (cs->dc.isac.mon_rx[1]) { 540 switch (cs->dc.isac.mon_rx[1]) {
541 case 0x80: 541 case 0x80:
diff --git a/drivers/isdn/hisax/elsa_ser.c b/drivers/isdn/hisax/elsa_ser.c
index d4c98d330bfe..3f84dd8f1757 100644
--- a/drivers/isdn/hisax/elsa_ser.c
+++ b/drivers/isdn/hisax/elsa_ser.c
@@ -344,7 +344,7 @@ static inline void receive_chars(struct IsdnCardState *cs,
344 344
345 t += sprintf(t, "modem read cnt %d", cs->hw.elsa.rcvcnt); 345 t += sprintf(t, "modem read cnt %d", cs->hw.elsa.rcvcnt);
346 QuickHex(t, cs->hw.elsa.rcvbuf, cs->hw.elsa.rcvcnt); 346 QuickHex(t, cs->hw.elsa.rcvbuf, cs->hw.elsa.rcvcnt);
347 debugl1(cs, tmp); 347 debugl1(cs, "%s", tmp);
348 } 348 }
349 cs->hw.elsa.rcvcnt = 0; 349 cs->hw.elsa.rcvcnt = 0;
350} 350}
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 3ccd724ff8c2..497bd026c237 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -901,7 +901,7 @@ Begin:
901 ptr--; 901 ptr--;
902 *ptr++ = '\n'; 902 *ptr++ = '\n';
903 *ptr = 0; 903 *ptr = 0;
904 HiSax_putstatus(cs, NULL, cs->dlog); 904 HiSax_putstatus(cs, NULL, "%s", cs->dlog);
905 } else 905 } else
906 HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", total - 3); 906 HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", total - 3);
907 } 907 }
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
index dc4574f735ef..fa1fefd711cd 100644
--- a/drivers/isdn/hisax/hfc_sx.c
+++ b/drivers/isdn/hisax/hfc_sx.c
@@ -674,7 +674,7 @@ receive_emsg(struct IsdnCardState *cs)
674 ptr--; 674 ptr--;
675 *ptr++ = '\n'; 675 *ptr++ = '\n';
676 *ptr = 0; 676 *ptr = 0;
677 HiSax_putstatus(cs, NULL, cs->dlog); 677 HiSax_putstatus(cs, NULL, "%s", cs->dlog);
678 } else 678 } else
679 HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", skb->len); 679 HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", skb->len);
680 } 680 }
diff --git a/drivers/isdn/hisax/hscx_irq.c b/drivers/isdn/hisax/hscx_irq.c
index f398d4838937..a8d6188402c6 100644
--- a/drivers/isdn/hisax/hscx_irq.c
+++ b/drivers/isdn/hisax/hscx_irq.c
@@ -75,7 +75,7 @@ hscx_empty_fifo(struct BCState *bcs, int count)
75 t += sprintf(t, "hscx_empty_fifo %c cnt %d", 75 t += sprintf(t, "hscx_empty_fifo %c cnt %d",
76 bcs->hw.hscx.hscx ? 'B' : 'A', count); 76 bcs->hw.hscx.hscx ? 'B' : 'A', count);
77 QuickHex(t, ptr, count); 77 QuickHex(t, ptr, count);
78 debugl1(cs, bcs->blog); 78 debugl1(cs, "%s", bcs->blog);
79 } 79 }
80} 80}
81 81
@@ -115,7 +115,7 @@ hscx_fill_fifo(struct BCState *bcs)
115 t += sprintf(t, "hscx_fill_fifo %c cnt %d", 115 t += sprintf(t, "hscx_fill_fifo %c cnt %d",
116 bcs->hw.hscx.hscx ? 'B' : 'A', count); 116 bcs->hw.hscx.hscx ? 'B' : 'A', count);
117 QuickHex(t, ptr, count); 117 QuickHex(t, ptr, count);
118 debugl1(cs, bcs->blog); 118 debugl1(cs, "%s", bcs->blog);
119 } 119 }
120} 120}
121 121
diff --git a/drivers/isdn/hisax/icc.c b/drivers/isdn/hisax/icc.c
index db5321f6379b..51dae9167238 100644
--- a/drivers/isdn/hisax/icc.c
+++ b/drivers/isdn/hisax/icc.c
@@ -134,7 +134,7 @@ icc_empty_fifo(struct IsdnCardState *cs, int count)
134 134
135 t += sprintf(t, "icc_empty_fifo cnt %d", count); 135 t += sprintf(t, "icc_empty_fifo cnt %d", count);
136 QuickHex(t, ptr, count); 136 QuickHex(t, ptr, count);
137 debugl1(cs, cs->dlog); 137 debugl1(cs, "%s", cs->dlog);
138 } 138 }
139} 139}
140 140
@@ -176,7 +176,7 @@ icc_fill_fifo(struct IsdnCardState *cs)
176 176
177 t += sprintf(t, "icc_fill_fifo cnt %d", count); 177 t += sprintf(t, "icc_fill_fifo cnt %d", count);
178 QuickHex(t, ptr, count); 178 QuickHex(t, ptr, count);
179 debugl1(cs, cs->dlog); 179 debugl1(cs, "%s", cs->dlog);
180 } 180 }
181} 181}
182 182
diff --git a/drivers/isdn/hisax/ipacx.c b/drivers/isdn/hisax/ipacx.c
index 74feb5c83067..5faa5de24305 100644
--- a/drivers/isdn/hisax/ipacx.c
+++ b/drivers/isdn/hisax/ipacx.c
@@ -260,7 +260,7 @@ dch_empty_fifo(struct IsdnCardState *cs, int count)
260 260
261 t += sprintf(t, "dch_empty_fifo() cnt %d", count); 261 t += sprintf(t, "dch_empty_fifo() cnt %d", count);
262 QuickHex(t, ptr, count); 262 QuickHex(t, ptr, count);
263 debugl1(cs, cs->dlog); 263 debugl1(cs, "%s", cs->dlog);
264 } 264 }
265} 265}
266 266
@@ -307,7 +307,7 @@ dch_fill_fifo(struct IsdnCardState *cs)
307 307
308 t += sprintf(t, "dch_fill_fifo() cnt %d", count); 308 t += sprintf(t, "dch_fill_fifo() cnt %d", count);
309 QuickHex(t, ptr, count); 309 QuickHex(t, ptr, count);
310 debugl1(cs, cs->dlog); 310 debugl1(cs, "%s", cs->dlog);
311 } 311 }
312} 312}
313 313
@@ -539,7 +539,7 @@ bch_empty_fifo(struct BCState *bcs, int count)
539 539
540 t += sprintf(t, "bch_empty_fifo() B-%d cnt %d", hscx, count); 540 t += sprintf(t, "bch_empty_fifo() B-%d cnt %d", hscx, count);
541 QuickHex(t, ptr, count); 541 QuickHex(t, ptr, count);
542 debugl1(cs, bcs->blog); 542 debugl1(cs, "%s", bcs->blog);
543 } 543 }
544} 544}
545 545
@@ -582,7 +582,7 @@ bch_fill_fifo(struct BCState *bcs)
582 582
583 t += sprintf(t, "chb_fill_fifo() B-%d cnt %d", hscx, count); 583 t += sprintf(t, "chb_fill_fifo() B-%d cnt %d", hscx, count);
584 QuickHex(t, ptr, count); 584 QuickHex(t, ptr, count);
585 debugl1(cs, bcs->blog); 585 debugl1(cs, "%s", bcs->blog);
586 } 586 }
587} 587}
588 588
diff --git a/drivers/isdn/hisax/isac.c b/drivers/isdn/hisax/isac.c
index a365ccc1c99c..7fdf78f46433 100644
--- a/drivers/isdn/hisax/isac.c
+++ b/drivers/isdn/hisax/isac.c
@@ -137,7 +137,7 @@ isac_empty_fifo(struct IsdnCardState *cs, int count)
137 137
138 t += sprintf(t, "isac_empty_fifo cnt %d", count); 138 t += sprintf(t, "isac_empty_fifo cnt %d", count);
139 QuickHex(t, ptr, count); 139 QuickHex(t, ptr, count);
140 debugl1(cs, cs->dlog); 140 debugl1(cs, "%s", cs->dlog);
141 } 141 }
142} 142}
143 143
@@ -179,7 +179,7 @@ isac_fill_fifo(struct IsdnCardState *cs)
179 179
180 t += sprintf(t, "isac_fill_fifo cnt %d", count); 180 t += sprintf(t, "isac_fill_fifo cnt %d", count);
181 QuickHex(t, ptr, count); 181 QuickHex(t, ptr, count);
182 debugl1(cs, cs->dlog); 182 debugl1(cs, "%s", cs->dlog);
183 } 183 }
184} 184}
185 185
diff --git a/drivers/isdn/hisax/isar.c b/drivers/isdn/hisax/isar.c
index 7fdf34704fe5..f4956c73aa11 100644
--- a/drivers/isdn/hisax/isar.c
+++ b/drivers/isdn/hisax/isar.c
@@ -74,7 +74,7 @@ sendmsg(struct IsdnCardState *cs, u_char his, u_char creg, u_char len,
74 t = tmp; 74 t = tmp;
75 t += sprintf(t, "sendmbox cnt %d", len); 75 t += sprintf(t, "sendmbox cnt %d", len);
76 QuickHex(t, &msg[len-i], (i > 64) ? 64 : i); 76 QuickHex(t, &msg[len-i], (i > 64) ? 64 : i);
77 debugl1(cs, tmp); 77 debugl1(cs, "%s", tmp);
78 i -= 64; 78 i -= 64;
79 } 79 }
80 } 80 }
@@ -105,7 +105,7 @@ rcv_mbox(struct IsdnCardState *cs, struct isar_reg *ireg, u_char *msg)
105 t = tmp; 105 t = tmp;
106 t += sprintf(t, "rcv_mbox cnt %d", ireg->clsb); 106 t += sprintf(t, "rcv_mbox cnt %d", ireg->clsb);
107 QuickHex(t, &msg[ireg->clsb - i], (i > 64) ? 64 : i); 107 QuickHex(t, &msg[ireg->clsb - i], (i > 64) ? 64 : i);
108 debugl1(cs, tmp); 108 debugl1(cs, "%s", tmp);
109 i -= 64; 109 i -= 64;
110 } 110 }
111 } 111 }
@@ -1248,7 +1248,7 @@ isar_int_main(struct IsdnCardState *cs)
1248 tp += sprintf(debbuf, "msg iis(%x) msb(%x)", 1248 tp += sprintf(debbuf, "msg iis(%x) msb(%x)",
1249 ireg->iis, ireg->cmsb); 1249 ireg->iis, ireg->cmsb);
1250 QuickHex(tp, (u_char *)ireg->par, ireg->clsb); 1250 QuickHex(tp, (u_char *)ireg->par, ireg->clsb);
1251 debugl1(cs, debbuf); 1251 debugl1(cs, "%s", debbuf);
1252 } 1252 }
1253 break; 1253 break;
1254 case ISAR_IIS_INVMSG: 1254 case ISAR_IIS_INVMSG:
diff --git a/drivers/isdn/hisax/jade.c b/drivers/isdn/hisax/jade.c
index f946c58d8ab1..e2ae7871a209 100644
--- a/drivers/isdn/hisax/jade.c
+++ b/drivers/isdn/hisax/jade.c
@@ -81,10 +81,7 @@ modejade(struct BCState *bcs, int mode, int bc)
81 int jade = bcs->hw.hscx.hscx; 81 int jade = bcs->hw.hscx.hscx;
82 82
83 if (cs->debug & L1_DEB_HSCX) { 83 if (cs->debug & L1_DEB_HSCX) {
84 char tmp[40]; 84 debugl1(cs, "jade %c mode %d ichan %d", 'A' + jade, mode, bc);
85 sprintf(tmp, "jade %c mode %d ichan %d",
86 'A' + jade, mode, bc);
87 debugl1(cs, tmp);
88 } 85 }
89 bcs->mode = mode; 86 bcs->mode = mode;
90 bcs->channel = bc; 87 bcs->channel = bc;
@@ -257,23 +254,18 @@ void
257clear_pending_jade_ints(struct IsdnCardState *cs) 254clear_pending_jade_ints(struct IsdnCardState *cs)
258{ 255{
259 int val; 256 int val;
260 char tmp[64];
261 257
262 cs->BC_Write_Reg(cs, 0, jade_HDLC_IMR, 0x00); 258 cs->BC_Write_Reg(cs, 0, jade_HDLC_IMR, 0x00);
263 cs->BC_Write_Reg(cs, 1, jade_HDLC_IMR, 0x00); 259 cs->BC_Write_Reg(cs, 1, jade_HDLC_IMR, 0x00);
264 260
265 val = cs->BC_Read_Reg(cs, 1, jade_HDLC_ISR); 261 val = cs->BC_Read_Reg(cs, 1, jade_HDLC_ISR);
266 sprintf(tmp, "jade B ISTA %x", val); 262 debugl1(cs, "jade B ISTA %x", val);
267 debugl1(cs, tmp);
268 val = cs->BC_Read_Reg(cs, 0, jade_HDLC_ISR); 263 val = cs->BC_Read_Reg(cs, 0, jade_HDLC_ISR);
269 sprintf(tmp, "jade A ISTA %x", val); 264 debugl1(cs, "jade A ISTA %x", val);
270 debugl1(cs, tmp);
271 val = cs->BC_Read_Reg(cs, 1, jade_HDLC_STAR); 265 val = cs->BC_Read_Reg(cs, 1, jade_HDLC_STAR);
272 sprintf(tmp, "jade B STAR %x", val); 266 debugl1(cs, "jade B STAR %x", val);
273 debugl1(cs, tmp);
274 val = cs->BC_Read_Reg(cs, 0, jade_HDLC_STAR); 267 val = cs->BC_Read_Reg(cs, 0, jade_HDLC_STAR);
275 sprintf(tmp, "jade A STAR %x", val); 268 debugl1(cs, "jade A STAR %x", val);
276 debugl1(cs, tmp);
277 /* Unmask ints */ 269 /* Unmask ints */
278 cs->BC_Write_Reg(cs, 0, jade_HDLC_IMR, 0xF8); 270 cs->BC_Write_Reg(cs, 0, jade_HDLC_IMR, 0xF8);
279 cs->BC_Write_Reg(cs, 1, jade_HDLC_IMR, 0xF8); 271 cs->BC_Write_Reg(cs, 1, jade_HDLC_IMR, 0xF8);
diff --git a/drivers/isdn/hisax/jade_irq.c b/drivers/isdn/hisax/jade_irq.c
index f521fc83dc76..b930da9b5aa6 100644
--- a/drivers/isdn/hisax/jade_irq.c
+++ b/drivers/isdn/hisax/jade_irq.c
@@ -65,7 +65,7 @@ jade_empty_fifo(struct BCState *bcs, int count)
65 t += sprintf(t, "jade_empty_fifo %c cnt %d", 65 t += sprintf(t, "jade_empty_fifo %c cnt %d",
66 bcs->hw.hscx.hscx ? 'B' : 'A', count); 66 bcs->hw.hscx.hscx ? 'B' : 'A', count);
67 QuickHex(t, ptr, count); 67 QuickHex(t, ptr, count);
68 debugl1(cs, bcs->blog); 68 debugl1(cs, "%s", bcs->blog);
69 } 69 }
70} 70}
71 71
@@ -105,7 +105,7 @@ jade_fill_fifo(struct BCState *bcs)
105 t += sprintf(t, "jade_fill_fifo %c cnt %d", 105 t += sprintf(t, "jade_fill_fifo %c cnt %d",
106 bcs->hw.hscx.hscx ? 'B' : 'A', count); 106 bcs->hw.hscx.hscx ? 'B' : 'A', count);
107 QuickHex(t, ptr, count); 107 QuickHex(t, ptr, count);
108 debugl1(cs, bcs->blog); 108 debugl1(cs, "%s", bcs->blog);
109 } 109 }
110} 110}
111 111
diff --git a/drivers/isdn/hisax/l3_1tr6.c b/drivers/isdn/hisax/l3_1tr6.c
index 4c1bca5caa1d..875402e76d0a 100644
--- a/drivers/isdn/hisax/l3_1tr6.c
+++ b/drivers/isdn/hisax/l3_1tr6.c
@@ -63,7 +63,7 @@ l3_1tr6_error(struct l3_process *pc, u_char *msg, struct sk_buff *skb)
63{ 63{
64 dev_kfree_skb(skb); 64 dev_kfree_skb(skb);
65 if (pc->st->l3.debug & L3_DEB_WARN) 65 if (pc->st->l3.debug & L3_DEB_WARN)
66 l3_debug(pc->st, msg); 66 l3_debug(pc->st, "%s", msg);
67 l3_1tr6_release_req(pc, 0, NULL); 67 l3_1tr6_release_req(pc, 0, NULL);
68} 68}
69 69
@@ -161,7 +161,6 @@ l3_1tr6_setup(struct l3_process *pc, u_char pr, void *arg)
161{ 161{
162 u_char *p; 162 u_char *p;
163 int bcfound = 0; 163 int bcfound = 0;
164 char tmp[80];
165 struct sk_buff *skb = arg; 164 struct sk_buff *skb = arg;
166 165
167 /* Channel Identification */ 166 /* Channel Identification */
@@ -214,10 +213,9 @@ l3_1tr6_setup(struct l3_process *pc, u_char pr, void *arg)
214 /* Signal all services, linklevel takes care of Service-Indicator */ 213 /* Signal all services, linklevel takes care of Service-Indicator */
215 if (bcfound) { 214 if (bcfound) {
216 if ((pc->para.setup.si1 != 7) && (pc->st->l3.debug & L3_DEB_WARN)) { 215 if ((pc->para.setup.si1 != 7) && (pc->st->l3.debug & L3_DEB_WARN)) {
217 sprintf(tmp, "non-digital call: %s -> %s", 216 l3_debug(pc->st, "non-digital call: %s -> %s",
218 pc->para.setup.phone, 217 pc->para.setup.phone,
219 pc->para.setup.eazmsn); 218 pc->para.setup.eazmsn);
220 l3_debug(pc->st, tmp);
221 } 219 }
222 newl3state(pc, 6); 220 newl3state(pc, 6);
223 pc->st->l3.l3l4(pc->st, CC_SETUP | INDICATION, pc); 221 pc->st->l3.l3l4(pc->st, CC_SETUP | INDICATION, pc);
@@ -301,7 +299,7 @@ l3_1tr6_info(struct l3_process *pc, u_char pr, void *arg)
301{ 299{
302 u_char *p; 300 u_char *p;
303 int i, tmpcharge = 0; 301 int i, tmpcharge = 0;
304 char a_charge[8], tmp[32]; 302 char a_charge[8];
305 struct sk_buff *skb = arg; 303 struct sk_buff *skb = arg;
306 304
307 p = skb->data; 305 p = skb->data;
@@ -316,8 +314,8 @@ l3_1tr6_info(struct l3_process *pc, u_char pr, void *arg)
316 pc->st->l3.l3l4(pc->st, CC_CHARGE | INDICATION, pc); 314 pc->st->l3.l3l4(pc->st, CC_CHARGE | INDICATION, pc);
317 } 315 }
318 if (pc->st->l3.debug & L3_DEB_CHARGE) { 316 if (pc->st->l3.debug & L3_DEB_CHARGE) {
319 sprintf(tmp, "charging info %d", pc->para.chargeinfo); 317 l3_debug(pc->st, "charging info %d",
320 l3_debug(pc->st, tmp); 318 pc->para.chargeinfo);
321 } 319 }
322 } else if (pc->st->l3.debug & L3_DEB_CHARGE) 320 } else if (pc->st->l3.debug & L3_DEB_CHARGE)
323 l3_debug(pc->st, "charging info not found"); 321 l3_debug(pc->st, "charging info not found");
@@ -399,7 +397,7 @@ l3_1tr6_disc(struct l3_process *pc, u_char pr, void *arg)
399 struct sk_buff *skb = arg; 397 struct sk_buff *skb = arg;
400 u_char *p; 398 u_char *p;
401 int i, tmpcharge = 0; 399 int i, tmpcharge = 0;
402 char a_charge[8], tmp[32]; 400 char a_charge[8];
403 401
404 StopAllL3Timer(pc); 402 StopAllL3Timer(pc);
405 p = skb->data; 403 p = skb->data;
@@ -414,8 +412,8 @@ l3_1tr6_disc(struct l3_process *pc, u_char pr, void *arg)
414 pc->st->l3.l3l4(pc->st, CC_CHARGE | INDICATION, pc); 412 pc->st->l3.l3l4(pc->st, CC_CHARGE | INDICATION, pc);
415 } 413 }
416 if (pc->st->l3.debug & L3_DEB_CHARGE) { 414 if (pc->st->l3.debug & L3_DEB_CHARGE) {
417 sprintf(tmp, "charging info %d", pc->para.chargeinfo); 415 l3_debug(pc->st, "charging info %d",
418 l3_debug(pc->st, tmp); 416 pc->para.chargeinfo);
419 } 417 }
420 } else if (pc->st->l3.debug & L3_DEB_CHARGE) 418 } else if (pc->st->l3.debug & L3_DEB_CHARGE)
421 l3_debug(pc->st, "charging info not found"); 419 l3_debug(pc->st, "charging info not found");
@@ -746,7 +744,6 @@ up1tr6(struct PStack *st, int pr, void *arg)
746 int i, mt, cr; 744 int i, mt, cr;
747 struct l3_process *proc; 745 struct l3_process *proc;
748 struct sk_buff *skb = arg; 746 struct sk_buff *skb = arg;
749 char tmp[80];
750 747
751 switch (pr) { 748 switch (pr) {
752 case (DL_DATA | INDICATION): 749 case (DL_DATA | INDICATION):
@@ -762,26 +759,23 @@ up1tr6(struct PStack *st, int pr, void *arg)
762 } 759 }
763 if (skb->len < 4) { 760 if (skb->len < 4) {
764 if (st->l3.debug & L3_DEB_PROTERR) { 761 if (st->l3.debug & L3_DEB_PROTERR) {
765 sprintf(tmp, "up1tr6 len only %d", skb->len); 762 l3_debug(st, "up1tr6 len only %d", skb->len);
766 l3_debug(st, tmp);
767 } 763 }
768 dev_kfree_skb(skb); 764 dev_kfree_skb(skb);
769 return; 765 return;
770 } 766 }
771 if ((skb->data[0] & 0xfe) != PROTO_DIS_N0) { 767 if ((skb->data[0] & 0xfe) != PROTO_DIS_N0) {
772 if (st->l3.debug & L3_DEB_PROTERR) { 768 if (st->l3.debug & L3_DEB_PROTERR) {
773 sprintf(tmp, "up1tr6%sunexpected discriminator %x message len %d", 769 l3_debug(st, "up1tr6%sunexpected discriminator %x message len %d",
774 (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ", 770 (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ",
775 skb->data[0], skb->len); 771 skb->data[0], skb->len);
776 l3_debug(st, tmp);
777 } 772 }
778 dev_kfree_skb(skb); 773 dev_kfree_skb(skb);
779 return; 774 return;
780 } 775 }
781 if (skb->data[1] != 1) { 776 if (skb->data[1] != 1) {
782 if (st->l3.debug & L3_DEB_PROTERR) { 777 if (st->l3.debug & L3_DEB_PROTERR) {
783 sprintf(tmp, "up1tr6 CR len not 1"); 778 l3_debug(st, "up1tr6 CR len not 1");
784 l3_debug(st, tmp);
785 } 779 }
786 dev_kfree_skb(skb); 780 dev_kfree_skb(skb);
787 return; 781 return;
@@ -791,9 +785,8 @@ up1tr6(struct PStack *st, int pr, void *arg)
791 if (skb->data[0] == PROTO_DIS_N0) { 785 if (skb->data[0] == PROTO_DIS_N0) {
792 dev_kfree_skb(skb); 786 dev_kfree_skb(skb);
793 if (st->l3.debug & L3_DEB_STATE) { 787 if (st->l3.debug & L3_DEB_STATE) {
794 sprintf(tmp, "up1tr6%s N0 mt %x unhandled", 788 l3_debug(st, "up1tr6%s N0 mt %x unhandled",
795 (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ", mt); 789 (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ", mt);
796 l3_debug(st, tmp);
797 } 790 }
798 } else if (skb->data[0] == PROTO_DIS_N1) { 791 } else if (skb->data[0] == PROTO_DIS_N1) {
799 if (!(proc = getl3proc(st, cr))) { 792 if (!(proc = getl3proc(st, cr))) {
@@ -801,8 +794,7 @@ up1tr6(struct PStack *st, int pr, void *arg)
801 if (cr < 128) { 794 if (cr < 128) {
802 if (!(proc = new_l3_process(st, cr))) { 795 if (!(proc = new_l3_process(st, cr))) {
803 if (st->l3.debug & L3_DEB_PROTERR) { 796 if (st->l3.debug & L3_DEB_PROTERR) {
804 sprintf(tmp, "up1tr6 no roc mem"); 797 l3_debug(st, "up1tr6 no roc mem");
805 l3_debug(st, tmp);
806 } 798 }
807 dev_kfree_skb(skb); 799 dev_kfree_skb(skb);
808 return; 800 return;
@@ -821,8 +813,7 @@ up1tr6(struct PStack *st, int pr, void *arg)
821 } else { 813 } else {
822 if (!(proc = new_l3_process(st, cr))) { 814 if (!(proc = new_l3_process(st, cr))) {
823 if (st->l3.debug & L3_DEB_PROTERR) { 815 if (st->l3.debug & L3_DEB_PROTERR) {
824 sprintf(tmp, "up1tr6 no roc mem"); 816 l3_debug(st, "up1tr6 no roc mem");
825 l3_debug(st, tmp);
826 } 817 }
827 dev_kfree_skb(skb); 818 dev_kfree_skb(skb);
828 return; 819 return;
@@ -837,18 +828,16 @@ up1tr6(struct PStack *st, int pr, void *arg)
837 if (i == ARRAY_SIZE(datastln1)) { 828 if (i == ARRAY_SIZE(datastln1)) {
838 dev_kfree_skb(skb); 829 dev_kfree_skb(skb);
839 if (st->l3.debug & L3_DEB_STATE) { 830 if (st->l3.debug & L3_DEB_STATE) {
840 sprintf(tmp, "up1tr6%sstate %d mt %x unhandled", 831 l3_debug(st, "up1tr6%sstate %d mt %x unhandled",
841 (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ", 832 (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ",
842 proc->state, mt); 833 proc->state, mt);
843 l3_debug(st, tmp);
844 } 834 }
845 return; 835 return;
846 } else { 836 } else {
847 if (st->l3.debug & L3_DEB_STATE) { 837 if (st->l3.debug & L3_DEB_STATE) {
848 sprintf(tmp, "up1tr6%sstate %d mt %x", 838 l3_debug(st, "up1tr6%sstate %d mt %x",
849 (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ", 839 (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ",
850 proc->state, mt); 840 proc->state, mt);
851 l3_debug(st, tmp);
852 } 841 }
853 datastln1[i].rout(proc, pr, skb); 842 datastln1[i].rout(proc, pr, skb);
854 } 843 }
@@ -861,7 +850,6 @@ down1tr6(struct PStack *st, int pr, void *arg)
861 int i, cr; 850 int i, cr;
862 struct l3_process *proc; 851 struct l3_process *proc;
863 struct Channel *chan; 852 struct Channel *chan;
864 char tmp[80];
865 853
866 if ((DL_ESTABLISH | REQUEST) == pr) { 854 if ((DL_ESTABLISH | REQUEST) == pr) {
867 l3_msg(st, pr, NULL); 855 l3_msg(st, pr, NULL);
@@ -888,15 +876,13 @@ down1tr6(struct PStack *st, int pr, void *arg)
888 break; 876 break;
889 if (i == ARRAY_SIZE(downstl)) { 877 if (i == ARRAY_SIZE(downstl)) {
890 if (st->l3.debug & L3_DEB_STATE) { 878 if (st->l3.debug & L3_DEB_STATE) {
891 sprintf(tmp, "down1tr6 state %d prim %d unhandled", 879 l3_debug(st, "down1tr6 state %d prim %d unhandled",
892 proc->state, pr); 880 proc->state, pr);
893 l3_debug(st, tmp);
894 } 881 }
895 } else { 882 } else {
896 if (st->l3.debug & L3_DEB_STATE) { 883 if (st->l3.debug & L3_DEB_STATE) {
897 sprintf(tmp, "down1tr6 state %d prim %d", 884 l3_debug(st, "down1tr6 state %d prim %d",
898 proc->state, pr); 885 proc->state, pr);
899 l3_debug(st, tmp);
900 } 886 }
901 downstl[i].rout(proc, pr, arg); 887 downstl[i].rout(proc, pr, arg);
902 } 888 }
diff --git a/drivers/isdn/hisax/netjet.c b/drivers/isdn/hisax/netjet.c
index b646eed379df..233e432e06f6 100644
--- a/drivers/isdn/hisax/netjet.c
+++ b/drivers/isdn/hisax/netjet.c
@@ -176,7 +176,7 @@ static void printframe(struct IsdnCardState *cs, u_char *buf, int count, char *s
176 else 176 else
177 j = i; 177 j = i;
178 QuickHex(t, p, j); 178 QuickHex(t, p, j);
179 debugl1(cs, tmp); 179 debugl1(cs, "%s", tmp);
180 p += j; 180 p += j;
181 i -= j; 181 i -= j;
182 t = tmp; 182 t = tmp;
diff --git a/drivers/isdn/hisax/q931.c b/drivers/isdn/hisax/q931.c
index 041bf52d9d0a..af1b020a81f1 100644
--- a/drivers/isdn/hisax/q931.c
+++ b/drivers/isdn/hisax/q931.c
@@ -1179,7 +1179,7 @@ LogFrame(struct IsdnCardState *cs, u_char *buf, int size)
1179 dp--; 1179 dp--;
1180 *dp++ = '\n'; 1180 *dp++ = '\n';
1181 *dp = 0; 1181 *dp = 0;
1182 HiSax_putstatus(cs, NULL, cs->dlog); 1182 HiSax_putstatus(cs, NULL, "%s", cs->dlog);
1183 } else 1183 } else
1184 HiSax_putstatus(cs, "LogFrame: ", "warning Frame too big (%d)", size); 1184 HiSax_putstatus(cs, "LogFrame: ", "warning Frame too big (%d)", size);
1185} 1185}
@@ -1246,7 +1246,7 @@ dlogframe(struct IsdnCardState *cs, struct sk_buff *skb, int dir)
1246 } 1246 }
1247 if (finish) { 1247 if (finish) {
1248 *dp = 0; 1248 *dp = 0;
1249 HiSax_putstatus(cs, NULL, cs->dlog); 1249 HiSax_putstatus(cs, NULL, "%s", cs->dlog);
1250 return; 1250 return;
1251 } 1251 }
1252 if ((0xfe & buf[0]) == PROTO_DIS_N0) { /* 1TR6 */ 1252 if ((0xfe & buf[0]) == PROTO_DIS_N0) { /* 1TR6 */
@@ -1509,5 +1509,5 @@ dlogframe(struct IsdnCardState *cs, struct sk_buff *skb, int dir)
1509 dp += sprintf(dp, "Unknown protocol %x!", buf[0]); 1509 dp += sprintf(dp, "Unknown protocol %x!", buf[0]);
1510 } 1510 }
1511 *dp = 0; 1511 *dp = 0;
1512 HiSax_putstatus(cs, NULL, cs->dlog); 1512 HiSax_putstatus(cs, NULL, "%s", cs->dlog);
1513} 1513}
diff --git a/drivers/isdn/hisax/w6692.c b/drivers/isdn/hisax/w6692.c
index d8cac6935818..a85895585d90 100644
--- a/drivers/isdn/hisax/w6692.c
+++ b/drivers/isdn/hisax/w6692.c
@@ -154,7 +154,7 @@ W6692_empty_fifo(struct IsdnCardState *cs, int count)
154 154
155 t += sprintf(t, "W6692_empty_fifo cnt %d", count); 155 t += sprintf(t, "W6692_empty_fifo cnt %d", count);
156 QuickHex(t, ptr, count); 156 QuickHex(t, ptr, count);
157 debugl1(cs, cs->dlog); 157 debugl1(cs, "%s", cs->dlog);
158 } 158 }
159} 159}
160 160
@@ -196,7 +196,7 @@ W6692_fill_fifo(struct IsdnCardState *cs)
196 196
197 t += sprintf(t, "W6692_fill_fifo cnt %d", count); 197 t += sprintf(t, "W6692_fill_fifo cnt %d", count);
198 QuickHex(t, ptr, count); 198 QuickHex(t, ptr, count);
199 debugl1(cs, cs->dlog); 199 debugl1(cs, "%s", cs->dlog);
200 } 200 }
201} 201}
202 202
@@ -226,7 +226,7 @@ W6692B_empty_fifo(struct BCState *bcs, int count)
226 t += sprintf(t, "W6692B_empty_fifo %c cnt %d", 226 t += sprintf(t, "W6692B_empty_fifo %c cnt %d",
227 bcs->channel + '1', count); 227 bcs->channel + '1', count);
228 QuickHex(t, ptr, count); 228 QuickHex(t, ptr, count);
229 debugl1(cs, bcs->blog); 229 debugl1(cs, "%s", bcs->blog);
230 } 230 }
231} 231}
232 232
@@ -264,7 +264,7 @@ W6692B_fill_fifo(struct BCState *bcs)
264 t += sprintf(t, "W6692B_fill_fifo %c cnt %d", 264 t += sprintf(t, "W6692B_fill_fifo %c cnt %d",
265 bcs->channel + '1', count); 265 bcs->channel + '1', count);
266 QuickHex(t, ptr, count); 266 QuickHex(t, ptr, count);
267 debugl1(cs, bcs->blog); 267 debugl1(cs, "%s", bcs->blog);
268 } 268 }
269} 269}
270 270
diff --git a/drivers/mailbox/mailbox-omap2.c b/drivers/mailbox/mailbox-omap2.c
index eba380d7b17f..42d2b893ea67 100644
--- a/drivers/mailbox/mailbox-omap2.c
+++ b/drivers/mailbox/mailbox-omap2.c
@@ -325,7 +325,6 @@ static int omap2_mbox_remove(struct platform_device *pdev)
325 kfree(privblk); 325 kfree(privblk);
326 kfree(mboxblk); 326 kfree(mboxblk);
327 kfree(list); 327 kfree(list);
328 platform_set_drvdata(pdev, NULL);
329 328
330 return 0; 329 return 0;
331} 330}
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index b39f6f0b45f2..0f12382aa35d 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -498,7 +498,7 @@ struct cached_dev {
498 */ 498 */
499 atomic_t has_dirty; 499 atomic_t has_dirty;
500 500
501 struct ratelimit writeback_rate; 501 struct bch_ratelimit writeback_rate;
502 struct delayed_work writeback_rate_update; 502 struct delayed_work writeback_rate_update;
503 503
504 /* 504 /*
@@ -507,10 +507,9 @@ struct cached_dev {
507 */ 507 */
508 sector_t last_read; 508 sector_t last_read;
509 509
510 /* Number of writeback bios in flight */ 510 /* Limit number of writeback bios in flight */
511 atomic_t in_flight; 511 struct semaphore in_flight;
512 struct closure_with_timer writeback; 512 struct closure_with_timer writeback;
513 struct closure_waitlist writeback_wait;
514 513
515 struct keybuf writeback_keys; 514 struct keybuf writeback_keys;
516 515
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 8010eed06a51..22d1ae72c282 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -926,28 +926,45 @@ struct bkey *bch_next_recurse_key(struct btree *b, struct bkey *search)
926 926
927/* Mergesort */ 927/* Mergesort */
928 928
929static void sort_key_next(struct btree_iter *iter,
930 struct btree_iter_set *i)
931{
932 i->k = bkey_next(i->k);
933
934 if (i->k == i->end)
935 *i = iter->data[--iter->used];
936}
937
929static void btree_sort_fixup(struct btree_iter *iter) 938static void btree_sort_fixup(struct btree_iter *iter)
930{ 939{
931 while (iter->used > 1) { 940 while (iter->used > 1) {
932 struct btree_iter_set *top = iter->data, *i = top + 1; 941 struct btree_iter_set *top = iter->data, *i = top + 1;
933 struct bkey *k;
934 942
935 if (iter->used > 2 && 943 if (iter->used > 2 &&
936 btree_iter_cmp(i[0], i[1])) 944 btree_iter_cmp(i[0], i[1]))
937 i++; 945 i++;
938 946
939 for (k = i->k; 947 if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
940 k != i->end && bkey_cmp(top->k, &START_KEY(k)) > 0;
941 k = bkey_next(k))
942 if (top->k > i->k)
943 __bch_cut_front(top->k, k);
944 else if (KEY_SIZE(k))
945 bch_cut_back(&START_KEY(k), top->k);
946
947 if (top->k < i->k || k == i->k)
948 break; 948 break;
949 949
950 heap_sift(iter, i - top, btree_iter_cmp); 950 if (!KEY_SIZE(i->k)) {
951 sort_key_next(iter, i);
952 heap_sift(iter, i - top, btree_iter_cmp);
953 continue;
954 }
955
956 if (top->k > i->k) {
957 if (bkey_cmp(top->k, i->k) >= 0)
958 sort_key_next(iter, i);
959 else
960 bch_cut_front(top->k, i->k);
961
962 heap_sift(iter, i - top, btree_iter_cmp);
963 } else {
964 /* can't happen because of comparison func */
965 BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
966 bch_cut_back(&START_KEY(i->k), top->k);
967 }
951 } 968 }
952} 969}
953 970
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index f9764e61978b..f42fc7ed9cd6 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -255,7 +255,7 @@ void bch_btree_node_read(struct btree *b)
255 255
256 return; 256 return;
257err: 257err:
258 bch_cache_set_error(b->c, "io error reading bucket %lu", 258 bch_cache_set_error(b->c, "io error reading bucket %zu",
259 PTR_BUCKET_NR(b->c, &b->key, 0)); 259 PTR_BUCKET_NR(b->c, &b->key, 0));
260} 260}
261 261
@@ -612,7 +612,7 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
612 return SHRINK_STOP; 612 return SHRINK_STOP;
613 613
614 /* Return -1 if we can't do anything right now */ 614 /* Return -1 if we can't do anything right now */
615 if (sc->gfp_mask & __GFP_WAIT) 615 if (sc->gfp_mask & __GFP_IO)
616 mutex_lock(&c->bucket_lock); 616 mutex_lock(&c->bucket_lock);
617 else if (!mutex_trylock(&c->bucket_lock)) 617 else if (!mutex_trylock(&c->bucket_lock))
618 return -1; 618 return -1;
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index ba95ab84b2be..8435f81e5d85 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -153,7 +153,8 @@ int bch_journal_read(struct cache_set *c, struct list_head *list,
153 bitmap_zero(bitmap, SB_JOURNAL_BUCKETS); 153 bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
154 pr_debug("%u journal buckets", ca->sb.njournal_buckets); 154 pr_debug("%u journal buckets", ca->sb.njournal_buckets);
155 155
156 /* Read journal buckets ordered by golden ratio hash to quickly 156 /*
157 * Read journal buckets ordered by golden ratio hash to quickly
157 * find a sequence of buckets with valid journal entries 158 * find a sequence of buckets with valid journal entries
158 */ 159 */
159 for (i = 0; i < ca->sb.njournal_buckets; i++) { 160 for (i = 0; i < ca->sb.njournal_buckets; i++) {
@@ -166,18 +167,20 @@ int bch_journal_read(struct cache_set *c, struct list_head *list,
166 goto bsearch; 167 goto bsearch;
167 } 168 }
168 169
169 /* If that fails, check all the buckets we haven't checked 170 /*
171 * If that fails, check all the buckets we haven't checked
170 * already 172 * already
171 */ 173 */
172 pr_debug("falling back to linear search"); 174 pr_debug("falling back to linear search");
173 175
174 for (l = 0; l < ca->sb.njournal_buckets; l++) { 176 for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
175 if (test_bit(l, bitmap)) 177 l < ca->sb.njournal_buckets;
176 continue; 178 l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1))
177
178 if (read_bucket(l)) 179 if (read_bucket(l))
179 goto bsearch; 180 goto bsearch;
180 } 181
182 if (list_empty(list))
183 continue;
181bsearch: 184bsearch:
182 /* Binary search */ 185 /* Binary search */
183 m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1); 186 m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
@@ -197,10 +200,12 @@ bsearch:
197 r = m; 200 r = m;
198 } 201 }
199 202
200 /* Read buckets in reverse order until we stop finding more 203 /*
204 * Read buckets in reverse order until we stop finding more
201 * journal entries 205 * journal entries
202 */ 206 */
203 pr_debug("finishing up"); 207 pr_debug("finishing up: m %u njournal_buckets %u",
208 m, ca->sb.njournal_buckets);
204 l = m; 209 l = m;
205 210
206 while (1) { 211 while (1) {
@@ -228,9 +233,10 @@ bsearch:
228 } 233 }
229 } 234 }
230 235
231 c->journal.seq = list_entry(list->prev, 236 if (!list_empty(list))
232 struct journal_replay, 237 c->journal.seq = list_entry(list->prev,
233 list)->j.seq; 238 struct journal_replay,
239 list)->j.seq;
234 240
235 return 0; 241 return 0;
236#undef read_bucket 242#undef read_bucket
@@ -428,7 +434,7 @@ static void do_journal_discard(struct cache *ca)
428 return; 434 return;
429 } 435 }
430 436
431 switch (atomic_read(&ja->discard_in_flight) == DISCARD_IN_FLIGHT) { 437 switch (atomic_read(&ja->discard_in_flight)) {
432 case DISCARD_IN_FLIGHT: 438 case DISCARD_IN_FLIGHT:
433 return; 439 return;
434 440
@@ -689,6 +695,7 @@ void bch_journal_meta(struct cache_set *c, struct closure *cl)
689 if (cl) 695 if (cl)
690 BUG_ON(!closure_wait(&w->wait, cl)); 696 BUG_ON(!closure_wait(&w->wait, cl));
691 697
698 closure_flush(&c->journal.io);
692 __journal_try_write(c, true); 699 __journal_try_write(c, true);
693 } 700 }
694} 701}
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 786a1a4f74d8..71eb233b9ace 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -997,14 +997,17 @@ static void request_write(struct cached_dev *dc, struct search *s)
997 } else { 997 } else {
998 bch_writeback_add(dc); 998 bch_writeback_add(dc);
999 999
1000 if (s->op.flush_journal) { 1000 if (bio->bi_rw & REQ_FLUSH) {
1001 /* Also need to send a flush to the backing device */ 1001 /* Also need to send a flush to the backing device */
1002 s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO, 1002 struct bio *flush = bio_alloc_bioset(0, GFP_NOIO,
1003 dc->disk.bio_split); 1003 dc->disk.bio_split);
1004 1004
1005 bio->bi_size = 0; 1005 flush->bi_rw = WRITE_FLUSH;
1006 bio->bi_vcnt = 0; 1006 flush->bi_bdev = bio->bi_bdev;
1007 closure_bio_submit(bio, cl, s->d); 1007 flush->bi_end_io = request_endio;
1008 flush->bi_private = cl;
1009
1010 closure_bio_submit(flush, cl, s->d);
1008 } else { 1011 } else {
1009 s->op.cache_bio = bio; 1012 s->op.cache_bio = bio;
1010 } 1013 }
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 4fe6ab2fbe2e..924dcfdae111 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -223,8 +223,13 @@ STORE(__cached_dev)
223 } 223 }
224 224
225 if (attr == &sysfs_label) { 225 if (attr == &sysfs_label) {
226 /* note: endlines are preserved */ 226 if (size > SB_LABEL_SIZE)
227 memcpy(dc->sb.label, buf, SB_LABEL_SIZE); 227 return -EINVAL;
228 memcpy(dc->sb.label, buf, size);
229 if (size < SB_LABEL_SIZE)
230 dc->sb.label[size] = '\0';
231 if (size && dc->sb.label[size - 1] == '\n')
232 dc->sb.label[size - 1] = '\0';
228 bch_write_bdev_super(dc, NULL); 233 bch_write_bdev_super(dc, NULL);
229 if (dc->disk.c) { 234 if (dc->disk.c) {
230 memcpy(dc->disk.c->uuids[dc->disk.id].label, 235 memcpy(dc->disk.c->uuids[dc->disk.id].label,
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index 98eb81159a22..420dad545c7d 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -190,7 +190,16 @@ void bch_time_stats_update(struct time_stats *stats, uint64_t start_time)
190 stats->last = now ?: 1; 190 stats->last = now ?: 1;
191} 191}
192 192
193unsigned bch_next_delay(struct ratelimit *d, uint64_t done) 193/**
194 * bch_next_delay() - increment @d by the amount of work done, and return how
195 * long to delay until the next time to do some work.
196 *
197 * @d - the struct bch_ratelimit to update
198 * @done - the amount of work done, in arbitrary units
199 *
200 * Returns the amount of time to delay by, in jiffies
201 */
202uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
194{ 203{
195 uint64_t now = local_clock(); 204 uint64_t now = local_clock();
196 205
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index 1ae2a73ad85f..ea345c6896f4 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -450,17 +450,23 @@ read_attribute(name ## _last_ ## frequency_units)
450 (ewma) >> factor; \ 450 (ewma) >> factor; \
451}) 451})
452 452
453struct ratelimit { 453struct bch_ratelimit {
454 /* Next time we want to do some work, in nanoseconds */
454 uint64_t next; 455 uint64_t next;
456
457 /*
458 * Rate at which we want to do work, in units per nanosecond
459 * The units here correspond to the units passed to bch_next_delay()
460 */
455 unsigned rate; 461 unsigned rate;
456}; 462};
457 463
458static inline void ratelimit_reset(struct ratelimit *d) 464static inline void bch_ratelimit_reset(struct bch_ratelimit *d)
459{ 465{
460 d->next = local_clock(); 466 d->next = local_clock();
461} 467}
462 468
463unsigned bch_next_delay(struct ratelimit *d, uint64_t done); 469uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done);
464 470
465#define __DIV_SAFE(n, d, zero) \ 471#define __DIV_SAFE(n, d, zero) \
466({ \ 472({ \
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 22cbff551628..ba3ee48320f2 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -94,11 +94,15 @@ static void update_writeback_rate(struct work_struct *work)
94 94
95static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) 95static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
96{ 96{
97 uint64_t ret;
98
97 if (atomic_read(&dc->disk.detaching) || 99 if (atomic_read(&dc->disk.detaching) ||
98 !dc->writeback_percent) 100 !dc->writeback_percent)
99 return 0; 101 return 0;
100 102
101 return bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL); 103 ret = bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL);
104
105 return min_t(uint64_t, ret, HZ);
102} 106}
103 107
104/* Background writeback */ 108/* Background writeback */
@@ -208,7 +212,7 @@ normal_refill:
208 212
209 up_write(&dc->writeback_lock); 213 up_write(&dc->writeback_lock);
210 214
211 ratelimit_reset(&dc->writeback_rate); 215 bch_ratelimit_reset(&dc->writeback_rate);
212 216
213 /* Punt to workqueue only so we don't recurse and blow the stack */ 217 /* Punt to workqueue only so we don't recurse and blow the stack */
214 continue_at(cl, read_dirty, dirty_wq); 218 continue_at(cl, read_dirty, dirty_wq);
@@ -318,9 +322,7 @@ static void write_dirty_finish(struct closure *cl)
318 } 322 }
319 323
320 bch_keybuf_del(&dc->writeback_keys, w); 324 bch_keybuf_del(&dc->writeback_keys, w);
321 atomic_dec_bug(&dc->in_flight); 325 up(&dc->in_flight);
322
323 closure_wake_up(&dc->writeback_wait);
324 326
325 closure_return_with_destructor(cl, dirty_io_destructor); 327 closure_return_with_destructor(cl, dirty_io_destructor);
326} 328}
@@ -349,7 +351,7 @@ static void write_dirty(struct closure *cl)
349 351
350 closure_bio_submit(&io->bio, cl, &io->dc->disk); 352 closure_bio_submit(&io->bio, cl, &io->dc->disk);
351 353
352 continue_at(cl, write_dirty_finish, dirty_wq); 354 continue_at(cl, write_dirty_finish, system_wq);
353} 355}
354 356
355static void read_dirty_endio(struct bio *bio, int error) 357static void read_dirty_endio(struct bio *bio, int error)
@@ -369,7 +371,7 @@ static void read_dirty_submit(struct closure *cl)
369 371
370 closure_bio_submit(&io->bio, cl, &io->dc->disk); 372 closure_bio_submit(&io->bio, cl, &io->dc->disk);
371 373
372 continue_at(cl, write_dirty, dirty_wq); 374 continue_at(cl, write_dirty, system_wq);
373} 375}
374 376
375static void read_dirty(struct closure *cl) 377static void read_dirty(struct closure *cl)
@@ -394,12 +396,8 @@ static void read_dirty(struct closure *cl)
394 396
395 if (delay > 0 && 397 if (delay > 0 &&
396 (KEY_START(&w->key) != dc->last_read || 398 (KEY_START(&w->key) != dc->last_read ||
397 jiffies_to_msecs(delay) > 50)) { 399 jiffies_to_msecs(delay) > 50))
398 w->private = NULL; 400 delay = schedule_timeout_uninterruptible(delay);
399
400 closure_delay(&dc->writeback, delay);
401 continue_at(cl, read_dirty, dirty_wq);
402 }
403 401
404 dc->last_read = KEY_OFFSET(&w->key); 402 dc->last_read = KEY_OFFSET(&w->key);
405 403
@@ -424,15 +422,10 @@ static void read_dirty(struct closure *cl)
424 422
425 trace_bcache_writeback(&w->key); 423 trace_bcache_writeback(&w->key);
426 424
427 closure_call(&io->cl, read_dirty_submit, NULL, &dc->disk.cl); 425 down(&dc->in_flight);
426 closure_call(&io->cl, read_dirty_submit, NULL, cl);
428 427
429 delay = writeback_delay(dc, KEY_SIZE(&w->key)); 428 delay = writeback_delay(dc, KEY_SIZE(&w->key));
430
431 atomic_inc(&dc->in_flight);
432
433 if (!closure_wait_event(&dc->writeback_wait, cl,
434 atomic_read(&dc->in_flight) < 64))
435 continue_at(cl, read_dirty, dirty_wq);
436 } 429 }
437 430
438 if (0) { 431 if (0) {
@@ -442,7 +435,11 @@ err:
442 bch_keybuf_del(&dc->writeback_keys, w); 435 bch_keybuf_del(&dc->writeback_keys, w);
443 } 436 }
444 437
445 refill_dirty(cl); 438 /*
439 * Wait for outstanding writeback IOs to finish (and keybuf slots to be
440 * freed) before refilling again
441 */
442 continue_at(cl, refill_dirty, dirty_wq);
446} 443}
447 444
448/* Init */ 445/* Init */
@@ -484,6 +481,7 @@ void bch_sectors_dirty_init(struct cached_dev *dc)
484 481
485void bch_cached_dev_writeback_init(struct cached_dev *dc) 482void bch_cached_dev_writeback_init(struct cached_dev *dc)
486{ 483{
484 sema_init(&dc->in_flight, 64);
487 closure_init_unlocked(&dc->writeback); 485 closure_init_unlocked(&dc->writeback);
488 init_rwsem(&dc->writeback_lock); 486 init_rwsem(&dc->writeback_lock);
489 487
@@ -513,7 +511,7 @@ void bch_writeback_exit(void)
513 511
514int __init bch_writeback_init(void) 512int __init bch_writeback_init(void)
515{ 513{
516 dirty_wq = create_singlethread_workqueue("bcache_writeback"); 514 dirty_wq = create_workqueue("bcache_writeback");
517 if (!dirty_wq) 515 if (!dirty_wq)
518 return -ENOMEM; 516 return -ENOMEM;
519 517
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index ea49834377c8..2a20986a2fec 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -19,8 +19,6 @@
19#define DM_MSG_PREFIX "io" 19#define DM_MSG_PREFIX "io"
20 20
21#define DM_IO_MAX_REGIONS BITS_PER_LONG 21#define DM_IO_MAX_REGIONS BITS_PER_LONG
22#define MIN_IOS 16
23#define MIN_BIOS 16
24 22
25struct dm_io_client { 23struct dm_io_client {
26 mempool_t *pool; 24 mempool_t *pool;
@@ -50,16 +48,17 @@ static struct kmem_cache *_dm_io_cache;
50struct dm_io_client *dm_io_client_create(void) 48struct dm_io_client *dm_io_client_create(void)
51{ 49{
52 struct dm_io_client *client; 50 struct dm_io_client *client;
51 unsigned min_ios = dm_get_reserved_bio_based_ios();
53 52
54 client = kmalloc(sizeof(*client), GFP_KERNEL); 53 client = kmalloc(sizeof(*client), GFP_KERNEL);
55 if (!client) 54 if (!client)
56 return ERR_PTR(-ENOMEM); 55 return ERR_PTR(-ENOMEM);
57 56
58 client->pool = mempool_create_slab_pool(MIN_IOS, _dm_io_cache); 57 client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache);
59 if (!client->pool) 58 if (!client->pool)
60 goto bad; 59 goto bad;
61 60
62 client->bios = bioset_create(MIN_BIOS, 0); 61 client->bios = bioset_create(min_ios, 0);
63 if (!client->bios) 62 if (!client->bios)
64 goto bad; 63 goto bad;
65 64
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index b759a127f9c3..de570a558764 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -7,6 +7,7 @@
7 7
8#include <linux/device-mapper.h> 8#include <linux/device-mapper.h>
9 9
10#include "dm.h"
10#include "dm-path-selector.h" 11#include "dm-path-selector.h"
11#include "dm-uevent.h" 12#include "dm-uevent.h"
12 13
@@ -116,8 +117,6 @@ struct dm_mpath_io {
116 117
117typedef int (*action_fn) (struct pgpath *pgpath); 118typedef int (*action_fn) (struct pgpath *pgpath);
118 119
119#define MIN_IOS 256 /* Mempool size */
120
121static struct kmem_cache *_mpio_cache; 120static struct kmem_cache *_mpio_cache;
122 121
123static struct workqueue_struct *kmultipathd, *kmpath_handlerd; 122static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
@@ -190,6 +189,7 @@ static void free_priority_group(struct priority_group *pg,
190static struct multipath *alloc_multipath(struct dm_target *ti) 189static struct multipath *alloc_multipath(struct dm_target *ti)
191{ 190{
192 struct multipath *m; 191 struct multipath *m;
192 unsigned min_ios = dm_get_reserved_rq_based_ios();
193 193
194 m = kzalloc(sizeof(*m), GFP_KERNEL); 194 m = kzalloc(sizeof(*m), GFP_KERNEL);
195 if (m) { 195 if (m) {
@@ -202,7 +202,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
202 INIT_WORK(&m->trigger_event, trigger_event); 202 INIT_WORK(&m->trigger_event, trigger_event);
203 init_waitqueue_head(&m->pg_init_wait); 203 init_waitqueue_head(&m->pg_init_wait);
204 mutex_init(&m->work_mutex); 204 mutex_init(&m->work_mutex);
205 m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); 205 m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
206 if (!m->mpio_pool) { 206 if (!m->mpio_pool) {
207 kfree(m); 207 kfree(m);
208 return NULL; 208 return NULL;
@@ -1268,6 +1268,7 @@ static int noretry_error(int error)
1268 case -EREMOTEIO: 1268 case -EREMOTEIO:
1269 case -EILSEQ: 1269 case -EILSEQ:
1270 case -ENODATA: 1270 case -ENODATA:
1271 case -ENOSPC:
1271 return 1; 1272 return 1;
1272 } 1273 }
1273 1274
@@ -1298,8 +1299,17 @@ static int do_end_io(struct multipath *m, struct request *clone,
1298 if (!error && !clone->errors) 1299 if (!error && !clone->errors)
1299 return 0; /* I/O complete */ 1300 return 0; /* I/O complete */
1300 1301
1301 if (noretry_error(error)) 1302 if (noretry_error(error)) {
1303 if ((clone->cmd_flags & REQ_WRITE_SAME) &&
1304 !clone->q->limits.max_write_same_sectors) {
1305 struct queue_limits *limits;
1306
1307 /* device doesn't really support WRITE SAME, disable it */
1308 limits = dm_get_queue_limits(dm_table_get_md(m->ti->table));
1309 limits->max_write_same_sectors = 0;
1310 }
1302 return error; 1311 return error;
1312 }
1303 1313
1304 if (mpio->pgpath) 1314 if (mpio->pgpath)
1305 fail_path(mpio->pgpath); 1315 fail_path(mpio->pgpath);
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 3ac415675b6c..4caa8e6d59d7 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -256,7 +256,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
256 */ 256 */
257 INIT_WORK_ONSTACK(&req.work, do_metadata); 257 INIT_WORK_ONSTACK(&req.work, do_metadata);
258 queue_work(ps->metadata_wq, &req.work); 258 queue_work(ps->metadata_wq, &req.work);
259 flush_work(&req.work); 259 flush_workqueue(ps->metadata_wq);
260 260
261 return req.result; 261 return req.result;
262} 262}
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index c434e5aab2df..aec57d76db5d 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -725,17 +725,16 @@ static int calc_max_buckets(void)
725 */ 725 */
726static int init_hash_tables(struct dm_snapshot *s) 726static int init_hash_tables(struct dm_snapshot *s)
727{ 727{
728 sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets; 728 sector_t hash_size, cow_dev_size, max_buckets;
729 729
730 /* 730 /*
731 * Calculate based on the size of the original volume or 731 * Calculate based on the size of the original volume or
732 * the COW volume... 732 * the COW volume...
733 */ 733 */
734 cow_dev_size = get_dev_size(s->cow->bdev); 734 cow_dev_size = get_dev_size(s->cow->bdev);
735 origin_dev_size = get_dev_size(s->origin->bdev);
736 max_buckets = calc_max_buckets(); 735 max_buckets = calc_max_buckets();
737 736
738 hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift; 737 hash_size = cow_dev_size >> s->store->chunk_shift;
739 hash_size = min(hash_size, max_buckets); 738 hash_size = min(hash_size, max_buckets);
740 739
741 if (hash_size < 64) 740 if (hash_size < 64)
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 8ae31e8d3d64..3d404c1371ed 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -451,19 +451,26 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
451 struct dm_stat_percpu *p; 451 struct dm_stat_percpu *p;
452 452
453 /* 453 /*
454 * For strict correctness we should use local_irq_disable/enable 454 * For strict correctness we should use local_irq_save/restore
455 * instead of preempt_disable/enable. 455 * instead of preempt_disable/enable.
456 * 456 *
457 * This is racy if the driver finishes bios from non-interrupt 457 * preempt_disable/enable is racy if the driver finishes bios
458 * context as well as from interrupt context or from more different 458 * from non-interrupt context as well as from interrupt context
459 * interrupts. 459 * or from more different interrupts.
460 * 460 *
461 * However, the race only results in not counting some events, 461 * On 64-bit architectures the race only results in not counting some
462 * so it is acceptable. 462 * events, so it is acceptable. On 32-bit architectures the race could
463 * cause the counter going off by 2^32, so we need to do proper locking
464 * there.
463 * 465 *
464 * part_stat_lock()/part_stat_unlock() have this race too. 466 * part_stat_lock()/part_stat_unlock() have this race too.
465 */ 467 */
468#if BITS_PER_LONG == 32
469 unsigned long flags;
470 local_irq_save(flags);
471#else
466 preempt_disable(); 472 preempt_disable();
473#endif
467 p = &s->stat_percpu[smp_processor_id()][entry]; 474 p = &s->stat_percpu[smp_processor_id()][entry];
468 475
469 if (!end) { 476 if (!end) {
@@ -478,7 +485,11 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
478 p->ticks[idx] += duration; 485 p->ticks[idx] += duration;
479 } 486 }
480 487
488#if BITS_PER_LONG == 32
489 local_irq_restore(flags);
490#else
481 preempt_enable(); 491 preempt_enable();
492#endif
482} 493}
483 494
484static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw, 495static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw,
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index ed063427d676..2c0cf511ec23 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2095,6 +2095,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
2095 * them down to the data device. The thin device's discard 2095 * them down to the data device. The thin device's discard
2096 * processing will cause mappings to be removed from the btree. 2096 * processing will cause mappings to be removed from the btree.
2097 */ 2097 */
2098 ti->discard_zeroes_data_unsupported = true;
2098 if (pf.discard_enabled && pf.discard_passdown) { 2099 if (pf.discard_enabled && pf.discard_passdown) {
2099 ti->num_discard_bios = 1; 2100 ti->num_discard_bios = 1;
2100 2101
@@ -2104,7 +2105,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
2104 * thin devices' discard limits consistent). 2105 * thin devices' discard limits consistent).
2105 */ 2106 */
2106 ti->discards_supported = true; 2107 ti->discards_supported = true;
2107 ti->discard_zeroes_data_unsupported = true;
2108 } 2108 }
2109 ti->private = pt; 2109 ti->private = pt;
2110 2110
@@ -2689,8 +2689,16 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
2689 * They get transferred to the live pool in bind_control_target() 2689 * They get transferred to the live pool in bind_control_target()
2690 * called from pool_preresume(). 2690 * called from pool_preresume().
2691 */ 2691 */
2692 if (!pt->adjusted_pf.discard_enabled) 2692 if (!pt->adjusted_pf.discard_enabled) {
2693 /*
2694 * Must explicitly disallow stacking discard limits otherwise the
2695 * block layer will stack them if pool's data device has support.
2696 * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the
2697 * user to see that, so make sure to set all discard limits to 0.
2698 */
2699 limits->discard_granularity = 0;
2693 return; 2700 return;
2701 }
2694 2702
2695 disable_passdown_if_not_supported(pt); 2703 disable_passdown_if_not_supported(pt);
2696 2704
@@ -2826,10 +2834,10 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
2826 ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook); 2834 ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
2827 2835
2828 /* In case the pool supports discards, pass them on. */ 2836 /* In case the pool supports discards, pass them on. */
2837 ti->discard_zeroes_data_unsupported = true;
2829 if (tc->pool->pf.discard_enabled) { 2838 if (tc->pool->pf.discard_enabled) {
2830 ti->discards_supported = true; 2839 ti->discards_supported = true;
2831 ti->num_discard_bios = 1; 2840 ti->num_discard_bios = 1;
2832 ti->discard_zeroes_data_unsupported = true;
2833 /* Discard bios must be split on a block boundary */ 2841 /* Discard bios must be split on a block boundary */
2834 ti->split_discard_bios = true; 2842 ti->split_discard_bios = true;
2835 } 2843 }
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 6a5e9ed2fcc3..b3e26c7d1417 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -211,10 +211,55 @@ struct dm_md_mempools {
211 struct bio_set *bs; 211 struct bio_set *bs;
212}; 212};
213 213
214#define MIN_IOS 256 214#define RESERVED_BIO_BASED_IOS 16
215#define RESERVED_REQUEST_BASED_IOS 256
216#define RESERVED_MAX_IOS 1024
215static struct kmem_cache *_io_cache; 217static struct kmem_cache *_io_cache;
216static struct kmem_cache *_rq_tio_cache; 218static struct kmem_cache *_rq_tio_cache;
217 219
220/*
221 * Bio-based DM's mempools' reserved IOs set by the user.
222 */
223static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
224
225/*
226 * Request-based DM's mempools' reserved IOs set by the user.
227 */
228static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
229
230static unsigned __dm_get_reserved_ios(unsigned *reserved_ios,
231 unsigned def, unsigned max)
232{
233 unsigned ios = ACCESS_ONCE(*reserved_ios);
234 unsigned modified_ios = 0;
235
236 if (!ios)
237 modified_ios = def;
238 else if (ios > max)
239 modified_ios = max;
240
241 if (modified_ios) {
242 (void)cmpxchg(reserved_ios, ios, modified_ios);
243 ios = modified_ios;
244 }
245
246 return ios;
247}
248
249unsigned dm_get_reserved_bio_based_ios(void)
250{
251 return __dm_get_reserved_ios(&reserved_bio_based_ios,
252 RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS);
253}
254EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
255
256unsigned dm_get_reserved_rq_based_ios(void)
257{
258 return __dm_get_reserved_ios(&reserved_rq_based_ios,
259 RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS);
260}
261EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
262
218static int __init local_init(void) 263static int __init local_init(void)
219{ 264{
220 int r = -ENOMEM; 265 int r = -ENOMEM;
@@ -2278,6 +2323,17 @@ struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
2278} 2323}
2279 2324
2280/* 2325/*
2326 * The queue_limits are only valid as long as you have a reference
2327 * count on 'md'.
2328 */
2329struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
2330{
2331 BUG_ON(!atomic_read(&md->holders));
2332 return &md->queue->limits;
2333}
2334EXPORT_SYMBOL_GPL(dm_get_queue_limits);
2335
2336/*
2281 * Fully initialize a request-based queue (->elevator, ->request_fn, etc). 2337 * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
2282 */ 2338 */
2283static int dm_init_request_based_queue(struct mapped_device *md) 2339static int dm_init_request_based_queue(struct mapped_device *md)
@@ -2862,18 +2918,18 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, u
2862 2918
2863 if (type == DM_TYPE_BIO_BASED) { 2919 if (type == DM_TYPE_BIO_BASED) {
2864 cachep = _io_cache; 2920 cachep = _io_cache;
2865 pool_size = 16; 2921 pool_size = dm_get_reserved_bio_based_ios();
2866 front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); 2922 front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
2867 } else if (type == DM_TYPE_REQUEST_BASED) { 2923 } else if (type == DM_TYPE_REQUEST_BASED) {
2868 cachep = _rq_tio_cache; 2924 cachep = _rq_tio_cache;
2869 pool_size = MIN_IOS; 2925 pool_size = dm_get_reserved_rq_based_ios();
2870 front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 2926 front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
2871 /* per_bio_data_size is not used. See __bind_mempools(). */ 2927 /* per_bio_data_size is not used. See __bind_mempools(). */
2872 WARN_ON(per_bio_data_size != 0); 2928 WARN_ON(per_bio_data_size != 0);
2873 } else 2929 } else
2874 goto out; 2930 goto out;
2875 2931
2876 pools->io_pool = mempool_create_slab_pool(MIN_IOS, cachep); 2932 pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
2877 if (!pools->io_pool) 2933 if (!pools->io_pool)
2878 goto out; 2934 goto out;
2879 2935
@@ -2924,6 +2980,13 @@ module_exit(dm_exit);
2924 2980
2925module_param(major, uint, 0); 2981module_param(major, uint, 0);
2926MODULE_PARM_DESC(major, "The major number of the device mapper"); 2982MODULE_PARM_DESC(major, "The major number of the device mapper");
2983
2984module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
2985MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
2986
2987module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
2988MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
2989
2927MODULE_DESCRIPTION(DM_NAME " driver"); 2990MODULE_DESCRIPTION(DM_NAME " driver");
2928MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 2991MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2929MODULE_LICENSE("GPL"); 2992MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 5e604cc7b4aa..1d1ad7b7e527 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -184,6 +184,9 @@ void dm_free_md_mempools(struct dm_md_mempools *pools);
184/* 184/*
185 * Helpers that are used by DM core 185 * Helpers that are used by DM core
186 */ 186 */
187unsigned dm_get_reserved_bio_based_ios(void);
188unsigned dm_get_reserved_rq_based_ios(void);
189
187static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen) 190static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
188{ 191{
189 return !maxlen || strlen(result) + 1 >= maxlen; 192 return !maxlen || strlen(result) + 1 >= maxlen;
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index d0fdc134068a..f6ff711aa5bb 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -57,6 +57,7 @@ void mei_amthif_reset_params(struct mei_device *dev)
57 dev->iamthif_ioctl = false; 57 dev->iamthif_ioctl = false;
58 dev->iamthif_state = MEI_IAMTHIF_IDLE; 58 dev->iamthif_state = MEI_IAMTHIF_IDLE;
59 dev->iamthif_timer = 0; 59 dev->iamthif_timer = 0;
60 dev->iamthif_stall_timer = 0;
60} 61}
61 62
62/** 63/**
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 6d0282c08a06..cd2033cd7120 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -297,10 +297,13 @@ int __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
297 297
298 if (cl->reading_state != MEI_READ_COMPLETE && 298 if (cl->reading_state != MEI_READ_COMPLETE &&
299 !waitqueue_active(&cl->rx_wait)) { 299 !waitqueue_active(&cl->rx_wait)) {
300
300 mutex_unlock(&dev->device_lock); 301 mutex_unlock(&dev->device_lock);
301 302
302 if (wait_event_interruptible(cl->rx_wait, 303 if (wait_event_interruptible(cl->rx_wait,
303 (MEI_READ_COMPLETE == cl->reading_state))) { 304 cl->reading_state == MEI_READ_COMPLETE ||
305 mei_cl_is_transitioning(cl))) {
306
304 if (signal_pending(current)) 307 if (signal_pending(current))
305 return -EINTR; 308 return -EINTR;
306 return -ERESTARTSYS; 309 return -ERESTARTSYS;
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index 9eb031e92070..892cc4207fa2 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -90,6 +90,12 @@ static inline bool mei_cl_is_connected(struct mei_cl *cl)
90 cl->dev->dev_state == MEI_DEV_ENABLED && 90 cl->dev->dev_state == MEI_DEV_ENABLED &&
91 cl->state == MEI_FILE_CONNECTED); 91 cl->state == MEI_FILE_CONNECTED);
92} 92}
93static inline bool mei_cl_is_transitioning(struct mei_cl *cl)
94{
95 return (MEI_FILE_INITIALIZING == cl->state ||
96 MEI_FILE_DISCONNECTED == cl->state ||
97 MEI_FILE_DISCONNECTING == cl->state);
98}
93 99
94bool mei_cl_is_other_connecting(struct mei_cl *cl); 100bool mei_cl_is_other_connecting(struct mei_cl *cl);
95int mei_cl_disconnect(struct mei_cl *cl); 101int mei_cl_disconnect(struct mei_cl *cl);
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 6127ab64bb39..0a0448326e9d 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -35,11 +35,15 @@ static void mei_hbm_me_cl_allocate(struct mei_device *dev)
35 struct mei_me_client *clients; 35 struct mei_me_client *clients;
36 int b; 36 int b;
37 37
38 dev->me_clients_num = 0;
39 dev->me_client_presentation_num = 0;
40 dev->me_client_index = 0;
41
38 /* count how many ME clients we have */ 42 /* count how many ME clients we have */
39 for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX) 43 for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX)
40 dev->me_clients_num++; 44 dev->me_clients_num++;
41 45
42 if (dev->me_clients_num <= 0) 46 if (dev->me_clients_num == 0)
43 return; 47 return;
44 48
45 kfree(dev->me_clients); 49 kfree(dev->me_clients);
@@ -221,7 +225,7 @@ static int mei_hbm_prop_req(struct mei_device *dev)
221 struct hbm_props_request *prop_req; 225 struct hbm_props_request *prop_req;
222 const size_t len = sizeof(struct hbm_props_request); 226 const size_t len = sizeof(struct hbm_props_request);
223 unsigned long next_client_index; 227 unsigned long next_client_index;
224 u8 client_num; 228 unsigned long client_num;
225 229
226 230
227 client_num = dev->me_client_presentation_num; 231 client_num = dev->me_client_presentation_num;
@@ -677,8 +681,6 @@ void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
677 if (dev->dev_state == MEI_DEV_INIT_CLIENTS && 681 if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
678 dev->hbm_state == MEI_HBM_ENUM_CLIENTS) { 682 dev->hbm_state == MEI_HBM_ENUM_CLIENTS) {
679 dev->init_clients_timer = 0; 683 dev->init_clients_timer = 0;
680 dev->me_client_presentation_num = 0;
681 dev->me_client_index = 0;
682 mei_hbm_me_cl_allocate(dev); 684 mei_hbm_me_cl_allocate(dev);
683 dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES; 685 dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES;
684 686
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 92c73118b13c..6197018e2f16 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -175,6 +175,9 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
175 memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg)); 175 memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg));
176 } 176 }
177 177
178 /* we're already in reset, cancel the init timer */
179 dev->init_clients_timer = 0;
180
178 dev->me_clients_num = 0; 181 dev->me_clients_num = 0;
179 dev->rd_msg_hdr = 0; 182 dev->rd_msg_hdr = 0;
180 dev->wd_pending = false; 183 dev->wd_pending = false;
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 173ff095be0d..cabeddd66c1f 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -249,19 +249,16 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
249 mutex_unlock(&dev->device_lock); 249 mutex_unlock(&dev->device_lock);
250 250
251 if (wait_event_interruptible(cl->rx_wait, 251 if (wait_event_interruptible(cl->rx_wait,
252 (MEI_READ_COMPLETE == cl->reading_state || 252 MEI_READ_COMPLETE == cl->reading_state ||
253 MEI_FILE_INITIALIZING == cl->state || 253 mei_cl_is_transitioning(cl))) {
254 MEI_FILE_DISCONNECTED == cl->state || 254
255 MEI_FILE_DISCONNECTING == cl->state))) {
256 if (signal_pending(current)) 255 if (signal_pending(current))
257 return -EINTR; 256 return -EINTR;
258 return -ERESTARTSYS; 257 return -ERESTARTSYS;
259 } 258 }
260 259
261 mutex_lock(&dev->device_lock); 260 mutex_lock(&dev->device_lock);
262 if (MEI_FILE_INITIALIZING == cl->state || 261 if (mei_cl_is_transitioning(cl)) {
263 MEI_FILE_DISCONNECTED == cl->state ||
264 MEI_FILE_DISCONNECTING == cl->state) {
265 rets = -EBUSY; 262 rets = -EBUSY;
266 goto out; 263 goto out;
267 } 264 }
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 7b918b2fb894..456b322013e2 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -396,9 +396,9 @@ struct mei_device {
396 struct mei_me_client *me_clients; /* Note: memory has to be allocated */ 396 struct mei_me_client *me_clients; /* Note: memory has to be allocated */
397 DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX); 397 DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX);
398 DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX); 398 DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX);
399 u8 me_clients_num; 399 unsigned long me_clients_num;
400 u8 me_client_presentation_num; 400 unsigned long me_client_presentation_num;
401 u8 me_client_index; 401 unsigned long me_client_index;
402 402
403 struct mei_cl wd_cl; 403 struct mei_cl wd_cl;
404 enum mei_wd_states wd_state; 404 enum mei_wd_states wd_state;
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index 87ed3fb5149a..f344659dceac 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -113,14 +113,14 @@ static const struct sh_mobile_sdhi_ops sdhi_ops = {
113}; 113};
114 114
115static const struct of_device_id sh_mobile_sdhi_of_match[] = { 115static const struct of_device_id sh_mobile_sdhi_of_match[] = {
116 { .compatible = "renesas,shmobile-sdhi" }, 116 { .compatible = "renesas,sdhi-shmobile" },
117 { .compatible = "renesas,sh7372-sdhi" }, 117 { .compatible = "renesas,sdhi-sh7372" },
118 { .compatible = "renesas,sh73a0-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], }, 118 { .compatible = "renesas,sdhi-sh73a0", .data = &sh_mobile_sdhi_of_cfg[0], },
119 { .compatible = "renesas,r8a73a4-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], }, 119 { .compatible = "renesas,sdhi-r8a73a4", .data = &sh_mobile_sdhi_of_cfg[0], },
120 { .compatible = "renesas,r8a7740-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], }, 120 { .compatible = "renesas,sdhi-r8a7740", .data = &sh_mobile_sdhi_of_cfg[0], },
121 { .compatible = "renesas,r8a7778-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], }, 121 { .compatible = "renesas,sdhi-r8a7778", .data = &sh_mobile_sdhi_of_cfg[0], },
122 { .compatible = "renesas,r8a7779-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], }, 122 { .compatible = "renesas,sdhi-r8a7779", .data = &sh_mobile_sdhi_of_cfg[0], },
123 { .compatible = "renesas,r8a7790-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], }, 123 { .compatible = "renesas,sdhi-r8a7790", .data = &sh_mobile_sdhi_of_cfg[0], },
124 {}, 124 {},
125}; 125};
126MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match); 126MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match);
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 5db900d917f9..dd03dfdfb0d6 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -1236,7 +1236,6 @@ static int pxa3xx_nand_remove(struct platform_device *pdev)
1236 return 0; 1236 return 0;
1237} 1237}
1238 1238
1239#ifdef CONFIG_OF
1240static struct of_device_id pxa3xx_nand_dt_ids[] = { 1239static struct of_device_id pxa3xx_nand_dt_ids[] = {
1241 { 1240 {
1242 .compatible = "marvell,pxa3xx-nand", 1241 .compatible = "marvell,pxa3xx-nand",
@@ -1284,12 +1283,6 @@ static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1284 1283
1285 return 0; 1284 return 0;
1286} 1285}
1287#else
1288static inline int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1289{
1290 return 0;
1291}
1292#endif
1293 1286
1294static int pxa3xx_nand_probe(struct platform_device *pdev) 1287static int pxa3xx_nand_probe(struct platform_device *pdev)
1295{ 1288{
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 91f179d5135c..f428ef574372 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1472,7 +1472,7 @@ void bond_alb_monitor(struct work_struct *work)
1472 bond_info->lp_counter++; 1472 bond_info->lp_counter++;
1473 1473
1474 /* send learning packets */ 1474 /* send learning packets */
1475 if (bond_info->lp_counter >= BOND_ALB_LP_TICKS) { 1475 if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) {
1476 /* change of curr_active_slave involves swapping of mac addresses. 1476 /* change of curr_active_slave involves swapping of mac addresses.
1477 * in order to avoid this swapping from happening while 1477 * in order to avoid this swapping from happening while
1478 * sending the learning packets, the curr_slave_lock must be held for 1478 * sending the learning packets, the curr_slave_lock must be held for
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index 28d8e4c7dc06..c5eff5dafdfe 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -36,14 +36,15 @@ struct slave;
36 * Used for division - never set 36 * Used for division - never set
37 * to zero !!! 37 * to zero !!!
38 */ 38 */
39#define BOND_ALB_LP_INTERVAL 1 /* In seconds, periodic send of 39#define BOND_ALB_DEFAULT_LP_INTERVAL 1
40 * learning packets to the switch 40#define BOND_ALB_LP_INTERVAL(bond) (bond->params.lp_interval) /* In seconds, periodic send of
41 */ 41 * learning packets to the switch
42 */
42 43
43#define BOND_TLB_REBALANCE_TICKS (BOND_TLB_REBALANCE_INTERVAL \ 44#define BOND_TLB_REBALANCE_TICKS (BOND_TLB_REBALANCE_INTERVAL \
44 * ALB_TIMER_TICKS_PER_SEC) 45 * ALB_TIMER_TICKS_PER_SEC)
45 46
46#define BOND_ALB_LP_TICKS (BOND_ALB_LP_INTERVAL \ 47#define BOND_ALB_LP_TICKS(bond) (BOND_ALB_LP_INTERVAL(bond) \
47 * ALB_TIMER_TICKS_PER_SEC) 48 * ALB_TIMER_TICKS_PER_SEC)
48 49
49#define TLB_HASH_TABLE_SIZE 256 /* The size of the clients hash table. 50#define TLB_HASH_TABLE_SIZE 256 /* The size of the clients hash table.
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 72df399c4ab3..e883bfe2e727 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1724,6 +1724,7 @@ static int __bond_release_one(struct net_device *bond_dev,
1724 struct bonding *bond = netdev_priv(bond_dev); 1724 struct bonding *bond = netdev_priv(bond_dev);
1725 struct slave *slave, *oldcurrent; 1725 struct slave *slave, *oldcurrent;
1726 struct sockaddr addr; 1726 struct sockaddr addr;
1727 int old_flags = bond_dev->flags;
1727 netdev_features_t old_features = bond_dev->features; 1728 netdev_features_t old_features = bond_dev->features;
1728 1729
1729 /* slave is not a slave or master is not master of this slave */ 1730 /* slave is not a slave or master is not master of this slave */
@@ -1855,12 +1856,18 @@ static int __bond_release_one(struct net_device *bond_dev,
1855 * bond_change_active_slave(..., NULL) 1856 * bond_change_active_slave(..., NULL)
1856 */ 1857 */
1857 if (!USES_PRIMARY(bond->params.mode)) { 1858 if (!USES_PRIMARY(bond->params.mode)) {
1858 /* unset promiscuity level from slave */ 1859 /* unset promiscuity level from slave
1859 if (bond_dev->flags & IFF_PROMISC) 1860 * NOTE: The NETDEV_CHANGEADDR call above may change the value
1861 * of the IFF_PROMISC flag in the bond_dev, but we need the
1862 * value of that flag before that change, as that was the value
1863 * when this slave was attached, so we cache at the start of the
1864 * function and use it here. Same goes for ALLMULTI below
1865 */
1866 if (old_flags & IFF_PROMISC)
1860 dev_set_promiscuity(slave_dev, -1); 1867 dev_set_promiscuity(slave_dev, -1);
1861 1868
1862 /* unset allmulti level from slave */ 1869 /* unset allmulti level from slave */
1863 if (bond_dev->flags & IFF_ALLMULTI) 1870 if (old_flags & IFF_ALLMULTI)
1864 dev_set_allmulti(slave_dev, -1); 1871 dev_set_allmulti(slave_dev, -1);
1865 1872
1866 bond_hw_addr_flush(bond_dev, slave_dev); 1873 bond_hw_addr_flush(bond_dev, slave_dev);
@@ -4416,6 +4423,7 @@ static int bond_check_params(struct bond_params *params)
4416 params->all_slaves_active = all_slaves_active; 4423 params->all_slaves_active = all_slaves_active;
4417 params->resend_igmp = resend_igmp; 4424 params->resend_igmp = resend_igmp;
4418 params->min_links = min_links; 4425 params->min_links = min_links;
4426 params->lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
4419 4427
4420 if (primary) { 4428 if (primary) {
4421 strncpy(params->primary, primary, IFNAMSIZ); 4429 strncpy(params->primary, primary, IFNAMSIZ);
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index eeab40b01b7a..c29b836749b6 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1699,6 +1699,44 @@ out:
1699static DEVICE_ATTR(resend_igmp, S_IRUGO | S_IWUSR, 1699static DEVICE_ATTR(resend_igmp, S_IRUGO | S_IWUSR,
1700 bonding_show_resend_igmp, bonding_store_resend_igmp); 1700 bonding_show_resend_igmp, bonding_store_resend_igmp);
1701 1701
1702
1703static ssize_t bonding_show_lp_interval(struct device *d,
1704 struct device_attribute *attr,
1705 char *buf)
1706{
1707 struct bonding *bond = to_bond(d);
1708 return sprintf(buf, "%d\n", bond->params.lp_interval);
1709}
1710
1711static ssize_t bonding_store_lp_interval(struct device *d,
1712 struct device_attribute *attr,
1713 const char *buf, size_t count)
1714{
1715 struct bonding *bond = to_bond(d);
1716 int new_value, ret = count;
1717
1718 if (sscanf(buf, "%d", &new_value) != 1) {
1719 pr_err("%s: no lp interval value specified.\n",
1720 bond->dev->name);
1721 ret = -EINVAL;
1722 goto out;
1723 }
1724
1725 if (new_value <= 0) {
1726 pr_err ("%s: lp_interval must be between 1 and %d\n",
1727 bond->dev->name, INT_MAX);
1728 ret = -EINVAL;
1729 goto out;
1730 }
1731
1732 bond->params.lp_interval = new_value;
1733out:
1734 return ret;
1735}
1736
1737static DEVICE_ATTR(lp_interval, S_IRUGO | S_IWUSR,
1738 bonding_show_lp_interval, bonding_store_lp_interval);
1739
1702static struct attribute *per_bond_attrs[] = { 1740static struct attribute *per_bond_attrs[] = {
1703 &dev_attr_slaves.attr, 1741 &dev_attr_slaves.attr,
1704 &dev_attr_mode.attr, 1742 &dev_attr_mode.attr,
@@ -1729,6 +1767,7 @@ static struct attribute *per_bond_attrs[] = {
1729 &dev_attr_all_slaves_active.attr, 1767 &dev_attr_all_slaves_active.attr,
1730 &dev_attr_resend_igmp.attr, 1768 &dev_attr_resend_igmp.attr,
1731 &dev_attr_min_links.attr, 1769 &dev_attr_min_links.attr,
1770 &dev_attr_lp_interval.attr,
1732 NULL, 1771 NULL,
1733}; 1772};
1734 1773
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 7ad8bd5cc947..03cf3fd14490 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -176,6 +176,7 @@ struct bond_params {
176 int tx_queues; 176 int tx_queues;
177 int all_slaves_active; 177 int all_slaves_active;
178 int resend_igmp; 178 int resend_igmp;
179 int lp_interval;
179}; 180};
180 181
181struct bond_parm_tbl { 182struct bond_parm_tbl {
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 71c677e651d7..3f21142138b7 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -702,7 +702,6 @@ static int flexcan_chip_start(struct net_device *dev)
702{ 702{
703 struct flexcan_priv *priv = netdev_priv(dev); 703 struct flexcan_priv *priv = netdev_priv(dev);
704 struct flexcan_regs __iomem *regs = priv->base; 704 struct flexcan_regs __iomem *regs = priv->base;
705 unsigned int i;
706 int err; 705 int err;
707 u32 reg_mcr, reg_ctrl; 706 u32 reg_mcr, reg_ctrl;
708 707
@@ -772,17 +771,6 @@ static int flexcan_chip_start(struct net_device *dev)
772 netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl); 771 netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
773 flexcan_write(reg_ctrl, &regs->ctrl); 772 flexcan_write(reg_ctrl, &regs->ctrl);
774 773
775 for (i = 0; i < ARRAY_SIZE(regs->cantxfg); i++) {
776 flexcan_write(0, &regs->cantxfg[i].can_ctrl);
777 flexcan_write(0, &regs->cantxfg[i].can_id);
778 flexcan_write(0, &regs->cantxfg[i].data[0]);
779 flexcan_write(0, &regs->cantxfg[i].data[1]);
780
781 /* put MB into rx queue */
782 flexcan_write(FLEXCAN_MB_CNT_CODE(0x4),
783 &regs->cantxfg[i].can_ctrl);
784 }
785
786 /* acceptance mask/acceptance code (accept everything) */ 774 /* acceptance mask/acceptance code (accept everything) */
787 flexcan_write(0x0, &regs->rxgmask); 775 flexcan_write(0x0, &regs->rxgmask);
788 flexcan_write(0x0, &regs->rx14mask); 776 flexcan_write(0x0, &regs->rx14mask);
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 874188ba06f7..25377e547f9b 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -76,6 +76,10 @@ MODULE_PARM_DESC(maxdev, "Maximum number of slcan interfaces");
76/* maximum rx buffer len: extended CAN frame with timestamp */ 76/* maximum rx buffer len: extended CAN frame with timestamp */
77#define SLC_MTU (sizeof("T1111222281122334455667788EA5F\r")+1) 77#define SLC_MTU (sizeof("T1111222281122334455667788EA5F\r")+1)
78 78
79#define SLC_CMD_LEN 1
80#define SLC_SFF_ID_LEN 3
81#define SLC_EFF_ID_LEN 8
82
79struct slcan { 83struct slcan {
80 int magic; 84 int magic;
81 85
@@ -142,47 +146,63 @@ static void slc_bump(struct slcan *sl)
142{ 146{
143 struct sk_buff *skb; 147 struct sk_buff *skb;
144 struct can_frame cf; 148 struct can_frame cf;
145 int i, dlc_pos, tmp; 149 int i, tmp;
146 unsigned long ultmp; 150 u32 tmpid;
147 char cmd = sl->rbuff[0]; 151 char *cmd = sl->rbuff;
148 152
149 if ((cmd != 't') && (cmd != 'T') && (cmd != 'r') && (cmd != 'R')) 153 cf.can_id = 0;
154
155 switch (*cmd) {
156 case 'r':
157 cf.can_id = CAN_RTR_FLAG;
158 /* fallthrough */
159 case 't':
160 /* store dlc ASCII value and terminate SFF CAN ID string */
161 cf.can_dlc = sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN];
162 sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN] = 0;
163 /* point to payload data behind the dlc */
164 cmd += SLC_CMD_LEN + SLC_SFF_ID_LEN + 1;
165 break;
166 case 'R':
167 cf.can_id = CAN_RTR_FLAG;
168 /* fallthrough */
169 case 'T':
170 cf.can_id |= CAN_EFF_FLAG;
171 /* store dlc ASCII value and terminate EFF CAN ID string */
172 cf.can_dlc = sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN];
173 sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN] = 0;
174 /* point to payload data behind the dlc */
175 cmd += SLC_CMD_LEN + SLC_EFF_ID_LEN + 1;
176 break;
177 default:
150 return; 178 return;
179 }
151 180
152 if (cmd & 0x20) /* tiny chars 'r' 't' => standard frame format */ 181 if (kstrtou32(sl->rbuff + SLC_CMD_LEN, 16, &tmpid))
153 dlc_pos = 4; /* dlc position tiiid */
154 else
155 dlc_pos = 9; /* dlc position Tiiiiiiiid */
156
157 if (!((sl->rbuff[dlc_pos] >= '0') && (sl->rbuff[dlc_pos] < '9')))
158 return; 182 return;
159 183
160 cf.can_dlc = sl->rbuff[dlc_pos] - '0'; /* get can_dlc from ASCII val */ 184 cf.can_id |= tmpid;
161 185
162 sl->rbuff[dlc_pos] = 0; /* terminate can_id string */ 186 /* get can_dlc from sanitized ASCII value */
163 187 if (cf.can_dlc >= '0' && cf.can_dlc < '9')
164 if (kstrtoul(sl->rbuff+1, 16, &ultmp)) 188 cf.can_dlc -= '0';
189 else
165 return; 190 return;
166 191
167 cf.can_id = ultmp;
168
169 if (!(cmd & 0x20)) /* NO tiny chars => extended frame format */
170 cf.can_id |= CAN_EFF_FLAG;
171
172 if ((cmd | 0x20) == 'r') /* RTR frame */
173 cf.can_id |= CAN_RTR_FLAG;
174
175 *(u64 *) (&cf.data) = 0; /* clear payload */ 192 *(u64 *) (&cf.data) = 0; /* clear payload */
176 193
177 for (i = 0, dlc_pos++; i < cf.can_dlc; i++) { 194 /* RTR frames may have a dlc > 0 but they never have any data bytes */
178 tmp = hex_to_bin(sl->rbuff[dlc_pos++]); 195 if (!(cf.can_id & CAN_RTR_FLAG)) {
179 if (tmp < 0) 196 for (i = 0; i < cf.can_dlc; i++) {
180 return; 197 tmp = hex_to_bin(*cmd++);
181 cf.data[i] = (tmp << 4); 198 if (tmp < 0)
182 tmp = hex_to_bin(sl->rbuff[dlc_pos++]); 199 return;
183 if (tmp < 0) 200 cf.data[i] = (tmp << 4);
184 return; 201 tmp = hex_to_bin(*cmd++);
185 cf.data[i] |= tmp; 202 if (tmp < 0)
203 return;
204 cf.data[i] |= tmp;
205 }
186 } 206 }
187 207
188 skb = dev_alloc_skb(sizeof(struct can_frame) + 208 skb = dev_alloc_skb(sizeof(struct can_frame) +
@@ -209,7 +229,6 @@ static void slc_bump(struct slcan *sl)
209/* parse tty input stream */ 229/* parse tty input stream */
210static void slcan_unesc(struct slcan *sl, unsigned char s) 230static void slcan_unesc(struct slcan *sl, unsigned char s)
211{ 231{
212
213 if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */ 232 if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */
214 if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && 233 if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
215 (sl->rcount > 4)) { 234 (sl->rcount > 4)) {
@@ -236,27 +255,46 @@ static void slcan_unesc(struct slcan *sl, unsigned char s)
236/* Encapsulate one can_frame and stuff into a TTY queue. */ 255/* Encapsulate one can_frame and stuff into a TTY queue. */
237static void slc_encaps(struct slcan *sl, struct can_frame *cf) 256static void slc_encaps(struct slcan *sl, struct can_frame *cf)
238{ 257{
239 int actual, idx, i; 258 int actual, i;
240 char cmd; 259 unsigned char *pos;
260 unsigned char *endpos;
261 canid_t id = cf->can_id;
262
263 pos = sl->xbuff;
241 264
242 if (cf->can_id & CAN_RTR_FLAG) 265 if (cf->can_id & CAN_RTR_FLAG)
243 cmd = 'R'; /* becomes 'r' in standard frame format */ 266 *pos = 'R'; /* becomes 'r' in standard frame format (SFF) */
244 else 267 else
245 cmd = 'T'; /* becomes 't' in standard frame format */ 268 *pos = 'T'; /* becomes 't' in standard frame format (SSF) */
246 269
247 if (cf->can_id & CAN_EFF_FLAG) 270 /* determine number of chars for the CAN-identifier */
248 sprintf(sl->xbuff, "%c%08X%d", cmd, 271 if (cf->can_id & CAN_EFF_FLAG) {
249 cf->can_id & CAN_EFF_MASK, cf->can_dlc); 272 id &= CAN_EFF_MASK;
250 else 273 endpos = pos + SLC_EFF_ID_LEN;
251 sprintf(sl->xbuff, "%c%03X%d", cmd | 0x20, 274 } else {
252 cf->can_id & CAN_SFF_MASK, cf->can_dlc); 275 *pos |= 0x20; /* convert R/T to lower case for SFF */
276 id &= CAN_SFF_MASK;
277 endpos = pos + SLC_SFF_ID_LEN;
278 }
253 279
254 idx = strlen(sl->xbuff); 280 /* build 3 (SFF) or 8 (EFF) digit CAN identifier */
281 pos++;
282 while (endpos >= pos) {
283 *endpos-- = hex_asc_upper[id & 0xf];
284 id >>= 4;
285 }
286
287 pos += (cf->can_id & CAN_EFF_FLAG) ? SLC_EFF_ID_LEN : SLC_SFF_ID_LEN;
255 288
256 for (i = 0; i < cf->can_dlc; i++) 289 *pos++ = cf->can_dlc + '0';
257 sprintf(&sl->xbuff[idx + 2*i], "%02X", cf->data[i]); 290
291 /* RTR frames may have a dlc > 0 but they never have any data bytes */
292 if (!(cf->can_id & CAN_RTR_FLAG)) {
293 for (i = 0; i < cf->can_dlc; i++)
294 pos = hex_byte_pack_upper(pos, cf->data[i]);
295 }
258 296
259 strcat(sl->xbuff, "\r"); /* add terminating character */ 297 *pos++ = '\r';
260 298
261 /* Order of next two lines is *very* important. 299 /* Order of next two lines is *very* important.
262 * When we are sending a little amount of data, 300 * When we are sending a little amount of data,
@@ -267,8 +305,8 @@ static void slc_encaps(struct slcan *sl, struct can_frame *cf)
267 * 14 Oct 1994 Dmitry Gorodchanin. 305 * 14 Oct 1994 Dmitry Gorodchanin.
268 */ 306 */
269 set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); 307 set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
270 actual = sl->tty->ops->write(sl->tty, sl->xbuff, strlen(sl->xbuff)); 308 actual = sl->tty->ops->write(sl->tty, sl->xbuff, pos - sl->xbuff);
271 sl->xleft = strlen(sl->xbuff) - actual; 309 sl->xleft = (pos - sl->xbuff) - actual;
272 sl->xhead = sl->xbuff + actual; 310 sl->xhead = sl->xbuff + actual;
273 sl->dev->stats.tx_bytes += cf->can_dlc; 311 sl->dev->stats.tx_bytes += cf->can_dlc;
274} 312}
@@ -286,11 +324,13 @@ static void slcan_write_wakeup(struct tty_struct *tty)
286 if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) 324 if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
287 return; 325 return;
288 326
327 spin_lock(&sl->lock);
289 if (sl->xleft <= 0) { 328 if (sl->xleft <= 0) {
290 /* Now serial buffer is almost free & we can start 329 /* Now serial buffer is almost free & we can start
291 * transmission of another packet */ 330 * transmission of another packet */
292 sl->dev->stats.tx_packets++; 331 sl->dev->stats.tx_packets++;
293 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 332 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
333 spin_unlock(&sl->lock);
294 netif_wake_queue(sl->dev); 334 netif_wake_queue(sl->dev);
295 return; 335 return;
296 } 336 }
@@ -298,6 +338,7 @@ static void slcan_write_wakeup(struct tty_struct *tty)
298 actual = tty->ops->write(tty, sl->xhead, sl->xleft); 338 actual = tty->ops->write(tty, sl->xhead, sl->xleft);
299 sl->xleft -= actual; 339 sl->xleft -= actual;
300 sl->xhead += actual; 340 sl->xhead += actual;
341 spin_unlock(&sl->lock);
301} 342}
302 343
303/* Send a can_frame to a TTY queue. */ 344/* Send a can_frame to a TTY queue. */
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index a0f647f92bf5..0b7a4c3b01a2 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -463,7 +463,7 @@ static int peak_usb_start(struct peak_usb_device *dev)
463 if (i < PCAN_USB_MAX_TX_URBS) { 463 if (i < PCAN_USB_MAX_TX_URBS) {
464 if (i == 0) { 464 if (i == 0) {
465 netdev_err(netdev, "couldn't setup any tx URB\n"); 465 netdev_err(netdev, "couldn't setup any tx URB\n");
466 return err; 466 goto err_tx;
467 } 467 }
468 468
469 netdev_warn(netdev, "tx performance may be slow\n"); 469 netdev_warn(netdev, "tx performance may be slow\n");
@@ -472,7 +472,7 @@ static int peak_usb_start(struct peak_usb_device *dev)
472 if (dev->adapter->dev_start) { 472 if (dev->adapter->dev_start) {
473 err = dev->adapter->dev_start(dev); 473 err = dev->adapter->dev_start(dev);
474 if (err) 474 if (err)
475 goto failed; 475 goto err_adapter;
476 } 476 }
477 477
478 dev->state |= PCAN_USB_STATE_STARTED; 478 dev->state |= PCAN_USB_STATE_STARTED;
@@ -481,19 +481,26 @@ static int peak_usb_start(struct peak_usb_device *dev)
481 if (dev->adapter->dev_set_bus) { 481 if (dev->adapter->dev_set_bus) {
482 err = dev->adapter->dev_set_bus(dev, 1); 482 err = dev->adapter->dev_set_bus(dev, 1);
483 if (err) 483 if (err)
484 goto failed; 484 goto err_adapter;
485 } 485 }
486 486
487 dev->can.state = CAN_STATE_ERROR_ACTIVE; 487 dev->can.state = CAN_STATE_ERROR_ACTIVE;
488 488
489 return 0; 489 return 0;
490 490
491failed: 491err_adapter:
492 if (err == -ENODEV) 492 if (err == -ENODEV)
493 netif_device_detach(dev->netdev); 493 netif_device_detach(dev->netdev);
494 494
495 netdev_warn(netdev, "couldn't submit control: %d\n", err); 495 netdev_warn(netdev, "couldn't submit control: %d\n", err);
496 496
497 for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++) {
498 usb_free_urb(dev->tx_contexts[i].urb);
499 dev->tx_contexts[i].urb = NULL;
500 }
501err_tx:
502 usb_kill_anchored_urbs(&dev->rx_submitted);
503
497 return err; 504 return err;
498} 505}
499 506
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index e66684a438f5..75fb1d20d6fd 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -530,7 +530,7 @@ static int bfin_mac_ethtool_setwol(struct net_device *dev,
530 if (lp->wol && !lp->irq_wake_requested) { 530 if (lp->wol && !lp->irq_wake_requested) {
531 /* register wake irq handler */ 531 /* register wake irq handler */
532 rc = request_irq(IRQ_MAC_WAKEDET, bfin_mac_wake_interrupt, 532 rc = request_irq(IRQ_MAC_WAKEDET, bfin_mac_wake_interrupt,
533 IRQF_DISABLED, "EMAC_WAKE", dev); 533 0, "EMAC_WAKE", dev);
534 if (rc) 534 if (rc)
535 return rc; 535 return rc;
536 lp->irq_wake_requested = true; 536 lp->irq_wake_requested = true;
@@ -1686,7 +1686,7 @@ static int bfin_mac_probe(struct platform_device *pdev)
1686 /* now, enable interrupts */ 1686 /* now, enable interrupts */
1687 /* register irq handler */ 1687 /* register irq handler */
1688 rc = request_irq(IRQ_MAC_RX, bfin_mac_interrupt, 1688 rc = request_irq(IRQ_MAC_RX, bfin_mac_interrupt,
1689 IRQF_DISABLED, "EMAC_RX", ndev); 1689 0, "EMAC_RX", ndev);
1690 if (rc) { 1690 if (rc) {
1691 dev_err(&pdev->dev, "Cannot request Blackfin MAC RX IRQ!\n"); 1691 dev_err(&pdev->dev, "Cannot request Blackfin MAC RX IRQ!\n");
1692 rc = -EBUSY; 1692 rc = -EBUSY;
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index d6b20296b8e4..3d8c6b2cdea4 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -358,7 +358,7 @@ static int __init lance_probe( struct net_device *dev)
358 358
359 REGA(CSR0) = CSR0_STOP; 359 REGA(CSR0) = CSR0_STOP;
360 360
361 if (request_irq(LANCE_IRQ, lance_interrupt, IRQF_DISABLED, "SUN3 Lance", dev) < 0) { 361 if (request_irq(LANCE_IRQ, lance_interrupt, 0, "SUN3 Lance", dev) < 0) {
362#ifdef CONFIG_SUN3 362#ifdef CONFIG_SUN3
363 iounmap((void __iomem *)ioaddr); 363 iounmap((void __iomem *)ioaddr);
364#endif 364#endif
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 027398ebbba6..fc95b235e210 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1188,7 +1188,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1188 struct alx_priv *alx; 1188 struct alx_priv *alx;
1189 struct alx_hw *hw; 1189 struct alx_hw *hw;
1190 bool phy_configured; 1190 bool phy_configured;
1191 int bars, pm_cap, err; 1191 int bars, err;
1192 1192
1193 err = pci_enable_device_mem(pdev); 1193 err = pci_enable_device_mem(pdev);
1194 if (err) 1194 if (err)
@@ -1225,18 +1225,13 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1225 pci_enable_pcie_error_reporting(pdev); 1225 pci_enable_pcie_error_reporting(pdev);
1226 pci_set_master(pdev); 1226 pci_set_master(pdev);
1227 1227
1228 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 1228 if (!pdev->pm_cap) {
1229 if (pm_cap == 0) {
1230 dev_err(&pdev->dev, 1229 dev_err(&pdev->dev,
1231 "Can't find power management capability, aborting\n"); 1230 "Can't find power management capability, aborting\n");
1232 err = -EIO; 1231 err = -EIO;
1233 goto out_pci_release; 1232 goto out_pci_release;
1234 } 1233 }
1235 1234
1236 err = pci_set_power_state(pdev, PCI_D0);
1237 if (err)
1238 goto out_pci_release;
1239
1240 netdev = alloc_etherdev(sizeof(*alx)); 1235 netdev = alloc_etherdev(sizeof(*alx));
1241 if (!netdev) { 1236 if (!netdev) {
1242 err = -ENOMEM; 1237 err = -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index eec0af45b859..249468f95365 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -157,6 +157,7 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
157 if (++ring->end >= BGMAC_TX_RING_SLOTS) 157 if (++ring->end >= BGMAC_TX_RING_SLOTS)
158 ring->end = 0; 158 ring->end = 0;
159 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX, 159 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
160 ring->index_base +
160 ring->end * sizeof(struct bgmac_dma_desc)); 161 ring->end * sizeof(struct bgmac_dma_desc));
161 162
162 /* Always keep one slot free to allow detecting bugged calls. */ 163 /* Always keep one slot free to allow detecting bugged calls. */
@@ -181,6 +182,8 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
181 /* The last slot that hardware didn't consume yet */ 182 /* The last slot that hardware didn't consume yet */
182 empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS); 183 empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
183 empty_slot &= BGMAC_DMA_TX_STATDPTR; 184 empty_slot &= BGMAC_DMA_TX_STATDPTR;
185 empty_slot -= ring->index_base;
186 empty_slot &= BGMAC_DMA_TX_STATDPTR;
184 empty_slot /= sizeof(struct bgmac_dma_desc); 187 empty_slot /= sizeof(struct bgmac_dma_desc);
185 188
186 while (ring->start != empty_slot) { 189 while (ring->start != empty_slot) {
@@ -274,6 +277,8 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
274 277
275 end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS); 278 end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
276 end_slot &= BGMAC_DMA_RX_STATDPTR; 279 end_slot &= BGMAC_DMA_RX_STATDPTR;
280 end_slot -= ring->index_base;
281 end_slot &= BGMAC_DMA_RX_STATDPTR;
277 end_slot /= sizeof(struct bgmac_dma_desc); 282 end_slot /= sizeof(struct bgmac_dma_desc);
278 283
279 ring->end = end_slot; 284 ring->end = end_slot;
@@ -418,9 +423,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
418 ring = &bgmac->tx_ring[i]; 423 ring = &bgmac->tx_ring[i];
419 ring->num_slots = BGMAC_TX_RING_SLOTS; 424 ring->num_slots = BGMAC_TX_RING_SLOTS;
420 ring->mmio_base = ring_base[i]; 425 ring->mmio_base = ring_base[i];
421 if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_TX))
422 bgmac_warn(bgmac, "TX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
423 ring->mmio_base);
424 426
425 /* Alloc ring of descriptors */ 427 /* Alloc ring of descriptors */
426 size = ring->num_slots * sizeof(struct bgmac_dma_desc); 428 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
@@ -435,6 +437,13 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
435 if (ring->dma_base & 0xC0000000) 437 if (ring->dma_base & 0xC0000000)
436 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); 438 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
437 439
440 ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
441 BGMAC_DMA_RING_TX);
442 if (ring->unaligned)
443 ring->index_base = lower_32_bits(ring->dma_base);
444 else
445 ring->index_base = 0;
446
438 /* No need to alloc TX slots yet */ 447 /* No need to alloc TX slots yet */
439 } 448 }
440 449
@@ -444,9 +453,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
444 ring = &bgmac->rx_ring[i]; 453 ring = &bgmac->rx_ring[i];
445 ring->num_slots = BGMAC_RX_RING_SLOTS; 454 ring->num_slots = BGMAC_RX_RING_SLOTS;
446 ring->mmio_base = ring_base[i]; 455 ring->mmio_base = ring_base[i];
447 if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_RX))
448 bgmac_warn(bgmac, "RX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
449 ring->mmio_base);
450 456
451 /* Alloc ring of descriptors */ 457 /* Alloc ring of descriptors */
452 size = ring->num_slots * sizeof(struct bgmac_dma_desc); 458 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
@@ -462,6 +468,13 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
462 if (ring->dma_base & 0xC0000000) 468 if (ring->dma_base & 0xC0000000)
463 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); 469 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
464 470
471 ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
472 BGMAC_DMA_RING_RX);
473 if (ring->unaligned)
474 ring->index_base = lower_32_bits(ring->dma_base);
475 else
476 ring->index_base = 0;
477
465 /* Alloc RX slots */ 478 /* Alloc RX slots */
466 for (j = 0; j < ring->num_slots; j++) { 479 for (j = 0; j < ring->num_slots; j++) {
467 err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]); 480 err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
@@ -489,12 +502,14 @@ static void bgmac_dma_init(struct bgmac *bgmac)
489 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) { 502 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
490 ring = &bgmac->tx_ring[i]; 503 ring = &bgmac->tx_ring[i];
491 504
492 /* We don't implement unaligned addressing, so enable first */ 505 if (!ring->unaligned)
493 bgmac_dma_tx_enable(bgmac, ring); 506 bgmac_dma_tx_enable(bgmac, ring);
494 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO, 507 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
495 lower_32_bits(ring->dma_base)); 508 lower_32_bits(ring->dma_base));
496 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI, 509 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
497 upper_32_bits(ring->dma_base)); 510 upper_32_bits(ring->dma_base));
511 if (ring->unaligned)
512 bgmac_dma_tx_enable(bgmac, ring);
498 513
499 ring->start = 0; 514 ring->start = 0;
500 ring->end = 0; /* Points the slot that should *not* be read */ 515 ring->end = 0; /* Points the slot that should *not* be read */
@@ -505,12 +520,14 @@ static void bgmac_dma_init(struct bgmac *bgmac)
505 520
506 ring = &bgmac->rx_ring[i]; 521 ring = &bgmac->rx_ring[i];
507 522
508 /* We don't implement unaligned addressing, so enable first */ 523 if (!ring->unaligned)
509 bgmac_dma_rx_enable(bgmac, ring); 524 bgmac_dma_rx_enable(bgmac, ring);
510 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO, 525 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
511 lower_32_bits(ring->dma_base)); 526 lower_32_bits(ring->dma_base));
512 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI, 527 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
513 upper_32_bits(ring->dma_base)); 528 upper_32_bits(ring->dma_base));
529 if (ring->unaligned)
530 bgmac_dma_rx_enable(bgmac, ring);
514 531
515 for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots; 532 for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots;
516 j++, dma_desc++) { 533 j++, dma_desc++) {
@@ -531,6 +548,7 @@ static void bgmac_dma_init(struct bgmac *bgmac)
531 } 548 }
532 549
533 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX, 550 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
551 ring->index_base +
534 ring->num_slots * sizeof(struct bgmac_dma_desc)); 552 ring->num_slots * sizeof(struct bgmac_dma_desc));
535 553
536 ring->start = 0; 554 ring->start = 0;
@@ -908,10 +926,10 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
908 struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc; 926 struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
909 u8 et_swtype = 0; 927 u8 et_swtype = 0;
910 u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY | 928 u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
911 BGMAC_CHIPCTL_1_IF_TYPE_RMII; 929 BGMAC_CHIPCTL_1_IF_TYPE_MII;
912 char buf[2]; 930 char buf[4];
913 931
914 if (bcm47xx_nvram_getenv("et_swtype", buf, 1) > 0) { 932 if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
915 if (kstrtou8(buf, 0, &et_swtype)) 933 if (kstrtou8(buf, 0, &et_swtype))
916 bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n", 934 bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n",
917 buf); 935 buf);
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 98d4b5fcc070..66c8afbdc8c7 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -333,7 +333,7 @@
333 333
334#define BGMAC_CHIPCTL_1_IF_TYPE_MASK 0x00000030 334#define BGMAC_CHIPCTL_1_IF_TYPE_MASK 0x00000030
335#define BGMAC_CHIPCTL_1_IF_TYPE_RMII 0x00000000 335#define BGMAC_CHIPCTL_1_IF_TYPE_RMII 0x00000000
336#define BGMAC_CHIPCTL_1_IF_TYPE_MI 0x00000010 336#define BGMAC_CHIPCTL_1_IF_TYPE_MII 0x00000010
337#define BGMAC_CHIPCTL_1_IF_TYPE_RGMII 0x00000020 337#define BGMAC_CHIPCTL_1_IF_TYPE_RGMII 0x00000020
338#define BGMAC_CHIPCTL_1_SW_TYPE_MASK 0x000000C0 338#define BGMAC_CHIPCTL_1_SW_TYPE_MASK 0x000000C0
339#define BGMAC_CHIPCTL_1_SW_TYPE_EPHY 0x00000000 339#define BGMAC_CHIPCTL_1_SW_TYPE_EPHY 0x00000000
@@ -384,6 +384,8 @@ struct bgmac_dma_ring {
384 u16 mmio_base; 384 u16 mmio_base;
385 struct bgmac_dma_desc *cpu_base; 385 struct bgmac_dma_desc *cpu_base;
386 dma_addr_t dma_base; 386 dma_addr_t dma_base;
387 u32 index_base; /* Used for unaligned rings only, otherwise 0 */
388 bool unaligned;
387 389
388 struct bgmac_slot_info slots[BGMAC_RX_RING_SLOTS]; 390 struct bgmac_slot_info slots[BGMAC_RX_RING_SLOTS];
389}; 391};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 0c338026ce01..97b3d32a98bd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -246,8 +246,37 @@ enum {
246 BNX2X_MAX_CNIC_ETH_CL_ID_IDX, 246 BNX2X_MAX_CNIC_ETH_CL_ID_IDX,
247}; 247};
248 248
249#define BNX2X_CNIC_START_ETH_CID(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) *\ 249/* use a value high enough to be above all the PFs, which has least significant
250 * nibble as 8, so when cnic needs to come up with a CID for UIO to use to
251 * calculate doorbell address according to old doorbell configuration scheme
252 * (db_msg_sz 1 << 7 * cid + 0x40 DPM offset) it can come up with a valid number
253 * We must avoid coming up with cid 8 for iscsi since according to this method
254 * the designated UIO cid will come out 0 and it has a special handling for that
255 * case which doesn't suit us. Therefore will will cieling to closes cid which
256 * has least signigifcant nibble 8 and if it is 8 we will move forward to 0x18.
257 */
258
259#define BNX2X_1st_NON_L2_ETH_CID(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * \
250 (bp)->max_cos) 260 (bp)->max_cos)
261/* amount of cids traversed by UIO's DPM addition to doorbell */
262#define UIO_DPM 8
263/* roundup to DPM offset */
264#define UIO_ROUNDUP(bp) (roundup(BNX2X_1st_NON_L2_ETH_CID(bp), \
265 UIO_DPM))
266/* offset to nearest value which has lsb nibble matching DPM */
267#define UIO_CID_OFFSET(bp) ((UIO_ROUNDUP(bp) + UIO_DPM) % \
268 (UIO_DPM * 2))
269/* add offset to rounded-up cid to get a value which could be used with UIO */
270#define UIO_DPM_ALIGN(bp) (UIO_ROUNDUP(bp) + UIO_CID_OFFSET(bp))
271/* but wait - avoid UIO special case for cid 0 */
272#define UIO_DPM_CID0_OFFSET(bp) ((UIO_DPM * 2) * \
273 (UIO_DPM_ALIGN(bp) == UIO_DPM))
274/* Properly DPM aligned CID dajusted to cid 0 secal case */
275#define BNX2X_CNIC_START_ETH_CID(bp) (UIO_DPM_ALIGN(bp) + \
276 (UIO_DPM_CID0_OFFSET(bp)))
277/* how many cids were wasted - need this value for cid allocation */
278#define UIO_CID_PAD(bp) (BNX2X_CNIC_START_ETH_CID(bp) - \
279 BNX2X_1st_NON_L2_ETH_CID(bp))
251 /* iSCSI L2 */ 280 /* iSCSI L2 */
252#define BNX2X_ISCSI_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp)) 281#define BNX2X_ISCSI_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp))
253 /* FCoE L2 */ 282 /* FCoE L2 */
@@ -1542,7 +1571,6 @@ struct bnx2x {
1542 */ 1571 */
1543 bool fcoe_init; 1572 bool fcoe_init;
1544 1573
1545 int pm_cap;
1546 int mrrs; 1574 int mrrs;
1547 1575
1548 struct delayed_work sp_task; 1576 struct delayed_work sp_task;
@@ -1681,10 +1709,11 @@ struct bnx2x {
1681 * Maximum CID count that might be required by the bnx2x: 1709 * Maximum CID count that might be required by the bnx2x:
1682 * Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI 1710 * Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI
1683 */ 1711 */
1712
1684#define BNX2X_L2_CID_COUNT(bp) (BNX2X_NUM_ETH_QUEUES(bp) * BNX2X_MULTI_TX_COS \ 1713#define BNX2X_L2_CID_COUNT(bp) (BNX2X_NUM_ETH_QUEUES(bp) * BNX2X_MULTI_TX_COS \
1685 + 2 * CNIC_SUPPORT(bp)) 1714 + CNIC_SUPPORT(bp) * (2 + UIO_CID_PAD(bp)))
1686#define BNX2X_L2_MAX_CID(bp) (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS \ 1715#define BNX2X_L2_MAX_CID(bp) (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS \
1687 + 2 * CNIC_SUPPORT(bp)) 1716 + CNIC_SUPPORT(bp) * (2 + UIO_CID_PAD(bp)))
1688#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\ 1717#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\
1689 ILT_PAGE_CIDS)) 1718 ILT_PAGE_CIDS))
1690 1719
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 90045c920d09..e66beff2704d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2481,8 +2481,7 @@ load_error_cnic2:
2481load_error_cnic1: 2481load_error_cnic1:
2482 bnx2x_napi_disable_cnic(bp); 2482 bnx2x_napi_disable_cnic(bp);
2483 /* Update the number of queues without the cnic queues */ 2483 /* Update the number of queues without the cnic queues */
2484 rc = bnx2x_set_real_num_queues(bp, 0); 2484 if (bnx2x_set_real_num_queues(bp, 0))
2485 if (rc)
2486 BNX2X_ERR("Unable to set real_num_queues not including cnic\n"); 2485 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2487load_error_cnic0: 2486load_error_cnic0:
2488 BNX2X_ERR("CNIC-related load failed\n"); 2487 BNX2X_ERR("CNIC-related load failed\n");
@@ -3008,16 +3007,16 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3008 u16 pmcsr; 3007 u16 pmcsr;
3009 3008
3010 /* If there is no power capability, silently succeed */ 3009 /* If there is no power capability, silently succeed */
3011 if (!bp->pm_cap) { 3010 if (!bp->pdev->pm_cap) {
3012 BNX2X_DEV_INFO("No power capability. Breaking.\n"); 3011 BNX2X_DEV_INFO("No power capability. Breaking.\n");
3013 return 0; 3012 return 0;
3014 } 3013 }
3015 3014
3016 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr); 3015 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3017 3016
3018 switch (state) { 3017 switch (state) {
3019 case PCI_D0: 3018 case PCI_D0:
3020 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, 3019 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3021 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) | 3020 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3022 PCI_PM_CTRL_PME_STATUS)); 3021 PCI_PM_CTRL_PME_STATUS));
3023 3022
@@ -3041,7 +3040,7 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3041 if (bp->wol) 3040 if (bp->wol)
3042 pmcsr |= PCI_PM_CTRL_PME_ENABLE; 3041 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3043 3042
3044 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, 3043 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3045 pmcsr); 3044 pmcsr);
3046 3045
3047 /* No more memory access after this point until 3046 /* No more memory access after this point until
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 2612e3c715d4..324de5f05332 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1387,9 +1387,9 @@ static bool bnx2x_is_nvm_accessible(struct bnx2x *bp)
1387 u16 pm = 0; 1387 u16 pm = 0;
1388 struct net_device *dev = pci_get_drvdata(bp->pdev); 1388 struct net_device *dev = pci_get_drvdata(bp->pdev);
1389 1389
1390 if (bp->pm_cap) 1390 if (bp->pdev->pm_cap)
1391 rc = pci_read_config_word(bp->pdev, 1391 rc = pci_read_config_word(bp->pdev,
1392 bp->pm_cap + PCI_PM_CTRL, &pm); 1392 bp->pdev->pm_cap + PCI_PM_CTRL, &pm);
1393 1393
1394 if ((rc && !netif_running(dev)) || 1394 if ((rc && !netif_running(dev)) ||
1395 (!rc && ((pm & PCI_PM_CTRL_STATE_MASK) != (__force u16)PCI_D0))) 1395 (!rc && ((pm & PCI_PM_CTRL_STATE_MASK) != (__force u16)PCI_D0)))
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index d60a2ea3da19..51468227bf3b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -175,6 +175,7 @@ typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy,
175#define EDC_MODE_LINEAR 0x0022 175#define EDC_MODE_LINEAR 0x0022
176#define EDC_MODE_LIMITING 0x0044 176#define EDC_MODE_LIMITING 0x0044
177#define EDC_MODE_PASSIVE_DAC 0x0055 177#define EDC_MODE_PASSIVE_DAC 0x0055
178#define EDC_MODE_ACTIVE_DAC 0x0066
178 179
179/* ETS defines*/ 180/* ETS defines*/
180#define DCBX_INVALID_COS (0xFF) 181#define DCBX_INVALID_COS (0xFF)
@@ -3684,6 +3685,41 @@ static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy,
3684 bnx2x_update_link_attr(params, vars->link_attr_sync); 3685 bnx2x_update_link_attr(params, vars->link_attr_sync);
3685} 3686}
3686 3687
3688static void bnx2x_disable_kr2(struct link_params *params,
3689 struct link_vars *vars,
3690 struct bnx2x_phy *phy)
3691{
3692 struct bnx2x *bp = params->bp;
3693 int i;
3694 static struct bnx2x_reg_set reg_set[] = {
3695 /* Step 1 - Program the TX/RX alignment markers */
3696 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690},
3697 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647},
3698 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0},
3699 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690},
3700 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647},
3701 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0},
3702 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c},
3703 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000},
3704 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000},
3705 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002},
3706 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000},
3707 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7},
3708 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7},
3709 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002},
3710 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000}
3711 };
3712 DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n");
3713
3714 for (i = 0; i < ARRAY_SIZE(reg_set); i++)
3715 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
3716 reg_set[i].val);
3717 vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
3718 bnx2x_update_link_attr(params, vars->link_attr_sync);
3719
3720 vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
3721}
3722
3687static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy, 3723static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy,
3688 struct link_params *params) 3724 struct link_params *params)
3689{ 3725{
@@ -3715,7 +3751,6 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3715 struct link_params *params, 3751 struct link_params *params,
3716 struct link_vars *vars) { 3752 struct link_vars *vars) {
3717 u16 lane, i, cl72_ctrl, an_adv = 0; 3753 u16 lane, i, cl72_ctrl, an_adv = 0;
3718 u16 ucode_ver;
3719 struct bnx2x *bp = params->bp; 3754 struct bnx2x *bp = params->bp;
3720 static struct bnx2x_reg_set reg_set[] = { 3755 static struct bnx2x_reg_set reg_set[] = {
3721 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, 3756 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
@@ -3806,15 +3841,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3806 3841
3807 /* Advertise pause */ 3842 /* Advertise pause */
3808 bnx2x_ext_phy_set_pause(params, phy, vars); 3843 bnx2x_ext_phy_set_pause(params, phy, vars);
3809 /* Set KR Autoneg Work-Around flag for Warpcore version older than D108 3844 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
3810 */
3811 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3812 MDIO_WC_REG_UC_INFO_B1_VERSION, &ucode_ver);
3813 if (ucode_ver < 0xd108) {
3814 DP(NETIF_MSG_LINK, "Enable AN KR work-around. WC ver:0x%x\n",
3815 ucode_ver);
3816 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
3817 }
3818 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 3845 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3819 MDIO_WC_REG_DIGITAL5_MISC7, 0x100); 3846 MDIO_WC_REG_DIGITAL5_MISC7, 0x100);
3820 3847
@@ -3838,6 +3865,8 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3838 bnx2x_set_aer_mmd(params, phy); 3865 bnx2x_set_aer_mmd(params, phy);
3839 3866
3840 bnx2x_warpcore_enable_AN_KR2(phy, params, vars); 3867 bnx2x_warpcore_enable_AN_KR2(phy, params, vars);
3868 } else {
3869 bnx2x_disable_kr2(params, vars, phy);
3841 } 3870 }
3842 3871
3843 /* Enable Autoneg: only on the main lane */ 3872 /* Enable Autoneg: only on the main lane */
@@ -4347,20 +4376,14 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
4347 struct bnx2x *bp = params->bp; 4376 struct bnx2x *bp = params->bp;
4348 u32 serdes_net_if; 4377 u32 serdes_net_if;
4349 u16 gp_status1 = 0, lnkup = 0, lnkup_kr = 0; 4378 u16 gp_status1 = 0, lnkup = 0, lnkup_kr = 0;
4350 u16 lane = bnx2x_get_warpcore_lane(phy, params);
4351 4379
4352 vars->turn_to_run_wc_rt = vars->turn_to_run_wc_rt ? 0 : 1; 4380 vars->turn_to_run_wc_rt = vars->turn_to_run_wc_rt ? 0 : 1;
4353 4381
4354 if (!vars->turn_to_run_wc_rt) 4382 if (!vars->turn_to_run_wc_rt)
4355 return; 4383 return;
4356 4384
4357 /* Return if there is no link partner */
4358 if (!(bnx2x_warpcore_get_sigdet(phy, params))) {
4359 DP(NETIF_MSG_LINK, "bnx2x_warpcore_get_sigdet false\n");
4360 return;
4361 }
4362
4363 if (vars->rx_tx_asic_rst) { 4385 if (vars->rx_tx_asic_rst) {
4386 u16 lane = bnx2x_get_warpcore_lane(phy, params);
4364 serdes_net_if = (REG_RD(bp, params->shmem_base + 4387 serdes_net_if = (REG_RD(bp, params->shmem_base +
4365 offsetof(struct shmem_region, dev_info. 4388 offsetof(struct shmem_region, dev_info.
4366 port_hw_config[params->port].default_cfg)) & 4389 port_hw_config[params->port].default_cfg)) &
@@ -4375,14 +4398,8 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
4375 /*10G KR*/ 4398 /*10G KR*/
4376 lnkup_kr = (gp_status1 >> (12+lane)) & 0x1; 4399 lnkup_kr = (gp_status1 >> (12+lane)) & 0x1;
4377 4400
4378 DP(NETIF_MSG_LINK,
4379 "gp_status1 0x%x\n", gp_status1);
4380
4381 if (lnkup_kr || lnkup) { 4401 if (lnkup_kr || lnkup) {
4382 vars->rx_tx_asic_rst = 0; 4402 vars->rx_tx_asic_rst = 0;
4383 DP(NETIF_MSG_LINK,
4384 "link up, rx_tx_asic_rst 0x%x\n",
4385 vars->rx_tx_asic_rst);
4386 } else { 4403 } else {
4387 /* Reset the lane to see if link comes up.*/ 4404 /* Reset the lane to see if link comes up.*/
4388 bnx2x_warpcore_reset_lane(bp, phy, 1); 4405 bnx2x_warpcore_reset_lane(bp, phy, 1);
@@ -4507,10 +4524,14 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
4507 * enabled transmitter to avoid current leakage in case 4524 * enabled transmitter to avoid current leakage in case
4508 * no module is connected 4525 * no module is connected
4509 */ 4526 */
4510 if (bnx2x_is_sfp_module_plugged(phy, params)) 4527 if ((params->loopback_mode == LOOPBACK_NONE) ||
4511 bnx2x_sfp_module_detection(phy, params); 4528 (params->loopback_mode == LOOPBACK_EXT)) {
4512 else 4529 if (bnx2x_is_sfp_module_plugged(phy, params))
4513 bnx2x_sfp_e3_set_transmitter(params, phy, 1); 4530 bnx2x_sfp_module_detection(phy, params);
4531 else
4532 bnx2x_sfp_e3_set_transmitter(params,
4533 phy, 1);
4534 }
4514 4535
4515 bnx2x_warpcore_config_sfi(phy, params); 4536 bnx2x_warpcore_config_sfi(phy, params);
4516 break; 4537 break;
@@ -5757,6 +5778,11 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
5757 rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed, 5778 rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed,
5758 duplex); 5779 duplex);
5759 5780
5781 /* In case of KR link down, start up the recovering procedure */
5782 if ((!link_up) && (phy->media_type == ETH_PHY_KR) &&
5783 (!(phy->flags & FLAGS_WC_DUAL_MODE)))
5784 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
5785
5760 DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n", 5786 DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n",
5761 vars->duplex, vars->flow_ctrl, vars->link_status); 5787 vars->duplex, vars->flow_ctrl, vars->link_status);
5762 return rc; 5788 return rc;
@@ -6507,6 +6533,11 @@ static int bnx2x_link_initialize(struct link_params *params,
6507 params->phy[INT_PHY].config_init(phy, params, vars); 6533 params->phy[INT_PHY].config_init(phy, params, vars);
6508 } 6534 }
6509 6535
6536 /* Re-read this value in case it was changed inside config_init due to
6537 * limitations of optic module
6538 */
6539 vars->line_speed = params->phy[INT_PHY].req_line_speed;
6540
6510 /* Init external phy*/ 6541 /* Init external phy*/
6511 if (non_ext_phy) { 6542 if (non_ext_phy) {
6512 if (params->phy[INT_PHY].supported & 6543 if (params->phy[INT_PHY].supported &
@@ -8080,7 +8111,10 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
8080 if (copper_module_type & 8111 if (copper_module_type &
8081 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) { 8112 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) {
8082 DP(NETIF_MSG_LINK, "Active Copper cable detected\n"); 8113 DP(NETIF_MSG_LINK, "Active Copper cable detected\n");
8083 check_limiting_mode = 1; 8114 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8115 *edc_mode = EDC_MODE_ACTIVE_DAC;
8116 else
8117 check_limiting_mode = 1;
8084 } else if (copper_module_type & 8118 } else if (copper_module_type &
8085 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) { 8119 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
8086 DP(NETIF_MSG_LINK, 8120 DP(NETIF_MSG_LINK,
@@ -8555,6 +8589,7 @@ static void bnx2x_warpcore_set_limiting_mode(struct link_params *params,
8555 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT; 8589 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT;
8556 break; 8590 break;
8557 case EDC_MODE_PASSIVE_DAC: 8591 case EDC_MODE_PASSIVE_DAC:
8592 case EDC_MODE_ACTIVE_DAC:
8558 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC; 8593 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC;
8559 break; 8594 break;
8560 default: 8595 default:
@@ -9730,32 +9765,41 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
9730 MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL, 9765 MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
9731 an_1000_val); 9766 an_1000_val);
9732 9767
9733 /* set 100 speed advertisement */ 9768 /* Set 10/100 speed advertisement */
9734 if ((phy->req_line_speed == SPEED_AUTO_NEG) && 9769 if (phy->req_line_speed == SPEED_AUTO_NEG) {
9735 (phy->speed_cap_mask & 9770 if (phy->speed_cap_mask &
9736 (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | 9771 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
9737 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))) { 9772 /* Enable autoneg and restart autoneg for legacy speeds
9738 an_10_100_val |= (1<<7); 9773 */
9739 /* Enable autoneg and restart autoneg for legacy speeds */ 9774 autoneg_val |= (1<<9 | 1<<12);
9740 autoneg_val |= (1<<9 | 1<<12);
9741
9742 if (phy->req_duplex == DUPLEX_FULL)
9743 an_10_100_val |= (1<<8); 9775 an_10_100_val |= (1<<8);
9744 DP(NETIF_MSG_LINK, "Advertising 100M\n"); 9776 DP(NETIF_MSG_LINK, "Advertising 100M-FD\n");
9745 } 9777 }
9746 /* set 10 speed advertisement */ 9778
9747 if (((phy->req_line_speed == SPEED_AUTO_NEG) && 9779 if (phy->speed_cap_mask &
9748 (phy->speed_cap_mask & 9780 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
9749 (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL | 9781 /* Enable autoneg and restart autoneg for legacy speeds
9750 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) && 9782 */
9751 (phy->supported & 9783 autoneg_val |= (1<<9 | 1<<12);
9752 (SUPPORTED_10baseT_Half | 9784 an_10_100_val |= (1<<7);
9753 SUPPORTED_10baseT_Full)))) { 9785 DP(NETIF_MSG_LINK, "Advertising 100M-HD\n");
9754 an_10_100_val |= (1<<5); 9786 }
9755 autoneg_val |= (1<<9 | 1<<12); 9787
9756 if (phy->req_duplex == DUPLEX_FULL) 9788 if ((phy->speed_cap_mask &
9789 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) &&
9790 (phy->supported & SUPPORTED_10baseT_Full)) {
9757 an_10_100_val |= (1<<6); 9791 an_10_100_val |= (1<<6);
9758 DP(NETIF_MSG_LINK, "Advertising 10M\n"); 9792 autoneg_val |= (1<<9 | 1<<12);
9793 DP(NETIF_MSG_LINK, "Advertising 10M-FD\n");
9794 }
9795
9796 if ((phy->speed_cap_mask &
9797 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) &&
9798 (phy->supported & SUPPORTED_10baseT_Half)) {
9799 an_10_100_val |= (1<<5);
9800 autoneg_val |= (1<<9 | 1<<12);
9801 DP(NETIF_MSG_LINK, "Advertising 10M-HD\n");
9802 }
9759 } 9803 }
9760 9804
9761 /* Only 10/100 are allowed to work in FORCE mode */ 9805 /* Only 10/100 are allowed to work in FORCE mode */
@@ -13432,43 +13476,6 @@ static void bnx2x_sfp_tx_fault_detection(struct bnx2x_phy *phy,
13432 } 13476 }
13433 } 13477 }
13434} 13478}
13435static void bnx2x_disable_kr2(struct link_params *params,
13436 struct link_vars *vars,
13437 struct bnx2x_phy *phy)
13438{
13439 struct bnx2x *bp = params->bp;
13440 int i;
13441 static struct bnx2x_reg_set reg_set[] = {
13442 /* Step 1 - Program the TX/RX alignment markers */
13443 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690},
13444 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647},
13445 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0},
13446 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690},
13447 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647},
13448 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0},
13449 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c},
13450 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000},
13451 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000},
13452 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002},
13453 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000},
13454 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7},
13455 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7},
13456 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002},
13457 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000}
13458 };
13459 DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n");
13460
13461 for (i = 0; i < ARRAY_SIZE(reg_set); i++)
13462 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
13463 reg_set[i].val);
13464 vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
13465 bnx2x_update_link_attr(params, vars->link_attr_sync);
13466
13467 vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
13468 /* Restart AN on leading lane */
13469 bnx2x_warpcore_restart_AN_KR(phy, params);
13470}
13471
13472static void bnx2x_kr2_recovery(struct link_params *params, 13479static void bnx2x_kr2_recovery(struct link_params *params,
13473 struct link_vars *vars, 13480 struct link_vars *vars,
13474 struct bnx2x_phy *phy) 13481 struct bnx2x_phy *phy)
@@ -13546,6 +13553,8 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
13546 /* Disable KR2 on both lanes */ 13553 /* Disable KR2 on both lanes */
13547 DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, next_page); 13554 DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, next_page);
13548 bnx2x_disable_kr2(params, vars, phy); 13555 bnx2x_disable_kr2(params, vars, phy);
13556 /* Restart AN on leading lane */
13557 bnx2x_warpcore_restart_AN_KR(phy, params);
13549 return; 13558 return;
13550 } 13559 }
13551} 13560}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 2f8dbbbd7a86..82b658d8c04c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -4703,6 +4703,14 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
4703 attn.sig[3] = REG_RD(bp, 4703 attn.sig[3] = REG_RD(bp,
4704 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + 4704 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
4705 port*4); 4705 port*4);
4706 /* Since MCP attentions can't be disabled inside the block, we need to
4707 * read AEU registers to see whether they're currently disabled
4708 */
4709 attn.sig[3] &= ((REG_RD(bp,
4710 !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
4711 : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) &
4712 MISC_AEU_ENABLE_MCP_PRTY_BITS) |
4713 ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
4706 4714
4707 if (!CHIP_IS_E1x(bp)) 4715 if (!CHIP_IS_E1x(bp))
4708 attn.sig[4] = REG_RD(bp, 4716 attn.sig[4] = REG_RD(bp,
@@ -5447,26 +5455,24 @@ static void bnx2x_timer(unsigned long data)
5447 if (IS_PF(bp) && 5455 if (IS_PF(bp) &&
5448 !BP_NOMCP(bp)) { 5456 !BP_NOMCP(bp)) {
5449 int mb_idx = BP_FW_MB_IDX(bp); 5457 int mb_idx = BP_FW_MB_IDX(bp);
5450 u32 drv_pulse; 5458 u16 drv_pulse;
5451 u32 mcp_pulse; 5459 u16 mcp_pulse;
5452 5460
5453 ++bp->fw_drv_pulse_wr_seq; 5461 ++bp->fw_drv_pulse_wr_seq;
5454 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; 5462 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5455 /* TBD - add SYSTEM_TIME */
5456 drv_pulse = bp->fw_drv_pulse_wr_seq; 5463 drv_pulse = bp->fw_drv_pulse_wr_seq;
5457 bnx2x_drv_pulse(bp); 5464 bnx2x_drv_pulse(bp);
5458 5465
5459 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) & 5466 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
5460 MCP_PULSE_SEQ_MASK); 5467 MCP_PULSE_SEQ_MASK);
5461 /* The delta between driver pulse and mcp response 5468 /* The delta between driver pulse and mcp response
5462 * should be 1 (before mcp response) or 0 (after mcp response) 5469 * should not get too big. If the MFW is more than 5 pulses
5470 * behind, we should worry about it enough to generate an error
5471 * log.
5463 */ 5472 */
5464 if ((drv_pulse != mcp_pulse) && 5473 if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5)
5465 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { 5474 BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5466 /* someone lost a heartbeat... */
5467 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5468 drv_pulse, mcp_pulse); 5475 drv_pulse, mcp_pulse);
5469 }
5470 } 5476 }
5471 5477
5472 if (bp->state == BNX2X_STATE_OPEN) 5478 if (bp->state == BNX2X_STATE_OPEN)
@@ -8652,6 +8658,7 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
8652 else if (bp->wol) { 8658 else if (bp->wol) {
8653 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 8659 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8654 u8 *mac_addr = bp->dev->dev_addr; 8660 u8 *mac_addr = bp->dev->dev_addr;
8661 struct pci_dev *pdev = bp->pdev;
8655 u32 val; 8662 u32 val;
8656 u16 pmc; 8663 u16 pmc;
8657 8664
@@ -8668,9 +8675,9 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
8668 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); 8675 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8669 8676
8670 /* Enable the PME and clear the status */ 8677 /* Enable the PME and clear the status */
8671 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc); 8678 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc);
8672 pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS; 8679 pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
8673 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc); 8680 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc);
8674 8681
8675 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; 8682 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8676 8683
@@ -10399,7 +10406,7 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
10399 break; 10406 break;
10400 } 10407 }
10401 10408
10402 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc); 10409 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc);
10403 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG; 10410 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
10404 10411
10405 BNX2X_DEV_INFO("%sWoL capable\n", 10412 BNX2X_DEV_INFO("%sWoL capable\n",
@@ -12141,8 +12148,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
12141 } 12148 }
12142 12149
12143 if (IS_PF(bp)) { 12150 if (IS_PF(bp)) {
12144 bp->pm_cap = pdev->pm_cap; 12151 if (!pdev->pm_cap) {
12145 if (bp->pm_cap == 0) {
12146 dev_err(&bp->pdev->dev, 12152 dev_err(&bp->pdev->dev,
12147 "Cannot find power management capability, aborting\n"); 12153 "Cannot find power management capability, aborting\n");
12148 rc = -EIO; 12154 rc = -EIO;
@@ -13632,6 +13638,10 @@ void bnx2x_setup_cnic_info(struct bnx2x *bp)
13632 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp); 13638 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
13633 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp); 13639 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
13634 13640
13641 DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n",
13642 BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid,
13643 cp->iscsi_l2_cid);
13644
13635 if (NO_ISCSI_OOO(bp)) 13645 if (NO_ISCSI_OOO(bp))
13636 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; 13646 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
13637} 13647}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 2604b6204abe..9ad012bdd915 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1819,7 +1819,7 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
1819 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); 1819 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
1820 if (fid & IGU_FID_ENCODE_IS_PF) 1820 if (fid & IGU_FID_ENCODE_IS_PF)
1821 current_pf = fid & IGU_FID_PF_NUM_MASK; 1821 current_pf = fid & IGU_FID_PF_NUM_MASK;
1822 else if (current_pf == BP_ABS_FUNC(bp)) 1822 else if (current_pf == BP_FUNC(bp))
1823 bnx2x_vf_set_igu_info(bp, sb_id, 1823 bnx2x_vf_set_igu_info(bp, sb_id,
1824 (fid & IGU_FID_VF_NUM_MASK)); 1824 (fid & IGU_FID_VF_NUM_MASK));
1825 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", 1825 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
@@ -3180,6 +3180,7 @@ int bnx2x_enable_sriov(struct bnx2x *bp)
3180 /* set local queue arrays */ 3180 /* set local queue arrays */
3181 vf->vfqs = &bp->vfdb->vfqs[qcount]; 3181 vf->vfqs = &bp->vfdb->vfqs[qcount];
3182 qcount += vf_sb_count(vf); 3182 qcount += vf_sb_count(vf);
3183 bnx2x_iov_static_resc(bp, vf);
3183 } 3184 }
3184 3185
3185 /* prepare msix vectors in VF configuration space */ 3186 /* prepare msix vectors in VF configuration space */
@@ -3187,6 +3188,8 @@ int bnx2x_enable_sriov(struct bnx2x *bp)
3187 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); 3188 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
3188 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, 3189 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
3189 num_vf_queues); 3190 num_vf_queues);
3191 DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
3192 vf_idx, num_vf_queues);
3190 } 3193 }
3191 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 3194 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
3192 3195
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 6cfb88732452..da16953eb2ec 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -1765,28 +1765,28 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
1765 switch (mbx->first_tlv.tl.type) { 1765 switch (mbx->first_tlv.tl.type) {
1766 case CHANNEL_TLV_ACQUIRE: 1766 case CHANNEL_TLV_ACQUIRE:
1767 bnx2x_vf_mbx_acquire(bp, vf, mbx); 1767 bnx2x_vf_mbx_acquire(bp, vf, mbx);
1768 break; 1768 return;
1769 case CHANNEL_TLV_INIT: 1769 case CHANNEL_TLV_INIT:
1770 bnx2x_vf_mbx_init_vf(bp, vf, mbx); 1770 bnx2x_vf_mbx_init_vf(bp, vf, mbx);
1771 break; 1771 return;
1772 case CHANNEL_TLV_SETUP_Q: 1772 case CHANNEL_TLV_SETUP_Q:
1773 bnx2x_vf_mbx_setup_q(bp, vf, mbx); 1773 bnx2x_vf_mbx_setup_q(bp, vf, mbx);
1774 break; 1774 return;
1775 case CHANNEL_TLV_SET_Q_FILTERS: 1775 case CHANNEL_TLV_SET_Q_FILTERS:
1776 bnx2x_vf_mbx_set_q_filters(bp, vf, mbx); 1776 bnx2x_vf_mbx_set_q_filters(bp, vf, mbx);
1777 break; 1777 return;
1778 case CHANNEL_TLV_TEARDOWN_Q: 1778 case CHANNEL_TLV_TEARDOWN_Q:
1779 bnx2x_vf_mbx_teardown_q(bp, vf, mbx); 1779 bnx2x_vf_mbx_teardown_q(bp, vf, mbx);
1780 break; 1780 return;
1781 case CHANNEL_TLV_CLOSE: 1781 case CHANNEL_TLV_CLOSE:
1782 bnx2x_vf_mbx_close_vf(bp, vf, mbx); 1782 bnx2x_vf_mbx_close_vf(bp, vf, mbx);
1783 break; 1783 return;
1784 case CHANNEL_TLV_RELEASE: 1784 case CHANNEL_TLV_RELEASE:
1785 bnx2x_vf_mbx_release_vf(bp, vf, mbx); 1785 bnx2x_vf_mbx_release_vf(bp, vf, mbx);
1786 break; 1786 return;
1787 case CHANNEL_TLV_UPDATE_RSS: 1787 case CHANNEL_TLV_UPDATE_RSS:
1788 bnx2x_vf_mbx_update_rss(bp, vf, mbx); 1788 bnx2x_vf_mbx_update_rss(bp, vf, mbx);
1789 break; 1789 return;
1790 } 1790 }
1791 1791
1792 } else { 1792 } else {
@@ -1802,26 +1802,24 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
1802 for (i = 0; i < 20; i++) 1802 for (i = 0; i < 20; i++)
1803 DP_CONT(BNX2X_MSG_IOV, "%x ", 1803 DP_CONT(BNX2X_MSG_IOV, "%x ",
1804 mbx->msg->req.tlv_buf_size.tlv_buffer[i]); 1804 mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
1805 }
1805 1806
1806 /* test whether we can respond to the VF (do we have an address 1807 /* can we respond to VF (do we have an address for it?) */
1807 * for it?) 1808 if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
1808 */ 1809 /* mbx_resp uses the op_rc of the VF */
1809 if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) { 1810 vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
1810 /* mbx_resp uses the op_rc of the VF */
1811 vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
1812 1811
1813 /* notify the VF that we do not support this request */ 1812 /* notify the VF that we do not support this request */
1814 bnx2x_vf_mbx_resp(bp, vf); 1813 bnx2x_vf_mbx_resp(bp, vf);
1815 } else { 1814 } else {
1816 /* can't send a response since this VF is unknown to us 1815 /* can't send a response since this VF is unknown to us
1817 * just ack the FW to release the mailbox and unlock 1816 * just ack the FW to release the mailbox and unlock
1818 * the channel. 1817 * the channel.
1819 */ 1818 */
1820 storm_memset_vf_mbx_ack(bp, vf->abs_vfid); 1819 storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
1821 mmiowb(); 1820 /* Firmware ack should be written before unlocking channel */
1822 bnx2x_unlock_vf_pf_channel(bp, vf, 1821 mmiowb();
1823 mbx->first_tlv.tl.type); 1822 bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
1824 }
1825 } 1823 }
1826} 1824}
1827 1825
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 8142480d9770..99394bd49a13 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -3135,6 +3135,7 @@ static void cnic_service_bnx2x_bh(unsigned long data)
3135{ 3135{
3136 struct cnic_dev *dev = (struct cnic_dev *) data; 3136 struct cnic_dev *dev = (struct cnic_dev *) data;
3137 struct cnic_local *cp = dev->cnic_priv; 3137 struct cnic_local *cp = dev->cnic_priv;
3138 struct bnx2x *bp = netdev_priv(dev->netdev);
3138 u32 status_idx, new_status_idx; 3139 u32 status_idx, new_status_idx;
3139 3140
3140 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) 3141 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
@@ -3146,7 +3147,7 @@ static void cnic_service_bnx2x_bh(unsigned long data)
3146 CNIC_WR16(dev, cp->kcq1.io_addr, 3147 CNIC_WR16(dev, cp->kcq1.io_addr,
3147 cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); 3148 cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
3148 3149
3149 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE) { 3150 if (!CNIC_SUPPORTS_FCOE(bp)) {
3150 cp->arm_int(dev, status_idx); 3151 cp->arm_int(dev, status_idx);
3151 break; 3152 break;
3152 } 3153 }
@@ -5217,7 +5218,8 @@ static void cnic_init_rings(struct cnic_dev *dev)
5217 "iSCSI CLIENT_SETUP did not complete\n"); 5218 "iSCSI CLIENT_SETUP did not complete\n");
5218 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1); 5219 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5219 cnic_ring_ctl(dev, cid, cli, 1); 5220 cnic_ring_ctl(dev, cid, cli, 1);
5220 *cid_ptr = cid; 5221 *cid_ptr = cid >> 4;
5222 *(cid_ptr + 1) = cid * bp->db_size;
5221 } 5223 }
5222} 5224}
5223 5225
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 5701f3d1a169..12d961c4ebca 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -3034,6 +3034,7 @@ static bool tg3_phy_led_bug(struct tg3 *tp)
3034{ 3034{
3035 switch (tg3_asic_rev(tp)) { 3035 switch (tg3_asic_rev(tp)) {
3036 case ASIC_REV_5719: 3036 case ASIC_REV_5719:
3037 case ASIC_REV_5720:
3037 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 3038 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3038 !tp->pci_fn) 3039 !tp->pci_fn)
3039 return true; 3040 return true;
@@ -16192,12 +16193,12 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16192 * So explicitly force the chip into D0 here. 16193 * So explicitly force the chip into D0 here.
16193 */ 16194 */
16194 pci_read_config_dword(tp->pdev, 16195 pci_read_config_dword(tp->pdev,
16195 tp->pm_cap + PCI_PM_CTRL, 16196 tp->pdev->pm_cap + PCI_PM_CTRL,
16196 &pm_reg); 16197 &pm_reg);
16197 pm_reg &= ~PCI_PM_CTRL_STATE_MASK; 16198 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16198 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */; 16199 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16199 pci_write_config_dword(tp->pdev, 16200 pci_write_config_dword(tp->pdev,
16200 tp->pm_cap + PCI_PM_CTRL, 16201 tp->pdev->pm_cap + PCI_PM_CTRL,
16201 pm_reg); 16202 pm_reg);
16202 16203
16203 /* Also, force SERR#/PERR# in PCI command. */ 16204 /* Also, force SERR#/PERR# in PCI command. */
@@ -17346,7 +17347,6 @@ static int tg3_init_one(struct pci_dev *pdev,
17346 tp = netdev_priv(dev); 17347 tp = netdev_priv(dev);
17347 tp->pdev = pdev; 17348 tp->pdev = pdev;
17348 tp->dev = dev; 17349 tp->dev = dev;
17349 tp->pm_cap = pdev->pm_cap;
17350 tp->rx_mode = TG3_DEF_RX_MODE; 17350 tp->rx_mode = TG3_DEF_RX_MODE;
17351 tp->tx_mode = TG3_DEF_TX_MODE; 17351 tp->tx_mode = TG3_DEF_TX_MODE;
17352 tp->irq_sync = 1; 17352 tp->irq_sync = 1;
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index ddb8be1298ea..70257808aa37 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -3234,7 +3234,6 @@ struct tg3 {
3234 u8 pci_lat_timer; 3234 u8 pci_lat_timer;
3235 3235
3236 int pci_fn; 3236 int pci_fn;
3237 int pm_cap;
3238 int msi_cap; 3237 int msi_cap;
3239 int pcix_cap; 3238 int pcix_cap;
3240 int pcie_readrq; 3239 int pcie_readrq;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 0d0665ca6f19..c73cabdbd4c0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -6149,8 +6149,10 @@ static int __init cxgb4_init_module(void)
6149 pr_warn("could not create debugfs entry, continuing\n"); 6149 pr_warn("could not create debugfs entry, continuing\n");
6150 6150
6151 ret = pci_register_driver(&cxgb4_driver); 6151 ret = pci_register_driver(&cxgb4_driver);
6152 if (ret < 0) 6152 if (ret < 0) {
6153 debugfs_remove(cxgb4_debugfs_root); 6153 debugfs_remove(cxgb4_debugfs_root);
6154 destroy_workqueue(workq);
6155 }
6154 6156
6155 register_inet6addr_notifier(&cxgb4_inet6addr_notifier); 6157 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6156 6158
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 2db6c573cec7..263b92c00cbf 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -1321,7 +1321,7 @@ de4x5_open(struct net_device *dev)
1321 if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED, 1321 if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
1322 lp->adapter_name, dev)) { 1322 lp->adapter_name, dev)) {
1323 printk("de4x5_open(): Requested IRQ%d is busy - attemping FAST/SHARE...", dev->irq); 1323 printk("de4x5_open(): Requested IRQ%d is busy - attemping FAST/SHARE...", dev->irq);
1324 if (request_irq(dev->irq, de4x5_interrupt, IRQF_DISABLED | IRQF_SHARED, 1324 if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
1325 lp->adapter_name, dev)) { 1325 lp->adapter_name, dev)) {
1326 printk("\n Cannot get IRQ- reconfigure your hardware.\n"); 1326 printk("\n Cannot get IRQ- reconfigure your hardware.\n");
1327 disable_ast(dev); 1327 disable_ast(dev);
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index ace5050dba38..db020230bd0b 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -88,6 +88,7 @@ static inline char *nic_name(struct pci_dev *pdev)
88#define BE_MIN_MTU 256 88#define BE_MIN_MTU 256
89 89
90#define BE_NUM_VLANS_SUPPORTED 64 90#define BE_NUM_VLANS_SUPPORTED 64
91#define BE_UMC_NUM_VLANS_SUPPORTED 15
91#define BE_MAX_EQD 96u 92#define BE_MAX_EQD 96u
92#define BE_MAX_TX_FRAG_COUNT 30 93#define BE_MAX_TX_FRAG_COUNT 30
93 94
@@ -333,6 +334,7 @@ enum vf_state {
333 334
334#define BE_FLAGS_LINK_STATUS_INIT 1 335#define BE_FLAGS_LINK_STATUS_INIT 1
335#define BE_FLAGS_WORKER_SCHEDULED (1 << 3) 336#define BE_FLAGS_WORKER_SCHEDULED (1 << 3)
337#define BE_FLAGS_VLAN_PROMISC (1 << 4)
336#define BE_FLAGS_NAPI_ENABLED (1 << 9) 338#define BE_FLAGS_NAPI_ENABLED (1 << 9)
337#define BE_UC_PMAC_COUNT 30 339#define BE_UC_PMAC_COUNT 30
338#define BE_VF_UC_PMAC_COUNT 2 340#define BE_VF_UC_PMAC_COUNT 2
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 1ab5dab11eff..bd0e0c0bbcd8 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -180,6 +180,9 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
180 dev_err(&adapter->pdev->dev, 180 dev_err(&adapter->pdev->dev,
181 "opcode %d-%d failed:status %d-%d\n", 181 "opcode %d-%d failed:status %d-%d\n",
182 opcode, subsystem, compl_status, extd_status); 182 opcode, subsystem, compl_status, extd_status);
183
184 if (extd_status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
185 return extd_status;
183 } 186 }
184 } 187 }
185done: 188done:
@@ -1812,6 +1815,12 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1812 } else if (flags & IFF_ALLMULTI) { 1815 } else if (flags & IFF_ALLMULTI) {
1813 req->if_flags_mask = req->if_flags = 1816 req->if_flags_mask = req->if_flags =
1814 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); 1817 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1818 } else if (flags & BE_FLAGS_VLAN_PROMISC) {
1819 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
1820
1821 if (value == ON)
1822 req->if_flags =
1823 cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
1815 } else { 1824 } else {
1816 struct netdev_hw_addr *ha; 1825 struct netdev_hw_addr *ha;
1817 int i = 0; 1826 int i = 0;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index d026226db88c..108ca8abf0af 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -60,6 +60,8 @@ enum {
60 MCC_STATUS_NOT_SUPPORTED = 66 60 MCC_STATUS_NOT_SUPPORTED = 66
61}; 61};
62 62
63#define MCC_ADDL_STS_INSUFFICIENT_RESOURCES 0x16
64
63#define CQE_STATUS_COMPL_MASK 0xFFFF 65#define CQE_STATUS_COMPL_MASK 0xFFFF
64#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */ 66#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */
65#define CQE_STATUS_EXTD_MASK 0xFFFF 67#define CQE_STATUS_EXTD_MASK 0xFFFF
@@ -1791,7 +1793,7 @@ struct be_nic_res_desc {
1791 u8 acpi_params; 1793 u8 acpi_params;
1792 u8 wol_param; 1794 u8 wol_param;
1793 u16 rsvd7; 1795 u16 rsvd7;
1794 u32 rsvd8[3]; 1796 u32 rsvd8[7];
1795} __packed; 1797} __packed;
1796 1798
1797struct be_cmd_req_get_func_config { 1799struct be_cmd_req_get_func_config {
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 3224d28cdad4..2c38cc402119 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -855,11 +855,11 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
855 unsigned int eth_hdr_len; 855 unsigned int eth_hdr_len;
856 struct iphdr *ip; 856 struct iphdr *ip;
857 857
858 /* Lancer ASIC has a bug wherein packets that are 32 bytes or less 858 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
859 * may cause a transmit stall on that port. So the work-around is to 859 * may cause a transmit stall on that port. So the work-around is to
860 * pad such packets to a 36-byte length. 860 * pad short packets (<= 32 bytes) to a 36-byte length.
861 */ 861 */
862 if (unlikely(lancer_chip(adapter) && skb->len <= 32)) { 862 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
863 if (skb_padto(skb, 36)) 863 if (skb_padto(skb, 36))
864 goto tx_drop; 864 goto tx_drop;
865 skb->len = 36; 865 skb->len = 36;
@@ -1013,18 +1013,40 @@ static int be_vid_config(struct be_adapter *adapter)
1013 status = be_cmd_vlan_config(adapter, adapter->if_handle, 1013 status = be_cmd_vlan_config(adapter, adapter->if_handle,
1014 vids, num, 1, 0); 1014 vids, num, 1, 0);
1015 1015
1016 /* Set to VLAN promisc mode as setting VLAN filter failed */
1017 if (status) { 1016 if (status) {
1018 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n"); 1017 /* Set to VLAN promisc mode as setting VLAN filter failed */
1019 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n"); 1018 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1020 goto set_vlan_promisc; 1019 goto set_vlan_promisc;
1020 dev_err(&adapter->pdev->dev,
1021 "Setting HW VLAN filtering failed.\n");
1022 } else {
1023 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1024 /* hw VLAN filtering re-enabled. */
1025 status = be_cmd_rx_filter(adapter,
1026 BE_FLAGS_VLAN_PROMISC, OFF);
1027 if (!status) {
1028 dev_info(&adapter->pdev->dev,
1029 "Disabling VLAN Promiscuous mode.\n");
1030 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1031 dev_info(&adapter->pdev->dev,
1032 "Re-Enabling HW VLAN filtering\n");
1033 }
1034 }
1021 } 1035 }
1022 1036
1023 return status; 1037 return status;
1024 1038
1025set_vlan_promisc: 1039set_vlan_promisc:
1026 status = be_cmd_vlan_config(adapter, adapter->if_handle, 1040 dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1027 NULL, 0, 1, 1); 1041
1042 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1043 if (!status) {
1044 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
1045 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n");
1046 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1047 } else
1048 dev_err(&adapter->pdev->dev,
1049 "Failed to enable VLAN Promiscuous mode.\n");
1028 return status; 1050 return status;
1029} 1051}
1030 1052
@@ -1033,10 +1055,6 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1033 struct be_adapter *adapter = netdev_priv(netdev); 1055 struct be_adapter *adapter = netdev_priv(netdev);
1034 int status = 0; 1056 int status = 0;
1035 1057
1036 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1037 status = -EINVAL;
1038 goto ret;
1039 }
1040 1058
1041 /* Packets with VID 0 are always received by Lancer by default */ 1059 /* Packets with VID 0 are always received by Lancer by default */
1042 if (lancer_chip(adapter) && vid == 0) 1060 if (lancer_chip(adapter) && vid == 0)
@@ -1059,11 +1077,6 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1059 struct be_adapter *adapter = netdev_priv(netdev); 1077 struct be_adapter *adapter = netdev_priv(netdev);
1060 int status = 0; 1078 int status = 0;
1061 1079
1062 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1063 status = -EINVAL;
1064 goto ret;
1065 }
1066
1067 /* Packets with VID 0 are always received by Lancer by default */ 1080 /* Packets with VID 0 are always received by Lancer by default */
1068 if (lancer_chip(adapter) && vid == 0) 1081 if (lancer_chip(adapter) && vid == 0)
1069 goto ret; 1082 goto ret;
@@ -1188,8 +1201,8 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
1188 1201
1189 vi->vf = vf; 1202 vi->vf = vf;
1190 vi->tx_rate = vf_cfg->tx_rate; 1203 vi->tx_rate = vf_cfg->tx_rate;
1191 vi->vlan = vf_cfg->vlan_tag; 1204 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1192 vi->qos = 0; 1205 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1193 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN); 1206 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1194 1207
1195 return 0; 1208 return 0;
@@ -1199,28 +1212,29 @@ static int be_set_vf_vlan(struct net_device *netdev,
1199 int vf, u16 vlan, u8 qos) 1212 int vf, u16 vlan, u8 qos)
1200{ 1213{
1201 struct be_adapter *adapter = netdev_priv(netdev); 1214 struct be_adapter *adapter = netdev_priv(netdev);
1215 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1202 int status = 0; 1216 int status = 0;
1203 1217
1204 if (!sriov_enabled(adapter)) 1218 if (!sriov_enabled(adapter))
1205 return -EPERM; 1219 return -EPERM;
1206 1220
1207 if (vf >= adapter->num_vfs || vlan > 4095) 1221 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1208 return -EINVAL; 1222 return -EINVAL;
1209 1223
1210 if (vlan) { 1224 if (vlan || qos) {
1211 if (adapter->vf_cfg[vf].vlan_tag != vlan) { 1225 vlan |= qos << VLAN_PRIO_SHIFT;
1226 if (vf_cfg->vlan_tag != vlan) {
1212 /* If this is new value, program it. Else skip. */ 1227 /* If this is new value, program it. Else skip. */
1213 adapter->vf_cfg[vf].vlan_tag = vlan; 1228 vf_cfg->vlan_tag = vlan;
1214 1229 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1215 status = be_cmd_set_hsw_config(adapter, vlan, 1230 vf_cfg->if_handle, 0);
1216 vf + 1, adapter->vf_cfg[vf].if_handle, 0);
1217 } 1231 }
1218 } else { 1232 } else {
1219 /* Reset Transparent Vlan Tagging. */ 1233 /* Reset Transparent Vlan Tagging. */
1220 adapter->vf_cfg[vf].vlan_tag = 0; 1234 vf_cfg->vlan_tag = 0;
1221 vlan = adapter->vf_cfg[vf].def_vid; 1235 vlan = vf_cfg->def_vid;
1222 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, 1236 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1223 adapter->vf_cfg[vf].if_handle, 0); 1237 vf_cfg->if_handle, 0);
1224 } 1238 }
1225 1239
1226 1240
@@ -2802,7 +2816,7 @@ static int be_vfs_if_create(struct be_adapter *adapter)
2802 struct be_resources res = {0}; 2816 struct be_resources res = {0};
2803 struct be_vf_cfg *vf_cfg; 2817 struct be_vf_cfg *vf_cfg;
2804 u32 cap_flags, en_flags, vf; 2818 u32 cap_flags, en_flags, vf;
2805 int status; 2819 int status = 0;
2806 2820
2807 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | 2821 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2808 BE_IF_FLAGS_MULTICAST; 2822 BE_IF_FLAGS_MULTICAST;
@@ -2963,6 +2977,8 @@ static void BEx_get_resources(struct be_adapter *adapter,
2963 2977
2964 if (adapter->function_mode & FLEX10_MODE) 2978 if (adapter->function_mode & FLEX10_MODE)
2965 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8; 2979 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2980 else if (adapter->function_mode & UMC_ENABLED)
2981 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
2966 else 2982 else
2967 res->max_vlans = BE_NUM_VLANS_SUPPORTED; 2983 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
2968 res->max_mcast_mac = BE_MAX_MC; 2984 res->max_mcast_mac = BE_MAX_MC;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index f9aacf5d8523..b2793b91cc55 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2199,7 +2199,7 @@ fec_probe(struct platform_device *pdev)
2199 goto failed_irq; 2199 goto failed_irq;
2200 } 2200 }
2201 ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt, 2201 ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt,
2202 IRQF_DISABLED, pdev->name, ndev); 2202 0, pdev->name, ndev);
2203 if (ret) 2203 if (ret)
2204 goto failed_irq; 2204 goto failed_irq;
2205 } 2205 }
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 098f133908ae..e006a09ba899 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -452,7 +452,9 @@ static int gianfar_ptp_probe(struct platform_device *dev)
452 err = -ENODEV; 452 err = -ENODEV;
453 453
454 etsects->caps = ptp_gianfar_caps; 454 etsects->caps = ptp_gianfar_caps;
455 etsects->cksel = DEFAULT_CKSEL; 455
456 if (get_of_u32(node, "fsl,cksel", &etsects->cksel))
457 etsects->cksel = DEFAULT_CKSEL;
456 458
457 if (get_of_u32(node, "fsl,tclk-period", &etsects->tclk_period) || 459 if (get_of_u32(node, "fsl,tclk-period", &etsects->tclk_period) ||
458 get_of_u32(node, "fsl,tmr-prsc", &etsects->tmr_prsc) || 460 get_of_u32(node, "fsl,tmr-prsc", &etsects->tmr_prsc) ||
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index e3c7c697fc45..91227d03274e 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -1097,7 +1097,7 @@ static int hp100_open(struct net_device *dev)
1097 /* New: if bus is PCI or EISA, interrupts might be shared interrupts */ 1097 /* New: if bus is PCI or EISA, interrupts might be shared interrupts */
1098 if (request_irq(dev->irq, hp100_interrupt, 1098 if (request_irq(dev->irq, hp100_interrupt,
1099 lp->bus == HP100_BUS_PCI || lp->bus == 1099 lp->bus == HP100_BUS_PCI || lp->bus ==
1100 HP100_BUS_EISA ? IRQF_SHARED : IRQF_DISABLED, 1100 HP100_BUS_EISA ? IRQF_SHARED : 0,
1101 "hp100", dev)) { 1101 "hp100", dev)) {
1102 printk("hp100: %s: unable to get IRQ %d\n", dev->name, dev->irq); 1102 printk("hp100: %s: unable to get IRQ %d\n", dev->name, dev->irq);
1103 return -EAGAIN; 1103 return -EAGAIN;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 35853b43d66e..2d1c6bdd3618 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -102,6 +102,19 @@ static int ehea_probe_adapter(struct platform_device *dev);
102 102
103static int ehea_remove(struct platform_device *dev); 103static int ehea_remove(struct platform_device *dev);
104 104
105static struct of_device_id ehea_module_device_table[] = {
106 {
107 .name = "lhea",
108 .compatible = "IBM,lhea",
109 },
110 {
111 .type = "network",
112 .compatible = "IBM,lhea-ethernet",
113 },
114 {},
115};
116MODULE_DEVICE_TABLE(of, ehea_module_device_table);
117
105static struct of_device_id ehea_device_table[] = { 118static struct of_device_id ehea_device_table[] = {
106 { 119 {
107 .name = "lhea", 120 .name = "lhea",
@@ -109,7 +122,6 @@ static struct of_device_id ehea_device_table[] = {
109 }, 122 },
110 {}, 123 {},
111}; 124};
112MODULE_DEVICE_TABLE(of, ehea_device_table);
113 125
114static struct platform_driver ehea_driver = { 126static struct platform_driver ehea_driver = {
115 .driver = { 127 .driver = {
@@ -1285,7 +1297,7 @@ static int ehea_reg_interrupts(struct net_device *dev)
1285 1297
1286 ret = ibmebus_request_irq(port->qp_eq->attr.ist1, 1298 ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
1287 ehea_qp_aff_irq_handler, 1299 ehea_qp_aff_irq_handler,
1288 IRQF_DISABLED, port->int_aff_name, port); 1300 0, port->int_aff_name, port);
1289 if (ret) { 1301 if (ret) {
1290 netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n", 1302 netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n",
1291 port->qp_eq->attr.ist1); 1303 port->qp_eq->attr.ist1);
@@ -1303,8 +1315,7 @@ static int ehea_reg_interrupts(struct net_device *dev)
1303 "%s-queue%d", dev->name, i); 1315 "%s-queue%d", dev->name, i);
1304 ret = ibmebus_request_irq(pr->eq->attr.ist1, 1316 ret = ibmebus_request_irq(pr->eq->attr.ist1,
1305 ehea_recv_irq_handler, 1317 ehea_recv_irq_handler,
1306 IRQF_DISABLED, pr->int_send_name, 1318 0, pr->int_send_name, pr);
1307 pr);
1308 if (ret) { 1319 if (ret) {
1309 netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n", 1320 netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
1310 i, pr->eq->attr.ist1); 1321 i, pr->eq->attr.ist1);
@@ -3320,7 +3331,7 @@ static int ehea_probe_adapter(struct platform_device *dev)
3320 } 3331 }
3321 3332
3322 ret = ibmebus_request_irq(adapter->neq->attr.ist1, 3333 ret = ibmebus_request_irq(adapter->neq->attr.ist1,
3323 ehea_interrupt_neq, IRQF_DISABLED, 3334 ehea_interrupt_neq, 0,
3324 "ehea_neq", adapter); 3335 "ehea_neq", adapter);
3325 if (ret) { 3336 if (ret) {
3326 dev_err(&dev->dev, "requesting NEQ IRQ failed\n"); 3337 dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index a8633b8f0ac5..d14c8f53384c 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -922,6 +922,14 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
922 else 922 else
923 mask &= ~(1 << 30); 923 mask &= ~(1 << 30);
924 } 924 }
925 if (mac->type == e1000_pch2lan) {
926 /* SHRAH[0,1,2] different than previous */
927 if (i == 7)
928 mask &= 0xFFF4FFFF;
929 /* SHRAH[3] different than SHRAH[0,1,2] */
930 if (i == 10)
931 mask |= (1 << 30);
932 }
925 933
926 REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), mask, 934 REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), mask,
927 0xFFFFFFFF); 935 0xFFFFFFFF);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index af08188d7e62..42f0f6717511 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1371,7 +1371,10 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1371 return; 1371 return;
1372 } 1372 }
1373 1373
1374 if (index < hw->mac.rar_entry_count) { 1374 /* RAR[1-6] are owned by manageability. Skip those and program the
1375 * next address into the SHRA register array.
1376 */
1377 if (index < (u32)(hw->mac.rar_entry_count - 6)) {
1375 s32 ret_val; 1378 s32 ret_val;
1376 1379
1377 ret_val = e1000_acquire_swflag_ich8lan(hw); 1380 ret_val = e1000_acquire_swflag_ich8lan(hw);
@@ -1962,8 +1965,8 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
1962 if (ret_val) 1965 if (ret_val)
1963 goto release; 1966 goto release;
1964 1967
1965 /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */ 1968 /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
1966 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) { 1969 for (i = 0; i < (hw->mac.rar_entry_count); i++) {
1967 mac_reg = er32(RAL(i)); 1970 mac_reg = er32(RAL(i));
1968 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i), 1971 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
1969 (u16)(mac_reg & 0xFFFF)); 1972 (u16)(mac_reg & 0xFFFF));
@@ -2007,10 +2010,10 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2007 return ret_val; 2010 return ret_val;
2008 2011
2009 if (enable) { 2012 if (enable) {
2010 /* Write Rx addresses (rar_entry_count for RAL/H, +4 for 2013 /* Write Rx addresses (rar_entry_count for RAL/H, and
2011 * SHRAL/H) and initial CRC values to the MAC 2014 * SHRAL/H) and initial CRC values to the MAC
2012 */ 2015 */
2013 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) { 2016 for (i = 0; i < hw->mac.rar_entry_count; i++) {
2014 u8 mac_addr[ETH_ALEN] = { 0 }; 2017 u8 mac_addr[ETH_ALEN] = { 0 };
2015 u32 addr_high, addr_low; 2018 u32 addr_high, addr_low;
2016 2019
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 59865695b282..217090df33e7 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -98,7 +98,7 @@
98#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL 98#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
99 99
100#define E1000_ICH_RAR_ENTRIES 7 100#define E1000_ICH_RAR_ENTRIES 7
101#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */ 101#define E1000_PCH2_RAR_ENTRIES 11 /* RAR[0-6], SHRA[0-3] */
102#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */ 102#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */
103 103
104#define PHY_PAGE_SHIFT 5 104#define PHY_PAGE_SHIFT 5
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index e87e9b01f404..4ef786775acb 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -4868,7 +4868,7 @@ static void e1000_watchdog_task(struct work_struct *work)
4868 */ 4868 */
4869 if ((hw->phy.type == e1000_phy_igp_3 || 4869 if ((hw->phy.type == e1000_phy_igp_3 ||
4870 hw->phy.type == e1000_phy_bm) && 4870 hw->phy.type == e1000_phy_bm) &&
4871 (hw->mac.autoneg == true) && 4871 hw->mac.autoneg &&
4872 (adapter->link_speed == SPEED_10 || 4872 (adapter->link_speed == SPEED_10 ||
4873 adapter->link_speed == SPEED_100) && 4873 adapter->link_speed == SPEED_100) &&
4874 (adapter->link_duplex == HALF_DUPLEX)) { 4874 (adapter->link_duplex == HALF_DUPLEX)) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 0c524fa9f811..cfef7fc32cdd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -701,8 +701,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
701 701
702 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); 702 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
703 if (cmd_details) { 703 if (cmd_details) {
704 memcpy(details, cmd_details, 704 *details = *cmd_details;
705 sizeof(struct i40e_asq_cmd_details));
706 705
707 /* If the cmd_details are defined copy the cookie. The 706 /* If the cmd_details are defined copy the cookie. The
708 * cpu_to_le32 is not needed here because the data is ignored 707 * cpu_to_le32 is not needed here because the data is ignored
@@ -760,7 +759,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
760 desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); 759 desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
761 760
762 /* if the desc is available copy the temp desc to the right place */ 761 /* if the desc is available copy the temp desc to the right place */
763 memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc)); 762 *desc_on_ring = *desc;
764 763
765 /* if buff is not NULL assume indirect command */ 764 /* if buff is not NULL assume indirect command */
766 if (buff != NULL) { 765 if (buff != NULL) {
@@ -807,7 +806,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
807 806
808 /* if ready, copy the desc back to temp */ 807 /* if ready, copy the desc back to temp */
809 if (i40e_asq_done(hw)) { 808 if (i40e_asq_done(hw)) {
810 memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc)); 809 *desc = *desc_on_ring;
811 if (buff != NULL) 810 if (buff != NULL)
812 memcpy(buff, dma_buff->va, buff_size); 811 memcpy(buff, dma_buff->va, buff_size);
813 retval = le16_to_cpu(desc->retval); 812 retval = le16_to_cpu(desc->retval);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index c21df7bc3b1d..1e4ea134975a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -507,7 +507,7 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
507 507
508 /* save link status information */ 508 /* save link status information */
509 if (link) 509 if (link)
510 memcpy(link, hw_link_info, sizeof(struct i40e_link_status)); 510 *link = *hw_link_info;
511 511
512 /* flag cleared so helper functions don't call AQ again */ 512 /* flag cleared so helper functions don't call AQ again */
513 hw->phy.get_link_info = false; 513 hw->phy.get_link_info = false;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 601d482694ea..221aa4795017 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -101,10 +101,10 @@ int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
101 mem->size = ALIGN(size, alignment); 101 mem->size = ALIGN(size, alignment);
102 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size, 102 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
103 &mem->pa, GFP_KERNEL); 103 &mem->pa, GFP_KERNEL);
104 if (mem->va) 104 if (!mem->va)
105 return 0; 105 return -ENOMEM;
106 106
107 return -ENOMEM; 107 return 0;
108} 108}
109 109
110/** 110/**
@@ -136,10 +136,10 @@ int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
136 mem->size = size; 136 mem->size = size;
137 mem->va = kzalloc(size, GFP_KERNEL); 137 mem->va = kzalloc(size, GFP_KERNEL);
138 138
139 if (mem->va) 139 if (!mem->va)
140 return 0; 140 return -ENOMEM;
141 141
142 return -ENOMEM; 142 return 0;
143} 143}
144 144
145/** 145/**
@@ -174,8 +174,7 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
174 u16 needed, u16 id) 174 u16 needed, u16 id)
175{ 175{
176 int ret = -ENOMEM; 176 int ret = -ENOMEM;
177 int i = 0; 177 int i, j;
178 int j = 0;
179 178
180 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) { 179 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
181 dev_info(&pf->pdev->dev, 180 dev_info(&pf->pdev->dev,
@@ -186,7 +185,7 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
186 185
187 /* start the linear search with an imperfect hint */ 186 /* start the linear search with an imperfect hint */
188 i = pile->search_hint; 187 i = pile->search_hint;
189 while (i < pile->num_entries && ret < 0) { 188 while (i < pile->num_entries) {
190 /* skip already allocated entries */ 189 /* skip already allocated entries */
191 if (pile->list[i] & I40E_PILE_VALID_BIT) { 190 if (pile->list[i] & I40E_PILE_VALID_BIT) {
192 i++; 191 i++;
@@ -205,6 +204,7 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
205 pile->list[i+j] = id | I40E_PILE_VALID_BIT; 204 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
206 ret = i; 205 ret = i;
207 pile->search_hint = i + j; 206 pile->search_hint = i + j;
207 break;
208 } else { 208 } else {
209 /* not enough, so skip over it and continue looking */ 209 /* not enough, so skip over it and continue looking */
210 i += j; 210 i += j;
@@ -1388,7 +1388,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1388 bool add_happened = false; 1388 bool add_happened = false;
1389 int filter_list_len = 0; 1389 int filter_list_len = 0;
1390 u32 changed_flags = 0; 1390 u32 changed_flags = 0;
1391 i40e_status ret = 0; 1391 i40e_status aq_ret = 0;
1392 struct i40e_pf *pf; 1392 struct i40e_pf *pf;
1393 int num_add = 0; 1393 int num_add = 0;
1394 int num_del = 0; 1394 int num_del = 0;
@@ -1449,28 +1449,28 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1449 1449
1450 /* flush a full buffer */ 1450 /* flush a full buffer */
1451 if (num_del == filter_list_len) { 1451 if (num_del == filter_list_len) {
1452 ret = i40e_aq_remove_macvlan(&pf->hw, 1452 aq_ret = i40e_aq_remove_macvlan(&pf->hw,
1453 vsi->seid, del_list, num_del, 1453 vsi->seid, del_list, num_del,
1454 NULL); 1454 NULL);
1455 num_del = 0; 1455 num_del = 0;
1456 memset(del_list, 0, sizeof(*del_list)); 1456 memset(del_list, 0, sizeof(*del_list));
1457 1457
1458 if (ret) 1458 if (aq_ret)
1459 dev_info(&pf->pdev->dev, 1459 dev_info(&pf->pdev->dev,
1460 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n", 1460 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
1461 ret, 1461 aq_ret,
1462 pf->hw.aq.asq_last_status); 1462 pf->hw.aq.asq_last_status);
1463 } 1463 }
1464 } 1464 }
1465 if (num_del) { 1465 if (num_del) {
1466 ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, 1466 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
1467 del_list, num_del, NULL); 1467 del_list, num_del, NULL);
1468 num_del = 0; 1468 num_del = 0;
1469 1469
1470 if (ret) 1470 if (aq_ret)
1471 dev_info(&pf->pdev->dev, 1471 dev_info(&pf->pdev->dev,
1472 "ignoring delete macvlan error, err %d, aq_err %d\n", 1472 "ignoring delete macvlan error, err %d, aq_err %d\n",
1473 ret, pf->hw.aq.asq_last_status); 1473 aq_ret, pf->hw.aq.asq_last_status);
1474 } 1474 }
1475 1475
1476 kfree(del_list); 1476 kfree(del_list);
@@ -1515,32 +1515,30 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1515 1515
1516 /* flush a full buffer */ 1516 /* flush a full buffer */
1517 if (num_add == filter_list_len) { 1517 if (num_add == filter_list_len) {
1518 ret = i40e_aq_add_macvlan(&pf->hw, 1518 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1519 vsi->seid, 1519 add_list, num_add,
1520 add_list, 1520 NULL);
1521 num_add,
1522 NULL);
1523 num_add = 0; 1521 num_add = 0;
1524 1522
1525 if (ret) 1523 if (aq_ret)
1526 break; 1524 break;
1527 memset(add_list, 0, sizeof(*add_list)); 1525 memset(add_list, 0, sizeof(*add_list));
1528 } 1526 }
1529 } 1527 }
1530 if (num_add) { 1528 if (num_add) {
1531 ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, 1529 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1532 add_list, num_add, NULL); 1530 add_list, num_add, NULL);
1533 num_add = 0; 1531 num_add = 0;
1534 } 1532 }
1535 kfree(add_list); 1533 kfree(add_list);
1536 add_list = NULL; 1534 add_list = NULL;
1537 1535
1538 if (add_happened && (!ret)) { 1536 if (add_happened && (!aq_ret)) {
1539 /* do nothing */; 1537 /* do nothing */;
1540 } else if (add_happened && (ret)) { 1538 } else if (add_happened && (aq_ret)) {
1541 dev_info(&pf->pdev->dev, 1539 dev_info(&pf->pdev->dev,
1542 "add filter failed, err %d, aq_err %d\n", 1540 "add filter failed, err %d, aq_err %d\n",
1543 ret, pf->hw.aq.asq_last_status); 1541 aq_ret, pf->hw.aq.asq_last_status);
1544 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && 1542 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
1545 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, 1543 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1546 &vsi->state)) { 1544 &vsi->state)) {
@@ -1556,28 +1554,27 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1556 if (changed_flags & IFF_ALLMULTI) { 1554 if (changed_flags & IFF_ALLMULTI) {
1557 bool cur_multipromisc; 1555 bool cur_multipromisc;
1558 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); 1556 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
1559 ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, 1557 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
1560 vsi->seid, 1558 vsi->seid,
1561 cur_multipromisc, 1559 cur_multipromisc,
1562 NULL); 1560 NULL);
1563 if (ret) 1561 if (aq_ret)
1564 dev_info(&pf->pdev->dev, 1562 dev_info(&pf->pdev->dev,
1565 "set multi promisc failed, err %d, aq_err %d\n", 1563 "set multi promisc failed, err %d, aq_err %d\n",
1566 ret, pf->hw.aq.asq_last_status); 1564 aq_ret, pf->hw.aq.asq_last_status);
1567 } 1565 }
1568 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { 1566 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
1569 bool cur_promisc; 1567 bool cur_promisc;
1570 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || 1568 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
1571 test_bit(__I40E_FILTER_OVERFLOW_PROMISC, 1569 test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1572 &vsi->state)); 1570 &vsi->state));
1573 ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw, 1571 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
1574 vsi->seid, 1572 vsi->seid,
1575 cur_promisc, 1573 cur_promisc, NULL);
1576 NULL); 1574 if (aq_ret)
1577 if (ret)
1578 dev_info(&pf->pdev->dev, 1575 dev_info(&pf->pdev->dev,
1579 "set uni promisc failed, err %d, aq_err %d\n", 1576 "set uni promisc failed, err %d, aq_err %d\n",
1580 ret, pf->hw.aq.asq_last_status); 1577 aq_ret, pf->hw.aq.asq_last_status);
1581 } 1578 }
1582 1579
1583 clear_bit(__I40E_CONFIG_BUSY, &vsi->state); 1580 clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
@@ -1790,6 +1787,8 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
1790 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan 1787 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
1791 * @vsi: the vsi being configured 1788 * @vsi: the vsi being configured
1792 * @vid: vlan id to be removed (0 = untagged only , -1 = any) 1789 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
1790 *
1791 * Return: 0 on success or negative otherwise
1793 **/ 1792 **/
1794int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) 1793int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
1795{ 1794{
@@ -1863,37 +1862,39 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
1863 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload 1862 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
1864 * @netdev: network interface to be adjusted 1863 * @netdev: network interface to be adjusted
1865 * @vid: vlan id to be added 1864 * @vid: vlan id to be added
1865 *
1866 * net_device_ops implementation for adding vlan ids
1866 **/ 1867 **/
1867static int i40e_vlan_rx_add_vid(struct net_device *netdev, 1868static int i40e_vlan_rx_add_vid(struct net_device *netdev,
1868 __always_unused __be16 proto, u16 vid) 1869 __always_unused __be16 proto, u16 vid)
1869{ 1870{
1870 struct i40e_netdev_priv *np = netdev_priv(netdev); 1871 struct i40e_netdev_priv *np = netdev_priv(netdev);
1871 struct i40e_vsi *vsi = np->vsi; 1872 struct i40e_vsi *vsi = np->vsi;
1872 int ret; 1873 int ret = 0;
1873 1874
1874 if (vid > 4095) 1875 if (vid > 4095)
1875 return 0; 1876 return -EINVAL;
1877
1878 netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
1876 1879
1877 netdev_info(vsi->netdev, "adding %pM vid=%d\n",
1878 netdev->dev_addr, vid);
1879 /* If the network stack called us with vid = 0, we should 1880 /* If the network stack called us with vid = 0, we should
1880 * indicate to i40e_vsi_add_vlan() that we want to receive 1881 * indicate to i40e_vsi_add_vlan() that we want to receive
1881 * any traffic (i.e. with any vlan tag, or untagged) 1882 * any traffic (i.e. with any vlan tag, or untagged)
1882 */ 1883 */
1883 ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY); 1884 ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY);
1884 1885
1885 if (!ret) { 1886 if (!ret && (vid < VLAN_N_VID))
1886 if (vid < VLAN_N_VID) 1887 set_bit(vid, vsi->active_vlans);
1887 set_bit(vid, vsi->active_vlans);
1888 }
1889 1888
1890 return 0; 1889 return ret;
1891} 1890}
1892 1891
1893/** 1892/**
1894 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload 1893 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
1895 * @netdev: network interface to be adjusted 1894 * @netdev: network interface to be adjusted
1896 * @vid: vlan id to be removed 1895 * @vid: vlan id to be removed
1896 *
1897 * net_device_ops implementation for adding vlan ids
1897 **/ 1898 **/
1898static int i40e_vlan_rx_kill_vid(struct net_device *netdev, 1899static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
1899 __always_unused __be16 proto, u16 vid) 1900 __always_unused __be16 proto, u16 vid)
@@ -1901,15 +1902,16 @@ static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
1901 struct i40e_netdev_priv *np = netdev_priv(netdev); 1902 struct i40e_netdev_priv *np = netdev_priv(netdev);
1902 struct i40e_vsi *vsi = np->vsi; 1903 struct i40e_vsi *vsi = np->vsi;
1903 1904
1904 netdev_info(vsi->netdev, "removing %pM vid=%d\n", 1905 netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid);
1905 netdev->dev_addr, vid); 1906
1906 /* return code is ignored as there is nothing a user 1907 /* return code is ignored as there is nothing a user
1907 * can do about failure to remove and a log message was 1908 * can do about failure to remove and a log message was
1908 * already printed from another function 1909 * already printed from the other function
1909 */ 1910 */
1910 i40e_vsi_kill_vlan(vsi, vid); 1911 i40e_vsi_kill_vlan(vsi, vid);
1911 1912
1912 clear_bit(vid, vsi->active_vlans); 1913 clear_bit(vid, vsi->active_vlans);
1914
1913 return 0; 1915 return 0;
1914} 1916}
1915 1917
@@ -1936,10 +1938,10 @@ static void i40e_restore_vlan(struct i40e_vsi *vsi)
1936 * @vsi: the vsi being adjusted 1938 * @vsi: the vsi being adjusted
1937 * @vid: the vlan id to set as a PVID 1939 * @vid: the vlan id to set as a PVID
1938 **/ 1940 **/
1939i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) 1941int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
1940{ 1942{
1941 struct i40e_vsi_context ctxt; 1943 struct i40e_vsi_context ctxt;
1942 i40e_status ret; 1944 i40e_status aq_ret;
1943 1945
1944 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 1946 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1945 vsi->info.pvid = cpu_to_le16(vid); 1947 vsi->info.pvid = cpu_to_le16(vid);
@@ -1948,14 +1950,15 @@ i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
1948 1950
1949 ctxt.seid = vsi->seid; 1951 ctxt.seid = vsi->seid;
1950 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 1952 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1951 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 1953 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1952 if (ret) { 1954 if (aq_ret) {
1953 dev_info(&vsi->back->pdev->dev, 1955 dev_info(&vsi->back->pdev->dev,
1954 "%s: update vsi failed, aq_err=%d\n", 1956 "%s: update vsi failed, aq_err=%d\n",
1955 __func__, vsi->back->hw.aq.asq_last_status); 1957 __func__, vsi->back->hw.aq.asq_last_status);
1958 return -ENOENT;
1956 } 1959 }
1957 1960
1958 return ret; 1961 return 0;
1959} 1962}
1960 1963
1961/** 1964/**
@@ -3326,7 +3329,8 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
3326 **/ 3329 **/
3327static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) 3330static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
3328{ 3331{
3329 int num_tc = 0, i; 3332 u8 num_tc = 0;
3333 int i;
3330 3334
3331 /* Scan the ETS Config Priority Table to find 3335 /* Scan the ETS Config Priority Table to find
3332 * traffic class enabled for a given priority 3336 * traffic class enabled for a given priority
@@ -3341,9 +3345,7 @@ static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
3341 /* Traffic class index starts from zero so 3345 /* Traffic class index starts from zero so
3342 * increment to return the actual count 3346 * increment to return the actual count
3343 */ 3347 */
3344 num_tc++; 3348 return num_tc + 1;
3345
3346 return num_tc;
3347} 3349}
3348 3350
3349/** 3351/**
@@ -3451,28 +3453,27 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
3451 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; 3453 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
3452 struct i40e_pf *pf = vsi->back; 3454 struct i40e_pf *pf = vsi->back;
3453 struct i40e_hw *hw = &pf->hw; 3455 struct i40e_hw *hw = &pf->hw;
3456 i40e_status aq_ret;
3454 u32 tc_bw_max; 3457 u32 tc_bw_max;
3455 int ret;
3456 int i; 3458 int i;
3457 3459
3458 /* Get the VSI level BW configuration */ 3460 /* Get the VSI level BW configuration */
3459 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); 3461 aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
3460 if (ret) { 3462 if (aq_ret) {
3461 dev_info(&pf->pdev->dev, 3463 dev_info(&pf->pdev->dev,
3462 "couldn't get pf vsi bw config, err %d, aq_err %d\n", 3464 "couldn't get pf vsi bw config, err %d, aq_err %d\n",
3463 ret, pf->hw.aq.asq_last_status); 3465 aq_ret, pf->hw.aq.asq_last_status);
3464 return ret; 3466 return -EINVAL;
3465 } 3467 }
3466 3468
3467 /* Get the VSI level BW configuration per TC */ 3469 /* Get the VSI level BW configuration per TC */
3468 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, 3470 aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
3469 &bw_ets_config, 3471 NULL);
3470 NULL); 3472 if (aq_ret) {
3471 if (ret) {
3472 dev_info(&pf->pdev->dev, 3473 dev_info(&pf->pdev->dev,
3473 "couldn't get pf vsi ets bw config, err %d, aq_err %d\n", 3474 "couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
3474 ret, pf->hw.aq.asq_last_status); 3475 aq_ret, pf->hw.aq.asq_last_status);
3475 return ret; 3476 return -EINVAL;
3476 } 3477 }
3477 3478
3478 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) { 3479 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
@@ -3494,7 +3495,8 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
3494 /* 3 bits out of 4 for each TC */ 3495 /* 3 bits out of 4 for each TC */
3495 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7); 3496 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
3496 } 3497 }
3497 return ret; 3498
3499 return 0;
3498} 3500}
3499 3501
3500/** 3502/**
@@ -3505,30 +3507,30 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
3505 * 3507 *
3506 * Returns 0 on success, negative value on failure 3508 * Returns 0 on success, negative value on failure
3507 **/ 3509 **/
3508static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, 3510static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
3509 u8 enabled_tc,
3510 u8 *bw_share) 3511 u8 *bw_share)
3511{ 3512{
3512 struct i40e_aqc_configure_vsi_tc_bw_data bw_data; 3513 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
3513 int i, ret = 0; 3514 i40e_status aq_ret;
3515 int i;
3514 3516
3515 bw_data.tc_valid_bits = enabled_tc; 3517 bw_data.tc_valid_bits = enabled_tc;
3516 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 3518 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3517 bw_data.tc_bw_credits[i] = bw_share[i]; 3519 bw_data.tc_bw_credits[i] = bw_share[i];
3518 3520
3519 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, 3521 aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
3520 &bw_data, NULL); 3522 NULL);
3521 if (ret) { 3523 if (aq_ret) {
3522 dev_info(&vsi->back->pdev->dev, 3524 dev_info(&vsi->back->pdev->dev,
3523 "%s: AQ command Config VSI BW allocation per TC failed = %d\n", 3525 "%s: AQ command Config VSI BW allocation per TC failed = %d\n",
3524 __func__, vsi->back->hw.aq.asq_last_status); 3526 __func__, vsi->back->hw.aq.asq_last_status);
3525 return ret; 3527 return -EINVAL;
3526 } 3528 }
3527 3529
3528 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 3530 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3529 vsi->info.qs_handle[i] = bw_data.qs_handles[i]; 3531 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
3530 3532
3531 return ret; 3533 return 0;
3532} 3534}
3533 3535
3534/** 3536/**
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 79b58353d849..47c2d10df826 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -719,6 +719,10 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
719 u32 ctrl_ext; 719 u32 ctrl_ext;
720 u32 mdic; 720 u32 mdic;
721 721
722 /* Extra read required for some PHY's on i354 */
723 if (hw->mac.type == e1000_i354)
724 igb_get_phy_id(hw);
725
722 /* For SGMII PHYs, we try the list of possible addresses until 726 /* For SGMII PHYs, we try the list of possible addresses until
723 * we find one that works. For non-SGMII PHYs 727 * we find one that works. For non-SGMII PHYs
724 * (e.g. integrated copper PHYs), an address of 1 should 728 * (e.g. integrated copper PHYs), an address of 1 should
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index f0dfd41dd4bd..298f0ed50670 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -712,6 +712,7 @@ static s32 igb_set_fc_watermarks(struct e1000_hw *hw)
712static s32 igb_set_default_fc(struct e1000_hw *hw) 712static s32 igb_set_default_fc(struct e1000_hw *hw)
713{ 713{
714 s32 ret_val = 0; 714 s32 ret_val = 0;
715 u16 lan_offset;
715 u16 nvm_data; 716 u16 nvm_data;
716 717
717 /* Read and store word 0x0F of the EEPROM. This word contains bits 718 /* Read and store word 0x0F of the EEPROM. This word contains bits
@@ -722,7 +723,14 @@ static s32 igb_set_default_fc(struct e1000_hw *hw)
722 * control setting, then the variable hw->fc will 723 * control setting, then the variable hw->fc will
723 * be initialized based on a value in the EEPROM. 724 * be initialized based on a value in the EEPROM.
724 */ 725 */
725 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); 726 if (hw->mac.type == e1000_i350) {
727 lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func);
728 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG
729 + lan_offset, 1, &nvm_data);
730 } else {
731 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG,
732 1, &nvm_data);
733 }
726 734
727 if (ret_val) { 735 if (ret_val) {
728 hw_dbg("NVM Read Error\n"); 736 hw_dbg("NVM Read Error\n");
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 48cbc833b051..86d51429a189 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1607,6 +1607,9 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
1607 igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0); 1607 igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0);
1608 igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); 1608 igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
1609 } 1609 }
1610 } else if (hw->phy.type == e1000_phy_82580) {
1611 /* enable MII loopback */
1612 igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041);
1610 } 1613 }
1611 1614
1612 /* add small delay to avoid loopback test failure */ 1615 /* add small delay to avoid loopback test failure */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 0e1b973659b0..e8649abf97c0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -160,6 +160,13 @@ static int ixgbe_get_settings(struct net_device *netdev,
160 bool autoneg = false; 160 bool autoneg = false;
161 bool link_up; 161 bool link_up;
162 162
163 /* SFP type is needed for get_link_capabilities */
164 if (hw->phy.media_type & (ixgbe_media_type_fiber |
165 ixgbe_media_type_fiber_qsfp)) {
166 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
167 hw->phy.ops.identify_sfp(hw);
168 }
169
163 hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg); 170 hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
164 171
165 /* set the supported link speeds */ 172 /* set the supported link speeds */
@@ -186,6 +193,11 @@ static int ixgbe_get_settings(struct net_device *netdev,
186 ecmd->advertising |= ADVERTISED_1000baseT_Full; 193 ecmd->advertising |= ADVERTISED_1000baseT_Full;
187 if (supported_link & IXGBE_LINK_SPEED_100_FULL) 194 if (supported_link & IXGBE_LINK_SPEED_100_FULL)
188 ecmd->advertising |= ADVERTISED_100baseT_Full; 195 ecmd->advertising |= ADVERTISED_100baseT_Full;
196
197 if (hw->phy.multispeed_fiber && !autoneg) {
198 if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
199 ecmd->advertising = ADVERTISED_10000baseT_Full;
200 }
189 } 201 }
190 202
191 if (autoneg) { 203 if (autoneg) {
@@ -314,6 +326,14 @@ static int ixgbe_set_settings(struct net_device *netdev,
314 if (ecmd->advertising & ~ecmd->supported) 326 if (ecmd->advertising & ~ecmd->supported)
315 return -EINVAL; 327 return -EINVAL;
316 328
329 /* only allow one speed at a time if no autoneg */
330 if (!ecmd->autoneg && hw->phy.multispeed_fiber) {
331 if (ecmd->advertising ==
332 (ADVERTISED_10000baseT_Full |
333 ADVERTISED_1000baseT_Full))
334 return -EINVAL;
335 }
336
317 old = hw->phy.autoneg_advertised; 337 old = hw->phy.autoneg_advertised;
318 advertised = 0; 338 advertised = 0;
319 if (ecmd->advertising & ADVERTISED_10000baseT_Full) 339 if (ecmd->advertising & ADVERTISED_10000baseT_Full)
@@ -1805,6 +1825,10 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1805 unsigned int size = 1024; 1825 unsigned int size = 1024;
1806 netdev_tx_t tx_ret_val; 1826 netdev_tx_t tx_ret_val;
1807 struct sk_buff *skb; 1827 struct sk_buff *skb;
1828 u32 flags_orig = adapter->flags;
1829
1830 /* DCB can modify the frames on Tx */
1831 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
1808 1832
1809 /* allocate test skb */ 1833 /* allocate test skb */
1810 skb = alloc_skb(size, GFP_KERNEL); 1834 skb = alloc_skb(size, GFP_KERNEL);
@@ -1857,6 +1881,7 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1857 1881
1858 /* free the original skb */ 1882 /* free the original skb */
1859 kfree_skb(skb); 1883 kfree_skb(skb);
1884 adapter->flags = flags_orig;
1860 1885
1861 return ret_val; 1886 return ret_val;
1862} 1887}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 7aba452833e5..0ade0cd5ef53 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -3571,7 +3571,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
3571{ 3571{
3572 struct ixgbe_hw *hw = &adapter->hw; 3572 struct ixgbe_hw *hw = &adapter->hw;
3573 int i; 3573 int i;
3574 u32 rxctrl; 3574 u32 rxctrl, rfctl;
3575 3575
3576 /* disable receives while setting up the descriptors */ 3576 /* disable receives while setting up the descriptors */
3577 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 3577 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
@@ -3580,6 +3580,13 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
3580 ixgbe_setup_psrtype(adapter); 3580 ixgbe_setup_psrtype(adapter);
3581 ixgbe_setup_rdrxctl(adapter); 3581 ixgbe_setup_rdrxctl(adapter);
3582 3582
3583 /* RSC Setup */
3584 rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
3585 rfctl &= ~IXGBE_RFCTL_RSC_DIS;
3586 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
3587 rfctl |= IXGBE_RFCTL_RSC_DIS;
3588 IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
3589
3583 /* Program registers for the distribution of queues */ 3590 /* Program registers for the distribution of queues */
3584 ixgbe_setup_mrqc(adapter); 3591 ixgbe_setup_mrqc(adapter);
3585 3592
@@ -5993,8 +6000,16 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
5993 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; 6000 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
5994 6001
5995 speed = hw->phy.autoneg_advertised; 6002 speed = hw->phy.autoneg_advertised;
5996 if ((!speed) && (hw->mac.ops.get_link_capabilities)) 6003 if ((!speed) && (hw->mac.ops.get_link_capabilities)) {
5997 hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg); 6004 hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg);
6005
6006 /* setup the highest link when no autoneg */
6007 if (!autoneg) {
6008 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
6009 speed = IXGBE_LINK_SPEED_10GB_FULL;
6010 }
6011 }
6012
5998 if (hw->mac.ops.setup_link) 6013 if (hw->mac.ops.setup_link)
5999 hw->mac.ops.setup_link(hw, speed, true); 6014 hw->mac.ops.setup_link(hw, speed, true);
6000 6015
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 6442cf8f9dce..10775cb9b6d8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1861,6 +1861,7 @@ enum {
1861#define IXGBE_RFCTL_ISCSI_DIS 0x00000001 1861#define IXGBE_RFCTL_ISCSI_DIS 0x00000001
1862#define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E 1862#define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E
1863#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1 1863#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1
1864#define IXGBE_RFCTL_RSC_DIS 0x00000020
1864#define IXGBE_RFCTL_NFSW_DIS 0x00000040 1865#define IXGBE_RFCTL_NFSW_DIS 0x00000040
1865#define IXGBE_RFCTL_NFSR_DIS 0x00000080 1866#define IXGBE_RFCTL_NFSR_DIS 0x00000080
1866#define IXGBE_RFCTL_NFS_VER_MASK 0x00000300 1867#define IXGBE_RFCTL_NFS_VER_MASK 0x00000300
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index bfdb06860397..6a6c1f76d8e0 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -282,8 +282,7 @@ ltq_etop_hw_init(struct net_device *dev)
282 282
283 if (IS_TX(i)) { 283 if (IS_TX(i)) {
284 ltq_dma_alloc_tx(&ch->dma); 284 ltq_dma_alloc_tx(&ch->dma);
285 request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED, 285 request_irq(irq, ltq_etop_dma_irq, 0, "etop_tx", priv);
286 "etop_tx", priv);
287 } else if (IS_RX(i)) { 286 } else if (IS_RX(i)) {
288 ltq_dma_alloc_rx(&ch->dma); 287 ltq_dma_alloc_rx(&ch->dma);
289 for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM; 288 for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM;
@@ -291,8 +290,7 @@ ltq_etop_hw_init(struct net_device *dev)
291 if (ltq_etop_alloc_skb(ch)) 290 if (ltq_etop_alloc_skb(ch))
292 return -ENOMEM; 291 return -ENOMEM;
293 ch->dma.desc = 0; 292 ch->dma.desc = 0;
294 request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED, 293 request_irq(irq, ltq_etop_dma_irq, 0, "etop_rx", priv);
295 "etop_rx", priv);
296 } 294 }
297 ch->dma.irq = irq; 295 ch->dma.irq = irq;
298 } 296 }
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 4ae0c7426010..fff62460185c 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1123,8 +1123,7 @@ static int pxa168_eth_open(struct net_device *dev)
1123 struct pxa168_eth_private *pep = netdev_priv(dev); 1123 struct pxa168_eth_private *pep = netdev_priv(dev);
1124 int err; 1124 int err;
1125 1125
1126 err = request_irq(dev->irq, pxa168_eth_int_handler, 1126 err = request_irq(dev->irq, pxa168_eth_int_handler, 0, dev->name, dev);
1127 IRQF_DISABLED, dev->name, dev);
1128 if (err) { 1127 if (err) {
1129 dev_err(&dev->dev, "can't assign irq\n"); 1128 dev_err(&dev->dev, "can't assign irq\n");
1130 return -EAGAIN; 1129 return -EAGAIN;
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index ef94a591f9e5..ecc7f7b696b8 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3086,23 +3086,27 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
3086 PCI_DMA_FROMDEVICE); 3086 PCI_DMA_FROMDEVICE);
3087 skge_rx_reuse(e, skge->rx_buf_size); 3087 skge_rx_reuse(e, skge->rx_buf_size);
3088 } else { 3088 } else {
3089 struct skge_element ee;
3089 struct sk_buff *nskb; 3090 struct sk_buff *nskb;
3090 3091
3091 nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size); 3092 nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size);
3092 if (!nskb) 3093 if (!nskb)
3093 goto resubmit; 3094 goto resubmit;
3094 3095
3096 ee = *e;
3097
3098 skb = ee.skb;
3099 prefetch(skb->data);
3100
3095 if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) { 3101 if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) {
3096 dev_kfree_skb(nskb); 3102 dev_kfree_skb(nskb);
3097 goto resubmit; 3103 goto resubmit;
3098 } 3104 }
3099 3105
3100 pci_unmap_single(skge->hw->pdev, 3106 pci_unmap_single(skge->hw->pdev,
3101 dma_unmap_addr(e, mapaddr), 3107 dma_unmap_addr(&ee, mapaddr),
3102 dma_unmap_len(e, maplen), 3108 dma_unmap_len(&ee, maplen),
3103 PCI_DMA_FROMDEVICE); 3109 PCI_DMA_FROMDEVICE);
3104 skb = e->skb;
3105 prefetch(skb->data);
3106 } 3110 }
3107 3111
3108 skb_put(skb, len); 3112 skb_put(skb, len);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index a28cd801a236..0c750985f47e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -53,9 +53,11 @@ static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
53 for (i = 0; i < priv->tx_ring_num; i++) { 53 for (i = 0; i < priv->tx_ring_num; i++) {
54 priv->tx_cq[i].moder_cnt = priv->tx_frames; 54 priv->tx_cq[i].moder_cnt = priv->tx_frames;
55 priv->tx_cq[i].moder_time = priv->tx_usecs; 55 priv->tx_cq[i].moder_time = priv->tx_usecs;
56 err = mlx4_en_set_cq_moder(priv, &priv->tx_cq[i]); 56 if (priv->port_up) {
57 if (err) 57 err = mlx4_en_set_cq_moder(priv, &priv->tx_cq[i]);
58 return err; 58 if (err)
59 return err;
60 }
59 } 61 }
60 62
61 if (priv->adaptive_rx_coal) 63 if (priv->adaptive_rx_coal)
@@ -65,9 +67,11 @@ static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
65 priv->rx_cq[i].moder_cnt = priv->rx_frames; 67 priv->rx_cq[i].moder_cnt = priv->rx_frames;
66 priv->rx_cq[i].moder_time = priv->rx_usecs; 68 priv->rx_cq[i].moder_time = priv->rx_usecs;
67 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; 69 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
68 err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]); 70 if (priv->port_up) {
69 if (err) 71 err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
70 return err; 72 if (err)
73 return err;
74 }
71 } 75 }
72 76
73 return err; 77 return err;
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index 0fba1532d326..075f4e21d33d 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -915,7 +915,7 @@ static int ks_net_open(struct net_device *netdev)
915 struct ks_net *ks = netdev_priv(netdev); 915 struct ks_net *ks = netdev_priv(netdev);
916 int err; 916 int err;
917 917
918#define KS_INT_FLAGS (IRQF_DISABLED|IRQF_TRIGGER_LOW) 918#define KS_INT_FLAGS IRQF_TRIGGER_LOW
919 /* lock the card, even if we may not actually do anything 919 /* lock the card, even if we may not actually do anything
920 * else at the moment. 920 * else at the moment.
921 */ 921 */
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 83c2091c9c23..bd1a2d2bc2ae 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -543,7 +543,7 @@ static const struct of_device_id moxart_mac_match[] = {
543 { } 543 { }
544}; 544};
545 545
546struct __initdata platform_driver moxart_mac_driver = { 546static struct platform_driver moxart_mac_driver = {
547 .probe = moxart_mac_probe, 547 .probe = moxart_mac_probe,
548 .remove = moxart_remove, 548 .remove = moxart_remove,
549 .driver = { 549 .driver = {
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index c20766c2f65b..79257f71c5d9 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -83,8 +83,7 @@ static int jazzsonic_open(struct net_device* dev)
83{ 83{
84 int retval; 84 int retval;
85 85
86 retval = request_irq(dev->irq, sonic_interrupt, IRQF_DISABLED, 86 retval = request_irq(dev->irq, sonic_interrupt, 0, "sonic", dev);
87 "sonic", dev);
88 if (retval) { 87 if (retval) {
89 printk(KERN_ERR "%s: unable to get IRQ %d.\n", 88 printk(KERN_ERR "%s: unable to get IRQ %d.\n",
90 dev->name, dev->irq); 89 dev->name, dev->irq);
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index c2e0256fe3df..4da172ac5599 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -95,8 +95,7 @@ static int xtsonic_open(struct net_device *dev)
95{ 95{
96 int retval; 96 int retval;
97 97
98 retval = request_irq(dev->irq, sonic_interrupt, IRQF_DISABLED, 98 retval = request_irq(dev->irq, sonic_interrupt, 0, "sonic", dev);
99 "sonic", dev);
100 if (retval) { 99 if (retval) {
101 printk(KERN_ERR "%s: unable to get IRQ %d.\n", 100 printk(KERN_ERR "%s: unable to get IRQ %d.\n",
102 dev->name, dev->irq); 101 dev->name, dev->irq);
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index c498181a9aa8..5b65356e7568 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1219,7 +1219,7 @@ static int pasemi_mac_open(struct net_device *dev)
1219 snprintf(mac->tx_irq_name, sizeof(mac->tx_irq_name), "%s tx", 1219 snprintf(mac->tx_irq_name, sizeof(mac->tx_irq_name), "%s tx",
1220 dev->name); 1220 dev->name);
1221 1221
1222 ret = request_irq(mac->tx->chan.irq, pasemi_mac_tx_intr, IRQF_DISABLED, 1222 ret = request_irq(mac->tx->chan.irq, pasemi_mac_tx_intr, 0,
1223 mac->tx_irq_name, mac->tx); 1223 mac->tx_irq_name, mac->tx);
1224 if (ret) { 1224 if (ret) {
1225 dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n", 1225 dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
@@ -1230,7 +1230,7 @@ static int pasemi_mac_open(struct net_device *dev)
1230 snprintf(mac->rx_irq_name, sizeof(mac->rx_irq_name), "%s rx", 1230 snprintf(mac->rx_irq_name, sizeof(mac->rx_irq_name), "%s rx",
1231 dev->name); 1231 dev->name);
1232 1232
1233 ret = request_irq(mac->rx->chan.irq, pasemi_mac_rx_intr, IRQF_DISABLED, 1233 ret = request_irq(mac->rx->chan.irq, pasemi_mac_rx_intr, 0,
1234 mac->rx_irq_name, mac->rx); 1234 mac->rx_irq_name, mac->rx);
1235 if (ret) { 1235 if (ret) {
1236 dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n", 1236 dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 4d7ad0074d1c..ebe4c86e5230 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -1794,3 +1794,11 @@ const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops = {
1794 .set_msglevel = qlcnic_set_msglevel, 1794 .set_msglevel = qlcnic_set_msglevel,
1795 .get_msglevel = qlcnic_get_msglevel, 1795 .get_msglevel = qlcnic_get_msglevel,
1796}; 1796};
1797
1798const struct ethtool_ops qlcnic_ethtool_failed_ops = {
1799 .get_settings = qlcnic_get_settings,
1800 .get_drvinfo = qlcnic_get_drvinfo,
1801 .set_msglevel = qlcnic_set_msglevel,
1802 .get_msglevel = qlcnic_get_msglevel,
1803 .set_dump = qlcnic_set_dump,
1804};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index c4c5023e1fdf..21d00a0449a1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -431,6 +431,9 @@ static void qlcnic_82xx_cancel_idc_work(struct qlcnic_adapter *adapter)
431 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) 431 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
432 usleep_range(10000, 11000); 432 usleep_range(10000, 11000);
433 433
434 if (!adapter->fw_work.work.func)
435 return;
436
434 cancel_delayed_work_sync(&adapter->fw_work); 437 cancel_delayed_work_sync(&adapter->fw_work);
435} 438}
436 439
@@ -2275,8 +2278,9 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2275 adapter->portnum = adapter->ahw->pci_func; 2278 adapter->portnum = adapter->ahw->pci_func;
2276 err = qlcnic_start_firmware(adapter); 2279 err = qlcnic_start_firmware(adapter);
2277 if (err) { 2280 if (err) {
2278 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"); 2281 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"
2279 goto err_out_free_hw; 2282 "\t\tIf reboot doesn't help, try flashing the card\n");
2283 goto err_out_maintenance_mode;
2280 } 2284 }
2281 2285
2282 qlcnic_get_multiq_capability(adapter); 2286 qlcnic_get_multiq_capability(adapter);
@@ -2408,6 +2412,22 @@ err_out_disable_pdev:
2408 pci_set_drvdata(pdev, NULL); 2412 pci_set_drvdata(pdev, NULL);
2409 pci_disable_device(pdev); 2413 pci_disable_device(pdev);
2410 return err; 2414 return err;
2415
2416err_out_maintenance_mode:
2417 netdev->netdev_ops = &qlcnic_netdev_failed_ops;
2418 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_failed_ops);
2419 err = register_netdev(netdev);
2420
2421 if (err) {
2422 dev_err(&pdev->dev, "Failed to register net device\n");
2423 qlcnic_clr_all_drv_state(adapter, 0);
2424 goto err_out_free_hw;
2425 }
2426
2427 pci_set_drvdata(pdev, adapter);
2428 qlcnic_add_sysfs(adapter);
2429
2430 return 0;
2411} 2431}
2412 2432
2413static void qlcnic_remove(struct pci_dev *pdev) 2433static void qlcnic_remove(struct pci_dev *pdev)
@@ -2518,8 +2538,16 @@ static int qlcnic_resume(struct pci_dev *pdev)
2518static int qlcnic_open(struct net_device *netdev) 2538static int qlcnic_open(struct net_device *netdev)
2519{ 2539{
2520 struct qlcnic_adapter *adapter = netdev_priv(netdev); 2540 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2541 u32 state;
2521 int err; 2542 int err;
2522 2543
2544 state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
2545 if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) {
2546 netdev_err(netdev, "%s: Device is in FAILED state\n", __func__);
2547
2548 return -EIO;
2549 }
2550
2523 netif_carrier_off(netdev); 2551 netif_carrier_off(netdev);
2524 2552
2525 err = qlcnic_attach(adapter); 2553 err = qlcnic_attach(adapter);
@@ -3228,6 +3256,13 @@ void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *adapter, u32 key)
3228 return; 3256 return;
3229 3257
3230 state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); 3258 state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
3259 if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) {
3260 netdev_err(adapter->netdev, "%s: Device is in FAILED state\n",
3261 __func__);
3262 qlcnic_api_unlock(adapter);
3263
3264 return;
3265 }
3231 3266
3232 if (state == QLCNIC_DEV_READY) { 3267 if (state == QLCNIC_DEV_READY) {
3233 QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, 3268 QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 652cc13c5023..392b9bd12b4f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -1561,6 +1561,7 @@ static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
1561{ 1561{
1562 int err; 1562 int err;
1563 1563
1564 adapter->need_fw_reset = 0;
1564 qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox); 1565 qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
1565 qlcnic_83xx_enable_mbx_interrupt(adapter); 1566 qlcnic_83xx_enable_mbx_interrupt(adapter);
1566 1567
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 330d9a8774ad..686f460b1502 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -397,6 +397,7 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
397{ 397{
398 struct net_device *netdev = adapter->netdev; 398 struct net_device *netdev = adapter->netdev;
399 399
400 rtnl_lock();
400 if (netif_running(netdev)) 401 if (netif_running(netdev))
401 __qlcnic_down(adapter, netdev); 402 __qlcnic_down(adapter, netdev);
402 403
@@ -407,12 +408,15 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
407 /* After disabling SRIOV re-init the driver in default mode 408 /* After disabling SRIOV re-init the driver in default mode
408 configure opmode based on op_mode of function 409 configure opmode based on op_mode of function
409 */ 410 */
410 if (qlcnic_83xx_configure_opmode(adapter)) 411 if (qlcnic_83xx_configure_opmode(adapter)) {
412 rtnl_unlock();
411 return -EIO; 413 return -EIO;
414 }
412 415
413 if (netif_running(netdev)) 416 if (netif_running(netdev))
414 __qlcnic_up(adapter, netdev); 417 __qlcnic_up(adapter, netdev);
415 418
419 rtnl_unlock();
416 return 0; 420 return 0;
417} 421}
418 422
@@ -533,6 +537,7 @@ static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs)
533 return -EIO; 537 return -EIO;
534 } 538 }
535 539
540 rtnl_lock();
536 if (netif_running(netdev)) 541 if (netif_running(netdev))
537 __qlcnic_down(adapter, netdev); 542 __qlcnic_down(adapter, netdev);
538 543
@@ -555,6 +560,7 @@ static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs)
555 __qlcnic_up(adapter, netdev); 560 __qlcnic_up(adapter, netdev);
556 561
557error: 562error:
563 rtnl_unlock();
558 return err; 564 return err;
559} 565}
560 566
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index c6165d05cc13..019f4377307f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -1272,6 +1272,7 @@ void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
1272void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter) 1272void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
1273{ 1273{
1274 struct device *dev = &adapter->pdev->dev; 1274 struct device *dev = &adapter->pdev->dev;
1275 u32 state;
1275 1276
1276 if (device_create_bin_file(dev, &bin_attr_port_stats)) 1277 if (device_create_bin_file(dev, &bin_attr_port_stats))
1277 dev_info(dev, "failed to create port stats sysfs entry"); 1278 dev_info(dev, "failed to create port stats sysfs entry");
@@ -1285,8 +1286,13 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
1285 if (device_create_bin_file(dev, &bin_attr_mem)) 1286 if (device_create_bin_file(dev, &bin_attr_mem))
1286 dev_info(dev, "failed to create mem sysfs entry\n"); 1287 dev_info(dev, "failed to create mem sysfs entry\n");
1287 1288
1289 state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
1290 if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD)
1291 return;
1292
1288 if (device_create_bin_file(dev, &bin_attr_pci_config)) 1293 if (device_create_bin_file(dev, &bin_attr_pci_config))
1289 dev_info(dev, "failed to create pci config sysfs entry"); 1294 dev_info(dev, "failed to create pci config sysfs entry");
1295
1290 if (device_create_file(dev, &dev_attr_beacon)) 1296 if (device_create_file(dev, &dev_attr_beacon))
1291 dev_info(dev, "failed to create beacon sysfs entry"); 1297 dev_info(dev, "failed to create beacon sysfs entry");
1292 1298
@@ -1307,6 +1313,7 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
1307void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter) 1313void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
1308{ 1314{
1309 struct device *dev = &adapter->pdev->dev; 1315 struct device *dev = &adapter->pdev->dev;
1316 u32 state;
1310 1317
1311 device_remove_bin_file(dev, &bin_attr_port_stats); 1318 device_remove_bin_file(dev, &bin_attr_port_stats);
1312 1319
@@ -1315,6 +1322,11 @@ void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
1315 device_remove_file(dev, &dev_attr_diag_mode); 1322 device_remove_file(dev, &dev_attr_diag_mode);
1316 device_remove_bin_file(dev, &bin_attr_crb); 1323 device_remove_bin_file(dev, &bin_attr_crb);
1317 device_remove_bin_file(dev, &bin_attr_mem); 1324 device_remove_bin_file(dev, &bin_attr_mem);
1325
1326 state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
1327 if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD)
1328 return;
1329
1318 device_remove_bin_file(dev, &bin_attr_pci_config); 1330 device_remove_bin_file(dev, &bin_attr_pci_config);
1319 device_remove_file(dev, &dev_attr_beacon); 1331 device_remove_file(dev, &dev_attr_beacon);
1320 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) 1332 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index 10093f0c4c0f..6bc5db703920 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -740,8 +740,8 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
740 int i; 740 int i;
741 741
742 if (!mpi_coredump) { 742 if (!mpi_coredump) {
743 netif_err(qdev, drv, qdev->ndev, "No memory available\n"); 743 netif_err(qdev, drv, qdev->ndev, "No memory allocated\n");
744 return -ENOMEM; 744 return -EINVAL;
745 } 745 }
746 746
747 /* Try to get the spinlock, but dont worry if 747 /* Try to get the spinlock, but dont worry if
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
index ff2bf8a4e247..7ad146080c36 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
@@ -1274,7 +1274,7 @@ void ql_mpi_reset_work(struct work_struct *work)
1274 return; 1274 return;
1275 } 1275 }
1276 1276
1277 if (!ql_core_dump(qdev, qdev->mpi_coredump)) { 1277 if (qdev->mpi_coredump && !ql_core_dump(qdev, qdev->mpi_coredump)) {
1278 netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n"); 1278 netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n");
1279 qdev->core_is_dumped = 1; 1279 qdev->core_is_dumped = 1;
1280 queue_delayed_work(qdev->workqueue, 1280 queue_delayed_work(qdev->workqueue,
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 6f87f2cde647..3397cee89777 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4231,6 +4231,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
4231 case RTL_GIGA_MAC_VER_23: 4231 case RTL_GIGA_MAC_VER_23:
4232 case RTL_GIGA_MAC_VER_24: 4232 case RTL_GIGA_MAC_VER_24:
4233 case RTL_GIGA_MAC_VER_34: 4233 case RTL_GIGA_MAC_VER_34:
4234 case RTL_GIGA_MAC_VER_35:
4234 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); 4235 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
4235 break; 4236 break;
4236 case RTL_GIGA_MAC_VER_40: 4237 case RTL_GIGA_MAC_VER_40:
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 8b7152565c5e..088921294448 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -7,7 +7,7 @@ config SFC
7 select I2C_ALGOBIT 7 select I2C_ALGOBIT
8 select PTP_1588_CLOCK 8 select PTP_1588_CLOCK
9 ---help--- 9 ---help---
10 This driver supports 10-gigabit Ethernet cards based on 10 This driver supports 10/40-gigabit Ethernet cards based on
11 the Solarflare SFC4000, SFC9000-family and SFC9100-family 11 the Solarflare SFC4000, SFC9000-family and SFC9100-family
12 controllers. 12 controllers.
13 13
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 5f42313b4965..9f18ae984f9e 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -94,7 +94,7 @@ static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
94 return resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]); 94 return resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]);
95} 95}
96 96
97static int efx_ef10_init_capabilities(struct efx_nic *efx) 97static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
98{ 98{
99 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN); 99 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN);
100 struct efx_ef10_nic_data *nic_data = efx->nic_data; 100 struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -107,16 +107,27 @@ static int efx_ef10_init_capabilities(struct efx_nic *efx)
107 outbuf, sizeof(outbuf), &outlen); 107 outbuf, sizeof(outbuf), &outlen);
108 if (rc) 108 if (rc)
109 return rc; 109 return rc;
110 if (outlen < sizeof(outbuf)) {
111 netif_err(efx, drv, efx->net_dev,
112 "unable to read datapath firmware capabilities\n");
113 return -EIO;
114 }
110 115
111 if (outlen >= sizeof(outbuf)) { 116 nic_data->datapath_caps =
112 nic_data->datapath_caps = 117 MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
113 MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1); 118
114 if (!(nic_data->datapath_caps & 119 if (!(nic_data->datapath_caps &
115 (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) { 120 (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) {
116 netif_err(efx, drv, efx->net_dev, 121 netif_err(efx, drv, efx->net_dev,
117 "Capabilities don't indicate TSO support.\n"); 122 "current firmware does not support TSO\n");
118 return -ENODEV; 123 return -ENODEV;
119 } 124 }
125
126 if (!(nic_data->datapath_caps &
127 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
128 netif_err(efx, probe, efx->net_dev,
129 "current firmware does not support an RX prefix\n");
130 return -ENODEV;
120 } 131 }
121 132
122 return 0; 133 return 0;
@@ -217,21 +228,13 @@ static int efx_ef10_probe(struct efx_nic *efx)
217 if (rc) 228 if (rc)
218 goto fail3; 229 goto fail3;
219 230
220 rc = efx_ef10_init_capabilities(efx); 231 rc = efx_ef10_init_datapath_caps(efx);
221 if (rc < 0) 232 if (rc < 0)
222 goto fail3; 233 goto fail3;
223 234
224 efx->rx_packet_len_offset = 235 efx->rx_packet_len_offset =
225 ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE; 236 ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
226 237
227 if (!(nic_data->datapath_caps &
228 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
229 netif_err(efx, probe, efx->net_dev,
230 "current firmware does not support an RX prefix\n");
231 rc = -ENODEV;
232 goto fail3;
233 }
234
235 rc = efx_mcdi_port_get_number(efx); 238 rc = efx_mcdi_port_get_number(efx);
236 if (rc < 0) 239 if (rc < 0)
237 goto fail3; 240 goto fail3;
@@ -260,8 +263,6 @@ static int efx_ef10_probe(struct efx_nic *efx)
260 if (rc) 263 if (rc)
261 goto fail3; 264 goto fail3;
262 265
263 efx_ptp_probe(efx);
264
265 return 0; 266 return 0;
266 267
267fail3: 268fail3:
@@ -342,6 +343,13 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
342 struct efx_ef10_nic_data *nic_data = efx->nic_data; 343 struct efx_ef10_nic_data *nic_data = efx->nic_data;
343 int rc; 344 int rc;
344 345
346 if (nic_data->must_check_datapath_caps) {
347 rc = efx_ef10_init_datapath_caps(efx);
348 if (rc)
349 return rc;
350 nic_data->must_check_datapath_caps = false;
351 }
352
345 if (nic_data->must_realloc_vis) { 353 if (nic_data->must_realloc_vis) {
346 /* We cannot let the number of VIs change now */ 354 /* We cannot let the number of VIs change now */
347 rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis, 355 rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
@@ -710,6 +718,14 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
710 nic_data->must_restore_filters = true; 718 nic_data->must_restore_filters = true;
711 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; 719 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
712 720
721 /* The datapath firmware might have been changed */
722 nic_data->must_check_datapath_caps = true;
723
724 /* MAC statistics have been cleared on the NIC; clear the local
725 * statistic that we update with efx_update_diff_stat().
726 */
727 nic_data->stats[EF10_STAT_rx_bad_bytes] = 0;
728
713 return -EIO; 729 return -EIO;
714} 730}
715 731
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 128d7cdf9eb2..c082562dbf4e 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -27,10 +27,10 @@
27 27
28/* A reboot/assertion causes the MCDI status word to be set after the 28/* A reboot/assertion causes the MCDI status word to be set after the
29 * command word is set or a REBOOT event is sent. If we notice a reboot 29 * command word is set or a REBOOT event is sent. If we notice a reboot
30 * via these mechanisms then wait 20ms for the status word to be set. 30 * via these mechanisms then wait 250ms for the status word to be set.
31 */ 31 */
32#define MCDI_STATUS_DELAY_US 100 32#define MCDI_STATUS_DELAY_US 100
33#define MCDI_STATUS_DELAY_COUNT 200 33#define MCDI_STATUS_DELAY_COUNT 2500
34#define MCDI_STATUS_SLEEP_MS \ 34#define MCDI_STATUS_SLEEP_MS \
35 (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000) 35 (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
36 36
@@ -800,9 +800,6 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
800 } else { 800 } else {
801 int count; 801 int count;
802 802
803 /* Nobody was waiting for an MCDI request, so trigger a reset */
804 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
805
806 /* Consume the status word since efx_mcdi_rpc_finish() won't */ 803 /* Consume the status word since efx_mcdi_rpc_finish() won't */
807 for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) { 804 for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
808 if (efx_mcdi_poll_reboot(efx)) 805 if (efx_mcdi_poll_reboot(efx))
@@ -810,6 +807,9 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
810 udelay(MCDI_STATUS_DELAY_US); 807 udelay(MCDI_STATUS_DELAY_US);
811 } 808 }
812 mcdi->new_epoch = true; 809 mcdi->new_epoch = true;
810
811 /* Nobody was waiting for an MCDI request, so trigger a reset */
812 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
813 } 813 }
814 814
815 spin_unlock(&mcdi->iface_lock); 815 spin_unlock(&mcdi->iface_lock);
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 8d33da6697fb..7b6be61d549f 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -556,6 +556,7 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
556 case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break; 556 case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break;
557 case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break; 557 case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break;
558 case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break; 558 case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break;
559 case 40000: caps = 1 << MC_CMD_PHY_CAP_40000FDX_LBN; break;
559 default: return -EINVAL; 560 default: return -EINVAL;
560 } 561 }
561 } else { 562 } else {
@@ -841,6 +842,7 @@ static unsigned int efx_mcdi_event_link_speed[] = {
841 [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100, 842 [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
842 [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000, 843 [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
843 [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000, 844 [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
845 [MCDI_EVENT_LINKCHANGE_SPEED_40G] = 40000,
844}; 846};
845 847
846void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev) 848void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 4b1e188f7a2f..fda29d39032f 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -400,6 +400,8 @@ enum {
400 * @rx_rss_context: Firmware handle for our RSS context 400 * @rx_rss_context: Firmware handle for our RSS context
401 * @stats: Hardware statistics 401 * @stats: Hardware statistics
402 * @workaround_35388: Flag: firmware supports workaround for bug 35388 402 * @workaround_35388: Flag: firmware supports workaround for bug 35388
403 * @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated
404 * after MC reboot
403 * @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of 405 * @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of
404 * %MC_CMD_GET_CAPABILITIES response) 406 * %MC_CMD_GET_CAPABILITIES response)
405 */ 407 */
@@ -413,6 +415,7 @@ struct efx_ef10_nic_data {
413 u32 rx_rss_context; 415 u32 rx_rss_context;
414 u64 stats[EF10_STAT_COUNT]; 416 u64 stats[EF10_STAT_COUNT];
415 bool workaround_35388; 417 bool workaround_35388;
418 bool must_check_datapath_caps;
416 u32 datapath_caps; 419 u32 datapath_caps;
417}; 420};
418 421
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 370e13dde115..5730fe2445a6 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -271,7 +271,7 @@ static inline void mcf_outsw(void *a, unsigned char *p, int l)
271#define SMC_insw(a, r, p, l) mcf_insw(a + r, p, l) 271#define SMC_insw(a, r, p, l) mcf_insw(a + r, p, l)
272#define SMC_outsw(a, r, p, l) mcf_outsw(a + r, p, l) 272#define SMC_outsw(a, r, p, l) mcf_outsw(a + r, p, l)
273 273
274#define SMC_IRQ_FLAGS (IRQF_DISABLED) 274#define SMC_IRQ_FLAGS 0
275 275
276#else 276#else
277 277
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index ffa5c4ad1210..5f9e79f7f2df 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -1356,8 +1356,7 @@ static int smsc9420_open(struct net_device *dev)
1356 smsc9420_reg_write(pd, INT_STAT, 0xFFFFFFFF); 1356 smsc9420_reg_write(pd, INT_STAT, 0xFFFFFFFF);
1357 smsc9420_pci_flush_write(pd); 1357 smsc9420_pci_flush_write(pd);
1358 1358
1359 result = request_irq(irq, smsc9420_isr, IRQF_SHARED | IRQF_DISABLED, 1359 result = request_irq(irq, smsc9420_isr, IRQF_SHARED, DRV_NAME, pd);
1360 DRV_NAME, pd);
1361 if (result) { 1360 if (result) {
1362 smsc_warn(IFUP, "Unable to use IRQ = %d", irq); 1361 smsc_warn(IFUP, "Unable to use IRQ = %d", irq);
1363 result = -ENODEV; 1362 result = -ENODEV;
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 9c805e0c0cae..f7f2ef49c0c1 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1726,7 +1726,7 @@ static int ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
1726 goto fail_alloc_irq; 1726 goto fail_alloc_irq;
1727 } 1727 }
1728 result = request_irq(card->irq, gelic_card_interrupt, 1728 result = request_irq(card->irq, gelic_card_interrupt,
1729 IRQF_DISABLED, netdev->name, card); 1729 0, netdev->name, card);
1730 1730
1731 if (result) { 1731 if (result) {
1732 dev_info(ctodev(card), "%s:request_irq failed (%d)\n", 1732 dev_info(ctodev(card), "%s:request_irq failed (%d)\n",
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index c8f088ab5fdf..bdf697b184ae 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -32,7 +32,7 @@
32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 33
34#define DRV_NAME "via-rhine" 34#define DRV_NAME "via-rhine"
35#define DRV_VERSION "1.5.0" 35#define DRV_VERSION "1.5.1"
36#define DRV_RELDATE "2010-10-09" 36#define DRV_RELDATE "2010-10-09"
37 37
38#include <linux/types.h> 38#include <linux/types.h>
@@ -1704,7 +1704,12 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1704 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); 1704 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1705 1705
1706 if (unlikely(vlan_tx_tag_present(skb))) { 1706 if (unlikely(vlan_tx_tag_present(skb))) {
1707 rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16); 1707 u16 vid_pcp = vlan_tx_tag_get(skb);
1708
1709 /* drop CFI/DEI bit, register needs VID and PCP */
1710 vid_pcp = (vid_pcp & VLAN_VID_MASK) |
1711 ((vid_pcp & VLAN_PRIO_MASK) >> 1);
1712 rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
1708 /* request tagging */ 1713 /* request tagging */
1709 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000); 1714 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1710 } 1715 }
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index b88121f240ca..0029148077a9 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -297,6 +297,12 @@ static int temac_dma_bd_init(struct net_device *ndev)
297 lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); 297 lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
298 lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p); 298 lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
299 299
300 /* Init descriptor indexes */
301 lp->tx_bd_ci = 0;
302 lp->tx_bd_next = 0;
303 lp->tx_bd_tail = 0;
304 lp->rx_bd_ci = 0;
305
300 return 0; 306 return 0;
301 307
302out: 308out:
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index f07c340990da..3f138ca88670 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -191,8 +191,8 @@ static inline int mcs_setup_transceiver_vishay(struct mcs_cb *mcs)
191 goto error; 191 goto error;
192 192
193 ret = 0; 193 ret = 0;
194 error: 194error:
195 return ret; 195 return ret;
196} 196}
197 197
198/* Setup a communication between mcs7780 and agilent chip. */ 198/* Setup a communication between mcs7780 and agilent chip. */
@@ -501,8 +501,11 @@ static inline int mcs_setup_urbs(struct mcs_cb *mcs)
501 return 0; 501 return 0;
502 502
503 mcs->rx_urb = usb_alloc_urb(0, GFP_KERNEL); 503 mcs->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
504 if (!mcs->rx_urb) 504 if (!mcs->rx_urb) {
505 usb_free_urb(mcs->tx_urb);
506 mcs->tx_urb = NULL;
505 return 0; 507 return 0;
508 }
506 509
507 return 1; 510 return 1;
508} 511}
@@ -643,9 +646,9 @@ static int mcs_speed_change(struct mcs_cb *mcs)
643 ret = mcs_set_reg(mcs, MCS_MODE_REG, rval); 646 ret = mcs_set_reg(mcs, MCS_MODE_REG, rval);
644 647
645 mcs->speed = mcs->new_speed; 648 mcs->speed = mcs->new_speed;
646 error: 649error:
647 mcs->new_speed = 0; 650 mcs->new_speed = 0;
648 return ret; 651 return ret;
649} 652}
650 653
651/* Ioctl calls not supported at this time. Can be an area of future work. */ 654/* Ioctl calls not supported at this time. Can be an area of future work. */
@@ -738,17 +741,20 @@ static int mcs_net_open(struct net_device *netdev)
738 741
739 ret = mcs_receive_start(mcs); 742 ret = mcs_receive_start(mcs);
740 if (ret) 743 if (ret)
741 goto error3; 744 goto error4;
742 745
743 netif_start_queue(netdev); 746 netif_start_queue(netdev);
744 return 0; 747 return 0;
745 748
746 error3: 749error4:
747 irlap_close(mcs->irlap); 750 usb_free_urb(mcs->rx_urb);
748 error2: 751 usb_free_urb(mcs->tx_urb);
749 kfree_skb(mcs->rx_buff.skb); 752error3:
750 error1: 753 irlap_close(mcs->irlap);
751 return ret; 754error2:
755 kfree_skb(mcs->rx_buff.skb);
756error1:
757 return ret;
752} 758}
753 759
754/* Receive callback function. */ 760/* Receive callback function. */
@@ -946,11 +952,11 @@ static int mcs_probe(struct usb_interface *intf,
946 usb_set_intfdata(intf, mcs); 952 usb_set_intfdata(intf, mcs);
947 return 0; 953 return 0;
948 954
949 error2: 955error2:
950 free_netdev(ndev); 956 free_netdev(ndev);
951 957
952 error1: 958error1:
953 return ret; 959 return ret;
954} 960}
955 961
956/* The current device is removed, the USB layer tells us to shut down. */ 962/* The current device is removed, the USB layer tells us to shut down. */
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index fcbf680c3e62..a17d85a331f1 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -146,6 +146,7 @@ static int loopback_dev_init(struct net_device *dev)
146 146
147static void loopback_dev_free(struct net_device *dev) 147static void loopback_dev_free(struct net_device *dev)
148{ 148{
149 dev_net(dev)->loopback_dev = NULL;
149 free_percpu(dev->lstats); 150 free_percpu(dev->lstats);
150 free_netdev(dev); 151 free_netdev(dev);
151} 152}
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index dcb21347c670..adeee615dd19 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -684,15 +684,12 @@ restart:
684 case NETDEV_RELEASE: 684 case NETDEV_RELEASE:
685 case NETDEV_JOIN: 685 case NETDEV_JOIN:
686 case NETDEV_UNREGISTER: 686 case NETDEV_UNREGISTER:
687 /* 687 /* rtnl_lock already held
688 * rtnl_lock already held
689 * we might sleep in __netpoll_cleanup() 688 * we might sleep in __netpoll_cleanup()
690 */ 689 */
691 spin_unlock_irqrestore(&target_list_lock, flags); 690 spin_unlock_irqrestore(&target_list_lock, flags);
692 691
693 mutex_lock(&nt->mutex);
694 __netpoll_cleanup(&nt->np); 692 __netpoll_cleanup(&nt->np);
695 mutex_unlock(&nt->mutex);
696 693
697 spin_lock_irqsave(&target_list_lock, flags); 694 spin_lock_irqsave(&target_list_lock, flags);
698 dev_put(nt->np.dev); 695 dev_put(nt->np.dev);
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index db472ffb6e89..313a0377f68f 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -30,9 +30,9 @@
30#include <linux/ethtool.h> 30#include <linux/ethtool.h>
31#include <linux/phy.h> 31#include <linux/phy.h>
32 32
33#include <asm/io.h> 33#include <linux/io.h>
34#include <asm/irq.h> 34#include <asm/irq.h>
35#include <asm/uaccess.h> 35#include <linux/uaccess.h>
36 36
37/* Cicada Extended Control Register 1 */ 37/* Cicada Extended Control Register 1 */
38#define MII_CIS8201_EXT_CON1 0x17 38#define MII_CIS8201_EXT_CON1 0x17
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 6fa5ae00039f..01805319e1e0 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -281,7 +281,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
281 nf_reset(skb); 281 nf_reset(skb);
282 282
283 skb->ip_summed = CHECKSUM_NONE; 283 skb->ip_summed = CHECKSUM_NONE;
284 ip_select_ident(iph, &rt->dst, NULL); 284 ip_select_ident(skb, &rt->dst, NULL);
285 ip_send_check(iph); 285 ip_send_check(iph);
286 286
287 ip_local_out(skb); 287 ip_local_out(skb);
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index a34d6bf5e43b..cc70ecfc7062 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -429,11 +429,13 @@ static void slip_write_wakeup(struct tty_struct *tty)
429 if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) 429 if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
430 return; 430 return;
431 431
432 spin_lock(&sl->lock);
432 if (sl->xleft <= 0) { 433 if (sl->xleft <= 0) {
433 /* Now serial buffer is almost free & we can start 434 /* Now serial buffer is almost free & we can start
434 * transmission of another packet */ 435 * transmission of another packet */
435 sl->dev->stats.tx_packets++; 436 sl->dev->stats.tx_packets++;
436 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 437 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
438 spin_unlock(&sl->lock);
437 sl_unlock(sl); 439 sl_unlock(sl);
438 return; 440 return;
439 } 441 }
@@ -441,6 +443,7 @@ static void slip_write_wakeup(struct tty_struct *tty)
441 actual = tty->ops->write(tty, sl->xhead, sl->xleft); 443 actual = tty->ops->write(tty, sl->xhead, sl->xleft);
442 sl->xleft -= actual; 444 sl->xleft -= actual;
443 sl->xhead += actual; 445 sl->xhead += actual;
446 spin_unlock(&sl->lock);
444} 447}
445 448
446static void sl_tx_timeout(struct net_device *dev) 449static void sl_tx_timeout(struct net_device *dev)
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a639de8401f8..807815fc9968 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1641,11 +1641,11 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1641 INIT_LIST_HEAD(&tun->disabled); 1641 INIT_LIST_HEAD(&tun->disabled);
1642 err = tun_attach(tun, file, false); 1642 err = tun_attach(tun, file, false);
1643 if (err < 0) 1643 if (err < 0)
1644 goto err_free_dev; 1644 goto err_free_flow;
1645 1645
1646 err = register_netdevice(tun->dev); 1646 err = register_netdevice(tun->dev);
1647 if (err < 0) 1647 if (err < 0)
1648 goto err_free_dev; 1648 goto err_detach;
1649 1649
1650 if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) || 1650 if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
1651 device_create_file(&tun->dev->dev, &dev_attr_owner) || 1651 device_create_file(&tun->dev->dev, &dev_attr_owner) ||
@@ -1689,7 +1689,12 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1689 strcpy(ifr->ifr_name, tun->dev->name); 1689 strcpy(ifr->ifr_name, tun->dev->name);
1690 return 0; 1690 return 0;
1691 1691
1692 err_free_dev: 1692err_detach:
1693 tun_detach_all(dev);
1694err_free_flow:
1695 tun_flow_uninit(tun);
1696 security_tun_dev_free_security(tun->security);
1697err_free_dev:
1693 free_netdev(dev); 1698 free_netdev(dev);
1694 return err; 1699 return err;
1695} 1700}
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 03ad4dc293aa..2023f3ea891e 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -33,7 +33,7 @@
33#include <linux/usb/usbnet.h> 33#include <linux/usb/usbnet.h>
34 34
35 35
36#if defined(CONFIG_USB_NET_RNDIS_HOST) || defined(CONFIG_USB_NET_RNDIS_HOST_MODULE) 36#if IS_ENABLED(CONFIG_USB_NET_RNDIS_HOST)
37 37
38static int is_rndis(struct usb_interface_descriptor *desc) 38static int is_rndis(struct usb_interface_descriptor *desc)
39{ 39{
@@ -69,8 +69,7 @@ static const u8 mbm_guid[16] = {
69 0xa6, 0x07, 0xc0, 0xff, 0xcb, 0x7e, 0x39, 0x2a, 69 0xa6, 0x07, 0xc0, 0xff, 0xcb, 0x7e, 0x39, 0x2a,
70}; 70};
71 71
72/* 72/* probes control interface, claims data interface, collects the bulk
73 * probes control interface, claims data interface, collects the bulk
74 * endpoints, activates data interface (if needed), maybe sets MTU. 73 * endpoints, activates data interface (if needed), maybe sets MTU.
75 * all pure cdc, except for certain firmware workarounds, and knowing 74 * all pure cdc, except for certain firmware workarounds, and knowing
76 * that rndis uses one different rule. 75 * that rndis uses one different rule.
@@ -88,7 +87,7 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
88 struct usb_cdc_mdlm_desc *desc = NULL; 87 struct usb_cdc_mdlm_desc *desc = NULL;
89 struct usb_cdc_mdlm_detail_desc *detail = NULL; 88 struct usb_cdc_mdlm_detail_desc *detail = NULL;
90 89
91 if (sizeof dev->data < sizeof *info) 90 if (sizeof(dev->data) < sizeof(*info))
92 return -EDOM; 91 return -EDOM;
93 92
94 /* expect strict spec conformance for the descriptors, but 93 /* expect strict spec conformance for the descriptors, but
@@ -126,10 +125,10 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
126 is_activesync(&intf->cur_altsetting->desc) || 125 is_activesync(&intf->cur_altsetting->desc) ||
127 is_wireless_rndis(&intf->cur_altsetting->desc)); 126 is_wireless_rndis(&intf->cur_altsetting->desc));
128 127
129 memset(info, 0, sizeof *info); 128 memset(info, 0, sizeof(*info));
130 info->control = intf; 129 info->control = intf;
131 while (len > 3) { 130 while (len > 3) {
132 if (buf [1] != USB_DT_CS_INTERFACE) 131 if (buf[1] != USB_DT_CS_INTERFACE)
133 goto next_desc; 132 goto next_desc;
134 133
135 /* use bDescriptorSubType to identify the CDC descriptors. 134 /* use bDescriptorSubType to identify the CDC descriptors.
@@ -139,14 +138,14 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
139 * in favor of a complicated OID-based RPC scheme doing what 138 * in favor of a complicated OID-based RPC scheme doing what
140 * CDC Ethernet achieves with a simple descriptor. 139 * CDC Ethernet achieves with a simple descriptor.
141 */ 140 */
142 switch (buf [2]) { 141 switch (buf[2]) {
143 case USB_CDC_HEADER_TYPE: 142 case USB_CDC_HEADER_TYPE:
144 if (info->header) { 143 if (info->header) {
145 dev_dbg(&intf->dev, "extra CDC header\n"); 144 dev_dbg(&intf->dev, "extra CDC header\n");
146 goto bad_desc; 145 goto bad_desc;
147 } 146 }
148 info->header = (void *) buf; 147 info->header = (void *) buf;
149 if (info->header->bLength != sizeof *info->header) { 148 if (info->header->bLength != sizeof(*info->header)) {
150 dev_dbg(&intf->dev, "CDC header len %u\n", 149 dev_dbg(&intf->dev, "CDC header len %u\n",
151 info->header->bLength); 150 info->header->bLength);
152 goto bad_desc; 151 goto bad_desc;
@@ -175,7 +174,7 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
175 goto bad_desc; 174 goto bad_desc;
176 } 175 }
177 info->u = (void *) buf; 176 info->u = (void *) buf;
178 if (info->u->bLength != sizeof *info->u) { 177 if (info->u->bLength != sizeof(*info->u)) {
179 dev_dbg(&intf->dev, "CDC union len %u\n", 178 dev_dbg(&intf->dev, "CDC union len %u\n",
180 info->u->bLength); 179 info->u->bLength);
181 goto bad_desc; 180 goto bad_desc;
@@ -233,7 +232,7 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
233 goto bad_desc; 232 goto bad_desc;
234 } 233 }
235 info->ether = (void *) buf; 234 info->ether = (void *) buf;
236 if (info->ether->bLength != sizeof *info->ether) { 235 if (info->ether->bLength != sizeof(*info->ether)) {
237 dev_dbg(&intf->dev, "CDC ether len %u\n", 236 dev_dbg(&intf->dev, "CDC ether len %u\n",
238 info->ether->bLength); 237 info->ether->bLength);
239 goto bad_desc; 238 goto bad_desc;
@@ -274,8 +273,8 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
274 break; 273 break;
275 } 274 }
276next_desc: 275next_desc:
277 len -= buf [0]; /* bLength */ 276 len -= buf[0]; /* bLength */
278 buf += buf [0]; 277 buf += buf[0];
279 } 278 }
280 279
281 /* Microsoft ActiveSync based and some regular RNDIS devices lack the 280 /* Microsoft ActiveSync based and some regular RNDIS devices lack the
@@ -379,9 +378,7 @@ void usbnet_cdc_unbind(struct usbnet *dev, struct usb_interface *intf)
379} 378}
380EXPORT_SYMBOL_GPL(usbnet_cdc_unbind); 379EXPORT_SYMBOL_GPL(usbnet_cdc_unbind);
381 380
382/*------------------------------------------------------------------------- 381/* Communications Device Class, Ethernet Control model
383 *
384 * Communications Device Class, Ethernet Control model
385 * 382 *
386 * Takes two interfaces. The DATA interface is inactive till an altsetting 383 * Takes two interfaces. The DATA interface is inactive till an altsetting
387 * is selected. Configuration data includes class descriptors. There's 384 * is selected. Configuration data includes class descriptors. There's
@@ -389,8 +386,7 @@ EXPORT_SYMBOL_GPL(usbnet_cdc_unbind);
389 * 386 *
390 * This should interop with whatever the 2.4 "CDCEther.c" driver 387 * This should interop with whatever the 2.4 "CDCEther.c" driver
391 * (by Brad Hards) talked with, with more functionality. 388 * (by Brad Hards) talked with, with more functionality.
392 * 389 */
393 *-------------------------------------------------------------------------*/
394 390
395static void dumpspeed(struct usbnet *dev, __le32 *speeds) 391static void dumpspeed(struct usbnet *dev, __le32 *speeds)
396{ 392{
@@ -404,7 +400,7 @@ void usbnet_cdc_status(struct usbnet *dev, struct urb *urb)
404{ 400{
405 struct usb_cdc_notification *event; 401 struct usb_cdc_notification *event;
406 402
407 if (urb->actual_length < sizeof *event) 403 if (urb->actual_length < sizeof(*event))
408 return; 404 return;
409 405
410 /* SPEED_CHANGE can get split into two 8-byte packets */ 406 /* SPEED_CHANGE can get split into two 8-byte packets */
@@ -423,7 +419,7 @@ void usbnet_cdc_status(struct usbnet *dev, struct urb *urb)
423 case USB_CDC_NOTIFY_SPEED_CHANGE: /* tx/rx rates */ 419 case USB_CDC_NOTIFY_SPEED_CHANGE: /* tx/rx rates */
424 netif_dbg(dev, timer, dev->net, "CDC: speed change (len %d)\n", 420 netif_dbg(dev, timer, dev->net, "CDC: speed change (len %d)\n",
425 urb->actual_length); 421 urb->actual_length);
426 if (urb->actual_length != (sizeof *event + 8)) 422 if (urb->actual_length != (sizeof(*event) + 8))
427 set_bit(EVENT_STS_SPLIT, &dev->flags); 423 set_bit(EVENT_STS_SPLIT, &dev->flags);
428 else 424 else
429 dumpspeed(dev, (__le32 *) &event[1]); 425 dumpspeed(dev, (__le32 *) &event[1]);
@@ -469,7 +465,6 @@ EXPORT_SYMBOL_GPL(usbnet_cdc_bind);
469static const struct driver_info cdc_info = { 465static const struct driver_info cdc_info = {
470 .description = "CDC Ethernet Device", 466 .description = "CDC Ethernet Device",
471 .flags = FLAG_ETHER | FLAG_POINTTOPOINT, 467 .flags = FLAG_ETHER | FLAG_POINTTOPOINT,
472 // .check_connect = cdc_check_connect,
473 .bind = usbnet_cdc_bind, 468 .bind = usbnet_cdc_bind,
474 .unbind = usbnet_cdc_unbind, 469 .unbind = usbnet_cdc_unbind,
475 .status = usbnet_cdc_status, 470 .status = usbnet_cdc_status,
@@ -493,9 +488,8 @@ static const struct driver_info wwan_info = {
493#define DELL_VENDOR_ID 0x413C 488#define DELL_VENDOR_ID 0x413C
494#define REALTEK_VENDOR_ID 0x0bda 489#define REALTEK_VENDOR_ID 0x0bda
495 490
496static const struct usb_device_id products [] = { 491static const struct usb_device_id products[] = {
497/* 492/* BLACKLIST !!
498 * BLACKLIST !!
499 * 493 *
500 * First blacklist any products that are egregiously nonconformant 494 * First blacklist any products that are egregiously nonconformant
501 * with the CDC Ethernet specs. Minor braindamage we cope with; when 495 * with the CDC Ethernet specs. Minor braindamage we cope with; when
@@ -542,7 +536,7 @@ static const struct usb_device_id products [] = {
542 .driver_info = 0, 536 .driver_info = 0,
543}, { 537}, {
544 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO 538 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
545 | USB_DEVICE_ID_MATCH_DEVICE, 539 | USB_DEVICE_ID_MATCH_DEVICE,
546 .idVendor = 0x04DD, 540 .idVendor = 0x04DD,
547 .idProduct = 0x8007, /* C-700 */ 541 .idProduct = 0x8007, /* C-700 */
548 ZAURUS_MASTER_INTERFACE, 542 ZAURUS_MASTER_INTERFACE,
@@ -659,8 +653,7 @@ static const struct usb_device_id products [] = {
659 .driver_info = 0, 653 .driver_info = 0,
660}, 654},
661 655
662/* 656/* WHITELIST!!!
663 * WHITELIST!!!
664 * 657 *
665 * CDC Ether uses two interfaces, not necessarily consecutive. 658 * CDC Ether uses two interfaces, not necessarily consecutive.
666 * We match the main interface, ignoring the optional device 659 * We match the main interface, ignoring the optional device
@@ -672,60 +665,40 @@ static const struct usb_device_id products [] = {
672 */ 665 */
673{ 666{
674 /* ZTE (Vodafone) K3805-Z */ 667 /* ZTE (Vodafone) K3805-Z */
675 .match_flags = USB_DEVICE_ID_MATCH_VENDOR 668 USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1003, USB_CLASS_COMM,
676 | USB_DEVICE_ID_MATCH_PRODUCT 669 USB_CDC_SUBCLASS_ETHERNET,
677 | USB_DEVICE_ID_MATCH_INT_INFO, 670 USB_CDC_PROTO_NONE),
678 .idVendor = ZTE_VENDOR_ID,
679 .idProduct = 0x1003,
680 .bInterfaceClass = USB_CLASS_COMM,
681 .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
682 .bInterfaceProtocol = USB_CDC_PROTO_NONE,
683 .driver_info = (unsigned long)&wwan_info, 671 .driver_info = (unsigned long)&wwan_info,
684}, { 672}, {
685 /* ZTE (Vodafone) K3806-Z */ 673 /* ZTE (Vodafone) K3806-Z */
686 .match_flags = USB_DEVICE_ID_MATCH_VENDOR 674 USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1015, USB_CLASS_COMM,
687 | USB_DEVICE_ID_MATCH_PRODUCT 675 USB_CDC_SUBCLASS_ETHERNET,
688 | USB_DEVICE_ID_MATCH_INT_INFO, 676 USB_CDC_PROTO_NONE),
689 .idVendor = ZTE_VENDOR_ID,
690 .idProduct = 0x1015,
691 .bInterfaceClass = USB_CLASS_COMM,
692 .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
693 .bInterfaceProtocol = USB_CDC_PROTO_NONE,
694 .driver_info = (unsigned long)&wwan_info, 677 .driver_info = (unsigned long)&wwan_info,
695}, { 678}, {
696 /* ZTE (Vodafone) K4510-Z */ 679 /* ZTE (Vodafone) K4510-Z */
697 .match_flags = USB_DEVICE_ID_MATCH_VENDOR 680 USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1173, USB_CLASS_COMM,
698 | USB_DEVICE_ID_MATCH_PRODUCT 681 USB_CDC_SUBCLASS_ETHERNET,
699 | USB_DEVICE_ID_MATCH_INT_INFO, 682 USB_CDC_PROTO_NONE),
700 .idVendor = ZTE_VENDOR_ID,
701 .idProduct = 0x1173,
702 .bInterfaceClass = USB_CLASS_COMM,
703 .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
704 .bInterfaceProtocol = USB_CDC_PROTO_NONE,
705 .driver_info = (unsigned long)&wwan_info, 683 .driver_info = (unsigned long)&wwan_info,
706}, { 684}, {
707 /* ZTE (Vodafone) K3770-Z */ 685 /* ZTE (Vodafone) K3770-Z */
708 .match_flags = USB_DEVICE_ID_MATCH_VENDOR 686 USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1177, USB_CLASS_COMM,
709 | USB_DEVICE_ID_MATCH_PRODUCT 687 USB_CDC_SUBCLASS_ETHERNET,
710 | USB_DEVICE_ID_MATCH_INT_INFO, 688 USB_CDC_PROTO_NONE),
711 .idVendor = ZTE_VENDOR_ID,
712 .idProduct = 0x1177,
713 .bInterfaceClass = USB_CLASS_COMM,
714 .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
715 .bInterfaceProtocol = USB_CDC_PROTO_NONE,
716 .driver_info = (unsigned long)&wwan_info, 689 .driver_info = (unsigned long)&wwan_info,
717}, { 690}, {
718 /* ZTE (Vodafone) K3772-Z */ 691 /* ZTE (Vodafone) K3772-Z */
719 .match_flags = USB_DEVICE_ID_MATCH_VENDOR 692 USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1181, USB_CLASS_COMM,
720 | USB_DEVICE_ID_MATCH_PRODUCT 693 USB_CDC_SUBCLASS_ETHERNET,
721 | USB_DEVICE_ID_MATCH_INT_INFO, 694 USB_CDC_PROTO_NONE),
722 .idVendor = ZTE_VENDOR_ID,
723 .idProduct = 0x1181,
724 .bInterfaceClass = USB_CLASS_COMM,
725 .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
726 .bInterfaceProtocol = USB_CDC_PROTO_NONE,
727 .driver_info = (unsigned long)&wwan_info, 695 .driver_info = (unsigned long)&wwan_info,
728}, { 696}, {
697 /* Telit modules */
698 USB_VENDOR_AND_INTERFACE_INFO(0x1bc7, USB_CLASS_COMM,
699 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
700 .driver_info = (kernel_ulong_t) &wwan_info,
701}, {
729 USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, 702 USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
730 USB_CDC_PROTO_NONE), 703 USB_CDC_PROTO_NONE),
731 .driver_info = (unsigned long) &cdc_info, 704 .driver_info = (unsigned long) &cdc_info,
@@ -736,15 +709,11 @@ static const struct usb_device_id products [] = {
736 709
737}, { 710}, {
738 /* Various Huawei modems with a network port like the UMG1831 */ 711 /* Various Huawei modems with a network port like the UMG1831 */
739 .match_flags = USB_DEVICE_ID_MATCH_VENDOR 712 USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_COMM,
740 | USB_DEVICE_ID_MATCH_INT_INFO, 713 USB_CDC_SUBCLASS_ETHERNET, 255),
741 .idVendor = HUAWEI_VENDOR_ID,
742 .bInterfaceClass = USB_CLASS_COMM,
743 .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
744 .bInterfaceProtocol = 255,
745 .driver_info = (unsigned long)&wwan_info, 714 .driver_info = (unsigned long)&wwan_info,
746}, 715},
747 { }, // END 716 { }, /* END */
748}; 717};
749MODULE_DEVICE_TABLE(usb, products); 718MODULE_DEVICE_TABLE(usb, products);
750 719
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 2dbb9460349d..c6867f926cff 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -303,7 +303,7 @@ static void dm9601_set_multicast(struct net_device *net)
303 rx_ctl |= 0x02; 303 rx_ctl |= 0x02;
304 } else if (net->flags & IFF_ALLMULTI || 304 } else if (net->flags & IFF_ALLMULTI ||
305 netdev_mc_count(net) > DM_MAX_MCAST) { 305 netdev_mc_count(net) > DM_MAX_MCAST) {
306 rx_ctl |= 0x04; 306 rx_ctl |= 0x08;
307 } else if (!netdev_mc_empty(net)) { 307 } else if (!netdev_mc_empty(net)) {
308 struct netdev_hw_addr *ha; 308 struct netdev_hw_addr *ha;
309 309
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 6312332afeba..3d6aaf79d8b2 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -714,7 +714,7 @@ static const struct usb_device_id products[] = {
714 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 714 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
715 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ 715 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
716 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 716 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
717 {QMI_FIXED_INTF(0x1e2d, 0x12d1, 4)}, /* Cinterion PLxx */ 717 {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
718 718
719 /* 4. Gobi 1000 devices */ 719 /* 4. Gobi 1000 devices */
720 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ 720 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 7b331e613e02..bf94e10a37c8 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1241,7 +1241,9 @@ static int build_dma_sg(const struct sk_buff *skb, struct urb *urb)
1241 if (num_sgs == 1) 1241 if (num_sgs == 1)
1242 return 0; 1242 return 0;
1243 1243
1244 urb->sg = kmalloc(num_sgs * sizeof(struct scatterlist), GFP_ATOMIC); 1244 /* reserve one for zero packet */
1245 urb->sg = kmalloc((num_sgs + 1) * sizeof(struct scatterlist),
1246 GFP_ATOMIC);
1245 if (!urb->sg) 1247 if (!urb->sg)
1246 return -ENOMEM; 1248 return -ENOMEM;
1247 1249
@@ -1305,7 +1307,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1305 if (build_dma_sg(skb, urb) < 0) 1307 if (build_dma_sg(skb, urb) < 0)
1306 goto drop; 1308 goto drop;
1307 } 1309 }
1308 entry->length = length = urb->transfer_buffer_length; 1310 length = urb->transfer_buffer_length;
1309 1311
1310 /* don't assume the hardware handles USB_ZERO_PACKET 1312 /* don't assume the hardware handles USB_ZERO_PACKET
1311 * NOTE: strictly conforming cdc-ether devices should expect 1313 * NOTE: strictly conforming cdc-ether devices should expect
@@ -1317,15 +1319,18 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1317 if (length % dev->maxpacket == 0) { 1319 if (length % dev->maxpacket == 0) {
1318 if (!(info->flags & FLAG_SEND_ZLP)) { 1320 if (!(info->flags & FLAG_SEND_ZLP)) {
1319 if (!(info->flags & FLAG_MULTI_PACKET)) { 1321 if (!(info->flags & FLAG_MULTI_PACKET)) {
1320 urb->transfer_buffer_length++; 1322 length++;
1321 if (skb_tailroom(skb)) { 1323 if (skb_tailroom(skb) && !urb->num_sgs) {
1322 skb->data[skb->len] = 0; 1324 skb->data[skb->len] = 0;
1323 __skb_put(skb, 1); 1325 __skb_put(skb, 1);
1324 } 1326 } else if (urb->num_sgs)
1327 sg_set_buf(&urb->sg[urb->num_sgs++],
1328 dev->padding_pkt, 1);
1325 } 1329 }
1326 } else 1330 } else
1327 urb->transfer_flags |= URB_ZERO_PACKET; 1331 urb->transfer_flags |= URB_ZERO_PACKET;
1328 } 1332 }
1333 entry->length = urb->transfer_buffer_length = length;
1329 1334
1330 spin_lock_irqsave(&dev->txq.lock, flags); 1335 spin_lock_irqsave(&dev->txq.lock, flags);
1331 retval = usb_autopm_get_interface_async(dev->intf); 1336 retval = usb_autopm_get_interface_async(dev->intf);
@@ -1509,6 +1514,7 @@ void usbnet_disconnect (struct usb_interface *intf)
1509 1514
1510 usb_kill_urb(dev->interrupt); 1515 usb_kill_urb(dev->interrupt);
1511 usb_free_urb(dev->interrupt); 1516 usb_free_urb(dev->interrupt);
1517 kfree(dev->padding_pkt);
1512 1518
1513 free_netdev(net); 1519 free_netdev(net);
1514} 1520}
@@ -1679,9 +1685,16 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1679 /* initialize max rx_qlen and tx_qlen */ 1685 /* initialize max rx_qlen and tx_qlen */
1680 usbnet_update_max_qlen(dev); 1686 usbnet_update_max_qlen(dev);
1681 1687
1688 if (dev->can_dma_sg && !(info->flags & FLAG_SEND_ZLP) &&
1689 !(info->flags & FLAG_MULTI_PACKET)) {
1690 dev->padding_pkt = kzalloc(1, GFP_KERNEL);
1691 if (!dev->padding_pkt)
1692 goto out4;
1693 }
1694
1682 status = register_netdev (net); 1695 status = register_netdev (net);
1683 if (status) 1696 if (status)
1684 goto out4; 1697 goto out5;
1685 netif_info(dev, probe, dev->net, 1698 netif_info(dev, probe, dev->net,
1686 "register '%s' at usb-%s-%s, %s, %pM\n", 1699 "register '%s' at usb-%s-%s, %s, %pM\n",
1687 udev->dev.driver->name, 1700 udev->dev.driver->name,
@@ -1699,6 +1712,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1699 1712
1700 return 0; 1713 return 0;
1701 1714
1715out5:
1716 kfree(dev->padding_pkt);
1702out4: 1717out4:
1703 usb_free_urb(dev->interrupt); 1718 usb_free_urb(dev->interrupt);
1704out3: 1719out3:
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index bf64b4191dcc..2ef5b6219f3f 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -564,7 +564,7 @@ static void vxlan_notify_add_rx_port(struct sock *sk)
564 struct net_device *dev; 564 struct net_device *dev;
565 struct net *net = sock_net(sk); 565 struct net *net = sock_net(sk);
566 sa_family_t sa_family = sk->sk_family; 566 sa_family_t sa_family = sk->sk_family;
567 u16 port = htons(inet_sk(sk)->inet_sport); 567 __be16 port = inet_sk(sk)->inet_sport;
568 568
569 rcu_read_lock(); 569 rcu_read_lock();
570 for_each_netdev_rcu(net, dev) { 570 for_each_netdev_rcu(net, dev) {
@@ -581,7 +581,7 @@ static void vxlan_notify_del_rx_port(struct sock *sk)
581 struct net_device *dev; 581 struct net_device *dev;
582 struct net *net = sock_net(sk); 582 struct net *net = sock_net(sk);
583 sa_family_t sa_family = sk->sk_family; 583 sa_family_t sa_family = sk->sk_family;
584 u16 port = htons(inet_sk(sk)->inet_sport); 584 __be16 port = inet_sk(sk)->inet_sport;
585 585
586 rcu_read_lock(); 586 rcu_read_lock();
587 for_each_netdev_rcu(net, dev) { 587 for_each_netdev_rcu(net, dev) {
@@ -952,8 +952,7 @@ void vxlan_sock_release(struct vxlan_sock *vs)
952 952
953 spin_lock(&vn->sock_lock); 953 spin_lock(&vn->sock_lock);
954 hlist_del_rcu(&vs->hlist); 954 hlist_del_rcu(&vs->hlist);
955 smp_wmb(); 955 rcu_assign_sk_user_data(vs->sock->sk, NULL);
956 vs->sock->sk->sk_user_data = NULL;
957 vxlan_notify_del_rx_port(sk); 956 vxlan_notify_del_rx_port(sk);
958 spin_unlock(&vn->sock_lock); 957 spin_unlock(&vn->sock_lock);
959 958
@@ -1048,8 +1047,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1048 1047
1049 port = inet_sk(sk)->inet_sport; 1048 port = inet_sk(sk)->inet_sport;
1050 1049
1051 smp_read_barrier_depends(); 1050 vs = rcu_dereference_sk_user_data(sk);
1052 vs = (struct vxlan_sock *)sk->sk_user_data;
1053 if (!vs) 1051 if (!vs)
1054 goto drop; 1052 goto drop;
1055 1053
@@ -2021,7 +2019,8 @@ static struct device_type vxlan_type = {
2021}; 2019};
2022 2020
2023/* Calls the ndo_add_vxlan_port of the caller in order to 2021/* Calls the ndo_add_vxlan_port of the caller in order to
2024 * supply the listening VXLAN udp ports. 2022 * supply the listening VXLAN udp ports. Callers are expected
2023 * to implement the ndo_add_vxlan_port.
2025 */ 2024 */
2026void vxlan_get_rx_port(struct net_device *dev) 2025void vxlan_get_rx_port(struct net_device *dev)
2027{ 2026{
@@ -2029,16 +2028,13 @@ void vxlan_get_rx_port(struct net_device *dev)
2029 struct net *net = dev_net(dev); 2028 struct net *net = dev_net(dev);
2030 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 2029 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2031 sa_family_t sa_family; 2030 sa_family_t sa_family;
2032 u16 port; 2031 __be16 port;
2033 int i; 2032 unsigned int i;
2034
2035 if (!dev || !dev->netdev_ops || !dev->netdev_ops->ndo_add_vxlan_port)
2036 return;
2037 2033
2038 spin_lock(&vn->sock_lock); 2034 spin_lock(&vn->sock_lock);
2039 for (i = 0; i < PORT_HASH_SIZE; ++i) { 2035 for (i = 0; i < PORT_HASH_SIZE; ++i) {
2040 hlist_for_each_entry_rcu(vs, vs_head(net, i), hlist) { 2036 hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
2041 port = htons(inet_sk(vs->sock->sk)->inet_sport); 2037 port = inet_sk(vs->sock->sk)->inet_sport;
2042 sa_family = vs->sock->sk->sk_family; 2038 sa_family = vs->sock->sk->sk_family;
2043 dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family, 2039 dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
2044 port); 2040 port);
@@ -2304,8 +2300,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
2304 atomic_set(&vs->refcnt, 1); 2300 atomic_set(&vs->refcnt, 1);
2305 vs->rcv = rcv; 2301 vs->rcv = rcv;
2306 vs->data = data; 2302 vs->data = data;
2307 smp_wmb(); 2303 rcu_assign_sk_user_data(vs->sock->sk, vs);
2308 vs->sock->sk->sk_user_data = vs;
2309 2304
2310 spin_lock(&vn->sock_lock); 2305 spin_lock(&vn->sock_lock);
2311 hlist_add_head_rcu(&vs->hlist, vs_head(net, port)); 2306 hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
@@ -2492,15 +2487,19 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
2492 2487
2493 SET_ETHTOOL_OPS(dev, &vxlan_ethtool_ops); 2488 SET_ETHTOOL_OPS(dev, &vxlan_ethtool_ops);
2494 2489
2495 /* create an fdb entry for default destination */ 2490 /* create an fdb entry for a valid default destination */
2496 err = vxlan_fdb_create(vxlan, all_zeros_mac, 2491 if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
2497 &vxlan->default_dst.remote_ip, 2492 err = vxlan_fdb_create(vxlan, all_zeros_mac,
2498 NUD_REACHABLE|NUD_PERMANENT, 2493 &vxlan->default_dst.remote_ip,
2499 NLM_F_EXCL|NLM_F_CREATE, 2494 NUD_REACHABLE|NUD_PERMANENT,
2500 vxlan->dst_port, vxlan->default_dst.remote_vni, 2495 NLM_F_EXCL|NLM_F_CREATE,
2501 vxlan->default_dst.remote_ifindex, NTF_SELF); 2496 vxlan->dst_port,
2502 if (err) 2497 vxlan->default_dst.remote_vni,
2503 return err; 2498 vxlan->default_dst.remote_ifindex,
2499 NTF_SELF);
2500 if (err)
2501 return err;
2502 }
2504 2503
2505 err = register_netdevice(dev); 2504 err = register_netdevice(dev);
2506 if (err) { 2505 if (err) {
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 4ee472a5a4e4..ab9e3a8410bc 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -1270,13 +1270,6 @@ static void ath9k_antenna_check(struct ath_softc *sc,
1270 return; 1270 return;
1271 1271
1272 /* 1272 /*
1273 * All MPDUs in an aggregate will use the same LNA
1274 * as the first MPDU.
1275 */
1276 if (rs->rs_isaggr && !rs->rs_firstaggr)
1277 return;
1278
1279 /*
1280 * Change the default rx antenna if rx diversity 1273 * Change the default rx antenna if rx diversity
1281 * chooses the other antenna 3 times in a row. 1274 * chooses the other antenna 3 times in a row.
1282 */ 1275 */
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 35b515fe3ffa..5ac713d2ff5d 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -399,6 +399,7 @@ static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
399 tbf->bf_buf_addr = bf->bf_buf_addr; 399 tbf->bf_buf_addr = bf->bf_buf_addr;
400 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len); 400 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
401 tbf->bf_state = bf->bf_state; 401 tbf->bf_state = bf->bf_state;
402 tbf->bf_state.stale = false;
402 403
403 return tbf; 404 return tbf;
404} 405}
@@ -1389,11 +1390,15 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1389 u16 tid, u16 *ssn) 1390 u16 tid, u16 *ssn)
1390{ 1391{
1391 struct ath_atx_tid *txtid; 1392 struct ath_atx_tid *txtid;
1393 struct ath_txq *txq;
1392 struct ath_node *an; 1394 struct ath_node *an;
1393 u8 density; 1395 u8 density;
1394 1396
1395 an = (struct ath_node *)sta->drv_priv; 1397 an = (struct ath_node *)sta->drv_priv;
1396 txtid = ATH_AN_2_TID(an, tid); 1398 txtid = ATH_AN_2_TID(an, tid);
1399 txq = txtid->ac->txq;
1400
1401 ath_txq_lock(sc, txq);
1397 1402
1398 /* update ampdu factor/density, they may have changed. This may happen 1403 /* update ampdu factor/density, they may have changed. This may happen
1399 * in HT IBSS when a beacon with HT-info is received after the station 1404 * in HT IBSS when a beacon with HT-info is received after the station
@@ -1417,6 +1422,8 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1417 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf)); 1422 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
1418 txtid->baw_head = txtid->baw_tail = 0; 1423 txtid->baw_head = txtid->baw_tail = 0;
1419 1424
1425 ath_txq_unlock_complete(sc, txq);
1426
1420 return 0; 1427 return 0;
1421} 1428}
1422 1429
@@ -1555,8 +1562,10 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
1555 __skb_unlink(bf->bf_mpdu, tid_q); 1562 __skb_unlink(bf->bf_mpdu, tid_q);
1556 list_add_tail(&bf->list, &bf_q); 1563 list_add_tail(&bf->list, &bf_q);
1557 ath_set_rates(tid->an->vif, tid->an->sta, bf); 1564 ath_set_rates(tid->an->vif, tid->an->sta, bf);
1558 ath_tx_addto_baw(sc, tid, bf); 1565 if (bf_isampdu(bf)) {
1559 bf->bf_state.bf_type &= ~BUF_AGGR; 1566 ath_tx_addto_baw(sc, tid, bf);
1567 bf->bf_state.bf_type &= ~BUF_AGGR;
1568 }
1560 if (bf_tail) 1569 if (bf_tail)
1561 bf_tail->bf_next = bf; 1570 bf_tail->bf_next = bf;
1562 1571
@@ -1950,7 +1959,9 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1950 if (bf_is_ampdu_not_probing(bf)) 1959 if (bf_is_ampdu_not_probing(bf))
1951 txq->axq_ampdu_depth++; 1960 txq->axq_ampdu_depth++;
1952 1961
1953 bf = bf->bf_lastbf->bf_next; 1962 bf_last = bf->bf_lastbf;
1963 bf = bf_last->bf_next;
1964 bf_last->bf_next = NULL;
1954 } 1965 }
1955 } 1966 }
1956} 1967}
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig
index fc8a0fa6d3b2..b00a7e92225f 100644
--- a/drivers/net/wireless/brcm80211/Kconfig
+++ b/drivers/net/wireless/brcm80211/Kconfig
@@ -28,7 +28,7 @@ config BRCMFMAC
28 28
29config BRCMFMAC_SDIO 29config BRCMFMAC_SDIO
30 bool "SDIO bus interface support for FullMAC driver" 30 bool "SDIO bus interface support for FullMAC driver"
31 depends on MMC 31 depends on (MMC = y || MMC = BRCMFMAC)
32 depends on BRCMFMAC 32 depends on BRCMFMAC
33 select FW_LOADER 33 select FW_LOADER
34 default y 34 default y
@@ -39,7 +39,7 @@ config BRCMFMAC_SDIO
39 39
40config BRCMFMAC_USB 40config BRCMFMAC_USB
41 bool "USB bus interface support for FullMAC driver" 41 bool "USB bus interface support for FullMAC driver"
42 depends on USB 42 depends on (USB = y || USB = BRCMFMAC)
43 depends on BRCMFMAC 43 depends on BRCMFMAC
44 select FW_LOADER 44 select FW_LOADER
45 ---help--- 45 ---help---
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index 64f4a2bc8dde..c3462b75bd08 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -464,8 +464,6 @@ static struct sdio_driver brcmf_sdmmc_driver = {
464 464
465static int brcmf_sdio_pd_probe(struct platform_device *pdev) 465static int brcmf_sdio_pd_probe(struct platform_device *pdev)
466{ 466{
467 int ret;
468
469 brcmf_dbg(SDIO, "Enter\n"); 467 brcmf_dbg(SDIO, "Enter\n");
470 468
471 brcmfmac_sdio_pdata = pdev->dev.platform_data; 469 brcmfmac_sdio_pdata = pdev->dev.platform_data;
@@ -473,11 +471,7 @@ static int brcmf_sdio_pd_probe(struct platform_device *pdev)
473 if (brcmfmac_sdio_pdata->power_on) 471 if (brcmfmac_sdio_pdata->power_on)
474 brcmfmac_sdio_pdata->power_on(); 472 brcmfmac_sdio_pdata->power_on();
475 473
476 ret = sdio_register_driver(&brcmf_sdmmc_driver); 474 return 0;
477 if (ret)
478 brcmf_err("sdio_register_driver failed: %d\n", ret);
479
480 return ret;
481} 475}
482 476
483static int brcmf_sdio_pd_remove(struct platform_device *pdev) 477static int brcmf_sdio_pd_remove(struct platform_device *pdev)
@@ -500,6 +494,15 @@ static struct platform_driver brcmf_sdio_pd = {
500 } 494 }
501}; 495};
502 496
497void brcmf_sdio_register(void)
498{
499 int ret;
500
501 ret = sdio_register_driver(&brcmf_sdmmc_driver);
502 if (ret)
503 brcmf_err("sdio_register_driver failed: %d\n", ret);
504}
505
503void brcmf_sdio_exit(void) 506void brcmf_sdio_exit(void)
504{ 507{
505 brcmf_dbg(SDIO, "Enter\n"); 508 brcmf_dbg(SDIO, "Enter\n");
@@ -510,18 +513,13 @@ void brcmf_sdio_exit(void)
510 sdio_unregister_driver(&brcmf_sdmmc_driver); 513 sdio_unregister_driver(&brcmf_sdmmc_driver);
511} 514}
512 515
513void brcmf_sdio_init(void) 516void __init brcmf_sdio_init(void)
514{ 517{
515 int ret; 518 int ret;
516 519
517 brcmf_dbg(SDIO, "Enter\n"); 520 brcmf_dbg(SDIO, "Enter\n");
518 521
519 ret = platform_driver_probe(&brcmf_sdio_pd, brcmf_sdio_pd_probe); 522 ret = platform_driver_probe(&brcmf_sdio_pd, brcmf_sdio_pd_probe);
520 if (ret == -ENODEV) { 523 if (ret == -ENODEV)
521 brcmf_dbg(SDIO, "No platform data available, registering without.\n"); 524 brcmf_dbg(SDIO, "No platform data available.\n");
522 ret = sdio_register_driver(&brcmf_sdmmc_driver);
523 }
524
525 if (ret)
526 brcmf_err("driver registration failed: %d\n", ret);
527} 525}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index f7c1985844e4..74156f84180c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -156,10 +156,11 @@ extern int brcmf_bus_start(struct device *dev);
156#ifdef CONFIG_BRCMFMAC_SDIO 156#ifdef CONFIG_BRCMFMAC_SDIO
157extern void brcmf_sdio_exit(void); 157extern void brcmf_sdio_exit(void);
158extern void brcmf_sdio_init(void); 158extern void brcmf_sdio_init(void);
159extern void brcmf_sdio_register(void);
159#endif 160#endif
160#ifdef CONFIG_BRCMFMAC_USB 161#ifdef CONFIG_BRCMFMAC_USB
161extern void brcmf_usb_exit(void); 162extern void brcmf_usb_exit(void);
162extern void brcmf_usb_init(void); 163extern void brcmf_usb_register(void);
163#endif 164#endif
164 165
165#endif /* _BRCMF_BUS_H_ */ 166#endif /* _BRCMF_BUS_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index e067aec1fbf1..40e7f854e10f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -1231,21 +1231,23 @@ u32 brcmf_get_chip_info(struct brcmf_if *ifp)
1231 return bus->chip << 4 | bus->chiprev; 1231 return bus->chip << 4 | bus->chiprev;
1232} 1232}
1233 1233
1234static void brcmf_driver_init(struct work_struct *work) 1234static void brcmf_driver_register(struct work_struct *work)
1235{ 1235{
1236 brcmf_debugfs_init();
1237
1238#ifdef CONFIG_BRCMFMAC_SDIO 1236#ifdef CONFIG_BRCMFMAC_SDIO
1239 brcmf_sdio_init(); 1237 brcmf_sdio_register();
1240#endif 1238#endif
1241#ifdef CONFIG_BRCMFMAC_USB 1239#ifdef CONFIG_BRCMFMAC_USB
1242 brcmf_usb_init(); 1240 brcmf_usb_register();
1243#endif 1241#endif
1244} 1242}
1245static DECLARE_WORK(brcmf_driver_work, brcmf_driver_init); 1243static DECLARE_WORK(brcmf_driver_work, brcmf_driver_register);
1246 1244
1247static int __init brcmfmac_module_init(void) 1245static int __init brcmfmac_module_init(void)
1248{ 1246{
1247 brcmf_debugfs_init();
1248#ifdef CONFIG_BRCMFMAC_SDIO
1249 brcmf_sdio_init();
1250#endif
1249 if (!schedule_work(&brcmf_driver_work)) 1251 if (!schedule_work(&brcmf_driver_work))
1250 return -EBUSY; 1252 return -EBUSY;
1251 1253
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 39e01a7c8556..f4aea47e0730 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -1539,7 +1539,7 @@ void brcmf_usb_exit(void)
1539 brcmf_release_fw(&fw_image_list); 1539 brcmf_release_fw(&fw_image_list);
1540} 1540}
1541 1541
1542void brcmf_usb_init(void) 1542void brcmf_usb_register(void)
1543{ 1543{
1544 brcmf_dbg(USB, "Enter\n"); 1544 brcmf_dbg(USB, "Enter\n");
1545 INIT_LIST_HEAD(&fw_image_list); 1545 INIT_LIST_HEAD(&fw_image_list);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 3a6544710c8a..edc5d105ff98 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -457,6 +457,8 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
457 if (err != 0) 457 if (err != 0)
458 brcms_err(wl->wlc->hw->d11core, "%s: brcms_up() returned %d\n", 458 brcms_err(wl->wlc->hw->d11core, "%s: brcms_up() returned %d\n",
459 __func__, err); 459 __func__, err);
460
461 bcma_core_pci_power_save(wl->wlc->hw->d11core->bus, true);
460 return err; 462 return err;
461} 463}
462 464
@@ -479,6 +481,8 @@ static void brcms_ops_stop(struct ieee80211_hw *hw)
479 return; 481 return;
480 } 482 }
481 483
484 bcma_core_pci_power_save(wl->wlc->hw->d11core->bus, false);
485
482 /* put driver in down state */ 486 /* put driver in down state */
483 spin_lock_bh(&wl->lock); 487 spin_lock_bh(&wl->lock);
484 brcms_down(wl); 488 brcms_down(wl);
diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c
index d06376014bcd..899cad34ccd3 100644
--- a/drivers/net/wireless/cw1200/cw1200_spi.c
+++ b/drivers/net/wireless/cw1200/cw1200_spi.c
@@ -40,6 +40,7 @@ struct hwbus_priv {
40 struct cw1200_common *core; 40 struct cw1200_common *core;
41 const struct cw1200_platform_data_spi *pdata; 41 const struct cw1200_platform_data_spi *pdata;
42 spinlock_t lock; /* Serialize all bus operations */ 42 spinlock_t lock; /* Serialize all bus operations */
43 wait_queue_head_t wq;
43 int claimed; 44 int claimed;
44}; 45};
45 46
@@ -197,8 +198,11 @@ static void cw1200_spi_lock(struct hwbus_priv *self)
197{ 198{
198 unsigned long flags; 199 unsigned long flags;
199 200
201 DECLARE_WAITQUEUE(wait, current);
202
200 might_sleep(); 203 might_sleep();
201 204
205 add_wait_queue(&self->wq, &wait);
202 spin_lock_irqsave(&self->lock, flags); 206 spin_lock_irqsave(&self->lock, flags);
203 while (1) { 207 while (1) {
204 set_current_state(TASK_UNINTERRUPTIBLE); 208 set_current_state(TASK_UNINTERRUPTIBLE);
@@ -211,6 +215,7 @@ static void cw1200_spi_lock(struct hwbus_priv *self)
211 set_current_state(TASK_RUNNING); 215 set_current_state(TASK_RUNNING);
212 self->claimed = 1; 216 self->claimed = 1;
213 spin_unlock_irqrestore(&self->lock, flags); 217 spin_unlock_irqrestore(&self->lock, flags);
218 remove_wait_queue(&self->wq, &wait);
214 219
215 return; 220 return;
216} 221}
@@ -222,6 +227,8 @@ static void cw1200_spi_unlock(struct hwbus_priv *self)
222 spin_lock_irqsave(&self->lock, flags); 227 spin_lock_irqsave(&self->lock, flags);
223 self->claimed = 0; 228 self->claimed = 0;
224 spin_unlock_irqrestore(&self->lock, flags); 229 spin_unlock_irqrestore(&self->lock, flags);
230 wake_up(&self->wq);
231
225 return; 232 return;
226} 233}
227 234
@@ -243,9 +250,10 @@ static int cw1200_spi_irq_subscribe(struct hwbus_priv *self)
243 250
244 pr_debug("SW IRQ subscribe\n"); 251 pr_debug("SW IRQ subscribe\n");
245 252
246 ret = request_any_context_irq(self->func->irq, cw1200_spi_irq_handler, 253 ret = request_threaded_irq(self->func->irq, NULL,
247 IRQF_TRIGGER_HIGH, 254 cw1200_spi_irq_handler,
248 "cw1200_wlan_irq", self); 255 IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
256 "cw1200_wlan_irq", self);
249 if (WARN_ON(ret < 0)) 257 if (WARN_ON(ret < 0))
250 goto exit; 258 goto exit;
251 259
@@ -400,6 +408,8 @@ static int cw1200_spi_probe(struct spi_device *func)
400 408
401 spi_set_drvdata(func, self); 409 spi_set_drvdata(func, self);
402 410
411 init_waitqueue_head(&self->wq);
412
403 status = cw1200_spi_irq_subscribe(self); 413 status = cw1200_spi_irq_subscribe(self);
404 414
405 status = cw1200_core_probe(&cw1200_spi_hwbus_ops, 415 status = cw1200_core_probe(&cw1200_spi_hwbus_ops,
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index 21c688264708..1214c587fd08 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -150,7 +150,7 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv,
150 */ 150 */
151int 151int
152mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, 152mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
153 struct mwifiex_ra_list_tbl *pra_list, int headroom, 153 struct mwifiex_ra_list_tbl *pra_list,
154 int ptrindex, unsigned long ra_list_flags) 154 int ptrindex, unsigned long ra_list_flags)
155 __releases(&priv->wmm.ra_list_spinlock) 155 __releases(&priv->wmm.ra_list_spinlock)
156{ 156{
@@ -160,6 +160,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
160 int pad = 0, ret; 160 int pad = 0, ret;
161 struct mwifiex_tx_param tx_param; 161 struct mwifiex_tx_param tx_param;
162 struct txpd *ptx_pd = NULL; 162 struct txpd *ptx_pd = NULL;
163 int headroom = adapter->iface_type == MWIFIEX_USB ? 0 : INTF_HEADER_LEN;
163 164
164 skb_src = skb_peek(&pra_list->skb_head); 165 skb_src = skb_peek(&pra_list->skb_head);
165 if (!skb_src) { 166 if (!skb_src) {
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.h b/drivers/net/wireless/mwifiex/11n_aggr.h
index 900e1c62a0cc..892098d6a696 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.h
+++ b/drivers/net/wireless/mwifiex/11n_aggr.h
@@ -26,7 +26,7 @@
26int mwifiex_11n_deaggregate_pkt(struct mwifiex_private *priv, 26int mwifiex_11n_deaggregate_pkt(struct mwifiex_private *priv,
27 struct sk_buff *skb); 27 struct sk_buff *skb);
28int mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, 28int mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
29 struct mwifiex_ra_list_tbl *ptr, int headroom, 29 struct mwifiex_ra_list_tbl *ptr,
30 int ptr_index, unsigned long flags) 30 int ptr_index, unsigned long flags)
31 __releases(&priv->wmm.ra_list_spinlock); 31 __releases(&priv->wmm.ra_list_spinlock);
32 32
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 2d761477d15e..a6c46f3b6e3a 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -1155,7 +1155,7 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
1155 uint32_t conditions = le32_to_cpu(phs_cfg->params.hs_config.conditions); 1155 uint32_t conditions = le32_to_cpu(phs_cfg->params.hs_config.conditions);
1156 1156
1157 if (phs_cfg->action == cpu_to_le16(HS_ACTIVATE) && 1157 if (phs_cfg->action == cpu_to_le16(HS_ACTIVATE) &&
1158 adapter->iface_type == MWIFIEX_SDIO) { 1158 adapter->iface_type != MWIFIEX_USB) {
1159 mwifiex_hs_activated_event(priv, true); 1159 mwifiex_hs_activated_event(priv, true);
1160 return 0; 1160 return 0;
1161 } else { 1161 } else {
@@ -1167,8 +1167,7 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
1167 } 1167 }
1168 if (conditions != HS_CFG_CANCEL) { 1168 if (conditions != HS_CFG_CANCEL) {
1169 adapter->is_hs_configured = true; 1169 adapter->is_hs_configured = true;
1170 if (adapter->iface_type == MWIFIEX_USB || 1170 if (adapter->iface_type == MWIFIEX_USB)
1171 adapter->iface_type == MWIFIEX_PCIE)
1172 mwifiex_hs_activated_event(priv, true); 1171 mwifiex_hs_activated_event(priv, true);
1173 } else { 1172 } else {
1174 adapter->is_hs_configured = false; 1173 adapter->is_hs_configured = false;
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index 2472d4b7f00e..1c70b8d09227 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -447,9 +447,6 @@ static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message)
447 */ 447 */
448 adapter->is_suspended = true; 448 adapter->is_suspended = true;
449 449
450 for (i = 0; i < adapter->priv_num; i++)
451 netif_carrier_off(adapter->priv[i]->netdev);
452
453 if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb) 450 if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb)
454 usb_kill_urb(card->rx_cmd.urb); 451 usb_kill_urb(card->rx_cmd.urb);
455 452
@@ -509,10 +506,6 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
509 MWIFIEX_RX_CMD_BUF_SIZE); 506 MWIFIEX_RX_CMD_BUF_SIZE);
510 } 507 }
511 508
512 for (i = 0; i < adapter->priv_num; i++)
513 if (adapter->priv[i]->media_connected)
514 netif_carrier_on(adapter->priv[i]->netdev);
515
516 /* Disable Host Sleep */ 509 /* Disable Host Sleep */
517 if (adapter->hs_activated) 510 if (adapter->hs_activated)
518 mwifiex_cancel_hs(mwifiex_get_priv(adapter, 511 mwifiex_cancel_hs(mwifiex_get_priv(adapter,
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 2e8f9cdea54d..95fa3599b407 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -1239,8 +1239,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
1239 if (enable_tx_amsdu && mwifiex_is_amsdu_allowed(priv, tid) && 1239 if (enable_tx_amsdu && mwifiex_is_amsdu_allowed(priv, tid) &&
1240 mwifiex_is_11n_aggragation_possible(priv, ptr, 1240 mwifiex_is_11n_aggragation_possible(priv, ptr,
1241 adapter->tx_buf_size)) 1241 adapter->tx_buf_size))
1242 mwifiex_11n_aggregate_pkt(priv, ptr, INTF_HEADER_LEN, 1242 mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags);
1243 ptr_index, flags);
1244 /* ra_list_spinlock has been freed in 1243 /* ra_list_spinlock has been freed in
1245 mwifiex_11n_aggregate_pkt() */ 1244 mwifiex_11n_aggregate_pkt() */
1246 else 1245 else
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index b9deef66cf4b..e328d3058c41 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -83,6 +83,7 @@ static struct usb_device_id p54u_table[] = {
83 {USB_DEVICE(0x06a9, 0x000e)}, /* Westell 802.11g USB (A90-211WG-01) */ 83 {USB_DEVICE(0x06a9, 0x000e)}, /* Westell 802.11g USB (A90-211WG-01) */
84 {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */ 84 {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */
85 {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */ 85 {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */
86 {USB_DEVICE(0x07aa, 0x0020)}, /* Corega WLUSB2GTST USB */
86 {USB_DEVICE(0x0803, 0x4310)}, /* Zoom 4410a */ 87 {USB_DEVICE(0x0803, 0x4310)}, /* Zoom 4410a */
87 {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */ 88 {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */
88 {USB_DEVICE(0x083a, 0x4531)}, /* T-Com Sinus 154 data II */ 89 {USB_DEVICE(0x083a, 0x4531)}, /* T-Com Sinus 154 data II */
@@ -979,6 +980,7 @@ static int p54u_load_firmware(struct ieee80211_hw *dev,
979 if (err) { 980 if (err) {
980 dev_err(&priv->udev->dev, "(p54usb) cannot load firmware %s " 981 dev_err(&priv->udev->dev, "(p54usb) cannot load firmware %s "
981 "(%d)!\n", p54u_fwlist[i].fw, err); 982 "(%d)!\n", p54u_fwlist[i].fw, err);
983 usb_put_dev(udev);
982 } 984 }
983 985
984 return err; 986 return err;
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 95e6e61c3de0..88ce656f96cd 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -6659,19 +6659,20 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
6659 rt2800_init_registers(rt2x00dev))) 6659 rt2800_init_registers(rt2x00dev)))
6660 return -EIO; 6660 return -EIO;
6661 6661
6662 if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev)))
6663 return -EIO;
6664
6662 /* 6665 /*
6663 * Send signal to firmware during boot time. 6666 * Send signal to firmware during boot time.
6664 */ 6667 */
6665 rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0); 6668 rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
6666 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0); 6669 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
6667 if (rt2x00_is_usb(rt2x00dev)) { 6670 if (rt2x00_is_usb(rt2x00dev))
6668 rt2800_register_write(rt2x00dev, H2M_INT_SRC, 0); 6671 rt2800_register_write(rt2x00dev, H2M_INT_SRC, 0);
6669 rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0); 6672 rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
6670 }
6671 msleep(1); 6673 msleep(1);
6672 6674
6673 if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev) || 6675 if (unlikely(rt2800_wait_bbp_ready(rt2x00dev)))
6674 rt2800_wait_bbp_ready(rt2x00dev)))
6675 return -EIO; 6676 return -EIO;
6676 6677
6677 rt2800_init_bbp(rt2x00dev); 6678 rt2800_init_bbp(rt2x00dev);
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 841fb9dfc9da..9a6edb0c014e 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -438,17 +438,16 @@ static int rtl8187_init_urbs(struct ieee80211_hw *dev)
438 skb_queue_tail(&priv->rx_queue, skb); 438 skb_queue_tail(&priv->rx_queue, skb);
439 usb_anchor_urb(entry, &priv->anchored); 439 usb_anchor_urb(entry, &priv->anchored);
440 ret = usb_submit_urb(entry, GFP_KERNEL); 440 ret = usb_submit_urb(entry, GFP_KERNEL);
441 usb_put_urb(entry);
441 if (ret) { 442 if (ret) {
442 skb_unlink(skb, &priv->rx_queue); 443 skb_unlink(skb, &priv->rx_queue);
443 usb_unanchor_urb(entry); 444 usb_unanchor_urb(entry);
444 goto err; 445 goto err;
445 } 446 }
446 usb_free_urb(entry);
447 } 447 }
448 return ret; 448 return ret;
449 449
450err: 450err:
451 usb_free_urb(entry);
452 kfree_skb(skb); 451 kfree_skb(skb);
453 usb_kill_anchored_urbs(&priv->anchored); 452 usb_kill_anchored_urbs(&priv->anchored);
454 return ret; 453 return ret;
@@ -956,8 +955,12 @@ static int rtl8187_start(struct ieee80211_hw *dev)
956 (RETRY_COUNT << 8 /* short retry limit */) | 955 (RETRY_COUNT << 8 /* short retry limit */) |
957 (RETRY_COUNT << 0 /* long retry limit */) | 956 (RETRY_COUNT << 0 /* long retry limit */) |
958 (7 << 21 /* MAX TX DMA */)); 957 (7 << 21 /* MAX TX DMA */));
959 rtl8187_init_urbs(dev); 958 ret = rtl8187_init_urbs(dev);
960 rtl8187b_init_status_urb(dev); 959 if (ret)
960 goto rtl8187_start_exit;
961 ret = rtl8187b_init_status_urb(dev);
962 if (ret)
963 usb_kill_anchored_urbs(&priv->anchored);
961 goto rtl8187_start_exit; 964 goto rtl8187_start_exit;
962 } 965 }
963 966
@@ -966,7 +969,9 @@ static int rtl8187_start(struct ieee80211_hw *dev)
966 rtl818x_iowrite32(priv, &priv->map->MAR[0], ~0); 969 rtl818x_iowrite32(priv, &priv->map->MAR[0], ~0);
967 rtl818x_iowrite32(priv, &priv->map->MAR[1], ~0); 970 rtl818x_iowrite32(priv, &priv->map->MAR[1], ~0);
968 971
969 rtl8187_init_urbs(dev); 972 ret = rtl8187_init_urbs(dev);
973 if (ret)
974 goto rtl8187_start_exit;
970 975
971 reg = RTL818X_RX_CONF_ONLYERLPKT | 976 reg = RTL818X_RX_CONF_ONLYERLPKT |
972 RTL818X_RX_CONF_RX_AUTORESETPHY | 977 RTL818X_RX_CONF_RX_AUTORESETPHY |
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index cc03e7c87cbe..703258742d28 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -2057,7 +2057,7 @@ struct rtl_priv {
2057 that it points to the data allocated 2057 that it points to the data allocated
2058 beyond this structure like: 2058 beyond this structure like:
2059 rtl_pci_priv or rtl_usb_priv */ 2059 rtl_pci_priv or rtl_usb_priv */
2060 u8 priv[0]; 2060 u8 priv[0] __aligned(sizeof(void *));
2061}; 2061};
2062 2062
2063#define rtl_priv(hw) (((struct rtl_priv *)(hw)->priv)) 2063#define rtl_priv(hw) (((struct rtl_priv *)(hw)->priv))
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index a1977430ddfb..5715318d6bab 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -184,6 +184,7 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
184 unsigned long rx_ring_ref, unsigned int tx_evtchn, 184 unsigned long rx_ring_ref, unsigned int tx_evtchn,
185 unsigned int rx_evtchn); 185 unsigned int rx_evtchn);
186void xenvif_disconnect(struct xenvif *vif); 186void xenvif_disconnect(struct xenvif *vif);
187void xenvif_free(struct xenvif *vif);
187 188
188int xenvif_xenbus_init(void); 189int xenvif_xenbus_init(void);
189void xenvif_xenbus_fini(void); 190void xenvif_xenbus_fini(void);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 625c6f49cfba..01bb854c7f62 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -353,6 +353,9 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
353 } 353 }
354 354
355 netdev_dbg(dev, "Successfully created xenvif\n"); 355 netdev_dbg(dev, "Successfully created xenvif\n");
356
357 __module_get(THIS_MODULE);
358
356 return vif; 359 return vif;
357} 360}
358 361
@@ -366,8 +369,6 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
366 if (vif->tx_irq) 369 if (vif->tx_irq)
367 return 0; 370 return 0;
368 371
369 __module_get(THIS_MODULE);
370
371 err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref); 372 err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
372 if (err < 0) 373 if (err < 0)
373 goto err; 374 goto err;
@@ -406,7 +407,7 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
406 407
407 init_waitqueue_head(&vif->wq); 408 init_waitqueue_head(&vif->wq);
408 vif->task = kthread_create(xenvif_kthread, 409 vif->task = kthread_create(xenvif_kthread,
409 (void *)vif, vif->dev->name); 410 (void *)vif, "%s", vif->dev->name);
410 if (IS_ERR(vif->task)) { 411 if (IS_ERR(vif->task)) {
411 pr_warn("Could not allocate kthread for %s\n", vif->dev->name); 412 pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
412 err = PTR_ERR(vif->task); 413 err = PTR_ERR(vif->task);
@@ -452,12 +453,6 @@ void xenvif_carrier_off(struct xenvif *vif)
452 453
453void xenvif_disconnect(struct xenvif *vif) 454void xenvif_disconnect(struct xenvif *vif)
454{ 455{
455 /* Disconnect funtion might get called by generic framework
456 * even before vif connects, so we need to check if we really
457 * need to do a module_put.
458 */
459 int need_module_put = 0;
460
461 if (netif_carrier_ok(vif->dev)) 456 if (netif_carrier_ok(vif->dev))
462 xenvif_carrier_off(vif); 457 xenvif_carrier_off(vif);
463 458
@@ -468,23 +463,22 @@ void xenvif_disconnect(struct xenvif *vif)
468 unbind_from_irqhandler(vif->tx_irq, vif); 463 unbind_from_irqhandler(vif->tx_irq, vif);
469 unbind_from_irqhandler(vif->rx_irq, vif); 464 unbind_from_irqhandler(vif->rx_irq, vif);
470 } 465 }
471 /* vif->irq is valid, we had a module_get in 466 vif->tx_irq = 0;
472 * xenvif_connect.
473 */
474 need_module_put = 1;
475 } 467 }
476 468
477 if (vif->task) 469 if (vif->task)
478 kthread_stop(vif->task); 470 kthread_stop(vif->task);
479 471
472 xenvif_unmap_frontend_rings(vif);
473}
474
475void xenvif_free(struct xenvif *vif)
476{
480 netif_napi_del(&vif->napi); 477 netif_napi_del(&vif->napi);
481 478
482 unregister_netdev(vif->dev); 479 unregister_netdev(vif->dev);
483 480
484 xenvif_unmap_frontend_rings(vif);
485
486 free_netdev(vif->dev); 481 free_netdev(vif->dev);
487 482
488 if (need_module_put) 483 module_put(THIS_MODULE);
489 module_put(THIS_MODULE);
490} 484}
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 956130c70036..f3e591c611de 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -212,6 +212,49 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
212 return false; 212 return false;
213} 213}
214 214
215struct xenvif_count_slot_state {
216 unsigned long copy_off;
217 bool head;
218};
219
220unsigned int xenvif_count_frag_slots(struct xenvif *vif,
221 unsigned long offset, unsigned long size,
222 struct xenvif_count_slot_state *state)
223{
224 unsigned count = 0;
225
226 offset &= ~PAGE_MASK;
227
228 while (size > 0) {
229 unsigned long bytes;
230
231 bytes = PAGE_SIZE - offset;
232
233 if (bytes > size)
234 bytes = size;
235
236 if (start_new_rx_buffer(state->copy_off, bytes, state->head)) {
237 count++;
238 state->copy_off = 0;
239 }
240
241 if (state->copy_off + bytes > MAX_BUFFER_OFFSET)
242 bytes = MAX_BUFFER_OFFSET - state->copy_off;
243
244 state->copy_off += bytes;
245
246 offset += bytes;
247 size -= bytes;
248
249 if (offset == PAGE_SIZE)
250 offset = 0;
251
252 state->head = false;
253 }
254
255 return count;
256}
257
215/* 258/*
216 * Figure out how many ring slots we're going to need to send @skb to 259 * Figure out how many ring slots we're going to need to send @skb to
217 * the guest. This function is essentially a dry run of 260 * the guest. This function is essentially a dry run of
@@ -219,48 +262,39 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
219 */ 262 */
220unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb) 263unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
221{ 264{
265 struct xenvif_count_slot_state state;
222 unsigned int count; 266 unsigned int count;
223 int i, copy_off; 267 unsigned char *data;
268 unsigned i;
224 269
225 count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE); 270 state.head = true;
271 state.copy_off = 0;
226 272
227 copy_off = skb_headlen(skb) % PAGE_SIZE; 273 /* Slot for the first (partial) page of data. */
274 count = 1;
228 275
276 /* Need a slot for the GSO prefix for GSO extra data? */
229 if (skb_shinfo(skb)->gso_size) 277 if (skb_shinfo(skb)->gso_size)
230 count++; 278 count++;
231 279
232 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 280 data = skb->data;
233 unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 281 while (data < skb_tail_pointer(skb)) {
234 unsigned long offset = skb_shinfo(skb)->frags[i].page_offset; 282 unsigned long offset = offset_in_page(data);
235 unsigned long bytes; 283 unsigned long size = PAGE_SIZE - offset;
236
237 offset &= ~PAGE_MASK;
238
239 while (size > 0) {
240 BUG_ON(offset >= PAGE_SIZE);
241 BUG_ON(copy_off > MAX_BUFFER_OFFSET);
242
243 bytes = PAGE_SIZE - offset;
244
245 if (bytes > size)
246 bytes = size;
247 284
248 if (start_new_rx_buffer(copy_off, bytes, 0)) { 285 if (data + size > skb_tail_pointer(skb))
249 count++; 286 size = skb_tail_pointer(skb) - data;
250 copy_off = 0;
251 }
252 287
253 if (copy_off + bytes > MAX_BUFFER_OFFSET) 288 count += xenvif_count_frag_slots(vif, offset, size, &state);
254 bytes = MAX_BUFFER_OFFSET - copy_off;
255 289
256 copy_off += bytes; 290 data += size;
291 }
257 292
258 offset += bytes; 293 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
259 size -= bytes; 294 unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
295 unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
260 296
261 if (offset == PAGE_SIZE) 297 count += xenvif_count_frag_slots(vif, offset, size, &state);
262 offset = 0;
263 }
264 } 298 }
265 return count; 299 return count;
266} 300}
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 1fe48fe364ed..b45bce20ad76 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -24,6 +24,12 @@
24struct backend_info { 24struct backend_info {
25 struct xenbus_device *dev; 25 struct xenbus_device *dev;
26 struct xenvif *vif; 26 struct xenvif *vif;
27
28 /* This is the state that will be reflected in xenstore when any
29 * active hotplug script completes.
30 */
31 enum xenbus_state state;
32
27 enum xenbus_state frontend_state; 33 enum xenbus_state frontend_state;
28 struct xenbus_watch hotplug_status_watch; 34 struct xenbus_watch hotplug_status_watch;
29 u8 have_hotplug_status_watch:1; 35 u8 have_hotplug_status_watch:1;
@@ -42,7 +48,7 @@ static int netback_remove(struct xenbus_device *dev)
42 if (be->vif) { 48 if (be->vif) {
43 kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE); 49 kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
44 xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status"); 50 xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
45 xenvif_disconnect(be->vif); 51 xenvif_free(be->vif);
46 be->vif = NULL; 52 be->vif = NULL;
47 } 53 }
48 kfree(be); 54 kfree(be);
@@ -136,6 +142,8 @@ static int netback_probe(struct xenbus_device *dev,
136 if (err) 142 if (err)
137 goto fail; 143 goto fail;
138 144
145 be->state = XenbusStateInitWait;
146
139 /* This kicks hotplug scripts, so do it immediately. */ 147 /* This kicks hotplug scripts, so do it immediately. */
140 backend_create_xenvif(be); 148 backend_create_xenvif(be);
141 149
@@ -208,15 +216,113 @@ static void backend_create_xenvif(struct backend_info *be)
208 kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE); 216 kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
209} 217}
210 218
219static void backend_disconnect(struct backend_info *be)
220{
221 if (be->vif)
222 xenvif_disconnect(be->vif);
223}
211 224
212static void disconnect_backend(struct xenbus_device *dev) 225static void backend_connect(struct backend_info *be)
213{ 226{
214 struct backend_info *be = dev_get_drvdata(&dev->dev); 227 if (be->vif)
228 connect(be);
229}
215 230
216 if (be->vif) { 231static inline void backend_switch_state(struct backend_info *be,
217 xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status"); 232 enum xenbus_state state)
218 xenvif_disconnect(be->vif); 233{
219 be->vif = NULL; 234 struct xenbus_device *dev = be->dev;
235
236 pr_debug("%s -> %s\n", dev->nodename, xenbus_strstate(state));
237 be->state = state;
238
239 /* If we are waiting for a hotplug script then defer the
240 * actual xenbus state change.
241 */
242 if (!be->have_hotplug_status_watch)
243 xenbus_switch_state(dev, state);
244}
245
246/* Handle backend state transitions:
247 *
248 * The backend state starts in InitWait and the following transitions are
249 * allowed.
250 *
251 * InitWait -> Connected
252 *
253 * ^ \ |
254 * | \ |
255 * | \ |
256 * | \ |
257 * | \ |
258 * | \ |
259 * | V V
260 *
261 * Closed <-> Closing
262 *
263 * The state argument specifies the eventual state of the backend and the
264 * function transitions to that state via the shortest path.
265 */
266static void set_backend_state(struct backend_info *be,
267 enum xenbus_state state)
268{
269 while (be->state != state) {
270 switch (be->state) {
271 case XenbusStateClosed:
272 switch (state) {
273 case XenbusStateInitWait:
274 case XenbusStateConnected:
275 pr_info("%s: prepare for reconnect\n",
276 be->dev->nodename);
277 backend_switch_state(be, XenbusStateInitWait);
278 break;
279 case XenbusStateClosing:
280 backend_switch_state(be, XenbusStateClosing);
281 break;
282 default:
283 BUG();
284 }
285 break;
286 case XenbusStateInitWait:
287 switch (state) {
288 case XenbusStateConnected:
289 backend_connect(be);
290 backend_switch_state(be, XenbusStateConnected);
291 break;
292 case XenbusStateClosing:
293 case XenbusStateClosed:
294 backend_switch_state(be, XenbusStateClosing);
295 break;
296 default:
297 BUG();
298 }
299 break;
300 case XenbusStateConnected:
301 switch (state) {
302 case XenbusStateInitWait:
303 case XenbusStateClosing:
304 case XenbusStateClosed:
305 backend_disconnect(be);
306 backend_switch_state(be, XenbusStateClosing);
307 break;
308 default:
309 BUG();
310 }
311 break;
312 case XenbusStateClosing:
313 switch (state) {
314 case XenbusStateInitWait:
315 case XenbusStateConnected:
316 case XenbusStateClosed:
317 backend_switch_state(be, XenbusStateClosed);
318 break;
319 default:
320 BUG();
321 }
322 break;
323 default:
324 BUG();
325 }
220 } 326 }
221} 327}
222 328
@@ -228,42 +334,33 @@ static void frontend_changed(struct xenbus_device *dev,
228{ 334{
229 struct backend_info *be = dev_get_drvdata(&dev->dev); 335 struct backend_info *be = dev_get_drvdata(&dev->dev);
230 336
231 pr_debug("frontend state %s\n", xenbus_strstate(frontend_state)); 337 pr_debug("%s -> %s\n", dev->otherend, xenbus_strstate(frontend_state));
232 338
233 be->frontend_state = frontend_state; 339 be->frontend_state = frontend_state;
234 340
235 switch (frontend_state) { 341 switch (frontend_state) {
236 case XenbusStateInitialising: 342 case XenbusStateInitialising:
237 if (dev->state == XenbusStateClosed) { 343 set_backend_state(be, XenbusStateInitWait);
238 pr_info("%s: prepare for reconnect\n", dev->nodename);
239 xenbus_switch_state(dev, XenbusStateInitWait);
240 }
241 break; 344 break;
242 345
243 case XenbusStateInitialised: 346 case XenbusStateInitialised:
244 break; 347 break;
245 348
246 case XenbusStateConnected: 349 case XenbusStateConnected:
247 if (dev->state == XenbusStateConnected) 350 set_backend_state(be, XenbusStateConnected);
248 break;
249 backend_create_xenvif(be);
250 if (be->vif)
251 connect(be);
252 break; 351 break;
253 352
254 case XenbusStateClosing: 353 case XenbusStateClosing:
255 if (be->vif) 354 set_backend_state(be, XenbusStateClosing);
256 kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
257 disconnect_backend(dev);
258 xenbus_switch_state(dev, XenbusStateClosing);
259 break; 355 break;
260 356
261 case XenbusStateClosed: 357 case XenbusStateClosed:
262 xenbus_switch_state(dev, XenbusStateClosed); 358 set_backend_state(be, XenbusStateClosed);
263 if (xenbus_dev_is_online(dev)) 359 if (xenbus_dev_is_online(dev))
264 break; 360 break;
265 /* fall through if not online */ 361 /* fall through if not online */
266 case XenbusStateUnknown: 362 case XenbusStateUnknown:
363 set_backend_state(be, XenbusStateClosed);
267 device_unregister(&dev->dev); 364 device_unregister(&dev->dev);
268 break; 365 break;
269 366
@@ -356,7 +453,9 @@ static void hotplug_status_changed(struct xenbus_watch *watch,
356 if (IS_ERR(str)) 453 if (IS_ERR(str))
357 return; 454 return;
358 if (len == sizeof("connected")-1 && !memcmp(str, "connected", len)) { 455 if (len == sizeof("connected")-1 && !memcmp(str, "connected", len)) {
359 xenbus_switch_state(be->dev, XenbusStateConnected); 456 /* Complete any pending state change */
457 xenbus_switch_state(be->dev, be->state);
458
360 /* Not interested in this watch anymore. */ 459 /* Not interested in this watch anymore. */
361 unregister_hotplug_status_watch(be); 460 unregister_hotplug_status_watch(be);
362 } 461 }
@@ -386,12 +485,8 @@ static void connect(struct backend_info *be)
386 err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, 485 err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
387 hotplug_status_changed, 486 hotplug_status_changed,
388 "%s/%s", dev->nodename, "hotplug-status"); 487 "%s/%s", dev->nodename, "hotplug-status");
389 if (err) { 488 if (!err)
390 /* Switch now, since we can't do a watch. */
391 xenbus_switch_state(dev, XenbusStateConnected);
392 } else {
393 be->have_hotplug_status_watch = 1; 489 be->have_hotplug_status_watch = 1;
394 }
395 490
396 netif_wake_queue(be->vif->dev); 491 netif_wake_queue(be->vif->dev);
397} 492}
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 7c29ee4ed0ae..b0299e6d9a3f 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -47,6 +47,9 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
47 if (event != ACPI_NOTIFY_DEVICE_WAKE || !pci_dev) 47 if (event != ACPI_NOTIFY_DEVICE_WAKE || !pci_dev)
48 return; 48 return;
49 49
50 if (pci_dev->pme_poll)
51 pci_dev->pme_poll = false;
52
50 if (pci_dev->current_state == PCI_D3cold) { 53 if (pci_dev->current_state == PCI_D3cold) {
51 pci_wakeup_event(pci_dev); 54 pci_wakeup_event(pci_dev);
52 pm_runtime_resume(&pci_dev->dev); 55 pm_runtime_resume(&pci_dev->dev);
@@ -57,9 +60,6 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
57 if (pci_dev->pme_support) 60 if (pci_dev->pme_support)
58 pci_check_pme_status(pci_dev); 61 pci_check_pme_status(pci_dev);
59 62
60 if (pci_dev->pme_poll)
61 pci_dev->pme_poll = false;
62
63 pci_wakeup_event(pci_dev); 63 pci_wakeup_event(pci_dev);
64 pm_runtime_resume(&pci_dev->dev); 64 pm_runtime_resume(&pci_dev->dev);
65 65
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e8ccf6c0f08a..bdd64b1b4817 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1155,8 +1155,14 @@ static void pci_enable_bridge(struct pci_dev *dev)
1155 1155
1156 pci_enable_bridge(dev->bus->self); 1156 pci_enable_bridge(dev->bus->self);
1157 1157
1158 if (pci_is_enabled(dev)) 1158 if (pci_is_enabled(dev)) {
1159 if (!dev->is_busmaster) {
1160 dev_warn(&dev->dev, "driver skip pci_set_master, fix it!\n");
1161 pci_set_master(dev);
1162 }
1159 return; 1163 return;
1164 }
1165
1160 retval = pci_enable_device(dev); 1166 retval = pci_enable_device(dev);
1161 if (retval) 1167 if (retval)
1162 dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n", 1168 dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n",
diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c
index a138965c01cb..b8fcc38c0d11 100644
--- a/drivers/pinctrl/pinconf.c
+++ b/drivers/pinctrl/pinconf.c
@@ -490,7 +490,7 @@ exit:
490 * <devicename> <state> <pinname> are values that should match the pinctrl-maps 490 * <devicename> <state> <pinname> are values that should match the pinctrl-maps
491 * <newvalue> reflects the new config and is driver dependant 491 * <newvalue> reflects the new config and is driver dependant
492 */ 492 */
493static int pinconf_dbg_config_write(struct file *file, 493static ssize_t pinconf_dbg_config_write(struct file *file,
494 const char __user *user_buf, size_t count, loff_t *ppos) 494 const char __user *user_buf, size_t count, loff_t *ppos)
495{ 495{
496 struct pinctrl_maps *maps_node; 496 struct pinctrl_maps *maps_node;
@@ -508,7 +508,7 @@ static int pinconf_dbg_config_write(struct file *file,
508 int i; 508 int i;
509 509
510 /* Get userspace string and assure termination */ 510 /* Get userspace string and assure termination */
511 buf_size = min(count, (size_t)(sizeof(buf)-1)); 511 buf_size = min(count, sizeof(buf) - 1);
512 if (copy_from_user(buf, user_buf, buf_size)) 512 if (copy_from_user(buf, user_buf, buf_size))
513 return -EFAULT; 513 return -EFAULT;
514 buf[buf_size] = 0; 514 buf[buf_size] = 0;
diff --git a/drivers/pinctrl/pinctrl-exynos.c b/drivers/pinctrl/pinctrl-exynos.c
index 2689f8d01a1e..155b1b3a0e7a 100644
--- a/drivers/pinctrl/pinctrl-exynos.c
+++ b/drivers/pinctrl/pinctrl-exynos.c
@@ -663,18 +663,18 @@ static void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata)
663/* pin banks of s5pv210 pin-controller */ 663/* pin banks of s5pv210 pin-controller */
664static struct samsung_pin_bank s5pv210_pin_bank[] = { 664static struct samsung_pin_bank s5pv210_pin_bank[] = {
665 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), 665 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
666 EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04), 666 EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpa1", 0x04),
667 EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08), 667 EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08),
668 EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpc0", 0x0c), 668 EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpc0", 0x0c),
669 EXYNOS_PIN_BANK_EINTG(5, 0x080, "gpc1", 0x10), 669 EXYNOS_PIN_BANK_EINTG(5, 0x080, "gpc1", 0x10),
670 EXYNOS_PIN_BANK_EINTG(4, 0x0a0, "gpd0", 0x14), 670 EXYNOS_PIN_BANK_EINTG(4, 0x0a0, "gpd0", 0x14),
671 EXYNOS_PIN_BANK_EINTG(4, 0x0c0, "gpd1", 0x18), 671 EXYNOS_PIN_BANK_EINTG(6, 0x0c0, "gpd1", 0x18),
672 EXYNOS_PIN_BANK_EINTG(5, 0x0e0, "gpe0", 0x1c), 672 EXYNOS_PIN_BANK_EINTG(8, 0x0e0, "gpe0", 0x1c),
673 EXYNOS_PIN_BANK_EINTG(8, 0x100, "gpe1", 0x20), 673 EXYNOS_PIN_BANK_EINTG(5, 0x100, "gpe1", 0x20),
674 EXYNOS_PIN_BANK_EINTG(6, 0x120, "gpf0", 0x24), 674 EXYNOS_PIN_BANK_EINTG(8, 0x120, "gpf0", 0x24),
675 EXYNOS_PIN_BANK_EINTG(8, 0x140, "gpf1", 0x28), 675 EXYNOS_PIN_BANK_EINTG(8, 0x140, "gpf1", 0x28),
676 EXYNOS_PIN_BANK_EINTG(8, 0x160, "gpf2", 0x2c), 676 EXYNOS_PIN_BANK_EINTG(8, 0x160, "gpf2", 0x2c),
677 EXYNOS_PIN_BANK_EINTG(8, 0x180, "gpf3", 0x30), 677 EXYNOS_PIN_BANK_EINTG(6, 0x180, "gpf3", 0x30),
678 EXYNOS_PIN_BANK_EINTG(7, 0x1a0, "gpg0", 0x34), 678 EXYNOS_PIN_BANK_EINTG(7, 0x1a0, "gpg0", 0x34),
679 EXYNOS_PIN_BANK_EINTG(7, 0x1c0, "gpg1", 0x38), 679 EXYNOS_PIN_BANK_EINTG(7, 0x1c0, "gpg1", 0x38),
680 EXYNOS_PIN_BANK_EINTG(7, 0x1e0, "gpg2", 0x3c), 680 EXYNOS_PIN_BANK_EINTG(7, 0x1e0, "gpg2", 0x3c),
diff --git a/drivers/pinctrl/pinctrl-palmas.c b/drivers/pinctrl/pinctrl-palmas.c
index 82638fac3cfa..30c4d356cb33 100644
--- a/drivers/pinctrl/pinctrl-palmas.c
+++ b/drivers/pinctrl/pinctrl-palmas.c
@@ -891,9 +891,10 @@ static int palmas_pinconf_set(struct pinctrl_dev *pctldev,
891 param = pinconf_to_config_param(configs[i]); 891 param = pinconf_to_config_param(configs[i]);
892 param_val = pinconf_to_config_argument(configs[i]); 892 param_val = pinconf_to_config_argument(configs[i]);
893 893
894 if (param == PIN_CONFIG_BIAS_PULL_PIN_DEFAULT)
895 continue;
896
894 switch (param) { 897 switch (param) {
895 case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
896 return 0;
897 case PIN_CONFIG_BIAS_DISABLE: 898 case PIN_CONFIG_BIAS_DISABLE:
898 case PIN_CONFIG_BIAS_PULL_UP: 899 case PIN_CONFIG_BIAS_PULL_UP:
899 case PIN_CONFIG_BIAS_PULL_DOWN: 900 case PIN_CONFIG_BIAS_PULL_DOWN:
diff --git a/drivers/pinctrl/pinctrl-tegra114.c b/drivers/pinctrl/pinctrl-tegra114.c
index 622c4854977e..93c9e3899d5e 100644
--- a/drivers/pinctrl/pinctrl-tegra114.c
+++ b/drivers/pinctrl/pinctrl-tegra114.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Arthur: Pritesh Raithatha <praithatha@nvidia.com> 6 * Author: Pritesh Raithatha <praithatha@nvidia.com>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License, 9 * under the terms and conditions of the GNU General Public License,
@@ -2763,7 +2763,6 @@ static struct platform_driver tegra114_pinctrl_driver = {
2763}; 2763};
2764module_platform_driver(tegra114_pinctrl_driver); 2764module_platform_driver(tegra114_pinctrl_driver);
2765 2765
2766MODULE_ALIAS("platform:tegra114-pinctrl");
2767MODULE_AUTHOR("Pritesh Raithatha <praithatha@nvidia.com>"); 2766MODULE_AUTHOR("Pritesh Raithatha <praithatha@nvidia.com>");
2768MODULE_DESCRIPTION("NVIDIA Tegra114 pincontrol driver"); 2767MODULE_DESCRIPTION("NVIDIA Tegra114 pinctrl driver");
2769MODULE_LICENSE("GPL v2"); 2768MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
index 1a7816390773..b9f2653e4ef9 100644
--- a/drivers/regulator/da9063-regulator.c
+++ b/drivers/regulator/da9063-regulator.c
@@ -709,7 +709,7 @@ static struct da9063_regulators_pdata *da9063_parse_regulators_dt(
709 struct of_regulator_match **da9063_reg_matches) 709 struct of_regulator_match **da9063_reg_matches)
710{ 710{
711 da9063_reg_matches = NULL; 711 da9063_reg_matches = NULL;
712 return PTR_ERR(-ENODEV); 712 return ERR_PTR(-ENODEV);
713} 713}
714#endif 714#endif
715 715
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index 488dfe7ce9a6..7e2b165972e6 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -201,13 +201,7 @@ static unsigned int palmas_smps_ramp_delay[4] = {0, 10000, 5000, 2500};
201#define SMPS_CTRL_MODE_ECO 0x02 201#define SMPS_CTRL_MODE_ECO 0x02
202#define SMPS_CTRL_MODE_PWM 0x03 202#define SMPS_CTRL_MODE_PWM 0x03
203 203
204/* These values are derived from the data sheet. And are the number of steps 204#define PALMAS_SMPS_NUM_VOLTAGES 122
205 * where there is a voltage change, the ranges at beginning and end of register
206 * max/min values where there are no change are ommitted.
207 *
208 * So they are basically (maxV-minV)/stepV
209 */
210#define PALMAS_SMPS_NUM_VOLTAGES 117
211#define PALMAS_SMPS10_NUM_VOLTAGES 2 205#define PALMAS_SMPS10_NUM_VOLTAGES 2
212#define PALMAS_LDO_NUM_VOLTAGES 50 206#define PALMAS_LDO_NUM_VOLTAGES 50
213 207
@@ -979,6 +973,7 @@ static int palmas_regulators_probe(struct platform_device *pdev)
979 pmic->desc[id].min_uV = 900000; 973 pmic->desc[id].min_uV = 900000;
980 pmic->desc[id].uV_step = 50000; 974 pmic->desc[id].uV_step = 50000;
981 pmic->desc[id].linear_min_sel = 1; 975 pmic->desc[id].linear_min_sel = 1;
976 pmic->desc[id].enable_time = 500;
982 pmic->desc[id].vsel_reg = 977 pmic->desc[id].vsel_reg =
983 PALMAS_BASE_TO_REG(PALMAS_LDO_BASE, 978 PALMAS_BASE_TO_REG(PALMAS_LDO_BASE,
984 palmas_regs_info[id].vsel_addr); 979 palmas_regs_info[id].vsel_addr);
@@ -997,6 +992,11 @@ static int palmas_regulators_probe(struct platform_device *pdev)
997 pmic->desc[id].min_uV = 450000; 992 pmic->desc[id].min_uV = 450000;
998 pmic->desc[id].uV_step = 25000; 993 pmic->desc[id].uV_step = 25000;
999 } 994 }
995
996 /* LOD6 in vibrator mode will have enable time 2000us */
997 if (pdata && pdata->ldo6_vibrator &&
998 (id == PALMAS_REG_LDO6))
999 pmic->desc[id].enable_time = 2000;
1000 } else { 1000 } else {
1001 pmic->desc[id].n_voltages = 1; 1001 pmic->desc[id].n_voltages = 1;
1002 pmic->desc[id].ops = &palmas_ops_extreg; 1002 pmic->desc[id].ops = &palmas_ops_extreg;
diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c
index d8e3e1262bc2..20c271d49dcb 100644
--- a/drivers/regulator/ti-abb-regulator.c
+++ b/drivers/regulator/ti-abb-regulator.c
@@ -279,8 +279,12 @@ static int ti_abb_set_opp(struct regulator_dev *rdev, struct ti_abb *abb,
279 ti_abb_rmw(regs->opp_sel_mask, info->opp_sel, regs->control_reg, 279 ti_abb_rmw(regs->opp_sel_mask, info->opp_sel, regs->control_reg,
280 abb->base); 280 abb->base);
281 281
282 /* program LDO VBB vset override if needed */ 282 /*
283 if (abb->ldo_base) 283 * program LDO VBB vset override if needed for !bypass mode
284 * XXX: Do not switch sequence - for !bypass, LDO override reset *must*
285 * be performed *before* switch to bias mode else VBB glitches.
286 */
287 if (abb->ldo_base && info->opp_sel != TI_ABB_NOMINAL_OPP)
284 ti_abb_program_ldovbb(dev, abb, info); 288 ti_abb_program_ldovbb(dev, abb, info);
285 289
286 /* Initiate ABB ldo change */ 290 /* Initiate ABB ldo change */
@@ -295,6 +299,14 @@ static int ti_abb_set_opp(struct regulator_dev *rdev, struct ti_abb *abb,
295 if (ret) 299 if (ret)
296 goto out; 300 goto out;
297 301
302 /*
303 * Reset LDO VBB vset override bypass mode
304 * XXX: Do not switch sequence - for bypass, LDO override reset *must*
305 * be performed *after* switch to bypass else VBB glitches.
306 */
307 if (abb->ldo_base && info->opp_sel == TI_ABB_NOMINAL_OPP)
308 ti_abb_program_ldovbb(dev, abb, info);
309
298out: 310out:
299 return ret; 311 return ret;
300} 312}
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index 1432b26ef2e9..2205fbc2c37b 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -63,7 +63,7 @@ static irqreturn_t wm831x_ldo_uv_irq(int irq, void *data)
63 */ 63 */
64 64
65static const struct regulator_linear_range wm831x_gp_ldo_ranges[] = { 65static const struct regulator_linear_range wm831x_gp_ldo_ranges[] = {
66 { .min_uV = 900000, .max_uV = 1650000, .min_sel = 0, .max_sel = 14, 66 { .min_uV = 900000, .max_uV = 1600000, .min_sel = 0, .max_sel = 14,
67 .uV_step = 50000 }, 67 .uV_step = 50000 },
68 { .min_uV = 1700000, .max_uV = 3300000, .min_sel = 15, .max_sel = 31, 68 { .min_uV = 1700000, .max_uV = 3300000, .min_sel = 15, .max_sel = 31,
69 .uV_step = 100000 }, 69 .uV_step = 100000 },
@@ -332,7 +332,7 @@ static struct platform_driver wm831x_gp_ldo_driver = {
332 */ 332 */
333 333
334static const struct regulator_linear_range wm831x_aldo_ranges[] = { 334static const struct regulator_linear_range wm831x_aldo_ranges[] = {
335 { .min_uV = 1000000, .max_uV = 1650000, .min_sel = 0, .max_sel = 12, 335 { .min_uV = 1000000, .max_uV = 1600000, .min_sel = 0, .max_sel = 12,
336 .uV_step = 50000 }, 336 .uV_step = 50000 },
337 { .min_uV = 1700000, .max_uV = 3500000, .min_sel = 13, .max_sel = 31, 337 { .min_uV = 1700000, .max_uV = 3500000, .min_sel = 13, .max_sel = 31,
338 .uV_step = 100000 }, 338 .uV_step = 100000 },
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 835b5f0f344e..61ca9292a429 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -543,7 +543,7 @@ static int wm8350_dcdc_set_suspend_mode(struct regulator_dev *rdev,
543} 543}
544 544
545static const struct regulator_linear_range wm8350_ldo_ranges[] = { 545static const struct regulator_linear_range wm8350_ldo_ranges[] = {
546 { .min_uV = 900000, .max_uV = 1750000, .min_sel = 0, .max_sel = 15, 546 { .min_uV = 900000, .max_uV = 1650000, .min_sel = 0, .max_sel = 15,
547 .uV_step = 50000 }, 547 .uV_step = 50000 },
548 { .min_uV = 1800000, .max_uV = 3300000, .min_sel = 16, .max_sel = 31, 548 { .min_uV = 1800000, .max_uV = 3300000, .min_sel = 16, .max_sel = 31,
549 .uV_step = 100000 }, 549 .uV_step = 100000 },
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 08b22a901c25..d7ca9305ff45 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -105,7 +105,7 @@
105#define BNX2FC_RQ_WQE_SIZE (BNX2FC_RQ_BUF_SZ) 105#define BNX2FC_RQ_WQE_SIZE (BNX2FC_RQ_BUF_SZ)
106#define BNX2FC_XFERQ_WQE_SIZE (sizeof(struct fcoe_xfrqe)) 106#define BNX2FC_XFERQ_WQE_SIZE (sizeof(struct fcoe_xfrqe))
107#define BNX2FC_CONFQ_WQE_SIZE (sizeof(struct fcoe_confqe)) 107#define BNX2FC_CONFQ_WQE_SIZE (sizeof(struct fcoe_confqe))
108#define BNX2FC_5771X_DB_PAGE_SIZE 128 108#define BNX2X_DB_SHIFT 3
109 109
110#define BNX2FC_TASK_SIZE 128 110#define BNX2FC_TASK_SIZE 128
111#define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE) 111#define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE)
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index c0d035a8f8f9..46a37657307f 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1421,8 +1421,7 @@ int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
1421 1421
1422 reg_base = pci_resource_start(hba->pcidev, 1422 reg_base = pci_resource_start(hba->pcidev,
1423 BNX2X_DOORBELL_PCI_BAR); 1423 BNX2X_DOORBELL_PCI_BAR);
1424 reg_off = BNX2FC_5771X_DB_PAGE_SIZE * 1424 reg_off = (1 << BNX2X_DB_SHIFT) * (context_id & 0x1FFFF);
1425 (context_id & 0x1FFFF) + DPM_TRIGER_TYPE;
1426 tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4); 1425 tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4);
1427 if (!tgt->ctx_base) 1426 if (!tgt->ctx_base)
1428 return -ENOMEM; 1427 return -ENOMEM;
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index 6940f0930a84..c73bbcb63c02 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -64,7 +64,7 @@
64#define MAX_PAGES_PER_CTRL_STRUCT_POOL 8 64#define MAX_PAGES_PER_CTRL_STRUCT_POOL 8
65#define BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS 4 65#define BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS 4
66 66
67#define BNX2I_5771X_DBELL_PAGE_SIZE 128 67#define BNX2X_DB_SHIFT 3
68 68
69/* 5706/08 hardware has limit on maximum buffer size per BD it can handle */ 69/* 5706/08 hardware has limit on maximum buffer size per BD it can handle */
70#define MAX_BD_LENGTH 65535 70#define MAX_BD_LENGTH 65535
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index af3e675d4d48..5be718c241c4 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -2738,8 +2738,7 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
2738 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { 2738 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
2739 reg_base = pci_resource_start(ep->hba->pcidev, 2739 reg_base = pci_resource_start(ep->hba->pcidev,
2740 BNX2X_DOORBELL_PCI_BAR); 2740 BNX2X_DOORBELL_PCI_BAR);
2741 reg_off = BNX2I_5771X_DBELL_PAGE_SIZE * (cid_num & 0x1FFFF) + 2741 reg_off = (1 << BNX2X_DB_SHIFT) * (cid_num & 0x1FFFF);
2742 DPM_TRIGER_TYPE;
2743 ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4); 2742 ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
2744 goto arm_cq; 2743 goto arm_cq;
2745 } 2744 }
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index a84aab47a113..f73287eab373 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -96,6 +96,15 @@ config COMEDI_SKEL
96 To compile this driver as a module, choose M here: the module will be 96 To compile this driver as a module, choose M here: the module will be
97 called skel. 97 called skel.
98 98
99config COMEDI_SSV_DNP
100 tristate "SSV Embedded Systems DIL/Net-PC support"
101 depends on X86_32 || COMPILE_TEST
102 ---help---
103 Enable support for SSV Embedded Systems DIL/Net-PC
104
105 To compile this driver as a module, choose M here: the module will be
106 called ssv_dnp.
107
99endif # COMEDI_MISC_DRIVERS 108endif # COMEDI_MISC_DRIVERS
100 109
101menuconfig COMEDI_ISA_DRIVERS 110menuconfig COMEDI_ISA_DRIVERS
@@ -386,6 +395,14 @@ config COMEDI_DMM32AT
386 To compile this driver as a module, choose M here: the module will be 395 To compile this driver as a module, choose M here: the module will be
387 called dmm32at. 396 called dmm32at.
388 397
398config COMEDI_UNIOXX5
399 tristate "Fastwel UNIOxx-5 analog and digital io board support"
400 ---help---
401 Enable support for Fastwel UNIOxx-5 (analog and digital i/o) boards
402
403 To compile this driver as a module, choose M here: the module will be
404 called unioxx5.
405
389config COMEDI_FL512 406config COMEDI_FL512
390 tristate "FL512 ISA card support" 407 tristate "FL512 ISA card support"
391 ---help--- 408 ---help---
@@ -855,14 +872,6 @@ config COMEDI_DYNA_PCI10XX
855 To compile this driver as a module, choose M here: the module will be 872 To compile this driver as a module, choose M here: the module will be
856 called dyna_pci10xx. 873 called dyna_pci10xx.
857 874
858config COMEDI_UNIOXX5
859 tristate "Fastwel UNIOxx-5 analog and digital io board support"
860 ---help---
861 Enable support for Fastwel UNIOxx-5 (analog and digital i/o) boards
862
863 To compile this driver as a module, choose M here: the module will be
864 called unioxx5.
865
866config COMEDI_GSC_HPDI 875config COMEDI_GSC_HPDI
867 tristate "General Standards PCI-HPDI32 / PMC-HPDI32 support" 876 tristate "General Standards PCI-HPDI32 / PMC-HPDI32 support"
868 select COMEDI_FC 877 select COMEDI_FC
@@ -1085,14 +1094,6 @@ config COMEDI_S626
1085 To compile this driver as a module, choose M here: the module will be 1094 To compile this driver as a module, choose M here: the module will be
1086 called s626. 1095 called s626.
1087 1096
1088config COMEDI_SSV_DNP
1089 tristate "SSV Embedded Systems DIL/Net-PC support"
1090 ---help---
1091 Enable support for SSV Embedded Systems DIL/Net-PC
1092
1093 To compile this driver as a module, choose M here: the module will be
1094 called ssv_dnp.
1095
1096config COMEDI_MITE 1097config COMEDI_MITE
1097 depends on HAS_DMA 1098 depends on HAS_DMA
1098 tristate 1099 tristate
diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c
index 3ba4c5712dff..853f62b2b1a9 100644
--- a/drivers/staging/comedi/drivers/ni_65xx.c
+++ b/drivers/staging/comedi/drivers/ni_65xx.c
@@ -369,28 +369,23 @@ static int ni_65xx_dio_insn_bits(struct comedi_device *dev,
369{ 369{
370 const struct ni_65xx_board *board = comedi_board(dev); 370 const struct ni_65xx_board *board = comedi_board(dev);
371 struct ni_65xx_private *devpriv = dev->private; 371 struct ni_65xx_private *devpriv = dev->private;
372 unsigned base_bitfield_channel; 372 int base_bitfield_channel;
373 const unsigned max_ports_per_bitfield = 5;
374 unsigned read_bits = 0; 373 unsigned read_bits = 0;
375 unsigned j; 374 int last_port_offset = ni_65xx_port_by_channel(s->n_chan - 1);
375 int port_offset;
376 376
377 base_bitfield_channel = CR_CHAN(insn->chanspec); 377 base_bitfield_channel = CR_CHAN(insn->chanspec);
378 for (j = 0; j < max_ports_per_bitfield; ++j) { 378 for (port_offset = ni_65xx_port_by_channel(base_bitfield_channel);
379 const unsigned port_offset = 379 port_offset <= last_port_offset; port_offset++) {
380 ni_65xx_port_by_channel(base_bitfield_channel) + j; 380 unsigned port = sprivate(s)->base_port + port_offset;
381 const unsigned port = 381 int base_port_channel = port_offset * ni_65xx_channels_per_port;
382 sprivate(s)->base_port + port_offset;
383 unsigned base_port_channel;
384 unsigned port_mask, port_data, port_read_bits; 382 unsigned port_mask, port_data, port_read_bits;
385 int bitshift; 383 int bitshift = base_port_channel - base_bitfield_channel;
386 if (port >= ni_65xx_total_num_ports(board)) 384
385 if (bitshift >= 32)
387 break; 386 break;
388 base_port_channel = port_offset * ni_65xx_channels_per_port;
389 port_mask = data[0]; 387 port_mask = data[0];
390 port_data = data[1]; 388 port_data = data[1];
391 bitshift = base_port_channel - base_bitfield_channel;
392 if (bitshift >= 32 || bitshift <= -32)
393 break;
394 if (bitshift > 0) { 389 if (bitshift > 0) {
395 port_mask >>= bitshift; 390 port_mask >>= bitshift;
396 port_data >>= bitshift; 391 port_data >>= bitshift;
diff --git a/drivers/staging/dgap/dgap_driver.c b/drivers/staging/dgap/dgap_driver.c
index 724a685753dd..40ef785a0428 100644
--- a/drivers/staging/dgap/dgap_driver.c
+++ b/drivers/staging/dgap/dgap_driver.c
@@ -474,7 +474,7 @@ static void dgap_cleanup_board(struct board_t *brd)
474 474
475 DGAP_LOCK(dgap_global_lock, flags); 475 DGAP_LOCK(dgap_global_lock, flags);
476 brd->msgbuf = NULL; 476 brd->msgbuf = NULL;
477 printk(brd->msgbuf_head); 477 printk("%s", brd->msgbuf_head);
478 kfree(brd->msgbuf_head); 478 kfree(brd->msgbuf_head);
479 brd->msgbuf_head = NULL; 479 brd->msgbuf_head = NULL;
480 DGAP_UNLOCK(dgap_global_lock, flags); 480 DGAP_UNLOCK(dgap_global_lock, flags);
@@ -628,7 +628,7 @@ static int dgap_found_board(struct pci_dev *pdev, int id)
628 DPR_INIT(("dgap_scan(%d) - printing out the msgbuf\n", i)); 628 DPR_INIT(("dgap_scan(%d) - printing out the msgbuf\n", i));
629 DGAP_LOCK(dgap_global_lock, flags); 629 DGAP_LOCK(dgap_global_lock, flags);
630 brd->msgbuf = NULL; 630 brd->msgbuf = NULL;
631 printk(brd->msgbuf_head); 631 printk("%s", brd->msgbuf_head);
632 kfree(brd->msgbuf_head); 632 kfree(brd->msgbuf_head);
633 brd->msgbuf_head = NULL; 633 brd->msgbuf_head = NULL;
634 DGAP_UNLOCK(dgap_global_lock, flags); 634 DGAP_UNLOCK(dgap_global_lock, flags);
@@ -955,25 +955,28 @@ static void dgap_mbuf(struct board_t *brd, const char *fmt, ...) {
955 char buf[1024]; 955 char buf[1024];
956 int i; 956 int i;
957 unsigned long flags; 957 unsigned long flags;
958 size_t length;
958 959
959 DGAP_LOCK(dgap_global_lock, flags); 960 DGAP_LOCK(dgap_global_lock, flags);
960 961
961 /* Format buf using fmt and arguments contained in ap. */ 962 /* Format buf using fmt and arguments contained in ap. */
962 va_start(ap, fmt); 963 va_start(ap, fmt);
963 i = vsprintf(buf, fmt, ap); 964 i = vsnprintf(buf, sizeof(buf), fmt, ap);
964 va_end(ap); 965 va_end(ap);
965 966
966 DPR((buf)); 967 DPR((buf));
967 968
968 if (!brd || !brd->msgbuf) { 969 if (!brd || !brd->msgbuf) {
969 printk(buf); 970 printk("%s", buf);
970 DGAP_UNLOCK(dgap_global_lock, flags); 971 DGAP_UNLOCK(dgap_global_lock, flags);
971 return; 972 return;
972 } 973 }
973 974
974 memcpy(brd->msgbuf, buf, strlen(buf)); 975 length = strlen(buf) + 1;
975 brd->msgbuf += strlen(buf); 976 if (brd->msgbuf - brd->msgbuf_head < length)
976 *brd->msgbuf = 0; 977 length = brd->msgbuf - brd->msgbuf_head;
978 memcpy(brd->msgbuf, buf, length);
979 brd->msgbuf += length;
977 980
978 DGAP_UNLOCK(dgap_global_lock, flags); 981 DGAP_UNLOCK(dgap_global_lock, flags);
979} 982}
diff --git a/drivers/staging/dgnc/dgnc_driver.c b/drivers/staging/dgnc/dgnc_driver.c
index f8c1e22585d6..71d2b83cc3a1 100644
--- a/drivers/staging/dgnc/dgnc_driver.c
+++ b/drivers/staging/dgnc/dgnc_driver.c
@@ -454,7 +454,7 @@ static void dgnc_cleanup_board(struct board_t *brd)
454 454
455 DGNC_LOCK(dgnc_global_lock, flags); 455 DGNC_LOCK(dgnc_global_lock, flags);
456 brd->msgbuf = NULL; 456 brd->msgbuf = NULL;
457 printk(brd->msgbuf_head); 457 printk("%s", brd->msgbuf_head);
458 kfree(brd->msgbuf_head); 458 kfree(brd->msgbuf_head);
459 brd->msgbuf_head = NULL; 459 brd->msgbuf_head = NULL;
460 DGNC_UNLOCK(dgnc_global_lock, flags); 460 DGNC_UNLOCK(dgnc_global_lock, flags);
@@ -710,7 +710,7 @@ static int dgnc_found_board(struct pci_dev *pdev, int id)
710 DPR_INIT(("dgnc_scan(%d) - printing out the msgbuf\n", i)); 710 DPR_INIT(("dgnc_scan(%d) - printing out the msgbuf\n", i));
711 DGNC_LOCK(dgnc_global_lock, flags); 711 DGNC_LOCK(dgnc_global_lock, flags);
712 brd->msgbuf = NULL; 712 brd->msgbuf = NULL;
713 printk(brd->msgbuf_head); 713 printk("%s", brd->msgbuf_head);
714 kfree(brd->msgbuf_head); 714 kfree(brd->msgbuf_head);
715 brd->msgbuf_head = NULL; 715 brd->msgbuf_head = NULL;
716 DGNC_UNLOCK(dgnc_global_lock, flags); 716 DGNC_UNLOCK(dgnc_global_lock, flags);
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index db4d6dc03243..b36feb080cba 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -37,7 +37,7 @@ config IIO_SIMPLE_DUMMY_EVENTS
37 37
38config IIO_SIMPLE_DUMMY_BUFFER 38config IIO_SIMPLE_DUMMY_BUFFER
39 boolean "Buffered capture support" 39 boolean "Buffered capture support"
40 depends on IIO_KFIFO_BUF 40 select IIO_KFIFO_BUF
41 help 41 help
42 Add buffered data capture to the simple dummy driver. 42 Add buffered data capture to the simple dummy driver.
43 43
diff --git a/drivers/staging/iio/light/isl29018.c b/drivers/staging/iio/light/isl29018.c
index 351936c3efd6..e4998e4d4434 100644
--- a/drivers/staging/iio/light/isl29018.c
+++ b/drivers/staging/iio/light/isl29018.c
@@ -563,6 +563,7 @@ static int isl29018_probe(struct i2c_client *client,
563 mutex_init(&chip->lock); 563 mutex_init(&chip->lock);
564 564
565 chip->lux_scale = 1; 565 chip->lux_scale = 1;
566 chip->lux_uscale = 0;
566 chip->range = 1000; 567 chip->range = 1000;
567 chip->adc_bit = 16; 568 chip->adc_bit = 16;
568 chip->suspended = false; 569 chip->suspended = false;
diff --git a/drivers/staging/iio/magnetometer/hmc5843.c b/drivers/staging/iio/magnetometer/hmc5843.c
index d2748c329eae..c3f3f539e787 100644
--- a/drivers/staging/iio/magnetometer/hmc5843.c
+++ b/drivers/staging/iio/magnetometer/hmc5843.c
@@ -229,7 +229,7 @@ static int hmc5843_read_measurement(struct iio_dev *indio_dev,
229 if (result < 0) 229 if (result < 0)
230 return -EINVAL; 230 return -EINVAL;
231 231
232 *val = result; 232 *val = sign_extend32(result, 15);
233 return IIO_VAL_INT; 233 return IIO_VAL_INT;
234} 234}
235 235
diff --git a/drivers/staging/iio/meter/ade7854-spi.c b/drivers/staging/iio/meter/ade7854-spi.c
index a802cf2491d6..4c6d2041260b 100644
--- a/drivers/staging/iio/meter/ade7854-spi.c
+++ b/drivers/staging/iio/meter/ade7854-spi.c
@@ -299,7 +299,7 @@ static int ade7854_spi_probe(struct spi_device *spi)
299 if (ret) 299 if (ret)
300 iio_device_free(indio_dev); 300 iio_device_free(indio_dev);
301 301
302 return 0; 302 return ret;
303} 303}
304 304
305static int ade7854_spi_remove(struct spi_device *spi) 305static int ade7854_spi_remove(struct spi_device *spi)
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 47c5888461ff..a2e52a0c53c9 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -41,7 +41,6 @@ struct imx_drm_device {
41 struct list_head encoder_list; 41 struct list_head encoder_list;
42 struct list_head connector_list; 42 struct list_head connector_list;
43 struct mutex mutex; 43 struct mutex mutex;
44 int references;
45 int pipes; 44 int pipes;
46 struct drm_fbdev_cma *fbhelper; 45 struct drm_fbdev_cma *fbhelper;
47}; 46};
@@ -241,8 +240,6 @@ struct drm_device *imx_drm_device_get(void)
241 } 240 }
242 } 241 }
243 242
244 imxdrm->references++;
245
246 return imxdrm->drm; 243 return imxdrm->drm;
247 244
248unwind_crtc: 245unwind_crtc:
@@ -280,8 +277,6 @@ void imx_drm_device_put(void)
280 list_for_each_entry(enc, &imxdrm->encoder_list, list) 277 list_for_each_entry(enc, &imxdrm->encoder_list, list)
281 module_put(enc->owner); 278 module_put(enc->owner);
282 279
283 imxdrm->references--;
284
285 mutex_unlock(&imxdrm->mutex); 280 mutex_unlock(&imxdrm->mutex);
286} 281}
287EXPORT_SYMBOL_GPL(imx_drm_device_put); 282EXPORT_SYMBOL_GPL(imx_drm_device_put);
@@ -485,7 +480,7 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
485 480
486 mutex_lock(&imxdrm->mutex); 481 mutex_lock(&imxdrm->mutex);
487 482
488 if (imxdrm->references) { 483 if (imxdrm->drm->open_count) {
489 ret = -EBUSY; 484 ret = -EBUSY;
490 goto err_busy; 485 goto err_busy;
491 } 486 }
@@ -564,7 +559,7 @@ int imx_drm_add_encoder(struct drm_encoder *encoder,
564 559
565 mutex_lock(&imxdrm->mutex); 560 mutex_lock(&imxdrm->mutex);
566 561
567 if (imxdrm->references) { 562 if (imxdrm->drm->open_count) {
568 ret = -EBUSY; 563 ret = -EBUSY;
569 goto err_busy; 564 goto err_busy;
570 } 565 }
@@ -709,7 +704,7 @@ int imx_drm_add_connector(struct drm_connector *connector,
709 704
710 mutex_lock(&imxdrm->mutex); 705 mutex_lock(&imxdrm->mutex);
711 706
712 if (imxdrm->references) { 707 if (imxdrm->drm->open_count) {
713 ret = -EBUSY; 708 ret = -EBUSY;
714 goto err_busy; 709 goto err_busy;
715 } 710 }
diff --git a/drivers/staging/line6/toneport.c b/drivers/staging/line6/toneport.c
index 2f44d56700af..776d3632dc7d 100644
--- a/drivers/staging/line6/toneport.c
+++ b/drivers/staging/line6/toneport.c
@@ -244,13 +244,17 @@ static int snd_toneport_source_put(struct snd_kcontrol *kcontrol,
244 struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol); 244 struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol);
245 struct usb_line6_toneport *toneport = 245 struct usb_line6_toneport *toneport =
246 (struct usb_line6_toneport *)line6pcm->line6; 246 (struct usb_line6_toneport *)line6pcm->line6;
247 unsigned int source;
247 248
248 if (ucontrol->value.enumerated.item[0] == toneport->source) 249 source = ucontrol->value.enumerated.item[0];
250 if (source >= ARRAY_SIZE(toneport_source_info))
251 return -EINVAL;
252 if (source == toneport->source)
249 return 0; 253 return 0;
250 254
251 toneport->source = ucontrol->value.enumerated.item[0]; 255 toneport->source = source;
252 toneport_send_cmd(toneport->line6.usbdev, 256 toneport_send_cmd(toneport->line6.usbdev,
253 toneport_source_info[toneport->source].code, 0x0000); 257 toneport_source_info[source].code, 0x0000);
254 return 1; 258 return 1;
255} 259}
256 260
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index 086ca3d7241b..26b49a24b3df 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -1802,7 +1802,7 @@ kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
1802int 1802int
1803kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name) 1803kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
1804{ 1804{
1805 struct task_struct *task = kthread_run(fn, arg, name); 1805 struct task_struct *task = kthread_run(fn, arg, "%s", name);
1806 1806
1807 if (IS_ERR(task)) 1807 if (IS_ERR(task))
1808 return PTR_ERR(task); 1808 return PTR_ERR(task);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index 2c581b7fa8ad..68a4f52ec998 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -1005,7 +1005,7 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
1005int 1005int
1006ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name) 1006ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
1007{ 1007{
1008 struct task_struct *task = kthread_run(fn, arg, name); 1008 struct task_struct *task = kthread_run(fn, arg, "%s", name);
1009 1009
1010 if (IS_ERR(task)) 1010 if (IS_ERR(task))
1011 return PTR_ERR(task); 1011 return PTR_ERR(task);
diff --git a/drivers/staging/lustre/lustre/Kconfig b/drivers/staging/lustre/lustre/Kconfig
index 4e898e491860..2156a44d0740 100644
--- a/drivers/staging/lustre/lustre/Kconfig
+++ b/drivers/staging/lustre/lustre/Kconfig
@@ -1,6 +1,6 @@
1config LUSTRE_FS 1config LUSTRE_FS
2 tristate "Lustre file system client support" 2 tristate "Lustre file system client support"
3 depends on INET && m 3 depends on INET && m && !MIPS && !XTENSA && !SUPERH
4 select LNET 4 select LNET
5 select CRYPTO 5 select CRYPTO
6 select CRYPTO_CRC32 6 select CRYPTO_CRC32
@@ -52,7 +52,7 @@ config LUSTRE_DEBUG_EXPENSIVE_CHECK
52config LUSTRE_TRANSLATE_ERRNOS 52config LUSTRE_TRANSLATE_ERRNOS
53 bool 53 bool
54 depends on LUSTRE_FS && !X86 54 depends on LUSTRE_FS && !X86
55 default true 55 default y
56 56
57config LUSTRE_LLITE_LLOOP 57config LUSTRE_LLITE_LLOOP
58 bool "Lustre virtual block device" 58 bool "Lustre virtual block device"
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
index 3916bda3004c..a100a0b96381 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
@@ -800,9 +800,9 @@ static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp)
800 800
801 init_completion(&bltd.bltd_comp); 801 init_completion(&bltd.bltd_comp);
802 bltd.bltd_num = atomic_read(&blp->blp_num_threads); 802 bltd.bltd_num = atomic_read(&blp->blp_num_threads);
803 snprintf(bltd.bltd_name, sizeof(bltd.bltd_name) - 1, 803 snprintf(bltd.bltd_name, sizeof(bltd.bltd_name),
804 "ldlm_bl_%02d", bltd.bltd_num); 804 "ldlm_bl_%02d", bltd.bltd_num);
805 task = kthread_run(ldlm_bl_thread_main, &bltd, bltd.bltd_name); 805 task = kthread_run(ldlm_bl_thread_main, &bltd, "%s", bltd.bltd_name);
806 if (IS_ERR(task)) { 806 if (IS_ERR(task)) {
807 CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %ld\n", 807 CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %ld\n",
808 atomic_read(&blp->blp_num_threads), PTR_ERR(task)); 808 atomic_read(&blp->blp_num_threads), PTR_ERR(task));
diff --git a/drivers/staging/lustre/lustre/libcfs/workitem.c b/drivers/staging/lustre/lustre/libcfs/workitem.c
index 462172d1a756..1a55c81892e0 100644
--- a/drivers/staging/lustre/lustre/libcfs/workitem.c
+++ b/drivers/staging/lustre/lustre/libcfs/workitem.c
@@ -397,7 +397,7 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
397 sched->ws_name, sched->ws_nthreads); 397 sched->ws_name, sched->ws_nthreads);
398 } 398 }
399 399
400 task = kthread_run(cfs_wi_scheduler, sched, name); 400 task = kthread_run(cfs_wi_scheduler, sched, "%s", name);
401 if (!IS_ERR(task)) { 401 if (!IS_ERR(task)) {
402 nthrs--; 402 nthrs--;
403 continue; 403 continue;
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index 2644edf438c1..c8b43442dc74 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -1387,7 +1387,7 @@ echo_copyout_lsm (struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
1387 if (nob > ulsm_nob) 1387 if (nob > ulsm_nob)
1388 return (-EINVAL); 1388 return (-EINVAL);
1389 1389
1390 if (copy_to_user (ulsm, lsm, sizeof(ulsm))) 1390 if (copy_to_user (ulsm, lsm, sizeof(*ulsm)))
1391 return (-EFAULT); 1391 return (-EFAULT);
1392 1392
1393 for (i = 0; i < lsm->lsm_stripe_count; i++) { 1393 for (i = 0; i < lsm->lsm_stripe_count; i++) {
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pinger.c b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
index 227a0ae9593b..5dec771d70ee 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pinger.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
@@ -383,8 +383,8 @@ int ptlrpc_start_pinger(void)
383 383
384 /* CLONE_VM and CLONE_FILES just avoid a needless copy, because we 384 /* CLONE_VM and CLONE_FILES just avoid a needless copy, because we
385 * just drop the VM and FILES in cfs_daemonize_ctxt() right away. */ 385 * just drop the VM and FILES in cfs_daemonize_ctxt() right away. */
386 rc = PTR_ERR(kthread_run(ptlrpc_pinger_main, 386 rc = PTR_ERR(kthread_run(ptlrpc_pinger_main, &pinger_thread,
387 &pinger_thread, pinger_thread.t_name)); 387 "%s", pinger_thread.t_name));
388 if (IS_ERR_VALUE(rc)) { 388 if (IS_ERR_VALUE(rc)) {
389 CERROR("cannot start thread: %d\n", rc); 389 CERROR("cannot start thread: %d\n", rc);
390 return rc; 390 return rc;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
index fbdeff65d059..89c9be96f454 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
@@ -615,7 +615,7 @@ int ptlrpcd_start(int index, int max, const char *name, struct ptlrpcd_ctl *pc)
615 init_completion(&pc->pc_starting); 615 init_completion(&pc->pc_starting);
616 init_completion(&pc->pc_finishing); 616 init_completion(&pc->pc_finishing);
617 spin_lock_init(&pc->pc_lock); 617 spin_lock_init(&pc->pc_lock);
618 strncpy(pc->pc_name, name, sizeof(pc->pc_name) - 1); 618 strlcpy(pc->pc_name, name, sizeof(pc->pc_name));
619 pc->pc_set = ptlrpc_prep_set(); 619 pc->pc_set = ptlrpc_prep_set();
620 if (pc->pc_set == NULL) 620 if (pc->pc_set == NULL)
621 GOTO(out, rc = -ENOMEM); 621 GOTO(out, rc = -ENOMEM);
@@ -638,7 +638,7 @@ int ptlrpcd_start(int index, int max, const char *name, struct ptlrpcd_ctl *pc)
638 GOTO(out, rc); 638 GOTO(out, rc);
639 } 639 }
640 640
641 task = kthread_run(ptlrpcd, pc, pc->pc_name); 641 task = kthread_run(ptlrpcd, pc, "%s", pc->pc_name);
642 if (IS_ERR(task)) 642 if (IS_ERR(task))
643 GOTO(out, rc = PTR_ERR(task)); 643 GOTO(out, rc = PTR_ERR(task));
644 644
@@ -745,7 +745,7 @@ static int ptlrpcd_init(void)
745 if (ptlrpcds == NULL) 745 if (ptlrpcds == NULL)
746 GOTO(out, rc = -ENOMEM); 746 GOTO(out, rc = -ENOMEM);
747 747
748 snprintf(name, 15, "ptlrpcd_rcv"); 748 snprintf(name, sizeof(name), "ptlrpcd_rcv");
749 set_bit(LIOD_RECOVERY, &ptlrpcds->pd_thread_rcv.pc_flags); 749 set_bit(LIOD_RECOVERY, &ptlrpcds->pd_thread_rcv.pc_flags);
750 rc = ptlrpcd_start(-1, nthreads, name, &ptlrpcds->pd_thread_rcv); 750 rc = ptlrpcd_start(-1, nthreads, name, &ptlrpcds->pd_thread_rcv);
751 if (rc < 0) 751 if (rc < 0)
@@ -764,7 +764,7 @@ static int ptlrpcd_init(void)
764 * unnecessary dependency. But how to distribute async RPCs load 764 * unnecessary dependency. But how to distribute async RPCs load
765 * among all the ptlrpc daemons becomes another trouble. */ 765 * among all the ptlrpc daemons becomes another trouble. */
766 for (i = 0; i < nthreads; i++) { 766 for (i = 0; i < nthreads; i++) {
767 snprintf(name, 15, "ptlrpcd_%d", i); 767 snprintf(name, sizeof(name), "ptlrpcd_%d", i);
768 rc = ptlrpcd_start(i, nthreads, name, &ptlrpcds->pd_threads[i]); 768 rc = ptlrpcd_start(i, nthreads, name, &ptlrpcds->pd_threads[i]);
769 if (rc < 0) 769 if (rc < 0)
770 GOTO(out, rc); 770 GOTO(out, rc);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index e90c8fb7da6a..6547f46a7729 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -59,8 +59,8 @@
59 ****************************************/ 59 ****************************************/
60 60
61 61
62#define PTRS_PER_PAGE (PAGE_CACHE_SIZE / sizeof(void *)) 62#define POINTERS_PER_PAGE (PAGE_CACHE_SIZE / sizeof(void *))
63#define PAGES_PER_POOL (PTRS_PER_PAGE) 63#define PAGES_PER_POOL (POINTERS_PER_PAGE)
64 64
65#define IDLE_IDX_MAX (100) 65#define IDLE_IDX_MAX (100)
66#define IDLE_IDX_WEIGHT (3) 66#define IDLE_IDX_WEIGHT (3)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
index ac8b5fd2300b..acf75f3873d1 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
@@ -2718,15 +2718,15 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait)
2718 spin_unlock(&svcpt->scp_lock); 2718 spin_unlock(&svcpt->scp_lock);
2719 2719
2720 if (svcpt->scp_cpt >= 0) { 2720 if (svcpt->scp_cpt >= 0) {
2721 snprintf(thread->t_name, PTLRPC_THR_NAME_LEN, "%s%02d_%03d", 2721 snprintf(thread->t_name, sizeof(thread->t_name), "%s%02d_%03d",
2722 svc->srv_thread_name, svcpt->scp_cpt, thread->t_id); 2722 svc->srv_thread_name, svcpt->scp_cpt, thread->t_id);
2723 } else { 2723 } else {
2724 snprintf(thread->t_name, PTLRPC_THR_NAME_LEN, "%s_%04d", 2724 snprintf(thread->t_name, sizeof(thread->t_name), "%s_%04d",
2725 svc->srv_thread_name, thread->t_id); 2725 svc->srv_thread_name, thread->t_id);
2726 } 2726 }
2727 2727
2728 CDEBUG(D_RPCTRACE, "starting thread '%s'\n", thread->t_name); 2728 CDEBUG(D_RPCTRACE, "starting thread '%s'\n", thread->t_name);
2729 rc = PTR_ERR(kthread_run(ptlrpc_main, thread, thread->t_name)); 2729 rc = PTR_ERR(kthread_run(ptlrpc_main, thread, "%s", thread->t_name));
2730 if (IS_ERR_VALUE(rc)) { 2730 if (IS_ERR_VALUE(rc)) {
2731 CERROR("cannot start thread '%s': rc %d\n", 2731 CERROR("cannot start thread '%s': rc %d\n",
2732 thread->t_name, rc); 2732 thread->t_name, rc);
diff --git a/drivers/staging/octeon-usb/cvmx-usb.c b/drivers/staging/octeon-usb/cvmx-usb.c
index d7b3c82b5ead..45dfe94199ae 100644
--- a/drivers/staging/octeon-usb/cvmx-usb.c
+++ b/drivers/staging/octeon-usb/cvmx-usb.c
@@ -604,7 +604,7 @@ int cvmx_usb_initialize(struct cvmx_usb_state *state, int usb_port_number,
604 } 604 }
605 } 605 }
606 606
607 memset(usb, 0, sizeof(usb)); 607 memset(usb, 0, sizeof(*usb));
608 usb->init_flags = flags; 608 usb->init_flags = flags;
609 609
610 /* Initialize the USB state structure */ 610 /* Initialize the USB state structure */
diff --git a/drivers/staging/octeon/ethernet-mem.c b/drivers/staging/octeon/ethernet-mem.c
index 78b6cb743769..199059d64c9b 100644
--- a/drivers/staging/octeon/ethernet-mem.c
+++ b/drivers/staging/octeon/ethernet-mem.c
@@ -48,13 +48,8 @@ static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements)
48 while (freed) { 48 while (freed) {
49 49
50 struct sk_buff *skb = dev_alloc_skb(size + 256); 50 struct sk_buff *skb = dev_alloc_skb(size + 256);
51 if (unlikely(skb == NULL)) { 51 if (unlikely(skb == NULL))
52 pr_warning
53 ("Failed to allocate skb for hardware pool %d\n",
54 pool);
55 break; 52 break;
56 }
57
58 skb_reserve(skb, 256 - (((unsigned long)skb->data) & 0x7f)); 53 skb_reserve(skb, 256 - (((unsigned long)skb->data) & 0x7f));
59 *(struct sk_buff **)(skb->data - sizeof(void *)) = skb; 54 *(struct sk_buff **)(skb->data - sizeof(void *)) = skb;
60 cvmx_fpa_free(skb->data, pool, DONT_WRITEBACK(size / 128)); 55 cvmx_fpa_free(skb->data, pool, DONT_WRITEBACK(size / 128));
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index d8f5f694ec35..ea53af30dfa7 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -373,9 +373,7 @@ int cvm_oct_rgmii_init(struct net_device *dev)
373 * Enable interrupts on inband status changes 373 * Enable interrupts on inband status changes
374 * for this port. 374 * for this port.
375 */ 375 */
376 gmx_rx_int_en.u64 = 376 gmx_rx_int_en.u64 = 0;
377 cvmx_read_csr(CVMX_GMXX_RXX_INT_EN
378 (index, interface));
379 gmx_rx_int_en.s.phy_dupx = 1; 377 gmx_rx_int_en.s.phy_dupx = 1;
380 gmx_rx_int_en.s.phy_link = 1; 378 gmx_rx_int_en.s.phy_link = 1;
381 gmx_rx_int_en.s.phy_spd = 1; 379 gmx_rx_int_en.s.phy_spd = 1;
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 34afc16bc493..e14a1bb04361 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -303,6 +303,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
303 if (backlog > budget * cores_in_use && napi != NULL) 303 if (backlog > budget * cores_in_use && napi != NULL)
304 cvm_oct_enable_one_cpu(); 304 cvm_oct_enable_one_cpu();
305 } 305 }
306 rx_count++;
306 307
307 skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1; 308 skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1;
308 if (likely(skb_in_hw)) { 309 if (likely(skb_in_hw)) {
@@ -336,9 +337,6 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
336 */ 337 */
337 skb = dev_alloc_skb(work->len); 338 skb = dev_alloc_skb(work->len);
338 if (!skb) { 339 if (!skb) {
339 printk_ratelimited("Port %d failed to allocate "
340 "skbuff, packet dropped\n",
341 work->ipprt);
342 cvm_oct_free_work(work); 340 cvm_oct_free_work(work);
343 continue; 341 continue;
344 } 342 }
@@ -429,7 +427,6 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
429#endif 427#endif
430 } 428 }
431 netif_receive_skb(skb); 429 netif_receive_skb(skb);
432 rx_count++;
433 } else { 430 } else {
434 /* Drop any packet received for a device that isn't up */ 431 /* Drop any packet received for a device that isn't up */
435 /* 432 /*
diff --git a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
index 3605c5da822d..6fc77428e83a 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
@@ -157,8 +157,8 @@ _func_enter_;
157 157
158 *frlen = *frlen + (len + 2); 158 *frlen = *frlen + (len + 2);
159 159
160 return pbuf + len + 2;
161_func_exit_; 160_func_exit_;
161 return pbuf + len + 2;
162} 162}
163 163
164inline u8 *rtw_set_ie_ch_switch (u8 *buf, u32 *buf_len, u8 ch_switch_mode, 164inline u8 *rtw_set_ie_ch_switch (u8 *buf, u32 *buf_len, u8 ch_switch_mode,
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index 8b2ba26ba38d..4b2eb8e9b562 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -1827,13 +1827,13 @@ unsigned int OnAction_back(struct adapter *padapter, union recv_frame *precv_fra
1827 1827
1828#ifdef CONFIG_88EU_P2P 1828#ifdef CONFIG_88EU_P2P
1829 1829
1830static int get_reg_classes_full_count(struct p2p_channels channel_list) 1830static int get_reg_classes_full_count(struct p2p_channels *channel_list)
1831{ 1831{
1832 int cnt = 0; 1832 int cnt = 0;
1833 int i; 1833 int i;
1834 1834
1835 for (i = 0; i < channel_list.reg_classes; i++) { 1835 for (i = 0; i < channel_list->reg_classes; i++) {
1836 cnt += channel_list.reg_class[i].channels; 1836 cnt += channel_list->reg_class[i].channels;
1837 } 1837 }
1838 1838
1839 return cnt; 1839 return cnt;
@@ -2065,7 +2065,7 @@ void issue_p2p_GO_request(struct adapter *padapter, u8 *raddr)
2065 /* + number of channels in all classes */ 2065 /* + number of channels in all classes */
2066 len_channellist_attr = 3 2066 len_channellist_attr = 3
2067 + (1 + 1) * (u16)(pmlmeext->channel_list.reg_classes) 2067 + (1 + 1) * (u16)(pmlmeext->channel_list.reg_classes)
2068 + get_reg_classes_full_count(pmlmeext->channel_list); 2068 + get_reg_classes_full_count(&pmlmeext->channel_list);
2069 2069
2070 *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr); 2070 *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr);
2071 p2pielen += 2; 2071 p2pielen += 2;
@@ -2437,7 +2437,7 @@ static void issue_p2p_GO_response(struct adapter *padapter, u8 *raddr, u8 *frame
2437 /* + number of channels in all classes */ 2437 /* + number of channels in all classes */
2438 len_channellist_attr = 3 2438 len_channellist_attr = 3
2439 + (1 + 1) * (u16)pmlmeext->channel_list.reg_classes 2439 + (1 + 1) * (u16)pmlmeext->channel_list.reg_classes
2440 + get_reg_classes_full_count(pmlmeext->channel_list); 2440 + get_reg_classes_full_count(&pmlmeext->channel_list);
2441 2441
2442 *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr); 2442 *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr);
2443 2443
@@ -2859,7 +2859,7 @@ void issue_p2p_invitation_request(struct adapter *padapter, u8 *raddr)
2859 /* + number of channels in all classes */ 2859 /* + number of channels in all classes */
2860 len_channellist_attr = 3 2860 len_channellist_attr = 3
2861 + (1 + 1) * (u16)pmlmeext->channel_list.reg_classes 2861 + (1 + 1) * (u16)pmlmeext->channel_list.reg_classes
2862 + get_reg_classes_full_count(pmlmeext->channel_list); 2862 + get_reg_classes_full_count(&pmlmeext->channel_list);
2863 2863
2864 *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr); 2864 *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr);
2865 2865
@@ -3120,7 +3120,7 @@ void issue_p2p_invitation_response(struct adapter *padapter, u8 *raddr, u8 dialo
3120 /* + number of channels in all classes */ 3120 /* + number of channels in all classes */
3121 len_channellist_attr = 3 3121 len_channellist_attr = 3
3122 + (1 + 1) * (u16)pmlmeext->channel_list.reg_classes 3122 + (1 + 1) * (u16)pmlmeext->channel_list.reg_classes
3123 + get_reg_classes_full_count(pmlmeext->channel_list); 3123 + get_reg_classes_full_count(&pmlmeext->channel_list);
3124 3124
3125 *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr); 3125 *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr);
3126 p2pielen += 2; 3126 p2pielen += 2;
diff --git a/drivers/staging/rtl8188eu/core/rtw_mp.c b/drivers/staging/rtl8188eu/core/rtw_mp.c
index c7ff2e4d1f23..9832dcbbd07f 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mp.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mp.c
@@ -907,7 +907,7 @@ u32 mp_query_psd(struct adapter *pAdapter, u8 *data)
907 sscanf(data, "pts =%d, start =%d, stop =%d", &psd_pts, &psd_start, &psd_stop); 907 sscanf(data, "pts =%d, start =%d, stop =%d", &psd_pts, &psd_start, &psd_stop);
908 } 908 }
909 909
910 _rtw_memset(data, '\0', sizeof(data)); 910 _rtw_memset(data, '\0', sizeof(*data));
911 911
912 i = psd_start; 912 i = psd_start;
913 while (i < psd_stop) { 913 while (i < psd_stop) {
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index 013ea487e7ac..8018edd3d42e 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -631,7 +631,7 @@ void WMMOnAssocRsp(struct adapter *padapter)
631 inx[0] = 0; inx[1] = 1; inx[2] = 2; inx[3] = 3; 631 inx[0] = 0; inx[1] = 1; inx[2] = 2; inx[3] = 3;
632 632
633 if (pregpriv->wifi_spec == 1) { 633 if (pregpriv->wifi_spec == 1) {
634 u32 j, tmp, change_inx; 634 u32 j, tmp, change_inx = false;
635 635
636 /* entry indx: 0->vo, 1->vi, 2->be, 3->bk. */ 636 /* entry indx: 0->vo, 1->vi, 2->be, 3->bk. */
637 for (i = 0; i < 4; i++) { 637 for (i = 0; i < 4; i++) {
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
index 9c2e7a20c09e..ec0028d4e61a 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
@@ -57,7 +57,7 @@ static void Init_ODM_ComInfo_88E(struct adapter *Adapter)
57 u8 cut_ver, fab_ver; 57 u8 cut_ver, fab_ver;
58 58
59 /* Init Value */ 59 /* Init Value */
60 _rtw_memset(dm_odm, 0, sizeof(dm_odm)); 60 _rtw_memset(dm_odm, 0, sizeof(*dm_odm));
61 61
62 dm_odm->Adapter = Adapter; 62 dm_odm->Adapter = Adapter;
63 63
diff --git a/drivers/staging/rtl8188eu/include/odm.h b/drivers/staging/rtl8188eu/include/odm.h
index 2bfe72841921..4787bacdcad8 100644
--- a/drivers/staging/rtl8188eu/include/odm.h
+++ b/drivers/staging/rtl8188eu/include/odm.h
@@ -1010,7 +1010,7 @@ enum dm_dig_op {
1010#define DM_false_ALARM_THRESH_LOW 400 1010#define DM_false_ALARM_THRESH_LOW 400
1011#define DM_false_ALARM_THRESH_HIGH 1000 1011#define DM_false_ALARM_THRESH_HIGH 1000
1012 1012
1013#define DM_DIG_MAX_NIC 0x3e 1013#define DM_DIG_MAX_NIC 0x4e
1014#define DM_DIG_MIN_NIC 0x1e /* 0x22/0x1c */ 1014#define DM_DIG_MIN_NIC 0x1e /* 0x22/0x1c */
1015 1015
1016#define DM_DIG_MAX_AP 0x32 1016#define DM_DIG_MAX_AP 0x32
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
index 52b280165a92..555c801d2ded 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
@@ -188,7 +188,7 @@ enum ChannelPlan {
188 188
189struct txpowerinfo24g { 189struct txpowerinfo24g {
190 u8 IndexCCK_Base[MAX_RF_PATH][MAX_CHNL_GROUP_24G]; 190 u8 IndexCCK_Base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
191 u8 IndexBW40_Base[MAX_RF_PATH][MAX_CHNL_GROUP_24G-1]; 191 u8 IndexBW40_Base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
192 /* If only one tx, only BW20 and OFDM are used. */ 192 /* If only one tx, only BW20 and OFDM are used. */
193 s8 CCK_Diff[MAX_RF_PATH][MAX_TX_COUNT]; 193 s8 CCK_Diff[MAX_RF_PATH][MAX_TX_COUNT];
194 s8 OFDM_Diff[MAX_RF_PATH][MAX_TX_COUNT]; 194 s8 OFDM_Diff[MAX_RF_PATH][MAX_TX_COUNT];
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
index a96b018e5e6a..853ab80a2b86 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
@@ -870,6 +870,7 @@ static struct fwevent wlanevents[] = {
870 {0, NULL}, 870 {0, NULL},
871 {0, NULL}, 871 {0, NULL},
872 {0, &rtw_cpwm_event_callback}, 872 {0, &rtw_cpwm_event_callback},
873 {0, NULL},
873}; 874};
874 875
875#endif/* _RTL_MLME_EXT_C_ */ 876#endif/* _RTL_MLME_EXT_C_ */
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index cd4100fb3645..95953ebc0279 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -6973,7 +6973,7 @@ static int rtw_mp_ctx(struct net_device *dev,
6973 stop = strncmp(extra, "stop", 4); 6973 stop = strncmp(extra, "stop", 4);
6974 sscanf(extra, "count =%d, pkt", &count); 6974 sscanf(extra, "count =%d, pkt", &count);
6975 6975
6976 _rtw_memset(extra, '\0', sizeof(extra)); 6976 _rtw_memset(extra, '\0', sizeof(*extra));
6977 6977
6978 if (stop == 0) { 6978 if (stop == 0) {
6979 bStartTest = 0; /* To set Stop */ 6979 bStartTest = 0; /* To set Stop */
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index d3078d200e50..9ca3180ebaa0 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -54,6 +54,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
54 /*=== Customer ID ===*/ 54 /*=== Customer ID ===*/
55 /****** 8188EUS ********/ 55 /****** 8188EUS ********/
56 {USB_DEVICE(0x8179, 0x07B8)}, /* Abocom - Abocom */ 56 {USB_DEVICE(0x8179, 0x07B8)}, /* Abocom - Abocom */
57 {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
57 {} /* Terminating entry */ 58 {} /* Terminating entry */
58}; 59};
59 60
diff --git a/drivers/staging/rtl8192u/r819xU_cmdpkt.c b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
index 5bc361b16d4c..56144014b7c9 100644
--- a/drivers/staging/rtl8192u/r819xU_cmdpkt.c
+++ b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
@@ -37,6 +37,8 @@ rt_status SendTxCommandPacket(struct net_device *dev, void *pData, u32 DataLen)
37 /* Get TCB and local buffer from common pool. 37 /* Get TCB and local buffer from common pool.
38 (It is shared by CmdQ, MgntQ, and USB coalesce DataQ) */ 38 (It is shared by CmdQ, MgntQ, and USB coalesce DataQ) */
39 skb = dev_alloc_skb(USB_HWDESC_HEADER_LEN + DataLen + 4); 39 skb = dev_alloc_skb(USB_HWDESC_HEADER_LEN + DataLen + 4);
40 if (!skb)
41 return RT_STATUS_FAILURE;
40 memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev)); 42 memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
41 tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); 43 tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
42 tcb_desc->queue_index = TXCMD_QUEUE; 44 tcb_desc->queue_index = TXCMD_QUEUE;
diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c
index dbf11ecb794e..19d3cf451b88 100644
--- a/drivers/staging/vt6656/card.c
+++ b/drivers/staging/vt6656/card.c
@@ -172,8 +172,8 @@ static u16 swGetOFDMControlRate(struct vnt_private *pDevice, u16 wRateIdx)
172 if (!CARDbIsOFDMinBasicRate(pDevice)) { 172 if (!CARDbIsOFDMinBasicRate(pDevice)) {
173 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO 173 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
174 "swGetOFDMControlRate:(NO OFDM) %d\n", wRateIdx); 174 "swGetOFDMControlRate:(NO OFDM) %d\n", wRateIdx);
175 if (wRateIdx > RATE_24M) 175 if (wRateIdx > RATE_24M)
176 wRateIdx = RATE_24M; 176 wRateIdx = RATE_24M;
177 return wRateIdx; 177 return wRateIdx;
178 } 178 }
179 179
diff --git a/drivers/staging/vt6656/iwctl.c b/drivers/staging/vt6656/iwctl.c
index d0cf7d8a20e5..8872e0f84f40 100644
--- a/drivers/staging/vt6656/iwctl.c
+++ b/drivers/staging/vt6656/iwctl.c
@@ -1634,6 +1634,9 @@ int iwctl_siwencodeext(struct net_device *dev, struct iw_request_info *info,
1634 if (pMgmt == NULL) 1634 if (pMgmt == NULL)
1635 return -EFAULT; 1635 return -EFAULT;
1636 1636
1637 if (!(pDevice->flags & DEVICE_FLAGS_OPENED))
1638 return -ENODEV;
1639
1637 buf = kzalloc(sizeof(struct viawget_wpa_param), GFP_KERNEL); 1640 buf = kzalloc(sizeof(struct viawget_wpa_param), GFP_KERNEL);
1638 if (buf == NULL) 1641 if (buf == NULL)
1639 return -ENOMEM; 1642 return -ENOMEM;
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 536971786ae8..6f9d28182445 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -1098,6 +1098,8 @@ static int device_close(struct net_device *dev)
1098 memset(pMgmt->abyCurrBSSID, 0, 6); 1098 memset(pMgmt->abyCurrBSSID, 0, 6);
1099 pMgmt->eCurrState = WMAC_STATE_IDLE; 1099 pMgmt->eCurrState = WMAC_STATE_IDLE;
1100 1100
1101 pDevice->flags &= ~DEVICE_FLAGS_OPENED;
1102
1101 device_free_tx_bufs(pDevice); 1103 device_free_tx_bufs(pDevice);
1102 device_free_rx_bufs(pDevice); 1104 device_free_rx_bufs(pDevice);
1103 device_free_int_bufs(pDevice); 1105 device_free_int_bufs(pDevice);
@@ -1109,7 +1111,6 @@ static int device_close(struct net_device *dev)
1109 usb_free_urb(pDevice->pInterruptURB); 1111 usb_free_urb(pDevice->pInterruptURB);
1110 1112
1111 BSSvClearNodeDBTable(pDevice, 0); 1113 BSSvClearNodeDBTable(pDevice, 0);
1112 pDevice->flags &=(~DEVICE_FLAGS_OPENED);
1113 1114
1114 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_close2 \n"); 1115 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_close2 \n");
1115 1116
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index fb743a8811bb..14f3e852215d 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -148,6 +148,8 @@ static void *s_vGetFreeContext(struct vnt_private *pDevice)
148 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"GetFreeContext()\n"); 148 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"GetFreeContext()\n");
149 149
150 for (ii = 0; ii < pDevice->cbTD; ii++) { 150 for (ii = 0; ii < pDevice->cbTD; ii++) {
151 if (!pDevice->apTD[ii])
152 return NULL;
151 pContext = pDevice->apTD[ii]; 153 pContext = pDevice->apTD[ii];
152 if (pContext->bBoolInUse == false) { 154 if (pContext->bBoolInUse == false) {
153 pContext->bBoolInUse = true; 155 pContext->bBoolInUse = true;
diff --git a/drivers/staging/xillybus/xillybus_core.c b/drivers/staging/xillybus/xillybus_core.c
index efc56987a60b..7db6f03a0054 100644
--- a/drivers/staging/xillybus/xillybus_core.c
+++ b/drivers/staging/xillybus/xillybus_core.c
@@ -2054,7 +2054,7 @@ static int xillybus_init_chrdev(struct xilly_endpoint *endpoint,
2054 NULL, 2054 NULL,
2055 MKDEV(major, i), 2055 MKDEV(major, i),
2056 NULL, 2056 NULL,
2057 devname); 2057 "%s", devname);
2058 2058
2059 if (IS_ERR(device)) { 2059 if (IS_ERR(device)) {
2060 pr_warn("xillybus: Failed to create %s " 2060 pr_warn("xillybus: Failed to create %s "
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 91d94b564433..2c4ed52ca849 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -981,4 +981,3 @@ MODULE_PARM_DESC(num_devices, "Number of zram devices");
981MODULE_LICENSE("Dual BSD/GPL"); 981MODULE_LICENSE("Dual BSD/GPL");
982MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>"); 982MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
983MODULE_DESCRIPTION("Compressed RAM Block Device"); 983MODULE_DESCRIPTION("Compressed RAM Block Device");
984MODULE_ALIAS("devname:zram");
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 35b61f7d6c63..38e44b9abf0f 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -753,7 +753,8 @@ static void iscsit_unmap_iovec(struct iscsi_cmd *cmd)
753 753
754static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn) 754static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
755{ 755{
756 struct iscsi_cmd *cmd; 756 LIST_HEAD(ack_list);
757 struct iscsi_cmd *cmd, *cmd_p;
757 758
758 conn->exp_statsn = exp_statsn; 759 conn->exp_statsn = exp_statsn;
759 760
@@ -761,19 +762,23 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
761 return; 762 return;
762 763
763 spin_lock_bh(&conn->cmd_lock); 764 spin_lock_bh(&conn->cmd_lock);
764 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { 765 list_for_each_entry_safe(cmd, cmd_p, &conn->conn_cmd_list, i_conn_node) {
765 spin_lock(&cmd->istate_lock); 766 spin_lock(&cmd->istate_lock);
766 if ((cmd->i_state == ISTATE_SENT_STATUS) && 767 if ((cmd->i_state == ISTATE_SENT_STATUS) &&
767 iscsi_sna_lt(cmd->stat_sn, exp_statsn)) { 768 iscsi_sna_lt(cmd->stat_sn, exp_statsn)) {
768 cmd->i_state = ISTATE_REMOVE; 769 cmd->i_state = ISTATE_REMOVE;
769 spin_unlock(&cmd->istate_lock); 770 spin_unlock(&cmd->istate_lock);
770 iscsit_add_cmd_to_immediate_queue(cmd, conn, 771 list_move_tail(&cmd->i_conn_node, &ack_list);
771 cmd->i_state);
772 continue; 772 continue;
773 } 773 }
774 spin_unlock(&cmd->istate_lock); 774 spin_unlock(&cmd->istate_lock);
775 } 775 }
776 spin_unlock_bh(&conn->cmd_lock); 776 spin_unlock_bh(&conn->cmd_lock);
777
778 list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) {
779 list_del(&cmd->i_conn_node);
780 iscsit_free_cmd(cmd, false);
781 }
777} 782}
778 783
779static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd) 784static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd)
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 14d1aed5af1d..ef6d836a4d09 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -1192,7 +1192,7 @@ get_target:
1192 */ 1192 */
1193alloc_tags: 1193alloc_tags:
1194 tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth); 1194 tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth);
1195 tag_num += ISCSIT_EXTRA_TAGS; 1195 tag_num += (tag_num / 2) + ISCSIT_EXTRA_TAGS;
1196 tag_size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size; 1196 tag_size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;
1197 1197
1198 ret = transport_alloc_session_tags(sess->se_sess, tag_num, tag_size); 1198 ret = transport_alloc_session_tags(sess->se_sess, tag_num, tag_size);
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index f2de28e178fd..b0cac0c342e1 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -736,7 +736,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
736 * Fallthrough 736 * Fallthrough
737 */ 737 */
738 case ISCSI_OP_SCSI_TMFUNC: 738 case ISCSI_OP_SCSI_TMFUNC:
739 rc = transport_generic_free_cmd(&cmd->se_cmd, 1); 739 rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
740 if (!rc && shutdown && se_cmd && se_cmd->se_sess) { 740 if (!rc && shutdown && se_cmd && se_cmd->se_sess) {
741 __iscsit_free_cmd(cmd, true, shutdown); 741 __iscsit_free_cmd(cmd, true, shutdown);
742 target_put_sess_cmd(se_cmd->se_sess, se_cmd); 742 target_put_sess_cmd(se_cmd->se_sess, se_cmd);
@@ -752,7 +752,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
752 se_cmd = &cmd->se_cmd; 752 se_cmd = &cmd->se_cmd;
753 __iscsit_free_cmd(cmd, true, shutdown); 753 __iscsit_free_cmd(cmd, true, shutdown);
754 754
755 rc = transport_generic_free_cmd(&cmd->se_cmd, 1); 755 rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
756 if (!rc && shutdown && se_cmd->se_sess) { 756 if (!rc && shutdown && se_cmd->se_sess) {
757 __iscsit_free_cmd(cmd, true, shutdown); 757 __iscsit_free_cmd(cmd, true, shutdown);
758 target_put_sess_cmd(se_cmd->se_sess, se_cmd); 758 target_put_sess_cmd(se_cmd->se_sess, se_cmd);
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 6c17295e8d7c..4714c6f8da4b 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -349,7 +349,16 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
349{ 349{
350 struct se_device *dev = cmd->se_dev; 350 struct se_device *dev = cmd->se_dev;
351 351
352 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; 352 /*
353 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
354 * within target_complete_ok_work() if the command was successfully
355 * sent to the backend driver.
356 */
357 spin_lock_irq(&cmd->t_state_lock);
358 if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status)
359 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
360 spin_unlock_irq(&cmd->t_state_lock);
361
353 /* 362 /*
354 * Unlock ->caw_sem originally obtained during sbc_compare_and_write() 363 * Unlock ->caw_sem originally obtained during sbc_compare_and_write()
355 * before the original READ I/O submission. 364 * before the original READ I/O submission.
@@ -363,7 +372,7 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
363{ 372{
364 struct se_device *dev = cmd->se_dev; 373 struct se_device *dev = cmd->se_dev;
365 struct scatterlist *write_sg = NULL, *sg; 374 struct scatterlist *write_sg = NULL, *sg;
366 unsigned char *buf, *addr; 375 unsigned char *buf = NULL, *addr;
367 struct sg_mapping_iter m; 376 struct sg_mapping_iter m;
368 unsigned int offset = 0, len; 377 unsigned int offset = 0, len;
369 unsigned int nlbas = cmd->t_task_nolb; 378 unsigned int nlbas = cmd->t_task_nolb;
@@ -378,6 +387,15 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
378 */ 387 */
379 if (!cmd->t_data_sg || !cmd->t_bidi_data_sg) 388 if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
380 return TCM_NO_SENSE; 389 return TCM_NO_SENSE;
390 /*
391 * Immediately exit + release dev->caw_sem if command has already
392 * been failed with a non-zero SCSI status.
393 */
394 if (cmd->scsi_status) {
395 pr_err("compare_and_write_callback: non zero scsi_status:"
396 " 0x%02x\n", cmd->scsi_status);
397 goto out;
398 }
381 399
382 buf = kzalloc(cmd->data_length, GFP_KERNEL); 400 buf = kzalloc(cmd->data_length, GFP_KERNEL);
383 if (!buf) { 401 if (!buf) {
@@ -508,6 +526,12 @@ sbc_compare_and_write(struct se_cmd *cmd)
508 cmd->transport_complete_callback = NULL; 526 cmd->transport_complete_callback = NULL;
509 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 527 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
510 } 528 }
529 /*
530 * Reset cmd->data_length to individual block_size in order to not
531 * confuse backend drivers that depend on this value matching the
532 * size of the I/O being submitted.
533 */
534 cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size;
511 535
512 ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents, 536 ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,
513 DMA_FROM_DEVICE); 537 DMA_FROM_DEVICE);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 84747cc1aac0..81e945eefbbd 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -236,17 +236,24 @@ int transport_alloc_session_tags(struct se_session *se_sess,
236{ 236{
237 int rc; 237 int rc;
238 238
239 se_sess->sess_cmd_map = kzalloc(tag_num * tag_size, GFP_KERNEL); 239 se_sess->sess_cmd_map = kzalloc(tag_num * tag_size,
240 GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
240 if (!se_sess->sess_cmd_map) { 241 if (!se_sess->sess_cmd_map) {
241 pr_err("Unable to allocate se_sess->sess_cmd_map\n"); 242 se_sess->sess_cmd_map = vzalloc(tag_num * tag_size);
242 return -ENOMEM; 243 if (!se_sess->sess_cmd_map) {
244 pr_err("Unable to allocate se_sess->sess_cmd_map\n");
245 return -ENOMEM;
246 }
243 } 247 }
244 248
245 rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num); 249 rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num);
246 if (rc < 0) { 250 if (rc < 0) {
247 pr_err("Unable to init se_sess->sess_tag_pool," 251 pr_err("Unable to init se_sess->sess_tag_pool,"
248 " tag_num: %u\n", tag_num); 252 " tag_num: %u\n", tag_num);
249 kfree(se_sess->sess_cmd_map); 253 if (is_vmalloc_addr(se_sess->sess_cmd_map))
254 vfree(se_sess->sess_cmd_map);
255 else
256 kfree(se_sess->sess_cmd_map);
250 se_sess->sess_cmd_map = NULL; 257 se_sess->sess_cmd_map = NULL;
251 return -ENOMEM; 258 return -ENOMEM;
252 } 259 }
@@ -412,7 +419,10 @@ void transport_free_session(struct se_session *se_sess)
412{ 419{
413 if (se_sess->sess_cmd_map) { 420 if (se_sess->sess_cmd_map) {
414 percpu_ida_destroy(&se_sess->sess_tag_pool); 421 percpu_ida_destroy(&se_sess->sess_tag_pool);
415 kfree(se_sess->sess_cmd_map); 422 if (is_vmalloc_addr(se_sess->sess_cmd_map))
423 vfree(se_sess->sess_cmd_map);
424 else
425 kfree(se_sess->sess_cmd_map);
416 } 426 }
417 kmem_cache_free(se_sess_cache, se_sess); 427 kmem_cache_free(se_sess_cache, se_sess);
418} 428}
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 4d22e7d2adca..3da4fd10b9f8 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -298,8 +298,8 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op
298 (unsigned long long)xop->dst_lba); 298 (unsigned long long)xop->dst_lba);
299 299
300 if (dc != 0) { 300 if (dc != 0) {
301 xop->dbl = (desc[29] << 16) & 0xff; 301 xop->dbl = (desc[29] & 0xff) << 16;
302 xop->dbl |= (desc[30] << 8) & 0xff; 302 xop->dbl |= (desc[30] & 0xff) << 8;
303 xop->dbl |= desc[31] & 0xff; 303 xop->dbl |= desc[31] & 0xff;
304 304
305 pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl); 305 pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl);
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index e61c36cbb866..c193af6a628f 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -636,6 +636,7 @@ struct console xenboot_console = {
636 .name = "xenboot", 636 .name = "xenboot",
637 .write = xenboot_write_console, 637 .write = xenboot_write_console,
638 .flags = CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME, 638 .flags = CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME,
639 .index = -1,
639}; 640};
640#endif /* CONFIG_EARLY_PRINTK */ 641#endif /* CONFIG_EARLY_PRINTK */
641 642
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index c9a9ddd1d0bc..7a744b69c3d1 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1758,8 +1758,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
1758 canon_change = (old->c_lflag ^ tty->termios.c_lflag) & ICANON; 1758 canon_change = (old->c_lflag ^ tty->termios.c_lflag) & ICANON;
1759 if (canon_change) { 1759 if (canon_change) {
1760 bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE); 1760 bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE);
1761 ldata->line_start = 0; 1761 ldata->line_start = ldata->canon_head = ldata->read_tail;
1762 ldata->canon_head = ldata->read_tail;
1763 ldata->erasing = 0; 1762 ldata->erasing = 0;
1764 ldata->lnext = 0; 1763 ldata->lnext = 0;
1765 } 1764 }
@@ -2184,28 +2183,34 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
2184 2183
2185 if (!input_available_p(tty, 0)) { 2184 if (!input_available_p(tty, 0)) {
2186 if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) { 2185 if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
2187 retval = -EIO; 2186 up_read(&tty->termios_rwsem);
2188 break; 2187 tty_flush_to_ldisc(tty);
2189 } 2188 down_read(&tty->termios_rwsem);
2190 if (tty_hung_up_p(file)) 2189 if (!input_available_p(tty, 0)) {
2191 break; 2190 retval = -EIO;
2192 if (!timeout) 2191 break;
2193 break; 2192 }
2194 if (file->f_flags & O_NONBLOCK) { 2193 } else {
2195 retval = -EAGAIN; 2194 if (tty_hung_up_p(file))
2196 break; 2195 break;
2197 } 2196 if (!timeout)
2198 if (signal_pending(current)) { 2197 break;
2199 retval = -ERESTARTSYS; 2198 if (file->f_flags & O_NONBLOCK) {
2200 break; 2199 retval = -EAGAIN;
2201 } 2200 break;
2202 n_tty_set_room(tty); 2201 }
2203 up_read(&tty->termios_rwsem); 2202 if (signal_pending(current)) {
2203 retval = -ERESTARTSYS;
2204 break;
2205 }
2206 n_tty_set_room(tty);
2207 up_read(&tty->termios_rwsem);
2204 2208
2205 timeout = schedule_timeout(timeout); 2209 timeout = schedule_timeout(timeout);
2206 2210
2207 down_read(&tty->termios_rwsem); 2211 down_read(&tty->termios_rwsem);
2208 continue; 2212 continue;
2213 }
2209 } 2214 }
2210 __set_current_state(TASK_RUNNING); 2215 __set_current_state(TASK_RUNNING);
2211 2216
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 52379e56a31e..44077c0b7670 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -667,30 +667,21 @@ static int pop_tx_x(struct eg20t_port *priv, unsigned char *buf)
667 667
668static int dma_push_rx(struct eg20t_port *priv, int size) 668static int dma_push_rx(struct eg20t_port *priv, int size)
669{ 669{
670 struct tty_struct *tty;
671 int room; 670 int room;
672 struct uart_port *port = &priv->port; 671 struct uart_port *port = &priv->port;
673 struct tty_port *tport = &port->state->port; 672 struct tty_port *tport = &port->state->port;
674 673
675 port = &priv->port;
676 tty = tty_port_tty_get(tport);
677 if (!tty) {
678 dev_dbg(priv->port.dev, "%s:tty is busy now", __func__);
679 return 0;
680 }
681
682 room = tty_buffer_request_room(tport, size); 674 room = tty_buffer_request_room(tport, size);
683 675
684 if (room < size) 676 if (room < size)
685 dev_warn(port->dev, "Rx overrun: dropping %u bytes\n", 677 dev_warn(port->dev, "Rx overrun: dropping %u bytes\n",
686 size - room); 678 size - room);
687 if (!room) 679 if (!room)
688 return room; 680 return 0;
689 681
690 tty_insert_flip_string(tport, sg_virt(&priv->sg_rx), size); 682 tty_insert_flip_string(tport, sg_virt(&priv->sg_rx), size);
691 683
692 port->icount.rx += room; 684 port->icount.rx += room;
693 tty_kref_put(tty);
694 685
695 return room; 686 return room;
696} 687}
@@ -1098,6 +1089,8 @@ static void pch_uart_err_ir(struct eg20t_port *priv, unsigned int lsr)
1098 if (tty == NULL) { 1089 if (tty == NULL) {
1099 for (i = 0; error_msg[i] != NULL; i++) 1090 for (i = 0; error_msg[i] != NULL; i++)
1100 dev_err(&priv->pdev->dev, error_msg[i]); 1091 dev_err(&priv->pdev->dev, error_msg[i]);
1092 } else {
1093 tty_kref_put(tty);
1101 } 1094 }
1102} 1095}
1103 1096
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index d0d972f7e43e..0489a2bdcdf9 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -732,7 +732,7 @@ static irqreturn_t tegra_uart_isr(int irq, void *data)
732static void tegra_uart_stop_rx(struct uart_port *u) 732static void tegra_uart_stop_rx(struct uart_port *u)
733{ 733{
734 struct tegra_uart_port *tup = to_tegra_uport(u); 734 struct tegra_uart_port *tup = to_tegra_uport(u);
735 struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port); 735 struct tty_struct *tty;
736 struct tty_port *port = &u->state->port; 736 struct tty_port *port = &u->state->port;
737 struct dma_tx_state state; 737 struct dma_tx_state state;
738 unsigned long ier; 738 unsigned long ier;
@@ -744,6 +744,8 @@ static void tegra_uart_stop_rx(struct uart_port *u)
744 if (!tup->rx_in_progress) 744 if (!tup->rx_in_progress)
745 return; 745 return;
746 746
747 tty = tty_port_tty_get(&tup->uport.state->port);
748
747 tegra_uart_wait_sym_time(tup, 1); /* wait a character interval */ 749 tegra_uart_wait_sym_time(tup, 1); /* wait a character interval */
748 750
749 ier = tup->ier_shadow; 751 ier = tup->ier_shadow;
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index a9355ce1c6d5..3a1a01af9a80 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -854,7 +854,8 @@ void disassociate_ctty(int on_exit)
854 struct pid *tty_pgrp = tty_get_pgrp(tty); 854 struct pid *tty_pgrp = tty_get_pgrp(tty);
855 if (tty_pgrp) { 855 if (tty_pgrp) {
856 kill_pgrp(tty_pgrp, SIGHUP, on_exit); 856 kill_pgrp(tty_pgrp, SIGHUP, on_exit);
857 kill_pgrp(tty_pgrp, SIGCONT, on_exit); 857 if (!on_exit)
858 kill_pgrp(tty_pgrp, SIGCONT, on_exit);
858 put_pid(tty_pgrp); 859 put_pid(tty_pgrp);
859 } 860 }
860 } 861 }
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 03ba081c5772..6fd60fece6b4 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -1201,6 +1201,9 @@ int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file,
1201 } 1201 }
1202 return 0; 1202 return 0;
1203 case TCFLSH: 1203 case TCFLSH:
1204 retval = tty_check_change(tty);
1205 if (retval)
1206 return retval;
1204 return __tty_perform_flush(tty, arg); 1207 return __tty_perform_flush(tty, arg);
1205 default: 1208 default:
1206 /* Try the mode commands */ 1209 /* Try the mode commands */
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index 4a851e15e58c..77b47d82c9a6 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -1,6 +1,6 @@
1config USB_CHIPIDEA 1config USB_CHIPIDEA
2 tristate "ChipIdea Highspeed Dual Role Controller" 2 tristate "ChipIdea Highspeed Dual Role Controller"
3 depends on (USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET) 3 depends on ((USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET)) && HAS_DMA
4 help 4 help
5 Say Y here if your system has a dual role high speed USB 5 Say Y here if your system has a dual role high speed USB
6 controller based on ChipIdea silicon IP. Currently, only the 6 controller based on ChipIdea silicon IP. Currently, only the
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 74d998d9b45b..be822a2c1776 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -131,7 +131,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
131 if (ret) { 131 if (ret) {
132 dev_err(&pdev->dev, "usbmisc init failed, ret=%d\n", 132 dev_err(&pdev->dev, "usbmisc init failed, ret=%d\n",
133 ret); 133 ret);
134 goto err_clk; 134 goto err_phy;
135 } 135 }
136 } 136 }
137 137
@@ -143,7 +143,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
143 dev_err(&pdev->dev, 143 dev_err(&pdev->dev,
144 "Can't register ci_hdrc platform device, err=%d\n", 144 "Can't register ci_hdrc platform device, err=%d\n",
145 ret); 145 ret);
146 goto err_clk; 146 goto err_phy;
147 } 147 }
148 148
149 if (data->usbmisc_data) { 149 if (data->usbmisc_data) {
@@ -164,6 +164,9 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
164 164
165disable_device: 165disable_device:
166 ci_hdrc_remove_device(data->ci_pdev); 166 ci_hdrc_remove_device(data->ci_pdev);
167err_phy:
168 if (data->phy)
169 usb_phy_shutdown(data->phy);
167err_clk: 170err_clk:
168 clk_disable_unprepare(data->clk); 171 clk_disable_unprepare(data->clk);
169 return ret; 172 return ret;
diff --git a/drivers/usb/chipidea/ci_hdrc_pci.c b/drivers/usb/chipidea/ci_hdrc_pci.c
index 042320a6c6c7..d514332ac081 100644
--- a/drivers/usb/chipidea/ci_hdrc_pci.c
+++ b/drivers/usb/chipidea/ci_hdrc_pci.c
@@ -129,7 +129,12 @@ static DEFINE_PCI_DEVICE_TABLE(ci_hdrc_pci_id_table) = {
129 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0829), 129 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0829),
130 .driver_data = (kernel_ulong_t)&penwell_pci_platdata, 130 .driver_data = (kernel_ulong_t)&penwell_pci_platdata,
131 }, 131 },
132 { 0, 0, 0, 0, 0, 0, 0 /* end: all zeroes */ } 132 {
133 /* Intel Clovertrail */
134 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe006),
135 .driver_data = (kernel_ulong_t)&penwell_pci_platdata,
136 },
137 { 0 } /* end: all zeroes */
133}; 138};
134MODULE_DEVICE_TABLE(pci, ci_hdrc_pci_id_table); 139MODULE_DEVICE_TABLE(pci, ci_hdrc_pci_id_table);
135 140
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 94626409559a..23763dcec069 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -605,6 +605,7 @@ static int ci_hdrc_remove(struct platform_device *pdev)
605 dbg_remove_files(ci); 605 dbg_remove_files(ci);
606 free_irq(ci->irq, ci); 606 free_irq(ci->irq, ci);
607 ci_role_destroy(ci); 607 ci_role_destroy(ci);
608 kfree(ci->hw_bank.regmap);
608 609
609 return 0; 610 return 0;
610} 611}
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 6b4c2f2eb946..9333083dd111 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1600,6 +1600,8 @@ static void destroy_eps(struct ci_hdrc *ci)
1600 for (i = 0; i < ci->hw_ep_max; i++) { 1600 for (i = 0; i < ci->hw_ep_max; i++) {
1601 struct ci_hw_ep *hwep = &ci->ci_hw_ep[i]; 1601 struct ci_hw_ep *hwep = &ci->ci_hw_ep[i];
1602 1602
1603 if (hwep->pending_td)
1604 free_pending_td(hwep);
1603 dma_pool_free(ci->qh_pool, hwep->qh.ptr, hwep->qh.dma); 1605 dma_pool_free(ci->qh_pool, hwep->qh.ptr, hwep->qh.dma);
1604 } 1606 }
1605} 1607}
@@ -1667,13 +1669,13 @@ static int ci_udc_stop(struct usb_gadget *gadget,
1667 if (ci->platdata->notify_event) 1669 if (ci->platdata->notify_event)
1668 ci->platdata->notify_event(ci, 1670 ci->platdata->notify_event(ci,
1669 CI_HDRC_CONTROLLER_STOPPED_EVENT); 1671 CI_HDRC_CONTROLLER_STOPPED_EVENT);
1670 ci->driver = NULL;
1671 spin_unlock_irqrestore(&ci->lock, flags); 1672 spin_unlock_irqrestore(&ci->lock, flags);
1672 _gadget_stop_activity(&ci->gadget); 1673 _gadget_stop_activity(&ci->gadget);
1673 spin_lock_irqsave(&ci->lock, flags); 1674 spin_lock_irqsave(&ci->lock, flags);
1674 pm_runtime_put(&ci->gadget.dev); 1675 pm_runtime_put(&ci->gadget.dev);
1675 } 1676 }
1676 1677
1678 ci->driver = NULL;
1677 spin_unlock_irqrestore(&ci->lock, flags); 1679 spin_unlock_irqrestore(&ci->lock, flags);
1678 1680
1679 return 0; 1681 return 0;
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 737e3c19967b..71dc5d768fa5 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -742,6 +742,22 @@ static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype,
742 if ((index & ~USB_DIR_IN) == 0) 742 if ((index & ~USB_DIR_IN) == 0)
743 return 0; 743 return 0;
744 ret = findintfep(ps->dev, index); 744 ret = findintfep(ps->dev, index);
745 if (ret < 0) {
746 /*
747 * Some not fully compliant Win apps seem to get
748 * index wrong and have the endpoint number here
749 * rather than the endpoint address (with the
750 * correct direction). Win does let this through,
751 * so we'll not reject it here but leave it to
752 * the device to not break KVM. But we warn.
753 */
754 ret = findintfep(ps->dev, index ^ 0x80);
755 if (ret >= 0)
756 dev_info(&ps->dev->dev,
757 "%s: process %i (%s) requesting ep %02x but needs %02x\n",
758 __func__, task_pid_nr(current),
759 current->comm, index, index ^ 0x80);
760 }
745 if (ret >= 0) 761 if (ret >= 0)
746 ret = checkintf(ps, ret); 762 ret = checkintf(ps, ret);
747 break; 763 break;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index dde4c83516a1..e6b682c6c236 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -3426,6 +3426,9 @@ static int usb_req_set_sel(struct usb_device *udev, enum usb3_link_state state)
3426 unsigned long long u2_pel; 3426 unsigned long long u2_pel;
3427 int ret; 3427 int ret;
3428 3428
3429 if (udev->state != USB_STATE_CONFIGURED)
3430 return 0;
3431
3429 /* Convert SEL and PEL stored in ns to us */ 3432 /* Convert SEL and PEL stored in ns to us */
3430 u1_sel = DIV_ROUND_UP(udev->u1_params.sel, 1000); 3433 u1_sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
3431 u1_pel = DIV_ROUND_UP(udev->u1_params.pel, 1000); 3434 u1_pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index b870872e020f..70fc43027a5c 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -1,7 +1,6 @@
1config USB_DWC3 1config USB_DWC3
2 tristate "DesignWare USB3 DRD Core Support" 2 tristate "DesignWare USB3 DRD Core Support"
3 depends on (USB || USB_GADGET) && HAS_DMA 3 depends on (USB || USB_GADGET) && HAS_DMA
4 depends on EXTCON
5 select USB_XHCI_PLATFORM if USB_SUPPORT && USB_XHCI_HCD 4 select USB_XHCI_PLATFORM if USB_SUPPORT && USB_XHCI_HCD
6 help 5 help
7 Say Y or M here if your system has a Dual Role SuperSpeed 6 Say Y or M here if your system has a Dual Role SuperSpeed
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 9b138129e856..2e252aae51ca 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -28,6 +28,8 @@
28/* FIXME define these in <linux/pci_ids.h> */ 28/* FIXME define these in <linux/pci_ids.h> */
29#define PCI_VENDOR_ID_SYNOPSYS 0x16c3 29#define PCI_VENDOR_ID_SYNOPSYS 0x16c3
30#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd 30#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd
31#define PCI_DEVICE_ID_INTEL_BYT 0x0f37
32#define PCI_DEVICE_ID_INTEL_MRFLD 0x119e
31 33
32struct dwc3_pci { 34struct dwc3_pci {
33 struct device *dev; 35 struct device *dev;
@@ -187,6 +189,8 @@ static DEFINE_PCI_DEVICE_TABLE(dwc3_pci_id_table) = {
187 PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, 189 PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS,
188 PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3), 190 PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3),
189 }, 191 },
192 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), },
193 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), },
190 { } /* Terminating Entry */ 194 { } /* Terminating Entry */
191}; 195};
192MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table); 196MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index f168eaebdef8..5452c0fce360 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2611,15 +2611,13 @@ int dwc3_gadget_init(struct dwc3 *dwc)
2611 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget); 2611 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2612 if (ret) { 2612 if (ret) {
2613 dev_err(dwc->dev, "failed to register udc\n"); 2613 dev_err(dwc->dev, "failed to register udc\n");
2614 goto err5; 2614 goto err4;
2615 } 2615 }
2616 2616
2617 return 0; 2617 return 0;
2618 2618
2619err5:
2620 dwc3_gadget_free_endpoints(dwc);
2621
2622err4: 2619err4:
2620 dwc3_gadget_free_endpoints(dwc);
2623 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE, 2621 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2624 dwc->ep0_bounce, dwc->ep0_bounce_addr); 2622 dwc->ep0_bounce, dwc->ep0_bounce_addr);
2625 2623
diff --git a/drivers/usb/gadget/cdc2.c b/drivers/usb/gadget/cdc2.c
index 5a5acf22c694..e126b6b248e6 100644
--- a/drivers/usb/gadget/cdc2.c
+++ b/drivers/usb/gadget/cdc2.c
@@ -113,12 +113,6 @@ static int __init cdc_do_config(struct usb_configuration *c)
113 c->bmAttributes |= USB_CONFIG_ATT_WAKEUP; 113 c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
114 } 114 }
115 115
116 fi_ecm = usb_get_function_instance("ecm");
117 if (IS_ERR(fi_ecm)) {
118 status = PTR_ERR(fi_ecm);
119 goto err_func_ecm;
120 }
121
122 f_ecm = usb_get_function(fi_ecm); 116 f_ecm = usb_get_function(fi_ecm);
123 if (IS_ERR(f_ecm)) { 117 if (IS_ERR(f_ecm)) {
124 status = PTR_ERR(f_ecm); 118 status = PTR_ERR(f_ecm);
@@ -129,35 +123,24 @@ static int __init cdc_do_config(struct usb_configuration *c)
129 if (status) 123 if (status)
130 goto err_add_ecm; 124 goto err_add_ecm;
131 125
132 fi_serial = usb_get_function_instance("acm");
133 if (IS_ERR(fi_serial)) {
134 status = PTR_ERR(fi_serial);
135 goto err_get_acm;
136 }
137
138 f_acm = usb_get_function(fi_serial); 126 f_acm = usb_get_function(fi_serial);
139 if (IS_ERR(f_acm)) { 127 if (IS_ERR(f_acm)) {
140 status = PTR_ERR(f_acm); 128 status = PTR_ERR(f_acm);
141 goto err_func_acm; 129 goto err_get_acm;
142 } 130 }
143 131
144 status = usb_add_function(c, f_acm); 132 status = usb_add_function(c, f_acm);
145 if (status) 133 if (status)
146 goto err_add_acm; 134 goto err_add_acm;
147
148 return 0; 135 return 0;
149 136
150err_add_acm: 137err_add_acm:
151 usb_put_function(f_acm); 138 usb_put_function(f_acm);
152err_func_acm:
153 usb_put_function_instance(fi_serial);
154err_get_acm: 139err_get_acm:
155 usb_remove_function(c, f_ecm); 140 usb_remove_function(c, f_ecm);
156err_add_ecm: 141err_add_ecm:
157 usb_put_function(f_ecm); 142 usb_put_function(f_ecm);
158err_get_ecm: 143err_get_ecm:
159 usb_put_function_instance(fi_ecm);
160err_func_ecm:
161 return status; 144 return status;
162} 145}
163 146
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index 06ecd08fd57a..b8a2376971a4 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -923,8 +923,9 @@ static int dummy_udc_stop(struct usb_gadget *g,
923 struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g); 923 struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g);
924 struct dummy *dum = dum_hcd->dum; 924 struct dummy *dum = dum_hcd->dum;
925 925
926 dev_dbg(udc_dev(dum), "unregister gadget driver '%s'\n", 926 if (driver)
927 driver->driver.name); 927 dev_dbg(udc_dev(dum), "unregister gadget driver '%s'\n",
928 driver->driver.name);
928 929
929 dum->driver = NULL; 930 dum->driver = NULL;
930 931
@@ -1000,8 +1001,8 @@ static int dummy_udc_remove(struct platform_device *pdev)
1000{ 1001{
1001 struct dummy *dum = platform_get_drvdata(pdev); 1002 struct dummy *dum = platform_get_drvdata(pdev);
1002 1003
1003 usb_del_gadget_udc(&dum->gadget);
1004 device_remove_file(&dum->gadget.dev, &dev_attr_function); 1004 device_remove_file(&dum->gadget.dev, &dev_attr_function);
1005 usb_del_gadget_udc(&dum->gadget);
1005 return 0; 1006 return 0;
1006} 1007}
1007 1008
diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c
index edab45da3741..8d9e6f7e8f1a 100644
--- a/drivers/usb/gadget/f_ecm.c
+++ b/drivers/usb/gadget/f_ecm.c
@@ -995,7 +995,7 @@ static void ecm_unbind(struct usb_configuration *c, struct usb_function *f)
995 usb_ep_free_request(ecm->notify, ecm->notify_req); 995 usb_ep_free_request(ecm->notify, ecm->notify_req);
996} 996}
997 997
998struct usb_function *ecm_alloc(struct usb_function_instance *fi) 998static struct usb_function *ecm_alloc(struct usb_function_instance *fi)
999{ 999{
1000 struct f_ecm *ecm; 1000 struct f_ecm *ecm;
1001 struct f_ecm_opts *opts; 1001 struct f_ecm_opts *opts;
diff --git a/drivers/usb/gadget/f_eem.c b/drivers/usb/gadget/f_eem.c
index d00392d879db..d61c11d765d0 100644
--- a/drivers/usb/gadget/f_eem.c
+++ b/drivers/usb/gadget/f_eem.c
@@ -624,7 +624,7 @@ static void eem_unbind(struct usb_configuration *c, struct usb_function *f)
624 usb_free_all_descriptors(f); 624 usb_free_all_descriptors(f);
625} 625}
626 626
627struct usb_function *eem_alloc(struct usb_function_instance *fi) 627static struct usb_function *eem_alloc(struct usb_function_instance *fi)
628{ 628{
629 struct f_eem *eem; 629 struct f_eem *eem;
630 struct f_eem_opts *opts; 630 struct f_eem_opts *opts;
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 1a66c5baa0d1..44cf775a8627 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -1034,37 +1034,19 @@ struct ffs_sb_fill_data {
1034 struct ffs_file_perms perms; 1034 struct ffs_file_perms perms;
1035 umode_t root_mode; 1035 umode_t root_mode;
1036 const char *dev_name; 1036 const char *dev_name;
1037 union { 1037 struct ffs_data *ffs_data;
1038 /* set by ffs_fs_mount(), read by ffs_sb_fill() */
1039 void *private_data;
1040 /* set by ffs_sb_fill(), read by ffs_fs_mount */
1041 struct ffs_data *ffs_data;
1042 };
1043}; 1038};
1044 1039
1045static int ffs_sb_fill(struct super_block *sb, void *_data, int silent) 1040static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
1046{ 1041{
1047 struct ffs_sb_fill_data *data = _data; 1042 struct ffs_sb_fill_data *data = _data;
1048 struct inode *inode; 1043 struct inode *inode;
1049 struct ffs_data *ffs; 1044 struct ffs_data *ffs = data->ffs_data;
1050 1045
1051 ENTER(); 1046 ENTER();
1052 1047
1053 /* Initialise data */
1054 ffs = ffs_data_new();
1055 if (unlikely(!ffs))
1056 goto Enomem;
1057
1058 ffs->sb = sb; 1048 ffs->sb = sb;
1059 ffs->dev_name = kstrdup(data->dev_name, GFP_KERNEL); 1049 data->ffs_data = NULL;
1060 if (unlikely(!ffs->dev_name))
1061 goto Enomem;
1062 ffs->file_perms = data->perms;
1063 ffs->private_data = data->private_data;
1064
1065 /* used by the caller of this function */
1066 data->ffs_data = ffs;
1067
1068 sb->s_fs_info = ffs; 1050 sb->s_fs_info = ffs;
1069 sb->s_blocksize = PAGE_CACHE_SIZE; 1051 sb->s_blocksize = PAGE_CACHE_SIZE;
1070 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 1052 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
@@ -1080,17 +1062,14 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
1080 &data->perms); 1062 &data->perms);
1081 sb->s_root = d_make_root(inode); 1063 sb->s_root = d_make_root(inode);
1082 if (unlikely(!sb->s_root)) 1064 if (unlikely(!sb->s_root))
1083 goto Enomem; 1065 return -ENOMEM;
1084 1066
1085 /* EP0 file */ 1067 /* EP0 file */
1086 if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs, 1068 if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs,
1087 &ffs_ep0_operations, NULL))) 1069 &ffs_ep0_operations, NULL)))
1088 goto Enomem; 1070 return -ENOMEM;
1089 1071
1090 return 0; 1072 return 0;
1091
1092Enomem:
1093 return -ENOMEM;
1094} 1073}
1095 1074
1096static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts) 1075static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
@@ -1193,6 +1172,7 @@ ffs_fs_mount(struct file_system_type *t, int flags,
1193 struct dentry *rv; 1172 struct dentry *rv;
1194 int ret; 1173 int ret;
1195 void *ffs_dev; 1174 void *ffs_dev;
1175 struct ffs_data *ffs;
1196 1176
1197 ENTER(); 1177 ENTER();
1198 1178
@@ -1200,18 +1180,30 @@ ffs_fs_mount(struct file_system_type *t, int flags,
1200 if (unlikely(ret < 0)) 1180 if (unlikely(ret < 0))
1201 return ERR_PTR(ret); 1181 return ERR_PTR(ret);
1202 1182
1183 ffs = ffs_data_new();
1184 if (unlikely(!ffs))
1185 return ERR_PTR(-ENOMEM);
1186 ffs->file_perms = data.perms;
1187
1188 ffs->dev_name = kstrdup(dev_name, GFP_KERNEL);
1189 if (unlikely(!ffs->dev_name)) {
1190 ffs_data_put(ffs);
1191 return ERR_PTR(-ENOMEM);
1192 }
1193
1203 ffs_dev = functionfs_acquire_dev_callback(dev_name); 1194 ffs_dev = functionfs_acquire_dev_callback(dev_name);
1204 if (IS_ERR(ffs_dev)) 1195 if (IS_ERR(ffs_dev)) {
1205 return ffs_dev; 1196 ffs_data_put(ffs);
1197 return ERR_CAST(ffs_dev);
1198 }
1199 ffs->private_data = ffs_dev;
1200 data.ffs_data = ffs;
1206 1201
1207 data.dev_name = dev_name;
1208 data.private_data = ffs_dev;
1209 rv = mount_nodev(t, flags, &data, ffs_sb_fill); 1202 rv = mount_nodev(t, flags, &data, ffs_sb_fill);
1210 1203 if (IS_ERR(rv) && data.ffs_data) {
1211 /* data.ffs_data is set by ffs_sb_fill */
1212 if (IS_ERR(rv))
1213 functionfs_release_dev_callback(data.ffs_data); 1204 functionfs_release_dev_callback(data.ffs_data);
1214 1205 ffs_data_put(data.ffs_data);
1206 }
1215 return rv; 1207 return rv;
1216} 1208}
1217 1209
@@ -2264,6 +2256,8 @@ static int ffs_func_bind(struct usb_configuration *c,
2264 data->raw_descs + ret, 2256 data->raw_descs + ret,
2265 (sizeof data->raw_descs) - ret, 2257 (sizeof data->raw_descs) - ret,
2266 __ffs_func_bind_do_descs, func); 2258 __ffs_func_bind_do_descs, func);
2259 if (unlikely(ret < 0))
2260 goto error;
2267 } 2261 }
2268 2262
2269 /* 2263 /*
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index 313b835eedfd..a01d7d38c016 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -2260,10 +2260,12 @@ reset:
2260 /* Disable the endpoints */ 2260 /* Disable the endpoints */
2261 if (fsg->bulk_in_enabled) { 2261 if (fsg->bulk_in_enabled) {
2262 usb_ep_disable(fsg->bulk_in); 2262 usb_ep_disable(fsg->bulk_in);
2263 fsg->bulk_in->driver_data = NULL;
2263 fsg->bulk_in_enabled = 0; 2264 fsg->bulk_in_enabled = 0;
2264 } 2265 }
2265 if (fsg->bulk_out_enabled) { 2266 if (fsg->bulk_out_enabled) {
2266 usb_ep_disable(fsg->bulk_out); 2267 usb_ep_disable(fsg->bulk_out);
2268 fsg->bulk_out->driver_data = NULL;
2267 fsg->bulk_out_enabled = 0; 2269 fsg->bulk_out_enabled = 0;
2268 } 2270 }
2269 2271
diff --git a/drivers/usb/gadget/fotg210-udc.c b/drivers/usb/gadget/fotg210-udc.c
index 32db2eee2d87..bbbfd1948778 100644
--- a/drivers/usb/gadget/fotg210-udc.c
+++ b/drivers/usb/gadget/fotg210-udc.c
@@ -1214,6 +1214,6 @@ static struct platform_driver fotg210_driver = {
1214 1214
1215module_platform_driver(fotg210_driver); 1215module_platform_driver(fotg210_driver);
1216 1216
1217MODULE_AUTHOR("Yuan-Hsin Chen <yhchen@faraday-tech.com>"); 1217MODULE_AUTHOR("Yuan-Hsin Chen, Feng-Hsin Chiang <john453@faraday-tech.com>");
1218MODULE_LICENSE("GPL"); 1218MODULE_LICENSE("GPL");
1219MODULE_DESCRIPTION(DRIVER_DESC); 1219MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/gadget/fusb300_udc.c b/drivers/usb/gadget/fusb300_udc.c
index f1dd6daabe21..b278abe52453 100644
--- a/drivers/usb/gadget/fusb300_udc.c
+++ b/drivers/usb/gadget/fusb300_udc.c
@@ -22,7 +22,7 @@
22 22
23MODULE_DESCRIPTION("FUSB300 USB gadget driver"); 23MODULE_DESCRIPTION("FUSB300 USB gadget driver");
24MODULE_LICENSE("GPL"); 24MODULE_LICENSE("GPL");
25MODULE_AUTHOR("Yuan Hsin Chen <yhchen@faraday-tech.com>"); 25MODULE_AUTHOR("Yuan-Hsin Chen, Feng-Hsin Chiang <john453@faraday-tech.com>");
26MODULE_ALIAS("platform:fusb300_udc"); 26MODULE_ALIAS("platform:fusb300_udc");
27 27
28#define DRIVER_VERSION "20 October 2010" 28#define DRIVER_VERSION "20 October 2010"
diff --git a/drivers/usb/gadget/multi.c b/drivers/usb/gadget/multi.c
index 2a1ebefd8f9e..23393254a8a3 100644
--- a/drivers/usb/gadget/multi.c
+++ b/drivers/usb/gadget/multi.c
@@ -179,7 +179,7 @@ err_conf:
179 return ret; 179 return ret;
180} 180}
181 181
182static int rndis_config_register(struct usb_composite_dev *cdev) 182static __ref int rndis_config_register(struct usb_composite_dev *cdev)
183{ 183{
184 static struct usb_configuration config = { 184 static struct usb_configuration config = {
185 .bConfigurationValue = MULTI_RNDIS_CONFIG_NUM, 185 .bConfigurationValue = MULTI_RNDIS_CONFIG_NUM,
@@ -194,7 +194,7 @@ static int rndis_config_register(struct usb_composite_dev *cdev)
194 194
195#else 195#else
196 196
197static int rndis_config_register(struct usb_composite_dev *cdev) 197static __ref int rndis_config_register(struct usb_composite_dev *cdev)
198{ 198{
199 return 0; 199 return 0;
200} 200}
@@ -241,7 +241,7 @@ err_conf:
241 return ret; 241 return ret;
242} 242}
243 243
244static int cdc_config_register(struct usb_composite_dev *cdev) 244static __ref int cdc_config_register(struct usb_composite_dev *cdev)
245{ 245{
246 static struct usb_configuration config = { 246 static struct usb_configuration config = {
247 .bConfigurationValue = MULTI_CDC_CONFIG_NUM, 247 .bConfigurationValue = MULTI_CDC_CONFIG_NUM,
@@ -256,7 +256,7 @@ static int cdc_config_register(struct usb_composite_dev *cdev)
256 256
257#else 257#else
258 258
259static int cdc_config_register(struct usb_composite_dev *cdev) 259static __ref int cdc_config_register(struct usb_composite_dev *cdev)
260{ 260{
261 return 0; 261 return 0;
262} 262}
diff --git a/drivers/usb/gadget/mv_u3d_core.c b/drivers/usb/gadget/mv_u3d_core.c
index bbb6e98c4384..561b30efb8ee 100644
--- a/drivers/usb/gadget/mv_u3d_core.c
+++ b/drivers/usb/gadget/mv_u3d_core.c
@@ -645,6 +645,7 @@ static int mv_u3d_ep_disable(struct usb_ep *_ep)
645 struct mv_u3d_ep *ep; 645 struct mv_u3d_ep *ep;
646 struct mv_u3d_ep_context *ep_context; 646 struct mv_u3d_ep_context *ep_context;
647 u32 epxcr, direction; 647 u32 epxcr, direction;
648 unsigned long flags;
648 649
649 if (!_ep) 650 if (!_ep)
650 return -EINVAL; 651 return -EINVAL;
@@ -661,7 +662,9 @@ static int mv_u3d_ep_disable(struct usb_ep *_ep)
661 direction = mv_u3d_ep_dir(ep); 662 direction = mv_u3d_ep_dir(ep);
662 663
663 /* nuke all pending requests (does flush) */ 664 /* nuke all pending requests (does flush) */
665 spin_lock_irqsave(&u3d->lock, flags);
664 mv_u3d_nuke(ep, -ESHUTDOWN); 666 mv_u3d_nuke(ep, -ESHUTDOWN);
667 spin_unlock_irqrestore(&u3d->lock, flags);
665 668
666 /* Disable the endpoint for Rx or Tx and reset the endpoint type */ 669 /* Disable the endpoint for Rx or Tx and reset the endpoint type */
667 if (direction == MV_U3D_EP_DIR_OUT) { 670 if (direction == MV_U3D_EP_DIR_OUT) {
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index cc9207473dbc..0ac6064aa3b8 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -2054,7 +2054,7 @@ static struct pxa25x_udc memory = {
2054/* 2054/*
2055 * probe - binds to the platform device 2055 * probe - binds to the platform device
2056 */ 2056 */
2057static int __init pxa25x_udc_probe(struct platform_device *pdev) 2057static int pxa25x_udc_probe(struct platform_device *pdev)
2058{ 2058{
2059 struct pxa25x_udc *dev = &memory; 2059 struct pxa25x_udc *dev = &memory;
2060 int retval, irq; 2060 int retval, irq;
@@ -2203,7 +2203,7 @@ static void pxa25x_udc_shutdown(struct platform_device *_dev)
2203 pullup_off(); 2203 pullup_off();
2204} 2204}
2205 2205
2206static int __exit pxa25x_udc_remove(struct platform_device *pdev) 2206static int pxa25x_udc_remove(struct platform_device *pdev)
2207{ 2207{
2208 struct pxa25x_udc *dev = platform_get_drvdata(pdev); 2208 struct pxa25x_udc *dev = platform_get_drvdata(pdev);
2209 2209
@@ -2294,7 +2294,8 @@ static int pxa25x_udc_resume(struct platform_device *dev)
2294 2294
2295static struct platform_driver udc_driver = { 2295static struct platform_driver udc_driver = {
2296 .shutdown = pxa25x_udc_shutdown, 2296 .shutdown = pxa25x_udc_shutdown,
2297 .remove = __exit_p(pxa25x_udc_remove), 2297 .probe = pxa25x_udc_probe,
2298 .remove = pxa25x_udc_remove,
2298 .suspend = pxa25x_udc_suspend, 2299 .suspend = pxa25x_udc_suspend,
2299 .resume = pxa25x_udc_resume, 2300 .resume = pxa25x_udc_resume,
2300 .driver = { 2301 .driver = {
@@ -2303,7 +2304,7 @@ static struct platform_driver udc_driver = {
2303 }, 2304 },
2304}; 2305};
2305 2306
2306module_platform_driver_probe(udc_driver, pxa25x_udc_probe); 2307module_platform_driver(udc_driver);
2307 2308
2308MODULE_DESCRIPTION(DRIVER_DESC); 2309MODULE_DESCRIPTION(DRIVER_DESC);
2309MODULE_AUTHOR("Frank Becker, Robert Schwebel, David Brownell"); 2310MODULE_AUTHOR("Frank Becker, Robert Schwebel, David Brownell");
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index d69b36a99dbc..a8a99e4748d5 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -543,7 +543,7 @@ static int s3c_hsotg_write_fifo(struct s3c_hsotg *hsotg,
543 * FIFO, requests of >512 cause the endpoint to get stuck with a 543 * FIFO, requests of >512 cause the endpoint to get stuck with a
544 * fragment of the end of the transfer in it. 544 * fragment of the end of the transfer in it.
545 */ 545 */
546 if (can_write > 512) 546 if (can_write > 512 && !periodic)
547 can_write = 512; 547 can_write = 512;
548 548
549 /* 549 /*
@@ -2475,8 +2475,6 @@ irq_retry:
2475 if (gintsts & GINTSTS_ErlySusp) { 2475 if (gintsts & GINTSTS_ErlySusp) {
2476 dev_dbg(hsotg->dev, "GINTSTS_ErlySusp\n"); 2476 dev_dbg(hsotg->dev, "GINTSTS_ErlySusp\n");
2477 writel(GINTSTS_ErlySusp, hsotg->regs + GINTSTS); 2477 writel(GINTSTS_ErlySusp, hsotg->regs + GINTSTS);
2478
2479 s3c_hsotg_disconnect(hsotg);
2480 } 2478 }
2481 2479
2482 /* 2480 /*
@@ -2962,9 +2960,6 @@ static int s3c_hsotg_udc_stop(struct usb_gadget *gadget,
2962 if (!hsotg) 2960 if (!hsotg)
2963 return -ENODEV; 2961 return -ENODEV;
2964 2962
2965 if (!driver || driver != hsotg->driver || !driver->unbind)
2966 return -EINVAL;
2967
2968 /* all endpoints should be shutdown */ 2963 /* all endpoints should be shutdown */
2969 for (ep = 0; ep < hsotg->num_of_eps; ep++) 2964 for (ep = 0; ep < hsotg->num_of_eps; ep++)
2970 s3c_hsotg_ep_disable(&hsotg->eps[ep].ep); 2965 s3c_hsotg_ep_disable(&hsotg->eps[ep].ep);
@@ -2972,15 +2967,15 @@ static int s3c_hsotg_udc_stop(struct usb_gadget *gadget,
2972 spin_lock_irqsave(&hsotg->lock, flags); 2967 spin_lock_irqsave(&hsotg->lock, flags);
2973 2968
2974 s3c_hsotg_phy_disable(hsotg); 2969 s3c_hsotg_phy_disable(hsotg);
2975 regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
2976 2970
2977 hsotg->driver = NULL; 2971 if (!driver)
2972 hsotg->driver = NULL;
2973
2978 hsotg->gadget.speed = USB_SPEED_UNKNOWN; 2974 hsotg->gadget.speed = USB_SPEED_UNKNOWN;
2979 2975
2980 spin_unlock_irqrestore(&hsotg->lock, flags); 2976 spin_unlock_irqrestore(&hsotg->lock, flags);
2981 2977
2982 dev_info(hsotg->dev, "unregistered gadget driver '%s'\n", 2978 regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
2983 driver->driver.name);
2984 2979
2985 return 0; 2980 return 0;
2986} 2981}
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 947b009009f1..f2407b2e8a99 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -130,7 +130,7 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
130 } 130 }
131 131
132 /* Enable USB controller, 83xx or 8536 */ 132 /* Enable USB controller, 83xx or 8536 */
133 if (pdata->have_sysif_regs) 133 if (pdata->have_sysif_regs && pdata->controller_ver < FSL_USB_VER_1_6)
134 setbits32(hcd->regs + FSL_SOC_USB_CTRL, 0x4); 134 setbits32(hcd->regs + FSL_SOC_USB_CTRL, 0x4);
135 135
136 /* Don't need to set host mode here. It will be done by tdi_reset() */ 136 /* Don't need to set host mode here. It will be done by tdi_reset() */
@@ -232,15 +232,9 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
232 case FSL_USB2_PHY_ULPI: 232 case FSL_USB2_PHY_ULPI:
233 if (pdata->have_sysif_regs && pdata->controller_ver) { 233 if (pdata->have_sysif_regs && pdata->controller_ver) {
234 /* controller version 1.6 or above */ 234 /* controller version 1.6 or above */
235 clrbits32(non_ehci + FSL_SOC_USB_CTRL, UTMI_PHY_EN);
235 setbits32(non_ehci + FSL_SOC_USB_CTRL, 236 setbits32(non_ehci + FSL_SOC_USB_CTRL,
236 ULPI_PHY_CLK_SEL); 237 ULPI_PHY_CLK_SEL | USB_CTRL_USB_EN);
237 /*
238 * Due to controller issue of PHY_CLK_VALID in ULPI
239 * mode, we set USB_CTRL_USB_EN before checking
240 * PHY_CLK_VALID, otherwise PHY_CLK_VALID doesn't work.
241 */
242 clrsetbits_be32(non_ehci + FSL_SOC_USB_CTRL,
243 UTMI_PHY_EN, USB_CTRL_USB_EN);
244 } 238 }
245 portsc |= PORT_PTS_ULPI; 239 portsc |= PORT_PTS_ULPI;
246 break; 240 break;
@@ -270,8 +264,9 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
270 if (pdata->have_sysif_regs && pdata->controller_ver && 264 if (pdata->have_sysif_regs && pdata->controller_ver &&
271 (phy_mode == FSL_USB2_PHY_ULPI)) { 265 (phy_mode == FSL_USB2_PHY_ULPI)) {
272 /* check PHY_CLK_VALID to get phy clk valid */ 266 /* check PHY_CLK_VALID to get phy clk valid */
273 if (!spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) & 267 if (!(spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) &
274 PHY_CLK_VALID, FSL_USB_PHY_CLK_TIMEOUT, 0)) { 268 PHY_CLK_VALID, FSL_USB_PHY_CLK_TIMEOUT, 0) ||
269 in_be32(non_ehci + FSL_SOC_USB_PRICTRL))) {
275 printk(KERN_WARNING "fsl-ehci: USB PHY clock invalid\n"); 270 printk(KERN_WARNING "fsl-ehci: USB PHY clock invalid\n");
276 return -EINVAL; 271 return -EINVAL;
277 } 272 }
@@ -669,7 +664,7 @@ static const struct hc_driver ehci_fsl_hc_driver = {
669 * generic hardware linkage 664 * generic hardware linkage
670 */ 665 */
671 .irq = ehci_irq, 666 .irq = ehci_irq,
672 .flags = HCD_USB2 | HCD_MEMORY | HCD_BH, 667 .flags = HCD_USB2 | HCD_MEMORY,
673 668
674 /* 669 /*
675 * basic lifecycle operations 670 * basic lifecycle operations
diff --git a/drivers/usb/host/ehci-grlib.c b/drivers/usb/host/ehci-grlib.c
index b52a66ce92e8..83ab51af250f 100644
--- a/drivers/usb/host/ehci-grlib.c
+++ b/drivers/usb/host/ehci-grlib.c
@@ -43,7 +43,7 @@ static const struct hc_driver ehci_grlib_hc_driver = {
43 * generic hardware linkage 43 * generic hardware linkage
44 */ 44 */
45 .irq = ehci_irq, 45 .irq = ehci_irq,
46 .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, 46 .flags = HCD_MEMORY | HCD_USB2,
47 47
48 /* 48 /*
49 * basic lifecycle operations 49 * basic lifecycle operations
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 5d6022f30ebe..86ab9fd9fe9e 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1158,7 +1158,7 @@ static const struct hc_driver ehci_hc_driver = {
1158 * generic hardware linkage 1158 * generic hardware linkage
1159 */ 1159 */
1160 .irq = ehci_irq, 1160 .irq = ehci_irq,
1161 .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, 1161 .flags = HCD_MEMORY | HCD_USB2,
1162 1162
1163 /* 1163 /*
1164 * basic lifecycle operations 1164 * basic lifecycle operations
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
index 417c10da9450..35cdbd88bbbe 100644
--- a/drivers/usb/host/ehci-mv.c
+++ b/drivers/usb/host/ehci-mv.c
@@ -96,7 +96,7 @@ static const struct hc_driver mv_ehci_hc_driver = {
96 * generic hardware linkage 96 * generic hardware linkage
97 */ 97 */
98 .irq = ehci_irq, 98 .irq = ehci_irq,
99 .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, 99 .flags = HCD_MEMORY | HCD_USB2,
100 100
101 /* 101 /*
102 * basic lifecycle operations 102 * basic lifecycle operations
diff --git a/drivers/usb/host/ehci-octeon.c b/drivers/usb/host/ehci-octeon.c
index ab0397e4d8f3..45cc00158412 100644
--- a/drivers/usb/host/ehci-octeon.c
+++ b/drivers/usb/host/ehci-octeon.c
@@ -51,7 +51,7 @@ static const struct hc_driver ehci_octeon_hc_driver = {
51 * generic hardware linkage 51 * generic hardware linkage
52 */ 52 */
53 .irq = ehci_irq, 53 .irq = ehci_irq,
54 .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, 54 .flags = HCD_MEMORY | HCD_USB2,
55 55
56 /* 56 /*
57 * basic lifecycle operations 57 * basic lifecycle operations
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 6bd299e61f58..854c2ec7b699 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -361,7 +361,7 @@ static struct pci_driver ehci_pci_driver = {
361 .remove = usb_hcd_pci_remove, 361 .remove = usb_hcd_pci_remove,
362 .shutdown = usb_hcd_pci_shutdown, 362 .shutdown = usb_hcd_pci_shutdown,
363 363
364#ifdef CONFIG_PM_SLEEP 364#ifdef CONFIG_PM
365 .driver = { 365 .driver = {
366 .pm = &usb_hcd_pci_pm_ops 366 .pm = &usb_hcd_pci_pm_ops
367 }, 367 },
diff --git a/drivers/usb/host/ehci-pmcmsp.c b/drivers/usb/host/ehci-pmcmsp.c
index 893b707f0000..601e208bd782 100644
--- a/drivers/usb/host/ehci-pmcmsp.c
+++ b/drivers/usb/host/ehci-pmcmsp.c
@@ -286,7 +286,7 @@ static const struct hc_driver ehci_msp_hc_driver = {
286#else 286#else
287 .irq = ehci_irq, 287 .irq = ehci_irq,
288#endif 288#endif
289 .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, 289 .flags = HCD_MEMORY | HCD_USB2,
290 290
291 /* 291 /*
292 * basic lifecycle operations 292 * basic lifecycle operations
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index 6cc5567bf9c8..932293fa32de 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -28,7 +28,7 @@ static const struct hc_driver ehci_ppc_of_hc_driver = {
28 * generic hardware linkage 28 * generic hardware linkage
29 */ 29 */
30 .irq = ehci_irq, 30 .irq = ehci_irq,
31 .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, 31 .flags = HCD_MEMORY | HCD_USB2,
32 32
33 /* 33 /*
34 * basic lifecycle operations 34 * basic lifecycle operations
diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c
index 8188542ba17e..fd983771b025 100644
--- a/drivers/usb/host/ehci-ps3.c
+++ b/drivers/usb/host/ehci-ps3.c
@@ -71,7 +71,7 @@ static const struct hc_driver ps3_ehci_hc_driver = {
71 .product_desc = "PS3 EHCI Host Controller", 71 .product_desc = "PS3 EHCI Host Controller",
72 .hcd_priv_size = sizeof(struct ehci_hcd), 72 .hcd_priv_size = sizeof(struct ehci_hcd),
73 .irq = ehci_irq, 73 .irq = ehci_irq,
74 .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, 74 .flags = HCD_MEMORY | HCD_USB2,
75 .reset = ps3_ehci_hc_reset, 75 .reset = ps3_ehci_hc_reset,
76 .start = ehci_run, 76 .start = ehci_run,
77 .stop = ehci_stop, 77 .stop = ehci_stop,
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index e321804c3475..a7f776a13eb1 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -247,6 +247,8 @@ static int qtd_copy_status (
247 247
248static void 248static void
249ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status) 249ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status)
250__releases(ehci->lock)
251__acquires(ehci->lock)
250{ 252{
251 if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) { 253 if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
252 /* ... update hc-wide periodic stats */ 254 /* ... update hc-wide periodic stats */
@@ -272,8 +274,11 @@ ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status)
272 urb->actual_length, urb->transfer_buffer_length); 274 urb->actual_length, urb->transfer_buffer_length);
273#endif 275#endif
274 276
277 /* complete() can reenter this HCD */
275 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); 278 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
279 spin_unlock (&ehci->lock);
276 usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status); 280 usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status);
281 spin_lock (&ehci->lock);
277} 282}
278 283
279static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh); 284static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
diff --git a/drivers/usb/host/ehci-sead3.c b/drivers/usb/host/ehci-sead3.c
index 8a734498079b..b2de52d39614 100644
--- a/drivers/usb/host/ehci-sead3.c
+++ b/drivers/usb/host/ehci-sead3.c
@@ -55,7 +55,7 @@ const struct hc_driver ehci_sead3_hc_driver = {
55 * generic hardware linkage 55 * generic hardware linkage
56 */ 56 */
57 .irq = ehci_irq, 57 .irq = ehci_irq,
58 .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, 58 .flags = HCD_MEMORY | HCD_USB2,
59 59
60 /* 60 /*
61 * basic lifecycle operations 61 * basic lifecycle operations
diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c
index dc899eb2b861..93e59a13bc1f 100644
--- a/drivers/usb/host/ehci-sh.c
+++ b/drivers/usb/host/ehci-sh.c
@@ -36,7 +36,7 @@ static const struct hc_driver ehci_sh_hc_driver = {
36 * generic hardware linkage 36 * generic hardware linkage
37 */ 37 */
38 .irq = ehci_irq, 38 .irq = ehci_irq,
39 .flags = HCD_USB2 | HCD_MEMORY | HCD_BH, 39 .flags = HCD_USB2 | HCD_MEMORY,
40 40
41 /* 41 /*
42 * basic lifecycle operations 42 * basic lifecycle operations
diff --git a/drivers/usb/host/ehci-tilegx.c b/drivers/usb/host/ehci-tilegx.c
index 67026ffbf9a8..cca4be90a864 100644
--- a/drivers/usb/host/ehci-tilegx.c
+++ b/drivers/usb/host/ehci-tilegx.c
@@ -61,7 +61,7 @@ static const struct hc_driver ehci_tilegx_hc_driver = {
61 * Generic hardware linkage. 61 * Generic hardware linkage.
62 */ 62 */
63 .irq = ehci_irq, 63 .irq = ehci_irq,
64 .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, 64 .flags = HCD_MEMORY | HCD_USB2,
65 65
66 /* 66 /*
67 * Basic lifecycle operations. 67 * Basic lifecycle operations.
diff --git a/drivers/usb/host/ehci-w90x900.c b/drivers/usb/host/ehci-w90x900.c
index 1c370dfbee0d..59e0e24c753f 100644
--- a/drivers/usb/host/ehci-w90x900.c
+++ b/drivers/usb/host/ehci-w90x900.c
@@ -108,7 +108,7 @@ static const struct hc_driver ehci_w90x900_hc_driver = {
108 * generic hardware linkage 108 * generic hardware linkage
109 */ 109 */
110 .irq = ehci_irq, 110 .irq = ehci_irq,
111 .flags = HCD_USB2|HCD_MEMORY|HCD_BH, 111 .flags = HCD_USB2|HCD_MEMORY,
112 112
113 /* 113 /*
114 * basic lifecycle operations 114 * basic lifecycle operations
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index 95979f9f4381..eba962e6ebfb 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -79,7 +79,7 @@ static const struct hc_driver ehci_xilinx_of_hc_driver = {
79 * generic hardware linkage 79 * generic hardware linkage
80 */ 80 */
81 .irq = ehci_irq, 81 .irq = ehci_irq,
82 .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, 82 .flags = HCD_MEMORY | HCD_USB2,
83 83
84 /* 84 /*
85 * basic lifecycle operations 85 * basic lifecycle operations
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index 9e0020d9e4c8..abd5050a4899 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -24,7 +24,7 @@ struct fsl_usb2_dev_data {
24 enum fsl_usb2_operating_modes op_mode; /* operating mode */ 24 enum fsl_usb2_operating_modes op_mode; /* operating mode */
25}; 25};
26 26
27struct fsl_usb2_dev_data dr_mode_data[] = { 27static struct fsl_usb2_dev_data dr_mode_data[] = {
28 { 28 {
29 .dr_mode = "host", 29 .dr_mode = "host",
30 .drivers = { "fsl-ehci", NULL, NULL, }, 30 .drivers = { "fsl-ehci", NULL, NULL, },
@@ -42,7 +42,7 @@ struct fsl_usb2_dev_data dr_mode_data[] = {
42 }, 42 },
43}; 43};
44 44
45struct fsl_usb2_dev_data *get_dr_mode_data(struct device_node *np) 45static struct fsl_usb2_dev_data *get_dr_mode_data(struct device_node *np)
46{ 46{
47 const unsigned char *prop; 47 const unsigned char *prop;
48 int i; 48 int i;
@@ -75,7 +75,7 @@ static enum fsl_usb2_phy_modes determine_usb_phy(const char *phy_type)
75 return FSL_USB2_PHY_NONE; 75 return FSL_USB2_PHY_NONE;
76} 76}
77 77
78struct platform_device *fsl_usb2_device_register( 78static struct platform_device *fsl_usb2_device_register(
79 struct platform_device *ofdev, 79 struct platform_device *ofdev,
80 struct fsl_usb2_platform_data *pdata, 80 struct fsl_usb2_platform_data *pdata,
81 const char *name, int id) 81 const char *name, int id)
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
index 60a5de505ca1..adb01d950a16 100644
--- a/drivers/usb/host/imx21-hcd.c
+++ b/drivers/usb/host/imx21-hcd.c
@@ -824,13 +824,13 @@ static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
824 i = DIV_ROUND_UP(wrap_frame( 824 i = DIV_ROUND_UP(wrap_frame(
825 cur_frame - urb->start_frame), 825 cur_frame - urb->start_frame),
826 urb->interval); 826 urb->interval);
827 if (urb->transfer_flags & URB_ISO_ASAP) { 827
828 /* Treat underruns as if URB_ISO_ASAP was set */
829 if ((urb->transfer_flags & URB_ISO_ASAP) ||
830 i >= urb->number_of_packets) {
828 urb->start_frame = wrap_frame(urb->start_frame 831 urb->start_frame = wrap_frame(urb->start_frame
829 + i * urb->interval); 832 + i * urb->interval);
830 i = 0; 833 i = 0;
831 } else if (i >= urb->number_of_packets) {
832 ret = -EXDEV;
833 goto alloc_dmem_failed;
834 } 834 }
835 } 835 }
836 } 836 }
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 8f6b695af6a4..604cad1bcf9c 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -216,31 +216,26 @@ static int ohci_urb_enqueue (
216 frame &= ~(ed->interval - 1); 216 frame &= ~(ed->interval - 1);
217 frame |= ed->branch; 217 frame |= ed->branch;
218 urb->start_frame = frame; 218 urb->start_frame = frame;
219 ed->last_iso = frame + ed->interval * (size - 1);
219 } 220 }
220 } else if (ed->type == PIPE_ISOCHRONOUS) { 221 } else if (ed->type == PIPE_ISOCHRONOUS) {
221 u16 next = ohci_frame_no(ohci) + 1; 222 u16 next = ohci_frame_no(ohci) + 1;
222 u16 frame = ed->last_iso + ed->interval; 223 u16 frame = ed->last_iso + ed->interval;
224 u16 length = ed->interval * (size - 1);
223 225
224 /* Behind the scheduling threshold? */ 226 /* Behind the scheduling threshold? */
225 if (unlikely(tick_before(frame, next))) { 227 if (unlikely(tick_before(frame, next))) {
226 228
227 /* USB_ISO_ASAP: Round up to the first available slot */ 229 /* URB_ISO_ASAP: Round up to the first available slot */
228 if (urb->transfer_flags & URB_ISO_ASAP) { 230 if (urb->transfer_flags & URB_ISO_ASAP) {
229 frame += (next - frame + ed->interval - 1) & 231 frame += (next - frame + ed->interval - 1) &
230 -ed->interval; 232 -ed->interval;
231 233
232 /* 234 /*
233 * Not ASAP: Use the next slot in the stream. If 235 * Not ASAP: Use the next slot in the stream,
234 * the entire URB falls before the threshold, fail. 236 * no matter what.
235 */ 237 */
236 } else { 238 } else {
237 if (tick_before(frame + ed->interval *
238 (urb->number_of_packets - 1), next)) {
239 retval = -EXDEV;
240 usb_hcd_unlink_urb_from_ep(hcd, urb);
241 goto fail;
242 }
243
244 /* 239 /*
245 * Some OHCI hardware doesn't handle late TDs 240 * Some OHCI hardware doesn't handle late TDs
246 * correctly. After retiring them it proceeds 241 * correctly. After retiring them it proceeds
@@ -251,9 +246,16 @@ static int ohci_urb_enqueue (
251 urb_priv->td_cnt = DIV_ROUND_UP( 246 urb_priv->td_cnt = DIV_ROUND_UP(
252 (u16) (next - frame), 247 (u16) (next - frame),
253 ed->interval); 248 ed->interval);
249 if (urb_priv->td_cnt >= urb_priv->length) {
250 ++urb_priv->td_cnt; /* Mark it */
251 ohci_dbg(ohci, "iso underrun %p (%u+%u < %u)\n",
252 urb, frame, length,
253 next);
254 }
254 } 255 }
255 } 256 }
256 urb->start_frame = frame; 257 urb->start_frame = frame;
258 ed->last_iso = frame + length;
257 } 259 }
258 260
259 /* fill the TDs and link them to the ed; and 261 /* fill the TDs and link them to the ed; and
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index df4a6707322d..e7f577e63624 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -41,9 +41,13 @@ finish_urb(struct ohci_hcd *ohci, struct urb *urb, int status)
41__releases(ohci->lock) 41__releases(ohci->lock)
42__acquires(ohci->lock) 42__acquires(ohci->lock)
43{ 43{
44 struct device *dev = ohci_to_hcd(ohci)->self.controller; 44 struct device *dev = ohci_to_hcd(ohci)->self.controller;
45 struct usb_host_endpoint *ep = urb->ep;
46 struct urb_priv *urb_priv;
47
45 // ASSERT (urb->hcpriv != 0); 48 // ASSERT (urb->hcpriv != 0);
46 49
50 restart:
47 urb_free_priv (ohci, urb->hcpriv); 51 urb_free_priv (ohci, urb->hcpriv);
48 urb->hcpriv = NULL; 52 urb->hcpriv = NULL;
49 if (likely(status == -EINPROGRESS)) 53 if (likely(status == -EINPROGRESS))
@@ -80,6 +84,21 @@ __acquires(ohci->lock)
80 ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_IE); 84 ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_IE);
81 ohci_writel (ohci, ohci->hc_control, &ohci->regs->control); 85 ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
82 } 86 }
87
88 /*
89 * An isochronous URB that is sumitted too late won't have any TDs
90 * (marked by the fact that the td_cnt value is larger than the
91 * actual number of TDs). If the next URB on this endpoint is like
92 * that, give it back now.
93 */
94 if (!list_empty(&ep->urb_list)) {
95 urb = list_first_entry(&ep->urb_list, struct urb, urb_list);
96 urb_priv = urb->hcpriv;
97 if (urb_priv->td_cnt > urb_priv->length) {
98 status = 0;
99 goto restart;
100 }
101 }
83} 102}
84 103
85 104
@@ -546,7 +565,6 @@ td_fill (struct ohci_hcd *ohci, u32 info,
546 td->hwCBP = cpu_to_hc32 (ohci, data & 0xFFFFF000); 565 td->hwCBP = cpu_to_hc32 (ohci, data & 0xFFFFF000);
547 *ohci_hwPSWp(ohci, td, 0) = cpu_to_hc16 (ohci, 566 *ohci_hwPSWp(ohci, td, 0) = cpu_to_hc16 (ohci,
548 (data & 0x0FFF) | 0xE000); 567 (data & 0x0FFF) | 0xE000);
549 td->ed->last_iso = info & 0xffff;
550 } else { 568 } else {
551 td->hwCBP = cpu_to_hc32 (ohci, data); 569 td->hwCBP = cpu_to_hc32 (ohci, data);
552 } 570 }
@@ -996,7 +1014,7 @@ rescan_this:
996 urb_priv->td_cnt++; 1014 urb_priv->td_cnt++;
997 1015
998 /* if URB is done, clean up */ 1016 /* if URB is done, clean up */
999 if (urb_priv->td_cnt == urb_priv->length) { 1017 if (urb_priv->td_cnt >= urb_priv->length) {
1000 modified = completed = 1; 1018 modified = completed = 1;
1001 finish_urb(ohci, urb, 0); 1019 finish_urb(ohci, urb, 0);
1002 } 1020 }
@@ -1086,7 +1104,7 @@ static void takeback_td(struct ohci_hcd *ohci, struct td *td)
1086 urb_priv->td_cnt++; 1104 urb_priv->td_cnt++;
1087 1105
1088 /* If all this urb's TDs are done, call complete() */ 1106 /* If all this urb's TDs are done, call complete() */
1089 if (urb_priv->td_cnt == urb_priv->length) 1107 if (urb_priv->td_cnt >= urb_priv->length)
1090 finish_urb(ohci, urb, status); 1108 finish_urb(ohci, urb, status);
1091 1109
1092 /* clean schedule: unlink EDs that are no longer busy */ 1110 /* clean schedule: unlink EDs that are no longer busy */
diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c
index c300bd2f7d1c..0f228c46eeda 100644
--- a/drivers/usb/host/uhci-pci.c
+++ b/drivers/usb/host/uhci-pci.c
@@ -293,7 +293,7 @@ static struct pci_driver uhci_pci_driver = {
293 .remove = usb_hcd_pci_remove, 293 .remove = usb_hcd_pci_remove,
294 .shutdown = uhci_shutdown, 294 .shutdown = uhci_shutdown,
295 295
296#ifdef CONFIG_PM_SLEEP 296#ifdef CONFIG_PM
297 .driver = { 297 .driver = {
298 .pm = &usb_hcd_pci_pm_ops 298 .pm = &usb_hcd_pci_pm_ops
299 }, 299 },
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index 041c6ddb695c..da6f56d996ce 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -1303,7 +1303,7 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
1303 } 1303 }
1304 1304
1305 /* Fell behind? */ 1305 /* Fell behind? */
1306 if (uhci_frame_before_eq(frame, next)) { 1306 if (!uhci_frame_before_eq(next, frame)) {
1307 1307
1308 /* USB_ISO_ASAP: Round up to the first available slot */ 1308 /* USB_ISO_ASAP: Round up to the first available slot */
1309 if (urb->transfer_flags & URB_ISO_ASAP) 1309 if (urb->transfer_flags & URB_ISO_ASAP)
@@ -1311,13 +1311,17 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
1311 -qh->period; 1311 -qh->period;
1312 1312
1313 /* 1313 /*
1314 * Not ASAP: Use the next slot in the stream. If 1314 * Not ASAP: Use the next slot in the stream,
1315 * the entire URB falls before the threshold, fail. 1315 * no matter what.
1316 */ 1316 */
1317 else if (!uhci_frame_before_eq(next, 1317 else if (!uhci_frame_before_eq(next,
1318 frame + (urb->number_of_packets - 1) * 1318 frame + (urb->number_of_packets - 1) *
1319 qh->period)) 1319 qh->period))
1320 return -EXDEV; 1320 dev_dbg(uhci_dev(uhci), "iso underrun %p (%u+%u < %u)\n",
1321 urb, frame,
1322 (urb->number_of_packets - 1) *
1323 qh->period,
1324 next);
1321 } 1325 }
1322 } 1326 }
1323 1327
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index fae697ed0b70..773a6b28c4f1 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -287,7 +287,7 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
287 if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) 287 if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue)
288 xhci_queue_stop_endpoint(xhci, slot_id, i, suspend); 288 xhci_queue_stop_endpoint(xhci, slot_id, i, suspend);
289 } 289 }
290 cmd->command_trb = xhci->cmd_ring->enqueue; 290 cmd->command_trb = xhci_find_next_enqueue(xhci->cmd_ring);
291 list_add_tail(&cmd->cmd_list, &virt_dev->cmd_list); 291 list_add_tail(&cmd->cmd_list, &virt_dev->cmd_list);
292 xhci_queue_stop_endpoint(xhci, slot_id, 0, suspend); 292 xhci_queue_stop_endpoint(xhci, slot_id, 0, suspend);
293 xhci_ring_cmd_db(xhci); 293 xhci_ring_cmd_db(xhci);
@@ -552,11 +552,15 @@ void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status, u16 wIndex)
552 * - Mark a port as being done with device resume, 552 * - Mark a port as being done with device resume,
553 * and ring the endpoint doorbells. 553 * and ring the endpoint doorbells.
554 * - Stop the Synopsys redriver Compliance Mode polling. 554 * - Stop the Synopsys redriver Compliance Mode polling.
555 * - Drop and reacquire the xHCI lock, in order to wait for port resume.
555 */ 556 */
556static u32 xhci_get_port_status(struct usb_hcd *hcd, 557static u32 xhci_get_port_status(struct usb_hcd *hcd,
557 struct xhci_bus_state *bus_state, 558 struct xhci_bus_state *bus_state,
558 __le32 __iomem **port_array, 559 __le32 __iomem **port_array,
559 u16 wIndex, u32 raw_port_status) 560 u16 wIndex, u32 raw_port_status,
561 unsigned long flags)
562 __releases(&xhci->lock)
563 __acquires(&xhci->lock)
560{ 564{
561 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 565 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
562 u32 status = 0; 566 u32 status = 0;
@@ -591,21 +595,42 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
591 return 0xffffffff; 595 return 0xffffffff;
592 if (time_after_eq(jiffies, 596 if (time_after_eq(jiffies,
593 bus_state->resume_done[wIndex])) { 597 bus_state->resume_done[wIndex])) {
598 int time_left;
599
594 xhci_dbg(xhci, "Resume USB2 port %d\n", 600 xhci_dbg(xhci, "Resume USB2 port %d\n",
595 wIndex + 1); 601 wIndex + 1);
596 bus_state->resume_done[wIndex] = 0; 602 bus_state->resume_done[wIndex] = 0;
597 clear_bit(wIndex, &bus_state->resuming_ports); 603 clear_bit(wIndex, &bus_state->resuming_ports);
604
605 set_bit(wIndex, &bus_state->rexit_ports);
598 xhci_set_link_state(xhci, port_array, wIndex, 606 xhci_set_link_state(xhci, port_array, wIndex,
599 XDEV_U0); 607 XDEV_U0);
600 xhci_dbg(xhci, "set port %d resume\n", 608
601 wIndex + 1); 609 spin_unlock_irqrestore(&xhci->lock, flags);
602 slot_id = xhci_find_slot_id_by_port(hcd, xhci, 610 time_left = wait_for_completion_timeout(
603 wIndex + 1); 611 &bus_state->rexit_done[wIndex],
604 if (!slot_id) { 612 msecs_to_jiffies(
605 xhci_dbg(xhci, "slot_id is zero\n"); 613 XHCI_MAX_REXIT_TIMEOUT));
606 return 0xffffffff; 614 spin_lock_irqsave(&xhci->lock, flags);
615
616 if (time_left) {
617 slot_id = xhci_find_slot_id_by_port(hcd,
618 xhci, wIndex + 1);
619 if (!slot_id) {
620 xhci_dbg(xhci, "slot_id is zero\n");
621 return 0xffffffff;
622 }
623 xhci_ring_device(xhci, slot_id);
624 } else {
625 int port_status = xhci_readl(xhci,
626 port_array[wIndex]);
627 xhci_warn(xhci, "Port resume took longer than %i msec, port status = 0x%x\n",
628 XHCI_MAX_REXIT_TIMEOUT,
629 port_status);
630 status |= USB_PORT_STAT_SUSPEND;
631 clear_bit(wIndex, &bus_state->rexit_ports);
607 } 632 }
608 xhci_ring_device(xhci, slot_id); 633
609 bus_state->port_c_suspend |= 1 << wIndex; 634 bus_state->port_c_suspend |= 1 << wIndex;
610 bus_state->suspended_ports &= ~(1 << wIndex); 635 bus_state->suspended_ports &= ~(1 << wIndex);
611 } else { 636 } else {
@@ -728,7 +753,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
728 break; 753 break;
729 } 754 }
730 status = xhci_get_port_status(hcd, bus_state, port_array, 755 status = xhci_get_port_status(hcd, bus_state, port_array,
731 wIndex, temp); 756 wIndex, temp, flags);
732 if (status == 0xffffffff) 757 if (status == 0xffffffff)
733 goto error; 758 goto error;
734 759
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 53b972c2a09f..83bcd13622c3 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -2428,6 +2428,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2428 for (i = 0; i < USB_MAXCHILDREN; ++i) { 2428 for (i = 0; i < USB_MAXCHILDREN; ++i) {
2429 xhci->bus_state[0].resume_done[i] = 0; 2429 xhci->bus_state[0].resume_done[i] = 0;
2430 xhci->bus_state[1].resume_done[i] = 0; 2430 xhci->bus_state[1].resume_done[i] = 0;
2431 /* Only the USB 2.0 completions will ever be used. */
2432 init_completion(&xhci->bus_state[1].rexit_done[i]);
2431 } 2433 }
2432 2434
2433 if (scratchpad_alloc(xhci, flags)) 2435 if (scratchpad_alloc(xhci, flags))
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index c2d495057eb5..236c3aabe940 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -351,7 +351,7 @@ static struct pci_driver xhci_pci_driver = {
351 /* suspend and resume implemented later */ 351 /* suspend and resume implemented later */
352 352
353 .shutdown = usb_hcd_pci_shutdown, 353 .shutdown = usb_hcd_pci_shutdown,
354#ifdef CONFIG_PM_SLEEP 354#ifdef CONFIG_PM
355 .driver = { 355 .driver = {
356 .pm = &usb_hcd_pci_pm_ops 356 .pm = &usb_hcd_pci_pm_ops
357 }, 357 },
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 411da1fc7ae8..6bfbd80ec2b9 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -123,6 +123,16 @@ static int enqueue_is_link_trb(struct xhci_ring *ring)
123 return TRB_TYPE_LINK_LE32(link->control); 123 return TRB_TYPE_LINK_LE32(link->control);
124} 124}
125 125
126union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring)
127{
128 /* Enqueue pointer can be left pointing to the link TRB,
129 * we must handle that
130 */
131 if (TRB_TYPE_LINK_LE32(ring->enqueue->link.control))
132 return ring->enq_seg->next->trbs;
133 return ring->enqueue;
134}
135
126/* Updates trb to point to the next TRB in the ring, and updates seg if the next 136/* Updates trb to point to the next TRB in the ring, and updates seg if the next
127 * TRB is in a new segment. This does not skip over link TRBs, and it does not 137 * TRB is in a new segment. This does not skip over link TRBs, and it does not
128 * effect the ring dequeue or enqueue pointers. 138 * effect the ring dequeue or enqueue pointers.
@@ -859,8 +869,12 @@ remove_finished_td:
859 /* Otherwise ring the doorbell(s) to restart queued transfers */ 869 /* Otherwise ring the doorbell(s) to restart queued transfers */
860 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 870 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
861 } 871 }
862 ep->stopped_td = NULL; 872
863 ep->stopped_trb = NULL; 873 /* Clear stopped_td and stopped_trb if endpoint is not halted */
874 if (!(ep->ep_state & EP_HALTED)) {
875 ep->stopped_td = NULL;
876 ep->stopped_trb = NULL;
877 }
864 878
865 /* 879 /*
866 * Drop the lock and complete the URBs in the cancelled TD list. 880 * Drop the lock and complete the URBs in the cancelled TD list.
@@ -1414,6 +1428,12 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1414 inc_deq(xhci, xhci->cmd_ring); 1428 inc_deq(xhci, xhci->cmd_ring);
1415 return; 1429 return;
1416 } 1430 }
1431 /* There is no command to handle if we get a stop event when the
1432 * command ring is empty, event->cmd_trb points to the next
1433 * unset command
1434 */
1435 if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue)
1436 return;
1417 } 1437 }
1418 1438
1419 switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]) 1439 switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
@@ -1743,6 +1763,19 @@ static void handle_port_status(struct xhci_hcd *xhci,
1743 } 1763 }
1744 } 1764 }
1745 1765
1766 /*
1767 * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or
1768 * RExit to a disconnect state). If so, let the the driver know it's
1769 * out of the RExit state.
1770 */
1771 if (!DEV_SUPERSPEED(temp) &&
1772 test_and_clear_bit(faked_port_index,
1773 &bus_state->rexit_ports)) {
1774 complete(&bus_state->rexit_done[faked_port_index]);
1775 bogus_port_status = true;
1776 goto cleanup;
1777 }
1778
1746 if (hcd->speed != HCD_USB3) 1779 if (hcd->speed != HCD_USB3)
1747 xhci_test_and_clear_bit(xhci, port_array, faked_port_index, 1780 xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
1748 PORT_PLC); 1781 PORT_PLC);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 49b6edb84a79..1e36dbb48366 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -2598,15 +2598,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2598 if (command) { 2598 if (command) {
2599 cmd_completion = command->completion; 2599 cmd_completion = command->completion;
2600 cmd_status = &command->status; 2600 cmd_status = &command->status;
2601 command->command_trb = xhci->cmd_ring->enqueue; 2601 command->command_trb = xhci_find_next_enqueue(xhci->cmd_ring);
2602
2603 /* Enqueue pointer can be left pointing to the link TRB,
2604 * we must handle that
2605 */
2606 if (TRB_TYPE_LINK_LE32(command->command_trb->link.control))
2607 command->command_trb =
2608 xhci->cmd_ring->enq_seg->next->trbs;
2609
2610 list_add_tail(&command->cmd_list, &virt_dev->cmd_list); 2602 list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
2611 } else { 2603 } else {
2612 cmd_completion = &virt_dev->cmd_completion; 2604 cmd_completion = &virt_dev->cmd_completion;
@@ -2614,7 +2606,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2614 } 2606 }
2615 init_completion(cmd_completion); 2607 init_completion(cmd_completion);
2616 2608
2617 cmd_trb = xhci->cmd_ring->dequeue; 2609 cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
2618 if (!ctx_change) 2610 if (!ctx_change)
2619 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma, 2611 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
2620 udev->slot_id, must_succeed); 2612 udev->slot_id, must_succeed);
@@ -3439,14 +3431,7 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
3439 3431
3440 /* Attempt to submit the Reset Device command to the command ring */ 3432 /* Attempt to submit the Reset Device command to the command ring */
3441 spin_lock_irqsave(&xhci->lock, flags); 3433 spin_lock_irqsave(&xhci->lock, flags);
3442 reset_device_cmd->command_trb = xhci->cmd_ring->enqueue; 3434 reset_device_cmd->command_trb = xhci_find_next_enqueue(xhci->cmd_ring);
3443
3444 /* Enqueue pointer can be left pointing to the link TRB,
3445 * we must handle that
3446 */
3447 if (TRB_TYPE_LINK_LE32(reset_device_cmd->command_trb->link.control))
3448 reset_device_cmd->command_trb =
3449 xhci->cmd_ring->enq_seg->next->trbs;
3450 3435
3451 list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list); 3436 list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
3452 ret = xhci_queue_reset_device(xhci, slot_id); 3437 ret = xhci_queue_reset_device(xhci, slot_id);
@@ -3650,7 +3635,7 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3650 union xhci_trb *cmd_trb; 3635 union xhci_trb *cmd_trb;
3651 3636
3652 spin_lock_irqsave(&xhci->lock, flags); 3637 spin_lock_irqsave(&xhci->lock, flags);
3653 cmd_trb = xhci->cmd_ring->dequeue; 3638 cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
3654 ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0); 3639 ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
3655 if (ret) { 3640 if (ret) {
3656 spin_unlock_irqrestore(&xhci->lock, flags); 3641 spin_unlock_irqrestore(&xhci->lock, flags);
@@ -3785,7 +3770,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3785 slot_ctx->dev_info >> 27); 3770 slot_ctx->dev_info >> 27);
3786 3771
3787 spin_lock_irqsave(&xhci->lock, flags); 3772 spin_lock_irqsave(&xhci->lock, flags);
3788 cmd_trb = xhci->cmd_ring->dequeue; 3773 cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
3789 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma, 3774 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
3790 udev->slot_id); 3775 udev->slot_id);
3791 if (ret) { 3776 if (ret) {
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 46aa14894148..289fbfbae746 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1412,8 +1412,18 @@ struct xhci_bus_state {
1412 unsigned long resume_done[USB_MAXCHILDREN]; 1412 unsigned long resume_done[USB_MAXCHILDREN];
1413 /* which ports have started to resume */ 1413 /* which ports have started to resume */
1414 unsigned long resuming_ports; 1414 unsigned long resuming_ports;
1415 /* Which ports are waiting on RExit to U0 transition. */
1416 unsigned long rexit_ports;
1417 struct completion rexit_done[USB_MAXCHILDREN];
1415}; 1418};
1416 1419
1420
1421/*
1422 * It can take up to 20 ms to transition from RExit to U0 on the
1423 * Intel Lynx Point LP xHCI host.
1424 */
1425#define XHCI_MAX_REXIT_TIMEOUT (20 * 1000)
1426
1417static inline unsigned int hcd_index(struct usb_hcd *hcd) 1427static inline unsigned int hcd_index(struct usb_hcd *hcd)
1418{ 1428{
1419 if (hcd->speed == HCD_USB3) 1429 if (hcd->speed == HCD_USB3)
@@ -1840,6 +1850,7 @@ int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command,
1840 union xhci_trb *cmd_trb); 1850 union xhci_trb *cmd_trb);
1841void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id, 1851void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
1842 unsigned int ep_index, unsigned int stream_id); 1852 unsigned int ep_index, unsigned int stream_id);
1853union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring);
1843 1854
1844/* xHCI roothub code */ 1855/* xHCI roothub code */
1845void xhci_set_link_state(struct xhci_hcd *xhci, __le32 __iomem **port_array, 1856void xhci_set_link_state(struct xhci_hcd *xhci, __le32 __iomem **port_array,
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 4047cbb91bac..bd4138d80a48 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -535,6 +535,9 @@ static int dsps_probe(struct platform_device *pdev)
535 struct dsps_glue *glue; 535 struct dsps_glue *glue;
536 int ret; 536 int ret;
537 537
538 if (!strcmp(pdev->name, "musb-hdrc"))
539 return -ENODEV;
540
538 match = of_match_node(musb_dsps_of_match, pdev->dev.of_node); 541 match = of_match_node(musb_dsps_of_match, pdev->dev.of_node);
539 if (!match) { 542 if (!match) {
540 dev_err(&pdev->dev, "fail to get matching of_match struct\n"); 543 dev_err(&pdev->dev, "fail to get matching of_match struct\n");
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 9a08679d204d..b19ed213ab85 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1790,6 +1790,10 @@ int musb_gadget_setup(struct musb *musb)
1790 musb->g.max_speed = USB_SPEED_HIGH; 1790 musb->g.max_speed = USB_SPEED_HIGH;
1791 musb->g.speed = USB_SPEED_UNKNOWN; 1791 musb->g.speed = USB_SPEED_UNKNOWN;
1792 1792
1793 MUSB_DEV_MODE(musb);
1794 musb->xceiv->otg->default_a = 0;
1795 musb->xceiv->state = OTG_STATE_B_IDLE;
1796
1793 /* this "gadget" abstracts/virtualizes the controller */ 1797 /* this "gadget" abstracts/virtualizes the controller */
1794 musb->g.name = musb_driver_name; 1798 musb->g.name = musb_driver_name;
1795 musb->g.is_otg = 1; 1799 musb->g.is_otg = 1;
@@ -1849,7 +1853,6 @@ static int musb_gadget_start(struct usb_gadget *g,
1849 musb->gadget_driver = driver; 1853 musb->gadget_driver = driver;
1850 1854
1851 spin_lock_irqsave(&musb->lock, flags); 1855 spin_lock_irqsave(&musb->lock, flags);
1852 musb->is_active = 1;
1853 1856
1854 otg_set_peripheral(otg, &musb->g); 1857 otg_set_peripheral(otg, &musb->g);
1855 musb->xceiv->state = OTG_STATE_B_IDLE; 1858 musb->xceiv->state = OTG_STATE_B_IDLE;
diff --git a/drivers/usb/phy/phy-gpio-vbus-usb.c b/drivers/usb/phy/phy-gpio-vbus-usb.c
index b2f29c9aebbf..02799a5efcd4 100644
--- a/drivers/usb/phy/phy-gpio-vbus-usb.c
+++ b/drivers/usb/phy/phy-gpio-vbus-usb.c
@@ -241,7 +241,7 @@ static int gpio_vbus_set_suspend(struct usb_phy *phy, int suspend)
241 241
242/* platform driver interface */ 242/* platform driver interface */
243 243
244static int __init gpio_vbus_probe(struct platform_device *pdev) 244static int gpio_vbus_probe(struct platform_device *pdev)
245{ 245{
246 struct gpio_vbus_mach_info *pdata = dev_get_platdata(&pdev->dev); 246 struct gpio_vbus_mach_info *pdata = dev_get_platdata(&pdev->dev);
247 struct gpio_vbus_data *gpio_vbus; 247 struct gpio_vbus_data *gpio_vbus;
@@ -349,7 +349,7 @@ err_gpio:
349 return err; 349 return err;
350} 350}
351 351
352static int __exit gpio_vbus_remove(struct platform_device *pdev) 352static int gpio_vbus_remove(struct platform_device *pdev)
353{ 353{
354 struct gpio_vbus_data *gpio_vbus = platform_get_drvdata(pdev); 354 struct gpio_vbus_data *gpio_vbus = platform_get_drvdata(pdev);
355 struct gpio_vbus_mach_info *pdata = dev_get_platdata(&pdev->dev); 355 struct gpio_vbus_mach_info *pdata = dev_get_platdata(&pdev->dev);
@@ -398,8 +398,6 @@ static const struct dev_pm_ops gpio_vbus_dev_pm_ops = {
398}; 398};
399#endif 399#endif
400 400
401/* NOTE: the gpio-vbus device may *NOT* be hotplugged */
402
403MODULE_ALIAS("platform:gpio-vbus"); 401MODULE_ALIAS("platform:gpio-vbus");
404 402
405static struct platform_driver gpio_vbus_driver = { 403static struct platform_driver gpio_vbus_driver = {
@@ -410,10 +408,11 @@ static struct platform_driver gpio_vbus_driver = {
410 .pm = &gpio_vbus_dev_pm_ops, 408 .pm = &gpio_vbus_dev_pm_ops,
411#endif 409#endif
412 }, 410 },
413 .remove = __exit_p(gpio_vbus_remove), 411 .probe = gpio_vbus_probe,
412 .remove = gpio_vbus_remove,
414}; 413};
415 414
416module_platform_driver_probe(gpio_vbus_driver, gpio_vbus_probe); 415module_platform_driver(gpio_vbus_driver);
417 416
418MODULE_DESCRIPTION("simple GPIO controlled OTG transceiver driver"); 417MODULE_DESCRIPTION("simple GPIO controlled OTG transceiver driver");
419MODULE_AUTHOR("Philipp Zabel"); 418MODULE_AUTHOR("Philipp Zabel");
diff --git a/drivers/usb/phy/phy-omap-usb3.c b/drivers/usb/phy/phy-omap-usb3.c
index fc15694d3031..4e8a0405f956 100644
--- a/drivers/usb/phy/phy-omap-usb3.c
+++ b/drivers/usb/phy/phy-omap-usb3.c
@@ -79,7 +79,7 @@ static struct usb_dpll_params *omap_usb3_get_dpll_params(unsigned long rate)
79 return &dpll_map[i].params; 79 return &dpll_map[i].params;
80 } 80 }
81 81
82 return 0; 82 return NULL;
83} 83}
84 84
85static int omap_usb3_suspend(struct usb_phy *x, int suspend) 85static int omap_usb3_suspend(struct usb_phy *x, int suspend)
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index c454bfa22a10..ddb9c51f2c99 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -60,7 +60,7 @@ config USB_SERIAL_SIMPLE
60 - Suunto ANT+ USB device. 60 - Suunto ANT+ USB device.
61 - Fundamental Software dongle. 61 - Fundamental Software dongle.
62 - HP4x calculators 62 - HP4x calculators
63 - a number of Motoroloa phones 63 - a number of Motorola phones
64 - Siemens USB/MPI adapter. 64 - Siemens USB/MPI adapter.
65 - ViVOtech ViVOpay USB device. 65 - ViVOtech ViVOpay USB device.
66 - Infineon Modem Flashloader USB interface 66 - Infineon Modem Flashloader USB interface
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 1cf6f125f5f0..80a7104d5ddb 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -81,6 +81,7 @@ static void option_instat_callback(struct urb *urb);
81 81
82#define HUAWEI_VENDOR_ID 0x12D1 82#define HUAWEI_VENDOR_ID 0x12D1
83#define HUAWEI_PRODUCT_E173 0x140C 83#define HUAWEI_PRODUCT_E173 0x140C
84#define HUAWEI_PRODUCT_E1750 0x1406
84#define HUAWEI_PRODUCT_K4505 0x1464 85#define HUAWEI_PRODUCT_K4505 0x1464
85#define HUAWEI_PRODUCT_K3765 0x1465 86#define HUAWEI_PRODUCT_K3765 0x1465
86#define HUAWEI_PRODUCT_K4605 0x14C6 87#define HUAWEI_PRODUCT_K4605 0x14C6
@@ -567,6 +568,8 @@ static const struct usb_device_id option_ids[] = {
567 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) }, 568 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) },
568 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff), 569 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
569 .driver_info = (kernel_ulong_t) &net_intf1_blacklist }, 570 .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
571 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff),
572 .driver_info = (kernel_ulong_t) &net_intf2_blacklist },
570 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) }, 573 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) },
571 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) }, 574 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) },
572 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff), 575 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index e7a84f0f5179..bedf8e47713b 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -139,6 +139,7 @@ enum pl2303_type {
139 HX_TA, /* HX(A) / X(A) / TA version */ /* TODO: improve */ 139 HX_TA, /* HX(A) / X(A) / TA version */ /* TODO: improve */
140 HXD_EA_RA_SA, /* HXD / EA / RA / SA version */ /* TODO: improve */ 140 HXD_EA_RA_SA, /* HXD / EA / RA / SA version */ /* TODO: improve */
141 TB, /* TB version */ 141 TB, /* TB version */
142 HX_CLONE, /* Cheap and less functional clone of the HX chip */
142}; 143};
143/* 144/*
144 * NOTE: don't know the difference between type 0 and type 1, 145 * NOTE: don't know the difference between type 0 and type 1,
@@ -206,8 +207,23 @@ static int pl2303_startup(struct usb_serial *serial)
206 * the device descriptors of the X/HX, HXD, EA, RA, SA, TA, TB 207 * the device descriptors of the X/HX, HXD, EA, RA, SA, TA, TB
207 */ 208 */
208 if (le16_to_cpu(serial->dev->descriptor.bcdDevice) == 0x300) { 209 if (le16_to_cpu(serial->dev->descriptor.bcdDevice) == 0x300) {
209 type = HX_TA; 210 /* Check if the device is a clone */
210 type_str = "X/HX/TA"; 211 pl2303_vendor_read(0x9494, 0, serial, buf);
212 /*
213 * NOTE: Not sure if this read is really needed.
214 * The HX returns 0x00, the clone 0x02, but the Windows
215 * driver seems to ignore the value and continues.
216 */
217 pl2303_vendor_write(0x0606, 0xaa, serial);
218 pl2303_vendor_read(0x8686, 0, serial, buf);
219 if (buf[0] != 0xaa) {
220 type = HX_CLONE;
221 type_str = "X/HX clone (limited functionality)";
222 } else {
223 type = HX_TA;
224 type_str = "X/HX/TA";
225 }
226 pl2303_vendor_write(0x0606, 0x00, serial);
211 } else if (le16_to_cpu(serial->dev->descriptor.bcdDevice) 227 } else if (le16_to_cpu(serial->dev->descriptor.bcdDevice)
212 == 0x400) { 228 == 0x400) {
213 type = HXD_EA_RA_SA; 229 type = HXD_EA_RA_SA;
@@ -305,8 +321,9 @@ static int pl2303_baudrate_encode_direct(int baud, enum pl2303_type type,
305{ 321{
306 /* 322 /*
307 * NOTE: Only the values defined in baud_sup are supported ! 323 * NOTE: Only the values defined in baud_sup are supported !
308 * => if unsupported values are set, the PL2303 seems to 324 * => if unsupported values are set, the PL2303 uses 9600 baud instead
309 * use 9600 baud (at least my PL2303X always does) 325 * => HX clones just don't work at unsupported baud rates < 115200 baud,
326 * for baud rates > 115200 they run at 115200 baud
310 */ 327 */
311 const int baud_sup[] = { 75, 150, 300, 600, 1200, 1800, 2400, 3600, 328 const int baud_sup[] = { 75, 150, 300, 600, 1200, 1800, 2400, 3600,
312 4800, 7200, 9600, 14400, 19200, 28800, 38400, 329 4800, 7200, 9600, 14400, 19200, 28800, 38400,
@@ -316,14 +333,14 @@ static int pl2303_baudrate_encode_direct(int baud, enum pl2303_type type,
316 * NOTE: With the exception of type_0/1 devices, the following 333 * NOTE: With the exception of type_0/1 devices, the following
317 * additional baud rates are supported (tested with HX rev. 3A only): 334 * additional baud rates are supported (tested with HX rev. 3A only):
318 * 110*, 56000*, 128000, 134400, 161280, 201600, 256000*, 268800, 335 * 110*, 56000*, 128000, 134400, 161280, 201600, 256000*, 268800,
319 * 403200, 806400. (*: not HX) 336 * 403200, 806400. (*: not HX and HX clones)
320 * 337 *
321 * Maximum values: HXD, TB: 12000000; HX, TA: 6000000; 338 * Maximum values: HXD, TB: 12000000; HX, TA: 6000000;
322 * type_0+1: 1228800; RA: 921600; SA: 115200 339 * type_0+1: 1228800; RA: 921600; HX clones, SA: 115200
323 * 340 *
324 * As long as we are not using this encoding method for anything else 341 * As long as we are not using this encoding method for anything else
325 * than the type_0+1 and HX chips, there is no point in complicating 342 * than the type_0+1, HX and HX clone chips, there is no point in
326 * the code to support them. 343 * complicating the code to support them.
327 */ 344 */
328 int i; 345 int i;
329 346
@@ -347,6 +364,8 @@ static int pl2303_baudrate_encode_direct(int baud, enum pl2303_type type,
347 baud = min_t(int, baud, 6000000); 364 baud = min_t(int, baud, 6000000);
348 else if (type == type_0 || type == type_1) 365 else if (type == type_0 || type == type_1)
349 baud = min_t(int, baud, 1228800); 366 baud = min_t(int, baud, 1228800);
367 else if (type == HX_CLONE)
368 baud = min_t(int, baud, 115200);
350 /* Direct (standard) baud rate encoding method */ 369 /* Direct (standard) baud rate encoding method */
351 put_unaligned_le32(baud, buf); 370 put_unaligned_le32(baud, buf);
352 371
@@ -359,7 +378,8 @@ static int pl2303_baudrate_encode_divisor(int baud, enum pl2303_type type,
359 /* 378 /*
360 * Divisor based baud rate encoding method 379 * Divisor based baud rate encoding method
361 * 380 *
362 * NOTE: it's not clear if the type_0/1 chips support this method 381 * NOTE: HX clones do NOT support this method.
382 * It's not clear if the type_0/1 chips support it.
363 * 383 *
364 * divisor = 12MHz * 32 / baudrate = 2^A * B 384 * divisor = 12MHz * 32 / baudrate = 2^A * B
365 * 385 *
@@ -452,7 +472,7 @@ static void pl2303_encode_baudrate(struct tty_struct *tty,
452 * 1) Direct method: encodes the baud rate value directly 472 * 1) Direct method: encodes the baud rate value directly
453 * => supported by all chip types 473 * => supported by all chip types
454 * 2) Divisor based method: encodes a divisor to a base value (12MHz*32) 474 * 2) Divisor based method: encodes a divisor to a base value (12MHz*32)
455 * => supported by HX chips (and likely not by type_0/1 chips) 475 * => not supported by HX clones (and likely type_0/1 chips)
456 * 476 *
457 * NOTE: Although the divisor based baud rate encoding method is much 477 * NOTE: Although the divisor based baud rate encoding method is much
458 * more flexible, some of the standard baud rate values can not be 478 * more flexible, some of the standard baud rate values can not be
@@ -460,7 +480,7 @@ static void pl2303_encode_baudrate(struct tty_struct *tty,
460 * the device likely uses the same baud rate generator for both methods 480 * the device likely uses the same baud rate generator for both methods
461 * so that there is likley no difference. 481 * so that there is likley no difference.
462 */ 482 */
463 if (type == type_0 || type == type_1) 483 if (type == type_0 || type == type_1 || type == HX_CLONE)
464 baud = pl2303_baudrate_encode_direct(baud, type, buf); 484 baud = pl2303_baudrate_encode_direct(baud, type, buf);
465 else 485 else
466 baud = pl2303_baudrate_encode_divisor(baud, type, buf); 486 baud = pl2303_baudrate_encode_divisor(baud, type, buf);
@@ -813,6 +833,7 @@ static void pl2303_break_ctl(struct tty_struct *tty, int break_state)
813 result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), 833 result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
814 BREAK_REQUEST, BREAK_REQUEST_TYPE, state, 834 BREAK_REQUEST, BREAK_REQUEST_TYPE, state,
815 0, NULL, 0, 100); 835 0, NULL, 0, 100);
836 /* NOTE: HX clones don't support sending breaks, -EPIPE is returned */
816 if (result) 837 if (result)
817 dev_err(&port->dev, "error sending break = %d\n", result); 838 dev_err(&port->dev, "error sending break = %d\n", result);
818} 839}
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 4b79a1f2f901..ce5221fa393a 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -461,7 +461,7 @@ static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
461 u32 i; 461 u32 i;
462 for (i = 0; i < tv_cmd->tvc_sgl_count; i++) 462 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
463 put_page(sg_page(&tv_cmd->tvc_sgl[i])); 463 put_page(sg_page(&tv_cmd->tvc_sgl[i]));
464 } 464 }
465 465
466 tcm_vhost_put_inflight(tv_cmd->inflight); 466 tcm_vhost_put_inflight(tv_cmd->inflight);
467 percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); 467 percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
@@ -728,7 +728,12 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq,
728 } 728 }
729 se_sess = tv_nexus->tvn_se_sess; 729 se_sess = tv_nexus->tvn_se_sess;
730 730
731 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_KERNEL); 731 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
732 if (tag < 0) {
733 pr_err("Unable to obtain tag for tcm_vhost_cmd\n");
734 return ERR_PTR(-ENOMEM);
735 }
736
732 cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag]; 737 cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag];
733 sg = cmd->tvc_sgl; 738 sg = cmd->tvc_sgl;
734 pages = cmd->tvc_upages; 739 pages = cmd->tvc_upages;
@@ -1373,21 +1378,30 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1373 return 0; 1378 return 0;
1374} 1379}
1375 1380
1381static void vhost_scsi_free(struct vhost_scsi *vs)
1382{
1383 if (is_vmalloc_addr(vs))
1384 vfree(vs);
1385 else
1386 kfree(vs);
1387}
1388
1376static int vhost_scsi_open(struct inode *inode, struct file *f) 1389static int vhost_scsi_open(struct inode *inode, struct file *f)
1377{ 1390{
1378 struct vhost_scsi *vs; 1391 struct vhost_scsi *vs;
1379 struct vhost_virtqueue **vqs; 1392 struct vhost_virtqueue **vqs;
1380 int r, i; 1393 int r = -ENOMEM, i;
1381 1394
1382 vs = kzalloc(sizeof(*vs), GFP_KERNEL); 1395 vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
1383 if (!vs) 1396 if (!vs) {
1384 return -ENOMEM; 1397 vs = vzalloc(sizeof(*vs));
1398 if (!vs)
1399 goto err_vs;
1400 }
1385 1401
1386 vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL); 1402 vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
1387 if (!vqs) { 1403 if (!vqs)
1388 kfree(vs); 1404 goto err_vqs;
1389 return -ENOMEM;
1390 }
1391 1405
1392 vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work); 1406 vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1393 vhost_work_init(&vs->vs_event_work, tcm_vhost_evt_work); 1407 vhost_work_init(&vs->vs_event_work, tcm_vhost_evt_work);
@@ -1407,14 +1421,18 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
1407 1421
1408 tcm_vhost_init_inflight(vs, NULL); 1422 tcm_vhost_init_inflight(vs, NULL);
1409 1423
1410 if (r < 0) { 1424 if (r < 0)
1411 kfree(vqs); 1425 goto err_init;
1412 kfree(vs);
1413 return r;
1414 }
1415 1426
1416 f->private_data = vs; 1427 f->private_data = vs;
1417 return 0; 1428 return 0;
1429
1430err_init:
1431 kfree(vqs);
1432err_vqs:
1433 vhost_scsi_free(vs);
1434err_vs:
1435 return r;
1418} 1436}
1419 1437
1420static int vhost_scsi_release(struct inode *inode, struct file *f) 1438static int vhost_scsi_release(struct inode *inode, struct file *f)
@@ -1431,7 +1449,7 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
1431 /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */ 1449 /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1432 vhost_scsi_flush(vs); 1450 vhost_scsi_flush(vs);
1433 kfree(vs->dev.vqs); 1451 kfree(vs->dev.vqs);
1434 kfree(vs); 1452 vhost_scsi_free(vs);
1435 return 0; 1453 return 0;
1436} 1454}
1437 1455
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 9a9502a4aa50..69068e0d8f31 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -161,9 +161,11 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
161 if (list_empty(&work->node)) { 161 if (list_empty(&work->node)) {
162 list_add_tail(&work->node, &dev->work_list); 162 list_add_tail(&work->node, &dev->work_list);
163 work->queue_seq++; 163 work->queue_seq++;
164 spin_unlock_irqrestore(&dev->work_lock, flags);
164 wake_up_process(dev->worker); 165 wake_up_process(dev->worker);
166 } else {
167 spin_unlock_irqrestore(&dev->work_lock, flags);
165 } 168 }
166 spin_unlock_irqrestore(&dev->work_lock, flags);
167} 169}
168EXPORT_SYMBOL_GPL(vhost_work_queue); 170EXPORT_SYMBOL_GPL(vhost_work_queue);
169 171
diff --git a/drivers/video/mmp/hw/mmp_ctrl.c b/drivers/video/mmp/hw/mmp_ctrl.c
index 75dca19bf214..6ac755270ab4 100644
--- a/drivers/video/mmp/hw/mmp_ctrl.c
+++ b/drivers/video/mmp/hw/mmp_ctrl.c
@@ -514,7 +514,7 @@ static int mmphw_probe(struct platform_device *pdev)
514 if (IS_ERR(ctrl->clk)) { 514 if (IS_ERR(ctrl->clk)) {
515 dev_err(ctrl->dev, "unable to get clk %s\n", mi->clk_name); 515 dev_err(ctrl->dev, "unable to get clk %s\n", mi->clk_name);
516 ret = -ENOENT; 516 ret = -ENOENT;
517 goto failed_get_clk; 517 goto failed;
518 } 518 }
519 clk_prepare_enable(ctrl->clk); 519 clk_prepare_enable(ctrl->clk);
520 520
@@ -551,21 +551,8 @@ failed_path_init:
551 path_deinit(path_plat); 551 path_deinit(path_plat);
552 } 552 }
553 553
554 if (ctrl->clk) { 554 clk_disable_unprepare(ctrl->clk);
555 devm_clk_put(ctrl->dev, ctrl->clk);
556 clk_disable_unprepare(ctrl->clk);
557 }
558failed_get_clk:
559 devm_free_irq(ctrl->dev, ctrl->irq, ctrl);
560failed: 555failed:
561 if (ctrl) {
562 if (ctrl->reg_base)
563 devm_iounmap(ctrl->dev, ctrl->reg_base);
564 devm_release_mem_region(ctrl->dev, res->start,
565 resource_size(res));
566 devm_kfree(ctrl->dev, ctrl);
567 }
568
569 dev_err(&pdev->dev, "device init failed\n"); 556 dev_err(&pdev->dev, "device init failed\n");
570 557
571 return ret; 558 return ret;
diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c
index d250ed0f806d..27197a8048c0 100644
--- a/drivers/video/mxsfb.c
+++ b/drivers/video/mxsfb.c
@@ -620,6 +620,7 @@ static int mxsfb_restore_mode(struct mxsfb_info *host)
620 break; 620 break;
621 case 3: 621 case 3:
622 bits_per_pixel = 32; 622 bits_per_pixel = 32;
623 break;
623 case 1: 624 case 1:
624 default: 625 default:
625 return -EINVAL; 626 return -EINVAL;
diff --git a/drivers/video/neofb.c b/drivers/video/neofb.c
index 7ef079c146e7..c172a5281f9e 100644
--- a/drivers/video/neofb.c
+++ b/drivers/video/neofb.c
@@ -2075,6 +2075,7 @@ static int neofb_probe(struct pci_dev *dev, const struct pci_device_id *id)
2075 if (!fb_find_mode(&info->var, info, mode_option, NULL, 0, 2075 if (!fb_find_mode(&info->var, info, mode_option, NULL, 0,
2076 info->monspecs.modedb, 16)) { 2076 info->monspecs.modedb, 16)) {
2077 printk(KERN_ERR "neofb: Unable to find usable video mode.\n"); 2077 printk(KERN_ERR "neofb: Unable to find usable video mode.\n");
2078 err = -EINVAL;
2078 goto err_map_video; 2079 goto err_map_video;
2079 } 2080 }
2080 2081
@@ -2097,7 +2098,8 @@ static int neofb_probe(struct pci_dev *dev, const struct pci_device_id *id)
2097 info->fix.smem_len >> 10, info->var.xres, 2098 info->fix.smem_len >> 10, info->var.xres,
2098 info->var.yres, h_sync / 1000, h_sync % 1000, v_sync); 2099 info->var.yres, h_sync / 1000, h_sync % 1000, v_sync);
2099 2100
2100 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) 2101 err = fb_alloc_cmap(&info->cmap, 256, 0);
2102 if (err < 0)
2101 goto err_map_video; 2103 goto err_map_video;
2102 2104
2103 err = register_framebuffer(info); 2105 err = register_framebuffer(info);
diff --git a/drivers/video/of_display_timing.c b/drivers/video/of_display_timing.c
index 171821ddd78d..ba5b40f581f6 100644
--- a/drivers/video/of_display_timing.c
+++ b/drivers/video/of_display_timing.c
@@ -120,7 +120,7 @@ int of_get_display_timing(struct device_node *np, const char *name,
120 return -EINVAL; 120 return -EINVAL;
121 } 121 }
122 122
123 timing_np = of_find_node_by_name(np, name); 123 timing_np = of_get_child_by_name(np, name);
124 if (!timing_np) { 124 if (!timing_np) {
125 pr_err("%s: could not find node '%s'\n", 125 pr_err("%s: could not find node '%s'\n",
126 of_node_full_name(np), name); 126 of_node_full_name(np), name);
@@ -143,11 +143,11 @@ struct display_timings *of_get_display_timings(struct device_node *np)
143 struct display_timings *disp; 143 struct display_timings *disp;
144 144
145 if (!np) { 145 if (!np) {
146 pr_err("%s: no devicenode given\n", of_node_full_name(np)); 146 pr_err("%s: no device node given\n", of_node_full_name(np));
147 return NULL; 147 return NULL;
148 } 148 }
149 149
150 timings_np = of_find_node_by_name(np, "display-timings"); 150 timings_np = of_get_child_by_name(np, "display-timings");
151 if (!timings_np) { 151 if (!timings_np) {
152 pr_err("%s: could not find display-timings node\n", 152 pr_err("%s: could not find display-timings node\n",
153 of_node_full_name(np)); 153 of_node_full_name(np));
diff --git a/drivers/video/omap2/displays-new/Kconfig b/drivers/video/omap2/displays-new/Kconfig
index 6c90885b0940..10b25e7cd878 100644
--- a/drivers/video/omap2/displays-new/Kconfig
+++ b/drivers/video/omap2/displays-new/Kconfig
@@ -35,6 +35,7 @@ config DISPLAY_PANEL_DPI
35 35
36config DISPLAY_PANEL_DSI_CM 36config DISPLAY_PANEL_DSI_CM
37 tristate "Generic DSI Command Mode Panel" 37 tristate "Generic DSI Command Mode Panel"
38 depends on BACKLIGHT_CLASS_DEVICE
38 help 39 help
39 Driver for generic DSI command mode panels. 40 Driver for generic DSI command mode panels.
40 41
diff --git a/drivers/video/omap2/displays-new/connector-analog-tv.c b/drivers/video/omap2/displays-new/connector-analog-tv.c
index 1b60698f141e..ccd9073f706f 100644
--- a/drivers/video/omap2/displays-new/connector-analog-tv.c
+++ b/drivers/video/omap2/displays-new/connector-analog-tv.c
@@ -191,7 +191,7 @@ static int tvc_probe_pdata(struct platform_device *pdev)
191 in = omap_dss_find_output(pdata->source); 191 in = omap_dss_find_output(pdata->source);
192 if (in == NULL) { 192 if (in == NULL) {
193 dev_err(&pdev->dev, "Failed to find video source\n"); 193 dev_err(&pdev->dev, "Failed to find video source\n");
194 return -ENODEV; 194 return -EPROBE_DEFER;
195 } 195 }
196 196
197 ddata->in = in; 197 ddata->in = in;
diff --git a/drivers/video/omap2/displays-new/connector-dvi.c b/drivers/video/omap2/displays-new/connector-dvi.c
index bc5f8ceda371..63d88ee6dfe4 100644
--- a/drivers/video/omap2/displays-new/connector-dvi.c
+++ b/drivers/video/omap2/displays-new/connector-dvi.c
@@ -263,7 +263,7 @@ static int dvic_probe_pdata(struct platform_device *pdev)
263 in = omap_dss_find_output(pdata->source); 263 in = omap_dss_find_output(pdata->source);
264 if (in == NULL) { 264 if (in == NULL) {
265 dev_err(&pdev->dev, "Failed to find video source\n"); 265 dev_err(&pdev->dev, "Failed to find video source\n");
266 return -ENODEV; 266 return -EPROBE_DEFER;
267 } 267 }
268 268
269 ddata->in = in; 269 ddata->in = in;
diff --git a/drivers/video/omap2/displays-new/connector-hdmi.c b/drivers/video/omap2/displays-new/connector-hdmi.c
index c5826716d6ab..9abe2c039ae9 100644
--- a/drivers/video/omap2/displays-new/connector-hdmi.c
+++ b/drivers/video/omap2/displays-new/connector-hdmi.c
@@ -290,7 +290,7 @@ static int hdmic_probe_pdata(struct platform_device *pdev)
290 in = omap_dss_find_output(pdata->source); 290 in = omap_dss_find_output(pdata->source);
291 if (in == NULL) { 291 if (in == NULL) {
292 dev_err(&pdev->dev, "Failed to find video source\n"); 292 dev_err(&pdev->dev, "Failed to find video source\n");
293 return -ENODEV; 293 return -EPROBE_DEFER;
294 } 294 }
295 295
296 ddata->in = in; 296 ddata->in = in;
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 02a7340111df..477975009eee 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -3691,6 +3691,7 @@ static int __init omap_dispchw_probe(struct platform_device *pdev)
3691 } 3691 }
3692 3692
3693 pm_runtime_enable(&pdev->dev); 3693 pm_runtime_enable(&pdev->dev);
3694 pm_runtime_irq_safe(&pdev->dev);
3694 3695
3695 r = dispc_runtime_get(); 3696 r = dispc_runtime_get();
3696 if (r) 3697 if (r)
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index 47ca86c5c6c0..d838ba829459 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -1336,14 +1336,7 @@ static int s3_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1336 (info->var.bits_per_pixel * info->var.xres_virtual); 1336 (info->var.bits_per_pixel * info->var.xres_virtual);
1337 if (info->var.yres_virtual < info->var.yres) { 1337 if (info->var.yres_virtual < info->var.yres) {
1338 dev_err(info->device, "virtual vertical size smaller than real\n"); 1338 dev_err(info->device, "virtual vertical size smaller than real\n");
1339 goto err_find_mode; 1339 rc = -EINVAL;
1340 }
1341
1342 /* maximize virtual vertical size for fast scrolling */
1343 info->var.yres_virtual = info->fix.smem_len * 8 /
1344 (info->var.bits_per_pixel * info->var.xres_virtual);
1345 if (info->var.yres_virtual < info->var.yres) {
1346 dev_err(info->device, "virtual vertical size smaller than real\n");
1347 goto err_find_mode; 1340 goto err_find_mode;
1348 } 1341 }
1349 1342
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index a50c6e3a7cc4..b232908a6192 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -398,8 +398,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
398 if (nr_pages > ARRAY_SIZE(frame_list)) 398 if (nr_pages > ARRAY_SIZE(frame_list))
399 nr_pages = ARRAY_SIZE(frame_list); 399 nr_pages = ARRAY_SIZE(frame_list);
400 400
401 scratch_page = get_balloon_scratch_page();
402
403 for (i = 0; i < nr_pages; i++) { 401 for (i = 0; i < nr_pages; i++) {
404 page = alloc_page(gfp); 402 page = alloc_page(gfp);
405 if (page == NULL) { 403 if (page == NULL) {
@@ -413,6 +411,12 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
413 411
414 scrub_page(page); 412 scrub_page(page);
415 413
414 /*
415 * Ballooned out frames are effectively replaced with
416 * a scratch frame. Ensure direct mappings and the
417 * p2m are consistent.
418 */
419 scratch_page = get_balloon_scratch_page();
416#ifdef CONFIG_XEN_HAVE_PVMMU 420#ifdef CONFIG_XEN_HAVE_PVMMU
417 if (xen_pv_domain() && !PageHighMem(page)) { 421 if (xen_pv_domain() && !PageHighMem(page)) {
418 ret = HYPERVISOR_update_va_mapping( 422 ret = HYPERVISOR_update_va_mapping(
@@ -422,24 +426,19 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
422 BUG_ON(ret); 426 BUG_ON(ret);
423 } 427 }
424#endif 428#endif
425 }
426
427 /* Ensure that ballooned highmem pages don't have kmaps. */
428 kmap_flush_unused();
429 flush_tlb_all();
430
431 /* No more mappings: invalidate P2M and add to balloon. */
432 for (i = 0; i < nr_pages; i++) {
433 pfn = mfn_to_pfn(frame_list[i]);
434 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 429 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
435 unsigned long p; 430 unsigned long p;
436 p = page_to_pfn(scratch_page); 431 p = page_to_pfn(scratch_page);
437 __set_phys_to_machine(pfn, pfn_to_mfn(p)); 432 __set_phys_to_machine(pfn, pfn_to_mfn(p));
438 } 433 }
434 put_balloon_scratch_page();
435
439 balloon_append(pfn_to_page(pfn)); 436 balloon_append(pfn_to_page(pfn));
440 } 437 }
441 438
442 put_balloon_scratch_page(); 439 /* Ensure that ballooned highmem pages don't have kmaps. */
440 kmap_flush_unused();
441 flush_tlb_all();
443 442
444 set_xen_guest_handle(reservation.extent_start, frame_list); 443 set_xen_guest_handle(reservation.extent_start, frame_list);
445 reservation.nr_extents = nr_pages; 444 reservation.nr_extents = nr_pages;
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index 58e6cbce4156..08f2e1e9a7e6 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -603,10 +603,11 @@ static int v9fs_cache_register(void)
603 if (ret < 0) 603 if (ret < 0)
604 return ret; 604 return ret;
605#ifdef CONFIG_9P_FSCACHE 605#ifdef CONFIG_9P_FSCACHE
606 return fscache_register_netfs(&v9fs_cache_netfs); 606 ret = fscache_register_netfs(&v9fs_cache_netfs);
607#else 607 if (ret < 0)
608 return ret; 608 v9fs_destroy_inode_cache();
609#endif 609#endif
610 return ret;
610} 611}
611 612
612static void v9fs_cache_unregister(void) 613static void v9fs_cache_unregister(void)
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index 53687bbf2296..a7c481402c46 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -267,14 +267,8 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
267 } 267 }
268 268
269 /* Only creates */ 269 /* Only creates */
270 if (!(flags & O_CREAT)) 270 if (!(flags & O_CREAT) || dentry->d_inode)
271 return finish_no_open(file, res); 271 return finish_no_open(file, res);
272 else if (dentry->d_inode) {
273 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
274 return -EEXIST;
275 else
276 return finish_no_open(file, res);
277 }
278 272
279 v9ses = v9fs_inode2v9ses(dir); 273 v9ses = v9fs_inode2v9ses(dir);
280 274
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 646337dc5201..529300327f45 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -600,9 +600,6 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
600 600
601 /* lock down the parent dentry so we can peer at it */ 601 /* lock down the parent dentry so we can peer at it */
602 parent = dget_parent(dentry); 602 parent = dget_parent(dentry);
603 if (!parent->d_inode)
604 goto out_bad;
605
606 dir = AFS_FS_I(parent->d_inode); 603 dir = AFS_FS_I(parent->d_inode);
607 604
608 /* validate the parent directory */ 605 /* validate the parent directory */
diff --git a/fs/aio.c b/fs/aio.c
index 6b868f0e0c4c..067e3d340c35 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -167,10 +167,25 @@ static int __init aio_setup(void)
167} 167}
168__initcall(aio_setup); 168__initcall(aio_setup);
169 169
170static void put_aio_ring_file(struct kioctx *ctx)
171{
172 struct file *aio_ring_file = ctx->aio_ring_file;
173 if (aio_ring_file) {
174 truncate_setsize(aio_ring_file->f_inode, 0);
175
176 /* Prevent further access to the kioctx from migratepages */
177 spin_lock(&aio_ring_file->f_inode->i_mapping->private_lock);
178 aio_ring_file->f_inode->i_mapping->private_data = NULL;
179 ctx->aio_ring_file = NULL;
180 spin_unlock(&aio_ring_file->f_inode->i_mapping->private_lock);
181
182 fput(aio_ring_file);
183 }
184}
185
170static void aio_free_ring(struct kioctx *ctx) 186static void aio_free_ring(struct kioctx *ctx)
171{ 187{
172 int i; 188 int i;
173 struct file *aio_ring_file = ctx->aio_ring_file;
174 189
175 for (i = 0; i < ctx->nr_pages; i++) { 190 for (i = 0; i < ctx->nr_pages; i++) {
176 pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i, 191 pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i,
@@ -178,14 +193,10 @@ static void aio_free_ring(struct kioctx *ctx)
178 put_page(ctx->ring_pages[i]); 193 put_page(ctx->ring_pages[i]);
179 } 194 }
180 195
196 put_aio_ring_file(ctx);
197
181 if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) 198 if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages)
182 kfree(ctx->ring_pages); 199 kfree(ctx->ring_pages);
183
184 if (aio_ring_file) {
185 truncate_setsize(aio_ring_file->f_inode, 0);
186 fput(aio_ring_file);
187 ctx->aio_ring_file = NULL;
188 }
189} 200}
190 201
191static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma) 202static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
@@ -207,9 +218,8 @@ static int aio_set_page_dirty(struct page *page)
207static int aio_migratepage(struct address_space *mapping, struct page *new, 218static int aio_migratepage(struct address_space *mapping, struct page *new,
208 struct page *old, enum migrate_mode mode) 219 struct page *old, enum migrate_mode mode)
209{ 220{
210 struct kioctx *ctx = mapping->private_data; 221 struct kioctx *ctx;
211 unsigned long flags; 222 unsigned long flags;
212 unsigned idx = old->index;
213 int rc; 223 int rc;
214 224
215 /* Writeback must be complete */ 225 /* Writeback must be complete */
@@ -224,10 +234,23 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
224 234
225 get_page(new); 235 get_page(new);
226 236
227 spin_lock_irqsave(&ctx->completion_lock, flags); 237 /* We can potentially race against kioctx teardown here. Use the
228 migrate_page_copy(new, old); 238 * address_space's private data lock to protect the mapping's
229 ctx->ring_pages[idx] = new; 239 * private_data.
230 spin_unlock_irqrestore(&ctx->completion_lock, flags); 240 */
241 spin_lock(&mapping->private_lock);
242 ctx = mapping->private_data;
243 if (ctx) {
244 pgoff_t idx;
245 spin_lock_irqsave(&ctx->completion_lock, flags);
246 migrate_page_copy(new, old);
247 idx = old->index;
248 if (idx < (pgoff_t)ctx->nr_pages)
249 ctx->ring_pages[idx] = new;
250 spin_unlock_irqrestore(&ctx->completion_lock, flags);
251 } else
252 rc = -EBUSY;
253 spin_unlock(&mapping->private_lock);
231 254
232 return rc; 255 return rc;
233} 256}
@@ -617,8 +640,7 @@ out_freepcpu:
617out_freeref: 640out_freeref:
618 free_percpu(ctx->users.pcpu_count); 641 free_percpu(ctx->users.pcpu_count);
619out_freectx: 642out_freectx:
620 if (ctx->aio_ring_file) 643 put_aio_ring_file(ctx);
621 fput(ctx->aio_ring_file);
622 kmem_cache_free(kioctx_cachep, ctx); 644 kmem_cache_free(kioctx_cachep, ctx);
623 pr_debug("error allocating ioctx %d\n", err); 645 pr_debug("error allocating ioctx %d\n", err);
624 return ERR_PTR(err); 646 return ERR_PTR(err);
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
index 3db70dae40d3..689e40d983ad 100644
--- a/fs/autofs4/waitq.c
+++ b/fs/autofs4/waitq.c
@@ -109,13 +109,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
109 109
110 pkt.hdr.proto_version = sbi->version; 110 pkt.hdr.proto_version = sbi->version;
111 pkt.hdr.type = type; 111 pkt.hdr.type = type;
112 mutex_lock(&sbi->wq_mutex);
113 112
114 /* Check if we have become catatonic */
115 if (sbi->catatonic) {
116 mutex_unlock(&sbi->wq_mutex);
117 return;
118 }
119 switch (type) { 113 switch (type) {
120 /* Kernel protocol v4 missing and expire packets */ 114 /* Kernel protocol v4 missing and expire packets */
121 case autofs_ptype_missing: 115 case autofs_ptype_missing:
@@ -427,7 +421,6 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
427 wq->tgid = current->tgid; 421 wq->tgid = current->tgid;
428 wq->status = -EINTR; /* Status return if interrupted */ 422 wq->status = -EINTR; /* Status return if interrupted */
429 wq->wait_ctr = 2; 423 wq->wait_ctr = 2;
430 mutex_unlock(&sbi->wq_mutex);
431 424
432 if (sbi->version < 5) { 425 if (sbi->version < 5) {
433 if (notify == NFY_MOUNT) 426 if (notify == NFY_MOUNT)
@@ -449,15 +442,15 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
449 (unsigned long) wq->wait_queue_token, wq->name.len, 442 (unsigned long) wq->wait_queue_token, wq->name.len,
450 wq->name.name, notify); 443 wq->name.name, notify);
451 444
452 /* autofs4_notify_daemon() may block */ 445 /* autofs4_notify_daemon() may block; it will unlock ->wq_mutex */
453 autofs4_notify_daemon(sbi, wq, type); 446 autofs4_notify_daemon(sbi, wq, type);
454 } else { 447 } else {
455 wq->wait_ctr++; 448 wq->wait_ctr++;
456 mutex_unlock(&sbi->wq_mutex);
457 kfree(qstr.name);
458 DPRINTK("existing wait id = 0x%08lx, name = %.*s, nfy=%d", 449 DPRINTK("existing wait id = 0x%08lx, name = %.*s, nfy=%d",
459 (unsigned long) wq->wait_queue_token, wq->name.len, 450 (unsigned long) wq->wait_queue_token, wq->name.len,
460 wq->name.name, notify); 451 wq->name.name, notify);
452 mutex_unlock(&sbi->wq_mutex);
453 kfree(qstr.name);
461 } 454 }
462 455
463 /* 456 /*
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 100edcc5e312..4c94a79991bb 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1413,7 +1413,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
1413 * long file_ofs 1413 * long file_ofs
1414 * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL... 1414 * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL...
1415 */ 1415 */
1416static void fill_files_note(struct memelfnote *note) 1416static int fill_files_note(struct memelfnote *note)
1417{ 1417{
1418 struct vm_area_struct *vma; 1418 struct vm_area_struct *vma;
1419 unsigned count, size, names_ofs, remaining, n; 1419 unsigned count, size, names_ofs, remaining, n;
@@ -1428,11 +1428,11 @@ static void fill_files_note(struct memelfnote *note)
1428 names_ofs = (2 + 3 * count) * sizeof(data[0]); 1428 names_ofs = (2 + 3 * count) * sizeof(data[0]);
1429 alloc: 1429 alloc:
1430 if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */ 1430 if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */
1431 goto err; 1431 return -EINVAL;
1432 size = round_up(size, PAGE_SIZE); 1432 size = round_up(size, PAGE_SIZE);
1433 data = vmalloc(size); 1433 data = vmalloc(size);
1434 if (!data) 1434 if (!data)
1435 goto err; 1435 return -ENOMEM;
1436 1436
1437 start_end_ofs = data + 2; 1437 start_end_ofs = data + 2;
1438 name_base = name_curpos = ((char *)data) + names_ofs; 1438 name_base = name_curpos = ((char *)data) + names_ofs;
@@ -1485,7 +1485,7 @@ static void fill_files_note(struct memelfnote *note)
1485 1485
1486 size = name_curpos - (char *)data; 1486 size = name_curpos - (char *)data;
1487 fill_note(note, "CORE", NT_FILE, size, data); 1487 fill_note(note, "CORE", NT_FILE, size, data);
1488 err: ; 1488 return 0;
1489} 1489}
1490 1490
1491#ifdef CORE_DUMP_USE_REGSET 1491#ifdef CORE_DUMP_USE_REGSET
@@ -1686,8 +1686,8 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
1686 fill_auxv_note(&info->auxv, current->mm); 1686 fill_auxv_note(&info->auxv, current->mm);
1687 info->size += notesize(&info->auxv); 1687 info->size += notesize(&info->auxv);
1688 1688
1689 fill_files_note(&info->files); 1689 if (fill_files_note(&info->files) == 0)
1690 info->size += notesize(&info->files); 1690 info->size += notesize(&info->files);
1691 1691
1692 return 1; 1692 return 1;
1693} 1693}
@@ -1719,7 +1719,8 @@ static int write_note_info(struct elf_note_info *info,
1719 return 0; 1719 return 0;
1720 if (first && !writenote(&info->auxv, file, foffset)) 1720 if (first && !writenote(&info->auxv, file, foffset))
1721 return 0; 1721 return 0;
1722 if (first && !writenote(&info->files, file, foffset)) 1722 if (first && info->files.data &&
1723 !writenote(&info->files, file, foffset))
1723 return 0; 1724 return 0;
1724 1725
1725 for (i = 1; i < info->thread_notes; ++i) 1726 for (i = 1; i < info->thread_notes; ++i)
@@ -1806,6 +1807,7 @@ static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1806 1807
1807struct elf_note_info { 1808struct elf_note_info {
1808 struct memelfnote *notes; 1809 struct memelfnote *notes;
1810 struct memelfnote *notes_files;
1809 struct elf_prstatus *prstatus; /* NT_PRSTATUS */ 1811 struct elf_prstatus *prstatus; /* NT_PRSTATUS */
1810 struct elf_prpsinfo *psinfo; /* NT_PRPSINFO */ 1812 struct elf_prpsinfo *psinfo; /* NT_PRPSINFO */
1811 struct list_head thread_list; 1813 struct list_head thread_list;
@@ -1896,9 +1898,12 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
1896 1898
1897 fill_siginfo_note(info->notes + 2, &info->csigdata, siginfo); 1899 fill_siginfo_note(info->notes + 2, &info->csigdata, siginfo);
1898 fill_auxv_note(info->notes + 3, current->mm); 1900 fill_auxv_note(info->notes + 3, current->mm);
1899 fill_files_note(info->notes + 4); 1901 info->numnote = 4;
1900 1902
1901 info->numnote = 5; 1903 if (fill_files_note(info->notes + info->numnote) == 0) {
1904 info->notes_files = info->notes + info->numnote;
1905 info->numnote++;
1906 }
1902 1907
1903 /* Try to dump the FPU. */ 1908 /* Try to dump the FPU. */
1904 info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, 1909 info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs,
@@ -1960,8 +1965,9 @@ static void free_note_info(struct elf_note_info *info)
1960 kfree(list_entry(tmp, struct elf_thread_status, list)); 1965 kfree(list_entry(tmp, struct elf_thread_status, list));
1961 } 1966 }
1962 1967
1963 /* Free data allocated by fill_files_note(): */ 1968 /* Free data possibly allocated by fill_files_note(): */
1964 vfree(info->notes[4].data); 1969 if (info->notes_files)
1970 vfree(info->notes_files->data);
1965 1971
1966 kfree(info->prstatus); 1972 kfree(info->prstatus);
1967 kfree(info->psinfo); 1973 kfree(info->psinfo);
@@ -2044,7 +2050,7 @@ static int elf_core_dump(struct coredump_params *cprm)
2044 struct vm_area_struct *vma, *gate_vma; 2050 struct vm_area_struct *vma, *gate_vma;
2045 struct elfhdr *elf = NULL; 2051 struct elfhdr *elf = NULL;
2046 loff_t offset = 0, dataoff, foffset; 2052 loff_t offset = 0, dataoff, foffset;
2047 struct elf_note_info info; 2053 struct elf_note_info info = { };
2048 struct elf_phdr *phdr4note = NULL; 2054 struct elf_phdr *phdr4note = NULL;
2049 struct elf_shdr *shdr4extnum = NULL; 2055 struct elf_shdr *shdr4extnum = NULL;
2050 Elf_Half e_phnum; 2056 Elf_Half e_phnum;
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index 60250847929f..fc60b31453ee 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -735,7 +735,7 @@ void bioset_integrity_free(struct bio_set *bs)
735 mempool_destroy(bs->bio_integrity_pool); 735 mempool_destroy(bs->bio_integrity_pool);
736 736
737 if (bs->bvec_integrity_pool) 737 if (bs->bvec_integrity_pool)
738 mempool_destroy(bs->bio_integrity_pool); 738 mempool_destroy(bs->bvec_integrity_pool);
739} 739}
740EXPORT_SYMBOL(bioset_integrity_free); 740EXPORT_SYMBOL(bioset_integrity_free);
741 741
diff --git a/fs/bio.c b/fs/bio.c
index b3b20ed9510e..ea5035da4d9a 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -917,8 +917,8 @@ void bio_copy_data(struct bio *dst, struct bio *src)
917 src_p = kmap_atomic(src_bv->bv_page); 917 src_p = kmap_atomic(src_bv->bv_page);
918 dst_p = kmap_atomic(dst_bv->bv_page); 918 dst_p = kmap_atomic(dst_bv->bv_page);
919 919
920 memcpy(dst_p + dst_bv->bv_offset, 920 memcpy(dst_p + dst_offset,
921 src_p + src_bv->bv_offset, 921 src_p + src_offset,
922 bytes); 922 bytes);
923 923
924 kunmap_atomic(dst_p); 924 kunmap_atomic(dst_p);
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 58b7d14b08ee..08cc08f037a6 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -107,7 +107,8 @@ static void check_idle_worker(struct btrfs_worker_thread *worker)
107 worker->idle = 1; 107 worker->idle = 1;
108 108
109 /* the list may be empty if the worker is just starting */ 109 /* the list may be empty if the worker is just starting */
110 if (!list_empty(&worker->worker_list)) { 110 if (!list_empty(&worker->worker_list) &&
111 !worker->workers->stopping) {
111 list_move(&worker->worker_list, 112 list_move(&worker->worker_list,
112 &worker->workers->idle_list); 113 &worker->workers->idle_list);
113 } 114 }
@@ -127,7 +128,8 @@ static void check_busy_worker(struct btrfs_worker_thread *worker)
127 spin_lock_irqsave(&worker->workers->lock, flags); 128 spin_lock_irqsave(&worker->workers->lock, flags);
128 worker->idle = 0; 129 worker->idle = 0;
129 130
130 if (!list_empty(&worker->worker_list)) { 131 if (!list_empty(&worker->worker_list) &&
132 !worker->workers->stopping) {
131 list_move_tail(&worker->worker_list, 133 list_move_tail(&worker->worker_list,
132 &worker->workers->worker_list); 134 &worker->workers->worker_list);
133 } 135 }
@@ -412,6 +414,7 @@ void btrfs_stop_workers(struct btrfs_workers *workers)
412 int can_stop; 414 int can_stop;
413 415
414 spin_lock_irq(&workers->lock); 416 spin_lock_irq(&workers->lock);
417 workers->stopping = 1;
415 list_splice_init(&workers->idle_list, &workers->worker_list); 418 list_splice_init(&workers->idle_list, &workers->worker_list);
416 while (!list_empty(&workers->worker_list)) { 419 while (!list_empty(&workers->worker_list)) {
417 cur = workers->worker_list.next; 420 cur = workers->worker_list.next;
@@ -455,6 +458,7 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
455 workers->ordered = 0; 458 workers->ordered = 0;
456 workers->atomic_start_pending = 0; 459 workers->atomic_start_pending = 0;
457 workers->atomic_worker_start = async_helper; 460 workers->atomic_worker_start = async_helper;
461 workers->stopping = 0;
458} 462}
459 463
460/* 464/*
@@ -480,15 +484,19 @@ static int __btrfs_start_workers(struct btrfs_workers *workers)
480 atomic_set(&worker->num_pending, 0); 484 atomic_set(&worker->num_pending, 0);
481 atomic_set(&worker->refs, 1); 485 atomic_set(&worker->refs, 1);
482 worker->workers = workers; 486 worker->workers = workers;
483 worker->task = kthread_run(worker_loop, worker, 487 worker->task = kthread_create(worker_loop, worker,
484 "btrfs-%s-%d", workers->name, 488 "btrfs-%s-%d", workers->name,
485 workers->num_workers + 1); 489 workers->num_workers + 1);
486 if (IS_ERR(worker->task)) { 490 if (IS_ERR(worker->task)) {
487 ret = PTR_ERR(worker->task); 491 ret = PTR_ERR(worker->task);
488 kfree(worker);
489 goto fail; 492 goto fail;
490 } 493 }
494
491 spin_lock_irq(&workers->lock); 495 spin_lock_irq(&workers->lock);
496 if (workers->stopping) {
497 spin_unlock_irq(&workers->lock);
498 goto fail_kthread;
499 }
492 list_add_tail(&worker->worker_list, &workers->idle_list); 500 list_add_tail(&worker->worker_list, &workers->idle_list);
493 worker->idle = 1; 501 worker->idle = 1;
494 workers->num_workers++; 502 workers->num_workers++;
@@ -496,8 +504,13 @@ static int __btrfs_start_workers(struct btrfs_workers *workers)
496 WARN_ON(workers->num_workers_starting < 0); 504 WARN_ON(workers->num_workers_starting < 0);
497 spin_unlock_irq(&workers->lock); 505 spin_unlock_irq(&workers->lock);
498 506
507 wake_up_process(worker->task);
499 return 0; 508 return 0;
509
510fail_kthread:
511 kthread_stop(worker->task);
500fail: 512fail:
513 kfree(worker);
501 spin_lock_irq(&workers->lock); 514 spin_lock_irq(&workers->lock);
502 workers->num_workers_starting--; 515 workers->num_workers_starting--;
503 spin_unlock_irq(&workers->lock); 516 spin_unlock_irq(&workers->lock);
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
index 063698b90ce2..1f26792683ed 100644
--- a/fs/btrfs/async-thread.h
+++ b/fs/btrfs/async-thread.h
@@ -107,6 +107,8 @@ struct btrfs_workers {
107 107
108 /* extra name for this worker, used for current->name */ 108 /* extra name for this worker, used for current->name */
109 char *name; 109 char *name;
110
111 int stopping;
110}; 112};
111 113
112void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work); 114void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index d0ae226926ee..71f074e1870b 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -213,7 +213,10 @@ static inline bool btrfs_is_free_space_inode(struct inode *inode)
213static inline int btrfs_inode_in_log(struct inode *inode, u64 generation) 213static inline int btrfs_inode_in_log(struct inode *inode, u64 generation)
214{ 214{
215 if (BTRFS_I(inode)->logged_trans == generation && 215 if (BTRFS_I(inode)->logged_trans == generation &&
216 BTRFS_I(inode)->last_sub_trans <= BTRFS_I(inode)->last_log_commit) 216 BTRFS_I(inode)->last_sub_trans <=
217 BTRFS_I(inode)->last_log_commit &&
218 BTRFS_I(inode)->last_sub_trans <=
219 BTRFS_I(inode)->root->last_log_commit)
217 return 1; 220 return 1;
218 return 0; 221 return 0;
219} 222}
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 64346721173f..61b5bcd57b7e 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1005,8 +1005,11 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1005 return ret; 1005 return ret;
1006 } 1006 }
1007 1007
1008 if (root->ref_cows) 1008 if (root->ref_cows) {
1009 btrfs_reloc_cow_block(trans, root, buf, cow); 1009 ret = btrfs_reloc_cow_block(trans, root, buf, cow);
1010 if (ret)
1011 return ret;
1012 }
1010 1013
1011 if (buf == root->node) { 1014 if (buf == root->node) {
1012 WARN_ON(parent && parent != buf); 1015 WARN_ON(parent && parent != buf);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 3c1da6f98a4d..0506f40ede83 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1118,15 +1118,6 @@ struct btrfs_space_info {
1118 */ 1118 */
1119 struct percpu_counter total_bytes_pinned; 1119 struct percpu_counter total_bytes_pinned;
1120 1120
1121 /*
1122 * we bump reservation progress every time we decrement
1123 * bytes_reserved. This way people waiting for reservations
1124 * know something good has happened and they can check
1125 * for progress. The number here isn't to be trusted, it
1126 * just shows reclaim activity
1127 */
1128 unsigned long reservation_progress;
1129
1130 unsigned int full:1; /* indicates that we cannot allocate any more 1121 unsigned int full:1; /* indicates that we cannot allocate any more
1131 chunks for this space */ 1122 chunks for this space */
1132 unsigned int chunk_alloc:1; /* set if we are allocating a chunk */ 1123 unsigned int chunk_alloc:1; /* set if we are allocating a chunk */
@@ -3135,7 +3126,7 @@ static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_root *root,
3135 unsigned num_items) 3126 unsigned num_items)
3136{ 3127{
3137 return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) * 3128 return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) *
3138 3 * num_items; 3129 2 * num_items;
3139} 3130}
3140 3131
3141/* 3132/*
@@ -3939,9 +3930,9 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
3939 struct btrfs_root *root); 3930 struct btrfs_root *root);
3940int btrfs_recover_relocation(struct btrfs_root *root); 3931int btrfs_recover_relocation(struct btrfs_root *root);
3941int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len); 3932int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len);
3942void btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, 3933int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
3943 struct btrfs_root *root, struct extent_buffer *buf, 3934 struct btrfs_root *root, struct extent_buffer *buf,
3944 struct extent_buffer *cow); 3935 struct extent_buffer *cow);
3945void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans, 3936void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans,
3946 struct btrfs_pending_snapshot *pending, 3937 struct btrfs_pending_snapshot *pending,
3947 u64 *bytes_to_reserve); 3938 u64 *bytes_to_reserve);
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index a64435359385..9efb94e95858 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -400,7 +400,7 @@ int btrfs_dev_replace_start(struct btrfs_root *root,
400 args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR; 400 args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
401 btrfs_dev_replace_unlock(dev_replace); 401 btrfs_dev_replace_unlock(dev_replace);
402 402
403 btrfs_wait_all_ordered_extents(root->fs_info, 0); 403 btrfs_wait_all_ordered_extents(root->fs_info);
404 404
405 /* force writing the updated state information to disk */ 405 /* force writing the updated state information to disk */
406 trans = btrfs_start_transaction(root, 0); 406 trans = btrfs_start_transaction(root, 0);
@@ -475,7 +475,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
475 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); 475 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
476 return ret; 476 return ret;
477 } 477 }
478 btrfs_wait_all_ordered_extents(root->fs_info, 0); 478 btrfs_wait_all_ordered_extents(root->fs_info);
479 479
480 trans = btrfs_start_transaction(root, 0); 480 trans = btrfs_start_transaction(root, 0);
481 if (IS_ERR(trans)) { 481 if (IS_ERR(trans)) {
@@ -535,10 +535,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
535 list_add(&tgt_device->dev_alloc_list, &fs_info->fs_devices->alloc_list); 535 list_add(&tgt_device->dev_alloc_list, &fs_info->fs_devices->alloc_list);
536 536
537 btrfs_rm_dev_replace_srcdev(fs_info, src_device); 537 btrfs_rm_dev_replace_srcdev(fs_info, src_device);
538 if (src_device->bdev) { 538
539 /* zero out the old super */
540 btrfs_scratch_superblock(src_device);
541 }
542 /* 539 /*
543 * this is again a consistent state where no dev_replace procedure 540 * this is again a consistent state where no dev_replace procedure
544 * is running, the target device is part of the filesystem, the 541 * is running, the target device is part of the filesystem, the
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 4cbb00af92ff..4ae17ed13b32 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -157,6 +157,7 @@ static struct btrfs_lockdep_keyset {
157 { .id = BTRFS_TREE_LOG_OBJECTID, .name_stem = "log" }, 157 { .id = BTRFS_TREE_LOG_OBJECTID, .name_stem = "log" },
158 { .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" }, 158 { .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" },
159 { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" }, 159 { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" },
160 { .id = BTRFS_UUID_TREE_OBJECTID, .name_stem = "uuid" },
160 { .id = 0, .name_stem = "tree" }, 161 { .id = 0, .name_stem = "tree" },
161}; 162};
162 163
@@ -3415,6 +3416,7 @@ static int write_all_supers(struct btrfs_root *root, int max_mirrors)
3415 if (total_errors > max_errors) { 3416 if (total_errors > max_errors) {
3416 printk(KERN_ERR "btrfs: %d errors while writing supers\n", 3417 printk(KERN_ERR "btrfs: %d errors while writing supers\n",
3417 total_errors); 3418 total_errors);
3419 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
3418 3420
3419 /* FUA is masked off if unsupported and can't be the reason */ 3421 /* FUA is masked off if unsupported and can't be the reason */
3420 btrfs_error(root->fs_info, -EIO, 3422 btrfs_error(root->fs_info, -EIO,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index cfb3cf711b34..d58bef130a41 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3925,7 +3925,6 @@ static int can_overcommit(struct btrfs_root *root,
3925 u64 space_size; 3925 u64 space_size;
3926 u64 avail; 3926 u64 avail;
3927 u64 used; 3927 u64 used;
3928 u64 to_add;
3929 3928
3930 used = space_info->bytes_used + space_info->bytes_reserved + 3929 used = space_info->bytes_used + space_info->bytes_reserved +
3931 space_info->bytes_pinned + space_info->bytes_readonly; 3930 space_info->bytes_pinned + space_info->bytes_readonly;
@@ -3959,25 +3958,17 @@ static int can_overcommit(struct btrfs_root *root,
3959 BTRFS_BLOCK_GROUP_RAID10)) 3958 BTRFS_BLOCK_GROUP_RAID10))
3960 avail >>= 1; 3959 avail >>= 1;
3961 3960
3962 to_add = space_info->total_bytes;
3963
3964 /* 3961 /*
3965 * If we aren't flushing all things, let us overcommit up to 3962 * If we aren't flushing all things, let us overcommit up to
3966 * 1/2th of the space. If we can flush, don't let us overcommit 3963 * 1/2th of the space. If we can flush, don't let us overcommit
3967 * too much, let it overcommit up to 1/8 of the space. 3964 * too much, let it overcommit up to 1/8 of the space.
3968 */ 3965 */
3969 if (flush == BTRFS_RESERVE_FLUSH_ALL) 3966 if (flush == BTRFS_RESERVE_FLUSH_ALL)
3970 to_add >>= 3; 3967 avail >>= 3;
3971 else 3968 else
3972 to_add >>= 1; 3969 avail >>= 1;
3973
3974 /*
3975 * Limit the overcommit to the amount of free space we could possibly
3976 * allocate for chunks.
3977 */
3978 to_add = min(avail, to_add);
3979 3970
3980 if (used + bytes < space_info->total_bytes + to_add) 3971 if (used + bytes < space_info->total_bytes + avail)
3981 return 1; 3972 return 1;
3982 return 0; 3973 return 0;
3983} 3974}
@@ -4000,7 +3991,7 @@ static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
4000 */ 3991 */
4001 btrfs_start_all_delalloc_inodes(root->fs_info, 0); 3992 btrfs_start_all_delalloc_inodes(root->fs_info, 0);
4002 if (!current->journal_info) 3993 if (!current->journal_info)
4003 btrfs_wait_all_ordered_extents(root->fs_info, 0); 3994 btrfs_wait_all_ordered_extents(root->fs_info);
4004 } 3995 }
4005} 3996}
4006 3997
@@ -4030,7 +4021,7 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4030 if (delalloc_bytes == 0) { 4021 if (delalloc_bytes == 0) {
4031 if (trans) 4022 if (trans)
4032 return; 4023 return;
4033 btrfs_wait_all_ordered_extents(root->fs_info, 0); 4024 btrfs_wait_all_ordered_extents(root->fs_info);
4034 return; 4025 return;
4035 } 4026 }
4036 4027
@@ -4058,7 +4049,7 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4058 4049
4059 loops++; 4050 loops++;
4060 if (wait_ordered && !trans) { 4051 if (wait_ordered && !trans) {
4061 btrfs_wait_all_ordered_extents(root->fs_info, 0); 4052 btrfs_wait_all_ordered_extents(root->fs_info);
4062 } else { 4053 } else {
4063 time_left = schedule_timeout_killable(1); 4054 time_left = schedule_timeout_killable(1);
4064 if (time_left) 4055 if (time_left)
@@ -4465,7 +4456,6 @@ static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4465 space_info->bytes_may_use -= num_bytes; 4456 space_info->bytes_may_use -= num_bytes;
4466 trace_btrfs_space_reservation(fs_info, "space_info", 4457 trace_btrfs_space_reservation(fs_info, "space_info",
4467 space_info->flags, num_bytes, 0); 4458 space_info->flags, num_bytes, 0);
4468 space_info->reservation_progress++;
4469 spin_unlock(&space_info->lock); 4459 spin_unlock(&space_info->lock);
4470 } 4460 }
4471 } 4461 }
@@ -4666,7 +4656,6 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4666 sinfo->bytes_may_use -= num_bytes; 4656 sinfo->bytes_may_use -= num_bytes;
4667 trace_btrfs_space_reservation(fs_info, "space_info", 4657 trace_btrfs_space_reservation(fs_info, "space_info",
4668 sinfo->flags, num_bytes, 0); 4658 sinfo->flags, num_bytes, 0);
4669 sinfo->reservation_progress++;
4670 block_rsv->reserved = block_rsv->size; 4659 block_rsv->reserved = block_rsv->size;
4671 block_rsv->full = 1; 4660 block_rsv->full = 1;
4672 } 4661 }
@@ -5446,7 +5435,6 @@ static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
5446 space_info->bytes_readonly += num_bytes; 5435 space_info->bytes_readonly += num_bytes;
5447 cache->reserved -= num_bytes; 5436 cache->reserved -= num_bytes;
5448 space_info->bytes_reserved -= num_bytes; 5437 space_info->bytes_reserved -= num_bytes;
5449 space_info->reservation_progress++;
5450 } 5438 }
5451 spin_unlock(&cache->lock); 5439 spin_unlock(&cache->lock);
5452 spin_unlock(&space_info->lock); 5440 spin_unlock(&space_info->lock);
@@ -6117,10 +6105,13 @@ enum btrfs_loop_type {
6117/* 6105/*
6118 * walks the btree of allocated extents and find a hole of a given size. 6106 * walks the btree of allocated extents and find a hole of a given size.
6119 * The key ins is changed to record the hole: 6107 * The key ins is changed to record the hole:
6120 * ins->objectid == block start 6108 * ins->objectid == start position
6121 * ins->flags = BTRFS_EXTENT_ITEM_KEY 6109 * ins->flags = BTRFS_EXTENT_ITEM_KEY
6122 * ins->offset == number of blocks 6110 * ins->offset == the size of the hole.
6123 * Any available blocks before search_start are skipped. 6111 * Any available blocks before search_start are skipped.
6112 *
6113 * If there is no suitable free space, we will record the max size of
6114 * the free space extent currently.
6124 */ 6115 */
6125static noinline int find_free_extent(struct btrfs_root *orig_root, 6116static noinline int find_free_extent(struct btrfs_root *orig_root,
6126 u64 num_bytes, u64 empty_size, 6117 u64 num_bytes, u64 empty_size,
@@ -6133,6 +6124,7 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
6133 struct btrfs_block_group_cache *block_group = NULL; 6124 struct btrfs_block_group_cache *block_group = NULL;
6134 struct btrfs_block_group_cache *used_block_group; 6125 struct btrfs_block_group_cache *used_block_group;
6135 u64 search_start = 0; 6126 u64 search_start = 0;
6127 u64 max_extent_size = 0;
6136 int empty_cluster = 2 * 1024 * 1024; 6128 int empty_cluster = 2 * 1024 * 1024;
6137 struct btrfs_space_info *space_info; 6129 struct btrfs_space_info *space_info;
6138 int loop = 0; 6130 int loop = 0;
@@ -6292,7 +6284,10 @@ have_block_group:
6292 btrfs_get_block_group(used_block_group); 6284 btrfs_get_block_group(used_block_group);
6293 6285
6294 offset = btrfs_alloc_from_cluster(used_block_group, 6286 offset = btrfs_alloc_from_cluster(used_block_group,
6295 last_ptr, num_bytes, used_block_group->key.objectid); 6287 last_ptr,
6288 num_bytes,
6289 used_block_group->key.objectid,
6290 &max_extent_size);
6296 if (offset) { 6291 if (offset) {
6297 /* we have a block, we're done */ 6292 /* we have a block, we're done */
6298 spin_unlock(&last_ptr->refill_lock); 6293 spin_unlock(&last_ptr->refill_lock);
@@ -6355,8 +6350,10 @@ refill_cluster:
6355 * cluster 6350 * cluster
6356 */ 6351 */
6357 offset = btrfs_alloc_from_cluster(block_group, 6352 offset = btrfs_alloc_from_cluster(block_group,
6358 last_ptr, num_bytes, 6353 last_ptr,
6359 search_start); 6354 num_bytes,
6355 search_start,
6356 &max_extent_size);
6360 if (offset) { 6357 if (offset) {
6361 /* we found one, proceed */ 6358 /* we found one, proceed */
6362 spin_unlock(&last_ptr->refill_lock); 6359 spin_unlock(&last_ptr->refill_lock);
@@ -6391,13 +6388,18 @@ unclustered_alloc:
6391 if (cached && 6388 if (cached &&
6392 block_group->free_space_ctl->free_space < 6389 block_group->free_space_ctl->free_space <
6393 num_bytes + empty_cluster + empty_size) { 6390 num_bytes + empty_cluster + empty_size) {
6391 if (block_group->free_space_ctl->free_space >
6392 max_extent_size)
6393 max_extent_size =
6394 block_group->free_space_ctl->free_space;
6394 spin_unlock(&block_group->free_space_ctl->tree_lock); 6395 spin_unlock(&block_group->free_space_ctl->tree_lock);
6395 goto loop; 6396 goto loop;
6396 } 6397 }
6397 spin_unlock(&block_group->free_space_ctl->tree_lock); 6398 spin_unlock(&block_group->free_space_ctl->tree_lock);
6398 6399
6399 offset = btrfs_find_space_for_alloc(block_group, search_start, 6400 offset = btrfs_find_space_for_alloc(block_group, search_start,
6400 num_bytes, empty_size); 6401 num_bytes, empty_size,
6402 &max_extent_size);
6401 /* 6403 /*
6402 * If we didn't find a chunk, and we haven't failed on this 6404 * If we didn't find a chunk, and we haven't failed on this
6403 * block group before, and this block group is in the middle of 6405 * block group before, and this block group is in the middle of
@@ -6515,7 +6517,8 @@ loop:
6515 ret = 0; 6517 ret = 0;
6516 } 6518 }
6517out: 6519out:
6518 6520 if (ret == -ENOSPC)
6521 ins->offset = max_extent_size;
6519 return ret; 6522 return ret;
6520} 6523}
6521 6524
@@ -6573,8 +6576,8 @@ again:
6573 flags); 6576 flags);
6574 6577
6575 if (ret == -ENOSPC) { 6578 if (ret == -ENOSPC) {
6576 if (!final_tried) { 6579 if (!final_tried && ins->offset) {
6577 num_bytes = num_bytes >> 1; 6580 num_bytes = min(num_bytes >> 1, ins->offset);
6578 num_bytes = round_down(num_bytes, root->sectorsize); 6581 num_bytes = round_down(num_bytes, root->sectorsize);
6579 num_bytes = max(num_bytes, min_alloc_size); 6582 num_bytes = max(num_bytes, min_alloc_size);
6580 if (num_bytes == min_alloc_size) 6583 if (num_bytes == min_alloc_size)
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 09582b81640c..22bda32acb89 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -145,8 +145,16 @@ int __init extent_io_init(void)
145 offsetof(struct btrfs_io_bio, bio)); 145 offsetof(struct btrfs_io_bio, bio));
146 if (!btrfs_bioset) 146 if (!btrfs_bioset)
147 goto free_buffer_cache; 147 goto free_buffer_cache;
148
149 if (bioset_integrity_create(btrfs_bioset, BIO_POOL_SIZE))
150 goto free_bioset;
151
148 return 0; 152 return 0;
149 153
154free_bioset:
155 bioset_free(btrfs_bioset);
156 btrfs_bioset = NULL;
157
150free_buffer_cache: 158free_buffer_cache:
151 kmem_cache_destroy(extent_buffer_cache); 159 kmem_cache_destroy(extent_buffer_cache);
152 extent_buffer_cache = NULL; 160 extent_buffer_cache = NULL;
@@ -1481,10 +1489,12 @@ static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1481 *end = state->end; 1489 *end = state->end;
1482 cur_start = state->end + 1; 1490 cur_start = state->end + 1;
1483 node = rb_next(node); 1491 node = rb_next(node);
1484 if (!node)
1485 break;
1486 total_bytes += state->end - state->start + 1; 1492 total_bytes += state->end - state->start + 1;
1487 if (total_bytes >= max_bytes) 1493 if (total_bytes >= max_bytes) {
1494 *end = *start + max_bytes - 1;
1495 break;
1496 }
1497 if (!node)
1488 break; 1498 break;
1489 } 1499 }
1490out: 1500out:
@@ -1612,7 +1622,7 @@ again:
1612 *start = delalloc_start; 1622 *start = delalloc_start;
1613 *end = delalloc_end; 1623 *end = delalloc_end;
1614 free_extent_state(cached_state); 1624 free_extent_state(cached_state);
1615 return found; 1625 return 0;
1616 } 1626 }
1617 1627
1618 /* 1628 /*
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index bc5072b2db53..72da4df53c9a 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1859,8 +1859,8 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1859 1859
1860 ret = btrfs_log_dentry_safe(trans, root, dentry); 1860 ret = btrfs_log_dentry_safe(trans, root, dentry);
1861 if (ret < 0) { 1861 if (ret < 0) {
1862 mutex_unlock(&inode->i_mutex); 1862 /* Fallthrough and commit/free transaction. */
1863 goto out; 1863 ret = 1;
1864 } 1864 }
1865 1865
1866 /* we've logged all the items and now have a consistent 1866 /* we've logged all the items and now have a consistent
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 3f0ddfce96e6..b4f9904c4c6b 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -1431,13 +1431,19 @@ static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
1431 ctl->free_space += bytes; 1431 ctl->free_space += bytes;
1432} 1432}
1433 1433
1434/*
1435 * If we can not find suitable extent, we will use bytes to record
1436 * the size of the max extent.
1437 */
1434static int search_bitmap(struct btrfs_free_space_ctl *ctl, 1438static int search_bitmap(struct btrfs_free_space_ctl *ctl,
1435 struct btrfs_free_space *bitmap_info, u64 *offset, 1439 struct btrfs_free_space *bitmap_info, u64 *offset,
1436 u64 *bytes) 1440 u64 *bytes)
1437{ 1441{
1438 unsigned long found_bits = 0; 1442 unsigned long found_bits = 0;
1443 unsigned long max_bits = 0;
1439 unsigned long bits, i; 1444 unsigned long bits, i;
1440 unsigned long next_zero; 1445 unsigned long next_zero;
1446 unsigned long extent_bits;
1441 1447
1442 i = offset_to_bit(bitmap_info->offset, ctl->unit, 1448 i = offset_to_bit(bitmap_info->offset, ctl->unit,
1443 max_t(u64, *offset, bitmap_info->offset)); 1449 max_t(u64, *offset, bitmap_info->offset));
@@ -1446,9 +1452,12 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl,
1446 for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) { 1452 for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) {
1447 next_zero = find_next_zero_bit(bitmap_info->bitmap, 1453 next_zero = find_next_zero_bit(bitmap_info->bitmap,
1448 BITS_PER_BITMAP, i); 1454 BITS_PER_BITMAP, i);
1449 if ((next_zero - i) >= bits) { 1455 extent_bits = next_zero - i;
1450 found_bits = next_zero - i; 1456 if (extent_bits >= bits) {
1457 found_bits = extent_bits;
1451 break; 1458 break;
1459 } else if (extent_bits > max_bits) {
1460 max_bits = extent_bits;
1452 } 1461 }
1453 i = next_zero; 1462 i = next_zero;
1454 } 1463 }
@@ -1459,38 +1468,41 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl,
1459 return 0; 1468 return 0;
1460 } 1469 }
1461 1470
1471 *bytes = (u64)(max_bits) * ctl->unit;
1462 return -1; 1472 return -1;
1463} 1473}
1464 1474
1475/* Cache the size of the max extent in bytes */
1465static struct btrfs_free_space * 1476static struct btrfs_free_space *
1466find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes, 1477find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
1467 unsigned long align) 1478 unsigned long align, u64 *max_extent_size)
1468{ 1479{
1469 struct btrfs_free_space *entry; 1480 struct btrfs_free_space *entry;
1470 struct rb_node *node; 1481 struct rb_node *node;
1471 u64 ctl_off;
1472 u64 tmp; 1482 u64 tmp;
1473 u64 align_off; 1483 u64 align_off;
1474 int ret; 1484 int ret;
1475 1485
1476 if (!ctl->free_space_offset.rb_node) 1486 if (!ctl->free_space_offset.rb_node)
1477 return NULL; 1487 goto out;
1478 1488
1479 entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1); 1489 entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1);
1480 if (!entry) 1490 if (!entry)
1481 return NULL; 1491 goto out;
1482 1492
1483 for (node = &entry->offset_index; node; node = rb_next(node)) { 1493 for (node = &entry->offset_index; node; node = rb_next(node)) {
1484 entry = rb_entry(node, struct btrfs_free_space, offset_index); 1494 entry = rb_entry(node, struct btrfs_free_space, offset_index);
1485 if (entry->bytes < *bytes) 1495 if (entry->bytes < *bytes) {
1496 if (entry->bytes > *max_extent_size)
1497 *max_extent_size = entry->bytes;
1486 continue; 1498 continue;
1499 }
1487 1500
1488 /* make sure the space returned is big enough 1501 /* make sure the space returned is big enough
1489 * to match our requested alignment 1502 * to match our requested alignment
1490 */ 1503 */
1491 if (*bytes >= align) { 1504 if (*bytes >= align) {
1492 ctl_off = entry->offset - ctl->start; 1505 tmp = entry->offset - ctl->start + align - 1;
1493 tmp = ctl_off + align - 1;;
1494 do_div(tmp, align); 1506 do_div(tmp, align);
1495 tmp = tmp * align + ctl->start; 1507 tmp = tmp * align + ctl->start;
1496 align_off = tmp - entry->offset; 1508 align_off = tmp - entry->offset;
@@ -1499,14 +1511,22 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
1499 tmp = entry->offset; 1511 tmp = entry->offset;
1500 } 1512 }
1501 1513
1502 if (entry->bytes < *bytes + align_off) 1514 if (entry->bytes < *bytes + align_off) {
1515 if (entry->bytes > *max_extent_size)
1516 *max_extent_size = entry->bytes;
1503 continue; 1517 continue;
1518 }
1504 1519
1505 if (entry->bitmap) { 1520 if (entry->bitmap) {
1506 ret = search_bitmap(ctl, entry, &tmp, bytes); 1521 u64 size = *bytes;
1522
1523 ret = search_bitmap(ctl, entry, &tmp, &size);
1507 if (!ret) { 1524 if (!ret) {
1508 *offset = tmp; 1525 *offset = tmp;
1526 *bytes = size;
1509 return entry; 1527 return entry;
1528 } else if (size > *max_extent_size) {
1529 *max_extent_size = size;
1510 } 1530 }
1511 continue; 1531 continue;
1512 } 1532 }
@@ -1515,7 +1535,7 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
1515 *bytes = entry->bytes - align_off; 1535 *bytes = entry->bytes - align_off;
1516 return entry; 1536 return entry;
1517 } 1537 }
1518 1538out:
1519 return NULL; 1539 return NULL;
1520} 1540}
1521 1541
@@ -2116,7 +2136,8 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
2116} 2136}
2117 2137
2118u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, 2138u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
2119 u64 offset, u64 bytes, u64 empty_size) 2139 u64 offset, u64 bytes, u64 empty_size,
2140 u64 *max_extent_size)
2120{ 2141{
2121 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2142 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2122 struct btrfs_free_space *entry = NULL; 2143 struct btrfs_free_space *entry = NULL;
@@ -2127,7 +2148,7 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
2127 2148
2128 spin_lock(&ctl->tree_lock); 2149 spin_lock(&ctl->tree_lock);
2129 entry = find_free_space(ctl, &offset, &bytes_search, 2150 entry = find_free_space(ctl, &offset, &bytes_search,
2130 block_group->full_stripe_len); 2151 block_group->full_stripe_len, max_extent_size);
2131 if (!entry) 2152 if (!entry)
2132 goto out; 2153 goto out;
2133 2154
@@ -2137,7 +2158,6 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
2137 if (!entry->bytes) 2158 if (!entry->bytes)
2138 free_bitmap(ctl, entry); 2159 free_bitmap(ctl, entry);
2139 } else { 2160 } else {
2140
2141 unlink_free_space(ctl, entry); 2161 unlink_free_space(ctl, entry);
2142 align_gap_len = offset - entry->offset; 2162 align_gap_len = offset - entry->offset;
2143 align_gap = entry->offset; 2163 align_gap = entry->offset;
@@ -2151,7 +2171,6 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
2151 else 2171 else
2152 link_free_space(ctl, entry); 2172 link_free_space(ctl, entry);
2153 } 2173 }
2154
2155out: 2174out:
2156 spin_unlock(&ctl->tree_lock); 2175 spin_unlock(&ctl->tree_lock);
2157 2176
@@ -2206,7 +2225,8 @@ int btrfs_return_cluster_to_free_space(
2206static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, 2225static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
2207 struct btrfs_free_cluster *cluster, 2226 struct btrfs_free_cluster *cluster,
2208 struct btrfs_free_space *entry, 2227 struct btrfs_free_space *entry,
2209 u64 bytes, u64 min_start) 2228 u64 bytes, u64 min_start,
2229 u64 *max_extent_size)
2210{ 2230{
2211 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2231 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2212 int err; 2232 int err;
@@ -2218,8 +2238,11 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
2218 search_bytes = bytes; 2238 search_bytes = bytes;
2219 2239
2220 err = search_bitmap(ctl, entry, &search_start, &search_bytes); 2240 err = search_bitmap(ctl, entry, &search_start, &search_bytes);
2221 if (err) 2241 if (err) {
2242 if (search_bytes > *max_extent_size)
2243 *max_extent_size = search_bytes;
2222 return 0; 2244 return 0;
2245 }
2223 2246
2224 ret = search_start; 2247 ret = search_start;
2225 __bitmap_clear_bits(ctl, entry, ret, bytes); 2248 __bitmap_clear_bits(ctl, entry, ret, bytes);
@@ -2234,7 +2257,7 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
2234 */ 2257 */
2235u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, 2258u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
2236 struct btrfs_free_cluster *cluster, u64 bytes, 2259 struct btrfs_free_cluster *cluster, u64 bytes,
2237 u64 min_start) 2260 u64 min_start, u64 *max_extent_size)
2238{ 2261{
2239 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2262 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2240 struct btrfs_free_space *entry = NULL; 2263 struct btrfs_free_space *entry = NULL;
@@ -2254,6 +2277,9 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
2254 2277
2255 entry = rb_entry(node, struct btrfs_free_space, offset_index); 2278 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2256 while(1) { 2279 while(1) {
2280 if (entry->bytes < bytes && entry->bytes > *max_extent_size)
2281 *max_extent_size = entry->bytes;
2282
2257 if (entry->bytes < bytes || 2283 if (entry->bytes < bytes ||
2258 (!entry->bitmap && entry->offset < min_start)) { 2284 (!entry->bitmap && entry->offset < min_start)) {
2259 node = rb_next(&entry->offset_index); 2285 node = rb_next(&entry->offset_index);
@@ -2267,7 +2293,8 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
2267 if (entry->bitmap) { 2293 if (entry->bitmap) {
2268 ret = btrfs_alloc_from_bitmap(block_group, 2294 ret = btrfs_alloc_from_bitmap(block_group,
2269 cluster, entry, bytes, 2295 cluster, entry, bytes,
2270 cluster->window_start); 2296 cluster->window_start,
2297 max_extent_size);
2271 if (ret == 0) { 2298 if (ret == 0) {
2272 node = rb_next(&entry->offset_index); 2299 node = rb_next(&entry->offset_index);
2273 if (!node) 2300 if (!node)
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h
index c74904167476..e737f92cf6d0 100644
--- a/fs/btrfs/free-space-cache.h
+++ b/fs/btrfs/free-space-cache.h
@@ -94,7 +94,8 @@ void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl);
94void btrfs_remove_free_space_cache(struct btrfs_block_group_cache 94void btrfs_remove_free_space_cache(struct btrfs_block_group_cache
95 *block_group); 95 *block_group);
96u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, 96u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
97 u64 offset, u64 bytes, u64 empty_size); 97 u64 offset, u64 bytes, u64 empty_size,
98 u64 *max_extent_size);
98u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root); 99u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root);
99void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, 100void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
100 u64 bytes); 101 u64 bytes);
@@ -105,7 +106,7 @@ int btrfs_find_space_cluster(struct btrfs_root *root,
105void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster); 106void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster);
106u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, 107u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
107 struct btrfs_free_cluster *cluster, u64 bytes, 108 struct btrfs_free_cluster *cluster, u64 bytes,
108 u64 min_start); 109 u64 min_start, u64 *max_extent_size);
109int btrfs_return_cluster_to_free_space( 110int btrfs_return_cluster_to_free_space(
110 struct btrfs_block_group_cache *block_group, 111 struct btrfs_block_group_cache *block_group,
111 struct btrfs_free_cluster *cluster); 112 struct btrfs_free_cluster *cluster);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index f338c5672d58..22ebc13b6c99 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -4688,11 +4688,11 @@ static void inode_tree_add(struct inode *inode)
4688 struct btrfs_inode *entry; 4688 struct btrfs_inode *entry;
4689 struct rb_node **p; 4689 struct rb_node **p;
4690 struct rb_node *parent; 4690 struct rb_node *parent;
4691 struct rb_node *new = &BTRFS_I(inode)->rb_node;
4691 u64 ino = btrfs_ino(inode); 4692 u64 ino = btrfs_ino(inode);
4692 4693
4693 if (inode_unhashed(inode)) 4694 if (inode_unhashed(inode))
4694 return; 4695 return;
4695again:
4696 parent = NULL; 4696 parent = NULL;
4697 spin_lock(&root->inode_lock); 4697 spin_lock(&root->inode_lock);
4698 p = &root->inode_tree.rb_node; 4698 p = &root->inode_tree.rb_node;
@@ -4707,14 +4707,14 @@ again:
4707 else { 4707 else {
4708 WARN_ON(!(entry->vfs_inode.i_state & 4708 WARN_ON(!(entry->vfs_inode.i_state &
4709 (I_WILL_FREE | I_FREEING))); 4709 (I_WILL_FREE | I_FREEING)));
4710 rb_erase(parent, &root->inode_tree); 4710 rb_replace_node(parent, new, &root->inode_tree);
4711 RB_CLEAR_NODE(parent); 4711 RB_CLEAR_NODE(parent);
4712 spin_unlock(&root->inode_lock); 4712 spin_unlock(&root->inode_lock);
4713 goto again; 4713 return;
4714 } 4714 }
4715 } 4715 }
4716 rb_link_node(&BTRFS_I(inode)->rb_node, parent, p); 4716 rb_link_node(new, parent, p);
4717 rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree); 4717 rb_insert_color(new, &root->inode_tree);
4718 spin_unlock(&root->inode_lock); 4718 spin_unlock(&root->inode_lock);
4719} 4719}
4720 4720
@@ -8216,6 +8216,10 @@ static int __start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
8216 8216
8217 work = btrfs_alloc_delalloc_work(inode, 0, delay_iput); 8217 work = btrfs_alloc_delalloc_work(inode, 0, delay_iput);
8218 if (unlikely(!work)) { 8218 if (unlikely(!work)) {
8219 if (delay_iput)
8220 btrfs_add_delayed_iput(inode);
8221 else
8222 iput(inode);
8219 ret = -ENOMEM; 8223 ret = -ENOMEM;
8220 goto out; 8224 goto out;
8221 } 8225 }
@@ -8613,11 +8617,13 @@ static const struct inode_operations btrfs_dir_inode_operations = {
8613 .removexattr = btrfs_removexattr, 8617 .removexattr = btrfs_removexattr,
8614 .permission = btrfs_permission, 8618 .permission = btrfs_permission,
8615 .get_acl = btrfs_get_acl, 8619 .get_acl = btrfs_get_acl,
8620 .update_time = btrfs_update_time,
8616}; 8621};
8617static const struct inode_operations btrfs_dir_ro_inode_operations = { 8622static const struct inode_operations btrfs_dir_ro_inode_operations = {
8618 .lookup = btrfs_lookup, 8623 .lookup = btrfs_lookup,
8619 .permission = btrfs_permission, 8624 .permission = btrfs_permission,
8620 .get_acl = btrfs_get_acl, 8625 .get_acl = btrfs_get_acl,
8626 .update_time = btrfs_update_time,
8621}; 8627};
8622 8628
8623static const struct file_operations btrfs_dir_file_operations = { 8629static const struct file_operations btrfs_dir_file_operations = {
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 1a5b9462dd9a..9d46f60cb943 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -574,7 +574,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
574 if (ret) 574 if (ret)
575 return ret; 575 return ret;
576 576
577 btrfs_wait_ordered_extents(root, 0); 577 btrfs_wait_ordered_extents(root);
578 578
579 pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS); 579 pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS);
580 if (!pending_snapshot) 580 if (!pending_snapshot)
@@ -2696,9 +2696,9 @@ out_unlock:
2696static long btrfs_ioctl_file_extent_same(struct file *file, 2696static long btrfs_ioctl_file_extent_same(struct file *file,
2697 void __user *argp) 2697 void __user *argp)
2698{ 2698{
2699 struct btrfs_ioctl_same_args *args = argp; 2699 struct btrfs_ioctl_same_args tmp;
2700 struct btrfs_ioctl_same_args same; 2700 struct btrfs_ioctl_same_args *same;
2701 struct btrfs_ioctl_same_extent_info info; 2701 struct btrfs_ioctl_same_extent_info *info;
2702 struct inode *src = file->f_dentry->d_inode; 2702 struct inode *src = file->f_dentry->d_inode;
2703 struct file *dst_file = NULL; 2703 struct file *dst_file = NULL;
2704 struct inode *dst; 2704 struct inode *dst;
@@ -2706,6 +2706,7 @@ static long btrfs_ioctl_file_extent_same(struct file *file,
2706 u64 len; 2706 u64 len;
2707 int i; 2707 int i;
2708 int ret; 2708 int ret;
2709 unsigned long size;
2709 u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize; 2710 u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize;
2710 bool is_admin = capable(CAP_SYS_ADMIN); 2711 bool is_admin = capable(CAP_SYS_ADMIN);
2711 2712
@@ -2716,15 +2717,30 @@ static long btrfs_ioctl_file_extent_same(struct file *file,
2716 if (ret) 2717 if (ret)
2717 return ret; 2718 return ret;
2718 2719
2719 if (copy_from_user(&same, 2720 if (copy_from_user(&tmp,
2720 (struct btrfs_ioctl_same_args __user *)argp, 2721 (struct btrfs_ioctl_same_args __user *)argp,
2721 sizeof(same))) { 2722 sizeof(tmp))) {
2722 ret = -EFAULT; 2723 ret = -EFAULT;
2723 goto out; 2724 goto out;
2724 } 2725 }
2725 2726
2726 off = same.logical_offset; 2727 size = sizeof(tmp) +
2727 len = same.length; 2728 tmp.dest_count * sizeof(struct btrfs_ioctl_same_extent_info);
2729
2730 same = kmalloc(size, GFP_NOFS);
2731 if (!same) {
2732 ret = -EFAULT;
2733 goto out;
2734 }
2735
2736 if (copy_from_user(same,
2737 (struct btrfs_ioctl_same_args __user *)argp, size)) {
2738 ret = -EFAULT;
2739 goto out;
2740 }
2741
2742 off = same->logical_offset;
2743 len = same->length;
2728 2744
2729 /* 2745 /*
2730 * Limit the total length we will dedupe for each operation. 2746 * Limit the total length we will dedupe for each operation.
@@ -2752,27 +2768,28 @@ static long btrfs_ioctl_file_extent_same(struct file *file,
2752 if (!S_ISREG(src->i_mode)) 2768 if (!S_ISREG(src->i_mode))
2753 goto out; 2769 goto out;
2754 2770
2755 ret = 0; 2771 /* pre-format output fields to sane values */
2756 for (i = 0; i < same.dest_count; i++) { 2772 for (i = 0; i < same->dest_count; i++) {
2757 if (copy_from_user(&info, &args->info[i], sizeof(info))) { 2773 same->info[i].bytes_deduped = 0ULL;
2758 ret = -EFAULT; 2774 same->info[i].status = 0;
2759 goto out; 2775 }
2760 }
2761 2776
2762 info.bytes_deduped = 0; 2777 ret = 0;
2778 for (i = 0; i < same->dest_count; i++) {
2779 info = &same->info[i];
2763 2780
2764 dst_file = fget(info.fd); 2781 dst_file = fget(info->fd);
2765 if (!dst_file) { 2782 if (!dst_file) {
2766 info.status = -EBADF; 2783 info->status = -EBADF;
2767 goto next; 2784 goto next;
2768 } 2785 }
2769 2786
2770 if (!(is_admin || (dst_file->f_mode & FMODE_WRITE))) { 2787 if (!(is_admin || (dst_file->f_mode & FMODE_WRITE))) {
2771 info.status = -EINVAL; 2788 info->status = -EINVAL;
2772 goto next; 2789 goto next;
2773 } 2790 }
2774 2791
2775 info.status = -EXDEV; 2792 info->status = -EXDEV;
2776 if (file->f_path.mnt != dst_file->f_path.mnt) 2793 if (file->f_path.mnt != dst_file->f_path.mnt)
2777 goto next; 2794 goto next;
2778 2795
@@ -2781,32 +2798,29 @@ static long btrfs_ioctl_file_extent_same(struct file *file,
2781 goto next; 2798 goto next;
2782 2799
2783 if (S_ISDIR(dst->i_mode)) { 2800 if (S_ISDIR(dst->i_mode)) {
2784 info.status = -EISDIR; 2801 info->status = -EISDIR;
2785 goto next; 2802 goto next;
2786 } 2803 }
2787 2804
2788 if (!S_ISREG(dst->i_mode)) { 2805 if (!S_ISREG(dst->i_mode)) {
2789 info.status = -EACCES; 2806 info->status = -EACCES;
2790 goto next; 2807 goto next;
2791 } 2808 }
2792 2809
2793 info.status = btrfs_extent_same(src, off, len, dst, 2810 info->status = btrfs_extent_same(src, off, len, dst,
2794 info.logical_offset); 2811 info->logical_offset);
2795 if (info.status == 0) 2812 if (info->status == 0)
2796 info.bytes_deduped += len; 2813 info->bytes_deduped += len;
2797 2814
2798next: 2815next:
2799 if (dst_file) 2816 if (dst_file)
2800 fput(dst_file); 2817 fput(dst_file);
2801
2802 if (__put_user_unaligned(info.status, &args->info[i].status) ||
2803 __put_user_unaligned(info.bytes_deduped,
2804 &args->info[i].bytes_deduped)) {
2805 ret = -EFAULT;
2806 goto out;
2807 }
2808 } 2818 }
2809 2819
2820 ret = copy_to_user(argp, same, size);
2821 if (ret)
2822 ret = -EFAULT;
2823
2810out: 2824out:
2811 mnt_drop_write_file(file); 2825 mnt_drop_write_file(file);
2812 return ret; 2826 return ret;
@@ -3310,7 +3324,7 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
3310 } 3324 }
3311 3325
3312 if (!objectid) 3326 if (!objectid)
3313 objectid = root->root_key.objectid; 3327 objectid = BTRFS_FS_TREE_OBJECTID;
3314 3328
3315 location.objectid = objectid; 3329 location.objectid = objectid;
3316 location.type = BTRFS_ROOT_ITEM_KEY; 3330 location.type = BTRFS_ROOT_ITEM_KEY;
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 966b413a33b8..c702cb62f78a 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -563,11 +563,10 @@ static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
563 * wait for all the ordered extents in a root. This is done when balancing 563 * wait for all the ordered extents in a root. This is done when balancing
564 * space between drives. 564 * space between drives.
565 */ 565 */
566void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput) 566void btrfs_wait_ordered_extents(struct btrfs_root *root)
567{ 567{
568 struct list_head splice, works; 568 struct list_head splice, works;
569 struct btrfs_ordered_extent *ordered, *next; 569 struct btrfs_ordered_extent *ordered, *next;
570 struct inode *inode;
571 570
572 INIT_LIST_HEAD(&splice); 571 INIT_LIST_HEAD(&splice);
573 INIT_LIST_HEAD(&works); 572 INIT_LIST_HEAD(&works);
@@ -580,15 +579,6 @@ void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
580 root_extent_list); 579 root_extent_list);
581 list_move_tail(&ordered->root_extent_list, 580 list_move_tail(&ordered->root_extent_list,
582 &root->ordered_extents); 581 &root->ordered_extents);
583 /*
584 * the inode may be getting freed (in sys_unlink path).
585 */
586 inode = igrab(ordered->inode);
587 if (!inode) {
588 cond_resched_lock(&root->ordered_extent_lock);
589 continue;
590 }
591
592 atomic_inc(&ordered->refs); 582 atomic_inc(&ordered->refs);
593 spin_unlock(&root->ordered_extent_lock); 583 spin_unlock(&root->ordered_extent_lock);
594 584
@@ -605,21 +595,13 @@ void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
605 list_for_each_entry_safe(ordered, next, &works, work_list) { 595 list_for_each_entry_safe(ordered, next, &works, work_list) {
606 list_del_init(&ordered->work_list); 596 list_del_init(&ordered->work_list);
607 wait_for_completion(&ordered->completion); 597 wait_for_completion(&ordered->completion);
608
609 inode = ordered->inode;
610 btrfs_put_ordered_extent(ordered); 598 btrfs_put_ordered_extent(ordered);
611 if (delay_iput)
612 btrfs_add_delayed_iput(inode);
613 else
614 iput(inode);
615
616 cond_resched(); 599 cond_resched();
617 } 600 }
618 mutex_unlock(&root->fs_info->ordered_operations_mutex); 601 mutex_unlock(&root->fs_info->ordered_operations_mutex);
619} 602}
620 603
621void btrfs_wait_all_ordered_extents(struct btrfs_fs_info *fs_info, 604void btrfs_wait_all_ordered_extents(struct btrfs_fs_info *fs_info)
622 int delay_iput)
623{ 605{
624 struct btrfs_root *root; 606 struct btrfs_root *root;
625 struct list_head splice; 607 struct list_head splice;
@@ -637,7 +619,7 @@ void btrfs_wait_all_ordered_extents(struct btrfs_fs_info *fs_info,
637 &fs_info->ordered_roots); 619 &fs_info->ordered_roots);
638 spin_unlock(&fs_info->ordered_root_lock); 620 spin_unlock(&fs_info->ordered_root_lock);
639 621
640 btrfs_wait_ordered_extents(root, delay_iput); 622 btrfs_wait_ordered_extents(root);
641 btrfs_put_fs_root(root); 623 btrfs_put_fs_root(root);
642 624
643 spin_lock(&fs_info->ordered_root_lock); 625 spin_lock(&fs_info->ordered_root_lock);
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index d9a5aa097b4f..0c0b35612d7a 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -195,9 +195,8 @@ int btrfs_run_ordered_operations(struct btrfs_trans_handle *trans,
195void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans, 195void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
196 struct btrfs_root *root, 196 struct btrfs_root *root,
197 struct inode *inode); 197 struct inode *inode);
198void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput); 198void btrfs_wait_ordered_extents(struct btrfs_root *root);
199void btrfs_wait_all_ordered_extents(struct btrfs_fs_info *fs_info, 199void btrfs_wait_all_ordered_extents(struct btrfs_fs_info *fs_info);
200 int delay_iput);
201void btrfs_get_logged_extents(struct btrfs_root *log, struct inode *inode); 200void btrfs_get_logged_extents(struct btrfs_root *log, struct inode *inode);
202void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid); 201void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid);
203void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid); 202void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid);
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index aacc2121e87c..a5a26320503f 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1548,7 +1548,7 @@ static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
1548 btrfs_file_extent_other_encoding(leaf, fi)); 1548 btrfs_file_extent_other_encoding(leaf, fi));
1549 1549
1550 if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) { 1550 if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) {
1551 ret = 1; 1551 ret = -EINVAL;
1552 goto out; 1552 goto out;
1553 } 1553 }
1554 1554
@@ -1579,7 +1579,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
1579 u64 end; 1579 u64 end;
1580 u32 nritems; 1580 u32 nritems;
1581 u32 i; 1581 u32 i;
1582 int ret; 1582 int ret = 0;
1583 int first = 1; 1583 int first = 1;
1584 int dirty = 0; 1584 int dirty = 0;
1585 1585
@@ -1642,11 +1642,13 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
1642 1642
1643 ret = get_new_location(rc->data_inode, &new_bytenr, 1643 ret = get_new_location(rc->data_inode, &new_bytenr,
1644 bytenr, num_bytes); 1644 bytenr, num_bytes);
1645 if (ret > 0) { 1645 if (ret) {
1646 WARN_ON(1); 1646 /*
1647 continue; 1647 * Don't have to abort since we've not changed anything
1648 * in the file extent yet.
1649 */
1650 break;
1648 } 1651 }
1649 BUG_ON(ret < 0);
1650 1652
1651 btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr); 1653 btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr);
1652 dirty = 1; 1654 dirty = 1;
@@ -1656,18 +1658,24 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
1656 num_bytes, parent, 1658 num_bytes, parent,
1657 btrfs_header_owner(leaf), 1659 btrfs_header_owner(leaf),
1658 key.objectid, key.offset, 1); 1660 key.objectid, key.offset, 1);
1659 BUG_ON(ret); 1661 if (ret) {
1662 btrfs_abort_transaction(trans, root, ret);
1663 break;
1664 }
1660 1665
1661 ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 1666 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1662 parent, btrfs_header_owner(leaf), 1667 parent, btrfs_header_owner(leaf),
1663 key.objectid, key.offset, 1); 1668 key.objectid, key.offset, 1);
1664 BUG_ON(ret); 1669 if (ret) {
1670 btrfs_abort_transaction(trans, root, ret);
1671 break;
1672 }
1665 } 1673 }
1666 if (dirty) 1674 if (dirty)
1667 btrfs_mark_buffer_dirty(leaf); 1675 btrfs_mark_buffer_dirty(leaf);
1668 if (inode) 1676 if (inode)
1669 btrfs_add_delayed_iput(inode); 1677 btrfs_add_delayed_iput(inode);
1670 return 0; 1678 return ret;
1671} 1679}
1672 1680
1673static noinline_for_stack 1681static noinline_for_stack
@@ -4238,7 +4246,7 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
4238 err = ret; 4246 err = ret;
4239 goto out; 4247 goto out;
4240 } 4248 }
4241 btrfs_wait_all_ordered_extents(fs_info, 0); 4249 btrfs_wait_all_ordered_extents(fs_info);
4242 4250
4243 while (1) { 4251 while (1) {
4244 mutex_lock(&fs_info->cleaner_mutex); 4252 mutex_lock(&fs_info->cleaner_mutex);
@@ -4499,19 +4507,19 @@ out:
4499 return ret; 4507 return ret;
4500} 4508}
4501 4509
4502void btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, 4510int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
4503 struct btrfs_root *root, struct extent_buffer *buf, 4511 struct btrfs_root *root, struct extent_buffer *buf,
4504 struct extent_buffer *cow) 4512 struct extent_buffer *cow)
4505{ 4513{
4506 struct reloc_control *rc; 4514 struct reloc_control *rc;
4507 struct backref_node *node; 4515 struct backref_node *node;
4508 int first_cow = 0; 4516 int first_cow = 0;
4509 int level; 4517 int level;
4510 int ret; 4518 int ret = 0;
4511 4519
4512 rc = root->fs_info->reloc_ctl; 4520 rc = root->fs_info->reloc_ctl;
4513 if (!rc) 4521 if (!rc)
4514 return; 4522 return 0;
4515 4523
4516 BUG_ON(rc->stage == UPDATE_DATA_PTRS && 4524 BUG_ON(rc->stage == UPDATE_DATA_PTRS &&
4517 root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID); 4525 root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID);
@@ -4547,10 +4555,9 @@ void btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
4547 rc->nodes_relocated += buf->len; 4555 rc->nodes_relocated += buf->len;
4548 } 4556 }
4549 4557
4550 if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS) { 4558 if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS)
4551 ret = replace_file_extents(trans, rc, root, cow); 4559 ret = replace_file_extents(trans, rc, root, cow);
4552 BUG_ON(ret); 4560 return ret;
4553 }
4554} 4561}
4555 4562
4556/* 4563/*
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 0afcd452fcb3..a18e0e23f6a6 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -158,12 +158,20 @@ struct scrub_fixup_nodatasum {
158 int mirror_num; 158 int mirror_num;
159}; 159};
160 160
161struct scrub_nocow_inode {
162 u64 inum;
163 u64 offset;
164 u64 root;
165 struct list_head list;
166};
167
161struct scrub_copy_nocow_ctx { 168struct scrub_copy_nocow_ctx {
162 struct scrub_ctx *sctx; 169 struct scrub_ctx *sctx;
163 u64 logical; 170 u64 logical;
164 u64 len; 171 u64 len;
165 int mirror_num; 172 int mirror_num;
166 u64 physical_for_dev_replace; 173 u64 physical_for_dev_replace;
174 struct list_head inodes;
167 struct btrfs_work work; 175 struct btrfs_work work;
168}; 176};
169 177
@@ -245,7 +253,7 @@ static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
245static int write_page_nocow(struct scrub_ctx *sctx, 253static int write_page_nocow(struct scrub_ctx *sctx,
246 u64 physical_for_dev_replace, struct page *page); 254 u64 physical_for_dev_replace, struct page *page);
247static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, 255static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
248 void *ctx); 256 struct scrub_copy_nocow_ctx *ctx);
249static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len, 257static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
250 int mirror_num, u64 physical_for_dev_replace); 258 int mirror_num, u64 physical_for_dev_replace);
251static void copy_nocow_pages_worker(struct btrfs_work *work); 259static void copy_nocow_pages_worker(struct btrfs_work *work);
@@ -3126,12 +3134,30 @@ static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
3126 nocow_ctx->mirror_num = mirror_num; 3134 nocow_ctx->mirror_num = mirror_num;
3127 nocow_ctx->physical_for_dev_replace = physical_for_dev_replace; 3135 nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
3128 nocow_ctx->work.func = copy_nocow_pages_worker; 3136 nocow_ctx->work.func = copy_nocow_pages_worker;
3137 INIT_LIST_HEAD(&nocow_ctx->inodes);
3129 btrfs_queue_worker(&fs_info->scrub_nocow_workers, 3138 btrfs_queue_worker(&fs_info->scrub_nocow_workers,
3130 &nocow_ctx->work); 3139 &nocow_ctx->work);
3131 3140
3132 return 0; 3141 return 0;
3133} 3142}
3134 3143
3144static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
3145{
3146 struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
3147 struct scrub_nocow_inode *nocow_inode;
3148
3149 nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS);
3150 if (!nocow_inode)
3151 return -ENOMEM;
3152 nocow_inode->inum = inum;
3153 nocow_inode->offset = offset;
3154 nocow_inode->root = root;
3155 list_add_tail(&nocow_inode->list, &nocow_ctx->inodes);
3156 return 0;
3157}
3158
3159#define COPY_COMPLETE 1
3160
3135static void copy_nocow_pages_worker(struct btrfs_work *work) 3161static void copy_nocow_pages_worker(struct btrfs_work *work)
3136{ 3162{
3137 struct scrub_copy_nocow_ctx *nocow_ctx = 3163 struct scrub_copy_nocow_ctx *nocow_ctx =
@@ -3167,8 +3193,7 @@ static void copy_nocow_pages_worker(struct btrfs_work *work)
3167 } 3193 }
3168 3194
3169 ret = iterate_inodes_from_logical(logical, fs_info, path, 3195 ret = iterate_inodes_from_logical(logical, fs_info, path,
3170 copy_nocow_pages_for_inode, 3196 record_inode_for_nocow, nocow_ctx);
3171 nocow_ctx);
3172 if (ret != 0 && ret != -ENOENT) { 3197 if (ret != 0 && ret != -ENOENT) {
3173 pr_warn("iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %u, ret %d\n", 3198 pr_warn("iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %u, ret %d\n",
3174 logical, physical_for_dev_replace, len, mirror_num, 3199 logical, physical_for_dev_replace, len, mirror_num,
@@ -3177,7 +3202,33 @@ static void copy_nocow_pages_worker(struct btrfs_work *work)
3177 goto out; 3202 goto out;
3178 } 3203 }
3179 3204
3205 btrfs_end_transaction(trans, root);
3206 trans = NULL;
3207 while (!list_empty(&nocow_ctx->inodes)) {
3208 struct scrub_nocow_inode *entry;
3209 entry = list_first_entry(&nocow_ctx->inodes,
3210 struct scrub_nocow_inode,
3211 list);
3212 list_del_init(&entry->list);
3213 ret = copy_nocow_pages_for_inode(entry->inum, entry->offset,
3214 entry->root, nocow_ctx);
3215 kfree(entry);
3216 if (ret == COPY_COMPLETE) {
3217 ret = 0;
3218 break;
3219 } else if (ret) {
3220 break;
3221 }
3222 }
3180out: 3223out:
3224 while (!list_empty(&nocow_ctx->inodes)) {
3225 struct scrub_nocow_inode *entry;
3226 entry = list_first_entry(&nocow_ctx->inodes,
3227 struct scrub_nocow_inode,
3228 list);
3229 list_del_init(&entry->list);
3230 kfree(entry);
3231 }
3181 if (trans && !IS_ERR(trans)) 3232 if (trans && !IS_ERR(trans))
3182 btrfs_end_transaction(trans, root); 3233 btrfs_end_transaction(trans, root);
3183 if (not_written) 3234 if (not_written)
@@ -3190,20 +3241,25 @@ out:
3190 scrub_pending_trans_workers_dec(sctx); 3241 scrub_pending_trans_workers_dec(sctx);
3191} 3242}
3192 3243
3193static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, void *ctx) 3244static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
3245 struct scrub_copy_nocow_ctx *nocow_ctx)
3194{ 3246{
3195 struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
3196 struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info; 3247 struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info;
3197 struct btrfs_key key; 3248 struct btrfs_key key;
3198 struct inode *inode; 3249 struct inode *inode;
3199 struct page *page; 3250 struct page *page;
3200 struct btrfs_root *local_root; 3251 struct btrfs_root *local_root;
3252 struct btrfs_ordered_extent *ordered;
3253 struct extent_map *em;
3254 struct extent_state *cached_state = NULL;
3255 struct extent_io_tree *io_tree;
3201 u64 physical_for_dev_replace; 3256 u64 physical_for_dev_replace;
3202 u64 len; 3257 u64 len = nocow_ctx->len;
3258 u64 lockstart = offset, lockend = offset + len - 1;
3203 unsigned long index; 3259 unsigned long index;
3204 int srcu_index; 3260 int srcu_index;
3205 int ret; 3261 int ret = 0;
3206 int err; 3262 int err = 0;
3207 3263
3208 key.objectid = root; 3264 key.objectid = root;
3209 key.type = BTRFS_ROOT_ITEM_KEY; 3265 key.type = BTRFS_ROOT_ITEM_KEY;
@@ -3229,9 +3285,33 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, void *ctx)
3229 mutex_lock(&inode->i_mutex); 3285 mutex_lock(&inode->i_mutex);
3230 inode_dio_wait(inode); 3286 inode_dio_wait(inode);
3231 3287
3232 ret = 0;
3233 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace; 3288 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
3234 len = nocow_ctx->len; 3289 io_tree = &BTRFS_I(inode)->io_tree;
3290
3291 lock_extent_bits(io_tree, lockstart, lockend, 0, &cached_state);
3292 ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
3293 if (ordered) {
3294 btrfs_put_ordered_extent(ordered);
3295 goto out_unlock;
3296 }
3297
3298 em = btrfs_get_extent(inode, NULL, 0, lockstart, len, 0);
3299 if (IS_ERR(em)) {
3300 ret = PTR_ERR(em);
3301 goto out_unlock;
3302 }
3303
3304 /*
3305 * This extent does not actually cover the logical extent anymore,
3306 * move on to the next inode.
3307 */
3308 if (em->block_start > nocow_ctx->logical ||
3309 em->block_start + em->block_len < nocow_ctx->logical + len) {
3310 free_extent_map(em);
3311 goto out_unlock;
3312 }
3313 free_extent_map(em);
3314
3235 while (len >= PAGE_CACHE_SIZE) { 3315 while (len >= PAGE_CACHE_SIZE) {
3236 index = offset >> PAGE_CACHE_SHIFT; 3316 index = offset >> PAGE_CACHE_SHIFT;
3237again: 3317again:
@@ -3247,10 +3327,9 @@ again:
3247 goto next_page; 3327 goto next_page;
3248 } else { 3328 } else {
3249 ClearPageError(page); 3329 ClearPageError(page);
3250 err = extent_read_full_page(&BTRFS_I(inode)-> 3330 err = extent_read_full_page_nolock(io_tree, page,
3251 io_tree, 3331 btrfs_get_extent,
3252 page, btrfs_get_extent, 3332 nocow_ctx->mirror_num);
3253 nocow_ctx->mirror_num);
3254 if (err) { 3333 if (err) {
3255 ret = err; 3334 ret = err;
3256 goto next_page; 3335 goto next_page;
@@ -3264,6 +3343,7 @@ again:
3264 * page in the page cache. 3343 * page in the page cache.
3265 */ 3344 */
3266 if (page->mapping != inode->i_mapping) { 3345 if (page->mapping != inode->i_mapping) {
3346 unlock_page(page);
3267 page_cache_release(page); 3347 page_cache_release(page);
3268 goto again; 3348 goto again;
3269 } 3349 }
@@ -3287,6 +3367,10 @@ next_page:
3287 physical_for_dev_replace += PAGE_CACHE_SIZE; 3367 physical_for_dev_replace += PAGE_CACHE_SIZE;
3288 len -= PAGE_CACHE_SIZE; 3368 len -= PAGE_CACHE_SIZE;
3289 } 3369 }
3370 ret = COPY_COMPLETE;
3371out_unlock:
3372 unlock_extent_cached(io_tree, lockstart, lockend, &cached_state,
3373 GFP_NOFS);
3290out: 3374out:
3291 mutex_unlock(&inode->i_mutex); 3375 mutex_unlock(&inode->i_mutex);
3292 iput(inode); 3376 iput(inode);
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 3aab10ce63e8..e913328d0f2a 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -921,7 +921,7 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
921 return 0; 921 return 0;
922 } 922 }
923 923
924 btrfs_wait_all_ordered_extents(fs_info, 1); 924 btrfs_wait_all_ordered_extents(fs_info);
925 925
926 trans = btrfs_attach_transaction_barrier(root); 926 trans = btrfs_attach_transaction_barrier(root);
927 if (IS_ERR(trans)) { 927 if (IS_ERR(trans)) {
@@ -1340,6 +1340,12 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
1340 if (ret) 1340 if (ret)
1341 goto restore; 1341 goto restore;
1342 } else { 1342 } else {
1343 if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
1344 btrfs_err(fs_info,
1345 "Remounting read-write after error is not allowed\n");
1346 ret = -EINVAL;
1347 goto restore;
1348 }
1343 if (fs_info->fs_devices->rw_devices == 0) { 1349 if (fs_info->fs_devices->rw_devices == 0) {
1344 ret = -EACCES; 1350 ret = -EACCES;
1345 goto restore; 1351 goto restore;
@@ -1377,6 +1383,16 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
1377 pr_warn("btrfs: failed to resume dev_replace\n"); 1383 pr_warn("btrfs: failed to resume dev_replace\n");
1378 goto restore; 1384 goto restore;
1379 } 1385 }
1386
1387 if (!fs_info->uuid_root) {
1388 pr_info("btrfs: creating UUID tree\n");
1389 ret = btrfs_create_uuid_tree(fs_info);
1390 if (ret) {
1391 pr_warn("btrfs: failed to create the uuid tree"
1392 "%d\n", ret);
1393 goto restore;
1394 }
1395 }
1380 sb->s_flags &= ~MS_RDONLY; 1396 sb->s_flags &= ~MS_RDONLY;
1381 } 1397 }
1382out: 1398out:
@@ -1762,6 +1778,9 @@ static void btrfs_print_info(void)
1762#ifdef CONFIG_BTRFS_DEBUG 1778#ifdef CONFIG_BTRFS_DEBUG
1763 ", debug=on" 1779 ", debug=on"
1764#endif 1780#endif
1781#ifdef CONFIG_BTRFS_ASSERT
1782 ", assert=on"
1783#endif
1765#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 1784#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
1766 ", integrity-checker=on" 1785 ", integrity-checker=on"
1767#endif 1786#endif
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index cac4a3f76323..8c81bdc1ef9b 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -1603,7 +1603,7 @@ static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
1603static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info) 1603static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
1604{ 1604{
1605 if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT)) 1605 if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
1606 btrfs_wait_all_ordered_extents(fs_info, 1); 1606 btrfs_wait_all_ordered_extents(fs_info);
1607} 1607}
1608 1608
1609int btrfs_commit_transaction(struct btrfs_trans_handle *trans, 1609int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
@@ -1838,11 +1838,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1838 assert_qgroups_uptodate(trans); 1838 assert_qgroups_uptodate(trans);
1839 update_super_roots(root); 1839 update_super_roots(root);
1840 1840
1841 if (!root->fs_info->log_root_recovering) { 1841 btrfs_set_super_log_root(root->fs_info->super_copy, 0);
1842 btrfs_set_super_log_root(root->fs_info->super_copy, 0); 1842 btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
1843 btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
1844 }
1845
1846 memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy, 1843 memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
1847 sizeof(*root->fs_info->super_copy)); 1844 sizeof(*root->fs_info->super_copy));
1848 1845
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 0d9613c3f5e5..79f057c0619a 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -93,7 +93,8 @@
93 */ 93 */
94#define LOG_WALK_PIN_ONLY 0 94#define LOG_WALK_PIN_ONLY 0
95#define LOG_WALK_REPLAY_INODES 1 95#define LOG_WALK_REPLAY_INODES 1
96#define LOG_WALK_REPLAY_ALL 2 96#define LOG_WALK_REPLAY_DIR_INDEX 2
97#define LOG_WALK_REPLAY_ALL 3
97 98
98static int btrfs_log_inode(struct btrfs_trans_handle *trans, 99static int btrfs_log_inode(struct btrfs_trans_handle *trans,
99 struct btrfs_root *root, struct inode *inode, 100 struct btrfs_root *root, struct inode *inode,
@@ -393,6 +394,7 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
393 if (inode_item) { 394 if (inode_item) {
394 struct btrfs_inode_item *item; 395 struct btrfs_inode_item *item;
395 u64 nbytes; 396 u64 nbytes;
397 u32 mode;
396 398
397 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 399 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
398 struct btrfs_inode_item); 400 struct btrfs_inode_item);
@@ -400,9 +402,19 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
400 item = btrfs_item_ptr(eb, slot, 402 item = btrfs_item_ptr(eb, slot,
401 struct btrfs_inode_item); 403 struct btrfs_inode_item);
402 btrfs_set_inode_nbytes(eb, item, nbytes); 404 btrfs_set_inode_nbytes(eb, item, nbytes);
405
406 /*
407 * If this is a directory we need to reset the i_size to
408 * 0 so that we can set it up properly when replaying
409 * the rest of the items in this log.
410 */
411 mode = btrfs_inode_mode(eb, item);
412 if (S_ISDIR(mode))
413 btrfs_set_inode_size(eb, item, 0);
403 } 414 }
404 } else if (inode_item) { 415 } else if (inode_item) {
405 struct btrfs_inode_item *item; 416 struct btrfs_inode_item *item;
417 u32 mode;
406 418
407 /* 419 /*
408 * New inode, set nbytes to 0 so that the nbytes comes out 420 * New inode, set nbytes to 0 so that the nbytes comes out
@@ -410,6 +422,15 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
410 */ 422 */
411 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); 423 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
412 btrfs_set_inode_nbytes(eb, item, 0); 424 btrfs_set_inode_nbytes(eb, item, 0);
425
426 /*
427 * If this is a directory we need to reset the i_size to 0 so
428 * that we can set it up properly when replaying the rest of
429 * the items in this log.
430 */
431 mode = btrfs_inode_mode(eb, item);
432 if (S_ISDIR(mode))
433 btrfs_set_inode_size(eb, item, 0);
413 } 434 }
414insert: 435insert:
415 btrfs_release_path(path); 436 btrfs_release_path(path);
@@ -1496,6 +1517,7 @@ static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1496 iput(inode); 1517 iput(inode);
1497 return -EIO; 1518 return -EIO;
1498 } 1519 }
1520
1499 ret = btrfs_add_link(trans, dir, inode, name, name_len, 1, index); 1521 ret = btrfs_add_link(trans, dir, inode, name, name_len, 1, index);
1500 1522
1501 /* FIXME, put inode into FIXUP list */ 1523 /* FIXME, put inode into FIXUP list */
@@ -1534,6 +1556,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1534 u8 log_type; 1556 u8 log_type;
1535 int exists; 1557 int exists;
1536 int ret = 0; 1558 int ret = 0;
1559 bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
1537 1560
1538 dir = read_one_inode(root, key->objectid); 1561 dir = read_one_inode(root, key->objectid);
1539 if (!dir) 1562 if (!dir)
@@ -1604,6 +1627,10 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1604 goto insert; 1627 goto insert;
1605out: 1628out:
1606 btrfs_release_path(path); 1629 btrfs_release_path(path);
1630 if (!ret && update_size) {
1631 btrfs_i_size_write(dir, dir->i_size + name_len * 2);
1632 ret = btrfs_update_inode(trans, root, dir);
1633 }
1607 kfree(name); 1634 kfree(name);
1608 iput(dir); 1635 iput(dir);
1609 return ret; 1636 return ret;
@@ -1614,6 +1641,7 @@ insert:
1614 name, name_len, log_type, &log_key); 1641 name, name_len, log_type, &log_key);
1615 if (ret && ret != -ENOENT) 1642 if (ret && ret != -ENOENT)
1616 goto out; 1643 goto out;
1644 update_size = false;
1617 ret = 0; 1645 ret = 0;
1618 goto out; 1646 goto out;
1619} 1647}
@@ -2027,6 +2055,15 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
2027 if (ret) 2055 if (ret)
2028 break; 2056 break;
2029 } 2057 }
2058
2059 if (key.type == BTRFS_DIR_INDEX_KEY &&
2060 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
2061 ret = replay_one_dir_item(wc->trans, root, path,
2062 eb, i, &key);
2063 if (ret)
2064 break;
2065 }
2066
2030 if (wc->stage < LOG_WALK_REPLAY_ALL) 2067 if (wc->stage < LOG_WALK_REPLAY_ALL)
2031 continue; 2068 continue;
2032 2069
@@ -2048,8 +2085,7 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
2048 eb, i, &key); 2085 eb, i, &key);
2049 if (ret) 2086 if (ret)
2050 break; 2087 break;
2051 } else if (key.type == BTRFS_DIR_ITEM_KEY || 2088 } else if (key.type == BTRFS_DIR_ITEM_KEY) {
2052 key.type == BTRFS_DIR_INDEX_KEY) {
2053 ret = replay_one_dir_item(wc->trans, root, path, 2089 ret = replay_one_dir_item(wc->trans, root, path,
2054 eb, i, &key); 2090 eb, i, &key);
2055 if (ret) 2091 if (ret)
@@ -3805,6 +3841,7 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
3805 int ret = 0; 3841 int ret = 0;
3806 struct btrfs_root *root; 3842 struct btrfs_root *root;
3807 struct dentry *old_parent = NULL; 3843 struct dentry *old_parent = NULL;
3844 struct inode *orig_inode = inode;
3808 3845
3809 /* 3846 /*
3810 * for regular files, if its inode is already on disk, we don't 3847 * for regular files, if its inode is already on disk, we don't
@@ -3824,7 +3861,14 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
3824 } 3861 }
3825 3862
3826 while (1) { 3863 while (1) {
3827 BTRFS_I(inode)->logged_trans = trans->transid; 3864 /*
3865 * If we are logging a directory then we start with our inode,
3866 * not our parents inode, so we need to skipp setting the
3867 * logged_trans so that further down in the log code we don't
3868 * think this inode has already been logged.
3869 */
3870 if (inode != orig_inode)
3871 BTRFS_I(inode)->logged_trans = trans->transid;
3828 smp_mb(); 3872 smp_mb();
3829 3873
3830 if (BTRFS_I(inode)->last_unlink_trans > last_committed) { 3874 if (BTRFS_I(inode)->last_unlink_trans > last_committed) {
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 0052ca8264d9..043b215769c2 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -796,7 +796,8 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
796 fs_devices->rotating = 1; 796 fs_devices->rotating = 1;
797 797
798 fs_devices->open_devices++; 798 fs_devices->open_devices++;
799 if (device->writeable && !device->is_tgtdev_for_dev_replace) { 799 if (device->writeable &&
800 device->devid != BTRFS_DEV_REPLACE_DEVID) {
800 fs_devices->rw_devices++; 801 fs_devices->rw_devices++;
801 list_add(&device->dev_alloc_list, 802 list_add(&device->dev_alloc_list,
802 &fs_devices->alloc_list); 803 &fs_devices->alloc_list);
@@ -911,9 +912,9 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
911 if (disk_super->label[0]) { 912 if (disk_super->label[0]) {
912 if (disk_super->label[BTRFS_LABEL_SIZE - 1]) 913 if (disk_super->label[BTRFS_LABEL_SIZE - 1])
913 disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0'; 914 disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
914 printk(KERN_INFO "device label %s ", disk_super->label); 915 printk(KERN_INFO "btrfs: device label %s ", disk_super->label);
915 } else { 916 } else {
916 printk(KERN_INFO "device fsid %pU ", disk_super->fsid); 917 printk(KERN_INFO "btrfs: device fsid %pU ", disk_super->fsid);
917 } 918 }
918 919
919 printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path); 920 printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path);
@@ -1715,6 +1716,7 @@ void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info,
1715 struct btrfs_device *srcdev) 1716 struct btrfs_device *srcdev)
1716{ 1717{
1717 WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex)); 1718 WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
1719
1718 list_del_rcu(&srcdev->dev_list); 1720 list_del_rcu(&srcdev->dev_list);
1719 list_del_rcu(&srcdev->dev_alloc_list); 1721 list_del_rcu(&srcdev->dev_alloc_list);
1720 fs_info->fs_devices->num_devices--; 1722 fs_info->fs_devices->num_devices--;
@@ -1724,9 +1726,13 @@ void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info,
1724 } 1726 }
1725 if (srcdev->can_discard) 1727 if (srcdev->can_discard)
1726 fs_info->fs_devices->num_can_discard--; 1728 fs_info->fs_devices->num_can_discard--;
1727 if (srcdev->bdev) 1729 if (srcdev->bdev) {
1728 fs_info->fs_devices->open_devices--; 1730 fs_info->fs_devices->open_devices--;
1729 1731
1732 /* zero out the old super */
1733 btrfs_scratch_superblock(srcdev);
1734 }
1735
1730 call_rcu(&srcdev->rcu, free_device); 1736 call_rcu(&srcdev->rcu, free_device);
1731} 1737}
1732 1738
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index 25badd1aec5c..f4a08d7fa2f7 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -56,7 +56,7 @@ void __cachefiles_printk_object(struct cachefiles_object *object,
56 object->fscache.cookie->parent, 56 object->fscache.cookie->parent,
57 object->fscache.cookie->netfs_data, 57 object->fscache.cookie->netfs_data,
58 object->fscache.cookie->flags); 58 object->fscache.cookie->flags);
59 if (keybuf) 59 if (keybuf && cookie->def)
60 keylen = cookie->def->get_key(cookie->netfs_data, keybuf, 60 keylen = cookie->def->get_key(cookie->netfs_data, keybuf,
61 CACHEFILES_KEYBUF_SIZE); 61 CACHEFILES_KEYBUF_SIZE);
62 else 62 else
diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c
index 34c88b83e39f..12b0eef84183 100644
--- a/fs/cachefiles/xattr.c
+++ b/fs/cachefiles/xattr.c
@@ -162,8 +162,9 @@ int cachefiles_update_object_xattr(struct cachefiles_object *object,
162int cachefiles_check_auxdata(struct cachefiles_object *object) 162int cachefiles_check_auxdata(struct cachefiles_object *object)
163{ 163{
164 struct cachefiles_xattr *auxbuf; 164 struct cachefiles_xattr *auxbuf;
165 enum fscache_checkaux validity;
165 struct dentry *dentry = object->dentry; 166 struct dentry *dentry = object->dentry;
166 unsigned int dlen; 167 ssize_t xlen;
167 int ret; 168 int ret;
168 169
169 ASSERT(dentry); 170 ASSERT(dentry);
@@ -174,22 +175,22 @@ int cachefiles_check_auxdata(struct cachefiles_object *object)
174 if (!auxbuf) 175 if (!auxbuf)
175 return -ENOMEM; 176 return -ENOMEM;
176 177
177 auxbuf->len = vfs_getxattr(dentry, cachefiles_xattr_cache, 178 xlen = vfs_getxattr(dentry, cachefiles_xattr_cache,
178 &auxbuf->type, 512 + 1); 179 &auxbuf->type, 512 + 1);
179 if (auxbuf->len < 1) 180 ret = -ESTALE;
180 return -ESTALE; 181 if (xlen < 1 ||
181 182 auxbuf->type != object->fscache.cookie->def->type)
182 if (auxbuf->type != object->fscache.cookie->def->type) 183 goto error;
183 return -ESTALE;
184 184
185 dlen = auxbuf->len - 1; 185 xlen--;
186 ret = fscache_check_aux(&object->fscache, &auxbuf->data, dlen); 186 validity = fscache_check_aux(&object->fscache, &auxbuf->data, xlen);
187 if (validity != FSCACHE_CHECKAUX_OKAY)
188 goto error;
187 189
190 ret = 0;
191error:
188 kfree(auxbuf); 192 kfree(auxbuf);
189 if (ret != FSCACHE_CHECKAUX_OKAY) 193 return ret;
190 return -ESTALE;
191
192 return 0;
193} 194}
194 195
195/* 196/*
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index ea723a5e8226..6d0b07217ac9 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -132,5 +132,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
132extern const struct export_operations cifs_export_ops; 132extern const struct export_operations cifs_export_ops;
133#endif /* CONFIG_CIFS_NFSD_EXPORT */ 133#endif /* CONFIG_CIFS_NFSD_EXPORT */
134 134
135#define CIFS_VERSION "2.01" 135#define CIFS_VERSION "2.02"
136#endif /* _CIFSFS_H */ 136#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index cfa14c80ef3b..52b6f6c26bfc 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -547,9 +547,6 @@ struct TCP_Server_Info {
547 unsigned int max_rw; /* maxRw specifies the maximum */ 547 unsigned int max_rw; /* maxRw specifies the maximum */
548 /* message size the server can send or receive for */ 548 /* message size the server can send or receive for */
549 /* SMB_COM_WRITE_RAW or SMB_COM_READ_RAW. */ 549 /* SMB_COM_WRITE_RAW or SMB_COM_READ_RAW. */
550 unsigned int max_vcs; /* maximum number of smb sessions, at least
551 those that can be specified uniquely with
552 vcnumbers */
553 unsigned int capabilities; /* selective disabling of caps by smb sess */ 550 unsigned int capabilities; /* selective disabling of caps by smb sess */
554 int timeAdj; /* Adjust for difference in server time zone in sec */ 551 int timeAdj; /* Adjust for difference in server time zone in sec */
555 __u64 CurrentMid; /* multiplex id - rotating counter */ 552 __u64 CurrentMid; /* multiplex id - rotating counter */
@@ -715,7 +712,6 @@ struct cifs_ses {
715 enum statusEnum status; 712 enum statusEnum status;
716 unsigned overrideSecFlg; /* if non-zero override global sec flags */ 713 unsigned overrideSecFlg; /* if non-zero override global sec flags */
717 __u16 ipc_tid; /* special tid for connection to IPC share */ 714 __u16 ipc_tid; /* special tid for connection to IPC share */
718 __u16 vcnum;
719 char *serverOS; /* name of operating system underlying server */ 715 char *serverOS; /* name of operating system underlying server */
720 char *serverNOS; /* name of network operating system of server */ 716 char *serverNOS; /* name of network operating system of server */
721 char *serverDomain; /* security realm of server */ 717 char *serverDomain; /* security realm of server */
@@ -1272,6 +1268,7 @@ struct dfs_info3_param {
1272#define CIFS_FATTR_DELETE_PENDING 0x2 1268#define CIFS_FATTR_DELETE_PENDING 0x2
1273#define CIFS_FATTR_NEED_REVAL 0x4 1269#define CIFS_FATTR_NEED_REVAL 0x4
1274#define CIFS_FATTR_INO_COLLISION 0x8 1270#define CIFS_FATTR_INO_COLLISION 0x8
1271#define CIFS_FATTR_UNKNOWN_NLINK 0x10
1275 1272
1276struct cifs_fattr { 1273struct cifs_fattr {
1277 u32 cf_flags; 1274 u32 cf_flags;
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index 948676db8e2e..a630475e421c 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -2652,26 +2652,7 @@ typedef struct file_xattr_info {
2652} __attribute__((packed)) FILE_XATTR_INFO; /* extended attribute info 2652} __attribute__((packed)) FILE_XATTR_INFO; /* extended attribute info
2653 level 0x205 */ 2653 level 0x205 */
2654 2654
2655 2655/* flags for lsattr and chflags commands removed arein uapi/linux/fs.h */
2656/* flags for chattr command */
2657#define EXT_SECURE_DELETE 0x00000001 /* EXT3_SECRM_FL */
2658#define EXT_ENABLE_UNDELETE 0x00000002 /* EXT3_UNRM_FL */
2659/* Reserved for compress file 0x4 */
2660#define EXT_SYNCHRONOUS 0x00000008 /* EXT3_SYNC_FL */
2661#define EXT_IMMUTABLE_FL 0x00000010 /* EXT3_IMMUTABLE_FL */
2662#define EXT_OPEN_APPEND_ONLY 0x00000020 /* EXT3_APPEND_FL */
2663#define EXT_DO_NOT_BACKUP 0x00000040 /* EXT3_NODUMP_FL */
2664#define EXT_NO_UPDATE_ATIME 0x00000080 /* EXT3_NOATIME_FL */
2665/* 0x100 through 0x800 reserved for compression flags and are GET-ONLY */
2666#define EXT_HASH_TREE_INDEXED_DIR 0x00001000 /* GET-ONLY EXT3_INDEX_FL */
2667/* 0x2000 reserved for IMAGIC_FL */
2668#define EXT_JOURNAL_THIS_FILE 0x00004000 /* GET-ONLY EXT3_JOURNAL_DATA_FL */
2669/* 0x8000 reserved for EXT3_NOTAIL_FL */
2670#define EXT_SYNCHRONOUS_DIR 0x00010000 /* EXT3_DIRSYNC_FL */
2671#define EXT_TOPDIR 0x00020000 /* EXT3_TOPDIR_FL */
2672
2673#define EXT_SET_MASK 0x000300FF
2674#define EXT_GET_MASK 0x0003DFFF
2675 2656
2676typedef struct file_chattr_info { 2657typedef struct file_chattr_info {
2677 __le64 mask; /* list of all possible attribute bits */ 2658 __le64 mask; /* list of all possible attribute bits */
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index a3d74fea1623..4baf35949b51 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -463,7 +463,6 @@ decode_lanman_negprot_rsp(struct TCP_Server_Info *server, NEGOTIATE_RSP *pSMBr)
463 cifs_max_pending); 463 cifs_max_pending);
464 set_credits(server, server->maxReq); 464 set_credits(server, server->maxReq);
465 server->maxBuf = le16_to_cpu(rsp->MaxBufSize); 465 server->maxBuf = le16_to_cpu(rsp->MaxBufSize);
466 server->max_vcs = le16_to_cpu(rsp->MaxNumberVcs);
467 /* even though we do not use raw we might as well set this 466 /* even though we do not use raw we might as well set this
468 accurately, in case we ever find a need for it */ 467 accurately, in case we ever find a need for it */
469 if ((le16_to_cpu(rsp->RawMode) & RAW_ENABLE) == RAW_ENABLE) { 468 if ((le16_to_cpu(rsp->RawMode) & RAW_ENABLE) == RAW_ENABLE) {
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index d3e2eaa503a6..5384c2a640ca 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -500,6 +500,7 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
500 if (server->ops->close) 500 if (server->ops->close)
501 server->ops->close(xid, tcon, &fid); 501 server->ops->close(xid, tcon, &fid);
502 cifs_del_pending_open(&open); 502 cifs_del_pending_open(&open);
503 fput(file);
503 rc = -ENOMEM; 504 rc = -ENOMEM;
504 } 505 }
505 506
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index eb955b525e55..7ddddf2e2504 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -3254,6 +3254,9 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
3254 /* 3254 /*
3255 * Reads as many pages as possible from fscache. Returns -ENOBUFS 3255 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3256 * immediately if the cookie is negative 3256 * immediately if the cookie is negative
3257 *
3258 * After this point, every page in the list might have PG_fscache set,
3259 * so we will need to clean that up off of every page we don't use.
3257 */ 3260 */
3258 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list, 3261 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3259 &num_pages); 3262 &num_pages);
@@ -3376,6 +3379,11 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
3376 kref_put(&rdata->refcount, cifs_readdata_release); 3379 kref_put(&rdata->refcount, cifs_readdata_release);
3377 } 3380 }
3378 3381
3382 /* Any pages that have been shown to fscache but didn't get added to
3383 * the pagecache must be uncached before they get returned to the
3384 * allocator.
3385 */
3386 cifs_fscache_readpages_cancel(mapping->host, page_list);
3379 return rc; 3387 return rc;
3380} 3388}
3381 3389
diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c
index 2f4bc5a58054..b3258f35e88a 100644
--- a/fs/cifs/fscache.c
+++ b/fs/cifs/fscache.c
@@ -223,6 +223,13 @@ void __cifs_readpage_to_fscache(struct inode *inode, struct page *page)
223 fscache_uncache_page(CIFS_I(inode)->fscache, page); 223 fscache_uncache_page(CIFS_I(inode)->fscache, page);
224} 224}
225 225
226void __cifs_fscache_readpages_cancel(struct inode *inode, struct list_head *pages)
227{
228 cifs_dbg(FYI, "%s: (fsc: %p, i: %p)\n",
229 __func__, CIFS_I(inode)->fscache, inode);
230 fscache_readpages_cancel(CIFS_I(inode)->fscache, pages);
231}
232
226void __cifs_fscache_invalidate_page(struct page *page, struct inode *inode) 233void __cifs_fscache_invalidate_page(struct page *page, struct inode *inode)
227{ 234{
228 struct cifsInodeInfo *cifsi = CIFS_I(inode); 235 struct cifsInodeInfo *cifsi = CIFS_I(inode);
diff --git a/fs/cifs/fscache.h b/fs/cifs/fscache.h
index 63539323e0b9..24794b6cd8ec 100644
--- a/fs/cifs/fscache.h
+++ b/fs/cifs/fscache.h
@@ -54,6 +54,7 @@ extern int __cifs_readpages_from_fscache(struct inode *,
54 struct address_space *, 54 struct address_space *,
55 struct list_head *, 55 struct list_head *,
56 unsigned *); 56 unsigned *);
57extern void __cifs_fscache_readpages_cancel(struct inode *, struct list_head *);
57 58
58extern void __cifs_readpage_to_fscache(struct inode *, struct page *); 59extern void __cifs_readpage_to_fscache(struct inode *, struct page *);
59 60
@@ -91,6 +92,13 @@ static inline void cifs_readpage_to_fscache(struct inode *inode,
91 __cifs_readpage_to_fscache(inode, page); 92 __cifs_readpage_to_fscache(inode, page);
92} 93}
93 94
95static inline void cifs_fscache_readpages_cancel(struct inode *inode,
96 struct list_head *pages)
97{
98 if (CIFS_I(inode)->fscache)
99 return __cifs_fscache_readpages_cancel(inode, pages);
100}
101
94#else /* CONFIG_CIFS_FSCACHE */ 102#else /* CONFIG_CIFS_FSCACHE */
95static inline int cifs_fscache_register(void) { return 0; } 103static inline int cifs_fscache_register(void) { return 0; }
96static inline void cifs_fscache_unregister(void) {} 104static inline void cifs_fscache_unregister(void) {}
@@ -131,6 +139,11 @@ static inline int cifs_readpages_from_fscache(struct inode *inode,
131static inline void cifs_readpage_to_fscache(struct inode *inode, 139static inline void cifs_readpage_to_fscache(struct inode *inode,
132 struct page *page) {} 140 struct page *page) {}
133 141
142static inline void cifs_fscache_readpages_cancel(struct inode *inode,
143 struct list_head *pages)
144{
145}
146
134#endif /* CONFIG_CIFS_FSCACHE */ 147#endif /* CONFIG_CIFS_FSCACHE */
135 148
136#endif /* _CIFS_FSCACHE_H */ 149#endif /* _CIFS_FSCACHE_H */
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index f9ff9c173f78..867b7cdc794a 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -120,6 +120,33 @@ cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
120 cifs_i->invalid_mapping = true; 120 cifs_i->invalid_mapping = true;
121} 121}
122 122
123/*
124 * copy nlink to the inode, unless it wasn't provided. Provide
125 * sane values if we don't have an existing one and none was provided
126 */
127static void
128cifs_nlink_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
129{
130 /*
131 * if we're in a situation where we can't trust what we
132 * got from the server (readdir, some non-unix cases)
133 * fake reasonable values
134 */
135 if (fattr->cf_flags & CIFS_FATTR_UNKNOWN_NLINK) {
136 /* only provide fake values on a new inode */
137 if (inode->i_state & I_NEW) {
138 if (fattr->cf_cifsattrs & ATTR_DIRECTORY)
139 set_nlink(inode, 2);
140 else
141 set_nlink(inode, 1);
142 }
143 return;
144 }
145
146 /* we trust the server, so update it */
147 set_nlink(inode, fattr->cf_nlink);
148}
149
123/* populate an inode with info from a cifs_fattr struct */ 150/* populate an inode with info from a cifs_fattr struct */
124void 151void
125cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr) 152cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
@@ -134,7 +161,7 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
134 inode->i_mtime = fattr->cf_mtime; 161 inode->i_mtime = fattr->cf_mtime;
135 inode->i_ctime = fattr->cf_ctime; 162 inode->i_ctime = fattr->cf_ctime;
136 inode->i_rdev = fattr->cf_rdev; 163 inode->i_rdev = fattr->cf_rdev;
137 set_nlink(inode, fattr->cf_nlink); 164 cifs_nlink_fattr_to_inode(inode, fattr);
138 inode->i_uid = fattr->cf_uid; 165 inode->i_uid = fattr->cf_uid;
139 inode->i_gid = fattr->cf_gid; 166 inode->i_gid = fattr->cf_gid;
140 167
@@ -541,6 +568,7 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
541 fattr->cf_bytes = le64_to_cpu(info->AllocationSize); 568 fattr->cf_bytes = le64_to_cpu(info->AllocationSize);
542 fattr->cf_createtime = le64_to_cpu(info->CreationTime); 569 fattr->cf_createtime = le64_to_cpu(info->CreationTime);
543 570
571 fattr->cf_nlink = le32_to_cpu(info->NumberOfLinks);
544 if (fattr->cf_cifsattrs & ATTR_DIRECTORY) { 572 if (fattr->cf_cifsattrs & ATTR_DIRECTORY) {
545 fattr->cf_mode = S_IFDIR | cifs_sb->mnt_dir_mode; 573 fattr->cf_mode = S_IFDIR | cifs_sb->mnt_dir_mode;
546 fattr->cf_dtype = DT_DIR; 574 fattr->cf_dtype = DT_DIR;
@@ -548,7 +576,8 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
548 * Server can return wrong NumberOfLinks value for directories 576 * Server can return wrong NumberOfLinks value for directories
549 * when Unix extensions are disabled - fake it. 577 * when Unix extensions are disabled - fake it.
550 */ 578 */
551 fattr->cf_nlink = 2; 579 if (!tcon->unix_ext)
580 fattr->cf_flags |= CIFS_FATTR_UNKNOWN_NLINK;
552 } else if (fattr->cf_cifsattrs & ATTR_REPARSE) { 581 } else if (fattr->cf_cifsattrs & ATTR_REPARSE) {
553 fattr->cf_mode = S_IFLNK; 582 fattr->cf_mode = S_IFLNK;
554 fattr->cf_dtype = DT_LNK; 583 fattr->cf_dtype = DT_LNK;
@@ -561,11 +590,15 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
561 if (fattr->cf_cifsattrs & ATTR_READONLY) 590 if (fattr->cf_cifsattrs & ATTR_READONLY)
562 fattr->cf_mode &= ~(S_IWUGO); 591 fattr->cf_mode &= ~(S_IWUGO);
563 592
564 fattr->cf_nlink = le32_to_cpu(info->NumberOfLinks); 593 /*
565 if (fattr->cf_nlink < 1) { 594 * Don't accept zero nlink from non-unix servers unless
566 cifs_dbg(1, "replacing bogus file nlink value %u\n", 595 * delete is pending. Instead mark it as unknown.
596 */
597 if ((fattr->cf_nlink < 1) && !tcon->unix_ext &&
598 !info->DeletePending) {
599 cifs_dbg(1, "bogus file nlink value %u\n",
567 fattr->cf_nlink); 600 fattr->cf_nlink);
568 fattr->cf_nlink = 1; 601 fattr->cf_flags |= CIFS_FATTR_UNKNOWN_NLINK;
569 } 602 }
570 } 603 }
571 604
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 42ef03be089f..53a75f3d0179 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -180,6 +180,9 @@ cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb)
180 fattr->cf_dtype = DT_REG; 180 fattr->cf_dtype = DT_REG;
181 } 181 }
182 182
183 /* non-unix readdir doesn't provide nlink */
184 fattr->cf_flags |= CIFS_FATTR_UNKNOWN_NLINK;
185
183 if (fattr->cf_cifsattrs & ATTR_READONLY) 186 if (fattr->cf_cifsattrs & ATTR_READONLY)
184 fattr->cf_mode &= ~S_IWUGO; 187 fattr->cf_mode &= ~S_IWUGO;
185 188
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 5f99b7f19e78..352358de1d7e 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -32,88 +32,6 @@
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include "cifs_spnego.h" 33#include "cifs_spnego.h"
34 34
35/*
36 * Checks if this is the first smb session to be reconnected after
37 * the socket has been reestablished (so we know whether to use vc 0).
38 * Called while holding the cifs_tcp_ses_lock, so do not block
39 */
40static bool is_first_ses_reconnect(struct cifs_ses *ses)
41{
42 struct list_head *tmp;
43 struct cifs_ses *tmp_ses;
44
45 list_for_each(tmp, &ses->server->smb_ses_list) {
46 tmp_ses = list_entry(tmp, struct cifs_ses,
47 smb_ses_list);
48 if (tmp_ses->need_reconnect == false)
49 return false;
50 }
51 /* could not find a session that was already connected,
52 this must be the first one we are reconnecting */
53 return true;
54}
55
56/*
57 * vc number 0 is treated specially by some servers, and should be the
58 * first one we request. After that we can use vcnumbers up to maxvcs,
59 * one for each smb session (some Windows versions set maxvcs incorrectly
60 * so maxvc=1 can be ignored). If we have too many vcs, we can reuse
61 * any vc but zero (some servers reset the connection on vcnum zero)
62 *
63 */
64static __le16 get_next_vcnum(struct cifs_ses *ses)
65{
66 __u16 vcnum = 0;
67 struct list_head *tmp;
68 struct cifs_ses *tmp_ses;
69 __u16 max_vcs = ses->server->max_vcs;
70 __u16 i;
71 int free_vc_found = 0;
72
73 /* Quoting the MS-SMB specification: "Windows-based SMB servers set this
74 field to one but do not enforce this limit, which allows an SMB client
75 to establish more virtual circuits than allowed by this value ... but
76 other server implementations can enforce this limit." */
77 if (max_vcs < 2)
78 max_vcs = 0xFFFF;
79
80 spin_lock(&cifs_tcp_ses_lock);
81 if ((ses->need_reconnect) && is_first_ses_reconnect(ses))
82 goto get_vc_num_exit; /* vcnum will be zero */
83 for (i = ses->server->srv_count - 1; i < max_vcs; i++) {
84 if (i == 0) /* this is the only connection, use vc 0 */
85 break;
86
87 free_vc_found = 1;
88
89 list_for_each(tmp, &ses->server->smb_ses_list) {
90 tmp_ses = list_entry(tmp, struct cifs_ses,
91 smb_ses_list);
92 if (tmp_ses->vcnum == i) {
93 free_vc_found = 0;
94 break; /* found duplicate, try next vcnum */
95 }
96 }
97 if (free_vc_found)
98 break; /* we found a vcnumber that will work - use it */
99 }
100
101 if (i == 0)
102 vcnum = 0; /* for most common case, ie if one smb session, use
103 vc zero. Also for case when no free vcnum, zero
104 is safest to send (some clients only send zero) */
105 else if (free_vc_found == 0)
106 vcnum = 1; /* we can not reuse vc=0 safely, since some servers
107 reset all uids on that, but 1 is ok. */
108 else
109 vcnum = i;
110 ses->vcnum = vcnum;
111get_vc_num_exit:
112 spin_unlock(&cifs_tcp_ses_lock);
113
114 return cpu_to_le16(vcnum);
115}
116
117static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, SESSION_SETUP_ANDX *pSMB) 35static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, SESSION_SETUP_ANDX *pSMB)
118{ 36{
119 __u32 capabilities = 0; 37 __u32 capabilities = 0;
@@ -128,7 +46,7 @@ static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, SESSION_SETUP_ANDX *pSMB)
128 CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4, 46 CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4,
129 USHRT_MAX)); 47 USHRT_MAX));
130 pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq); 48 pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq);
131 pSMB->req.VcNumber = get_next_vcnum(ses); 49 pSMB->req.VcNumber = __constant_cpu_to_le16(1);
132 50
133 /* Now no need to set SMBFLG_CASELESS or obsolete CANONICAL PATH */ 51 /* Now no need to set SMBFLG_CASELESS or obsolete CANONICAL PATH */
134 52
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
index 318e8433527c..b2a86e324aac 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -586,7 +586,8 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
586 586
587 fscache_operation_init(op, NULL, NULL); 587 fscache_operation_init(op, NULL, NULL);
588 op->flags = FSCACHE_OP_MYTHREAD | 588 op->flags = FSCACHE_OP_MYTHREAD |
589 (1 << FSCACHE_OP_WAITING); 589 (1 << FSCACHE_OP_WAITING) |
590 (1 << FSCACHE_OP_UNUSE_COOKIE);
590 591
591 spin_lock(&cookie->lock); 592 spin_lock(&cookie->lock);
592 593
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 62b43b577bfc..b7989f2ab4c4 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -182,6 +182,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
182 struct inode *inode; 182 struct inode *inode;
183 struct dentry *parent; 183 struct dentry *parent;
184 struct fuse_conn *fc; 184 struct fuse_conn *fc;
185 struct fuse_inode *fi;
185 int ret; 186 int ret;
186 187
187 inode = ACCESS_ONCE(entry->d_inode); 188 inode = ACCESS_ONCE(entry->d_inode);
@@ -228,7 +229,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
228 if (!err && !outarg.nodeid) 229 if (!err && !outarg.nodeid)
229 err = -ENOENT; 230 err = -ENOENT;
230 if (!err) { 231 if (!err) {
231 struct fuse_inode *fi = get_fuse_inode(inode); 232 fi = get_fuse_inode(inode);
232 if (outarg.nodeid != get_node_id(inode)) { 233 if (outarg.nodeid != get_node_id(inode)) {
233 fuse_queue_forget(fc, forget, outarg.nodeid, 1); 234 fuse_queue_forget(fc, forget, outarg.nodeid, 1);
234 goto invalid; 235 goto invalid;
@@ -246,8 +247,11 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
246 attr_version); 247 attr_version);
247 fuse_change_entry_timeout(entry, &outarg); 248 fuse_change_entry_timeout(entry, &outarg);
248 } else if (inode) { 249 } else if (inode) {
249 fc = get_fuse_conn(inode); 250 fi = get_fuse_inode(inode);
250 if (fc->readdirplus_auto) { 251 if (flags & LOOKUP_RCU) {
252 if (test_bit(FUSE_I_INIT_RDPLUS, &fi->state))
253 return -ECHILD;
254 } else if (test_and_clear_bit(FUSE_I_INIT_RDPLUS, &fi->state)) {
251 parent = dget_parent(entry); 255 parent = dget_parent(entry);
252 fuse_advise_use_readdirplus(parent->d_inode); 256 fuse_advise_use_readdirplus(parent->d_inode);
253 dput(parent); 257 dput(parent);
@@ -259,7 +263,8 @@ out:
259 263
260invalid: 264invalid:
261 ret = 0; 265 ret = 0;
262 if (check_submounts_and_drop(entry) != 0) 266
267 if (!(flags & LOOKUP_RCU) && check_submounts_and_drop(entry) != 0)
263 ret = 1; 268 ret = 1;
264 goto out; 269 goto out;
265} 270}
@@ -1063,6 +1068,8 @@ static int fuse_access(struct inode *inode, int mask)
1063 struct fuse_access_in inarg; 1068 struct fuse_access_in inarg;
1064 int err; 1069 int err;
1065 1070
1071 BUG_ON(mask & MAY_NOT_BLOCK);
1072
1066 if (fc->no_access) 1073 if (fc->no_access)
1067 return 0; 1074 return 0;
1068 1075
@@ -1150,9 +1157,6 @@ static int fuse_permission(struct inode *inode, int mask)
1150 noticed immediately, only after the attribute 1157 noticed immediately, only after the attribute
1151 timeout has expired */ 1158 timeout has expired */
1152 } else if (mask & (MAY_ACCESS | MAY_CHDIR)) { 1159 } else if (mask & (MAY_ACCESS | MAY_CHDIR)) {
1153 if (mask & MAY_NOT_BLOCK)
1154 return -ECHILD;
1155
1156 err = fuse_access(inode, mask); 1160 err = fuse_access(inode, mask);
1157 } else if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode)) { 1161 } else if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode)) {
1158 if (!(inode->i_mode & S_IXUGO)) { 1162 if (!(inode->i_mode & S_IXUGO)) {
@@ -1291,6 +1295,8 @@ static int fuse_direntplus_link(struct file *file,
1291 } 1295 }
1292 1296
1293found: 1297found:
1298 if (fc->readdirplus_auto)
1299 set_bit(FUSE_I_INIT_RDPLUS, &get_fuse_inode(inode)->state);
1294 fuse_change_entry_timeout(dentry, o); 1300 fuse_change_entry_timeout(dentry, o);
1295 1301
1296 err = 0; 1302 err = 0;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index d409deafc67b..4598345ab87d 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -2467,6 +2467,7 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
2467{ 2467{
2468 struct fuse_file *ff = file->private_data; 2468 struct fuse_file *ff = file->private_data;
2469 struct inode *inode = file->f_inode; 2469 struct inode *inode = file->f_inode;
2470 struct fuse_inode *fi = get_fuse_inode(inode);
2470 struct fuse_conn *fc = ff->fc; 2471 struct fuse_conn *fc = ff->fc;
2471 struct fuse_req *req; 2472 struct fuse_req *req;
2472 struct fuse_fallocate_in inarg = { 2473 struct fuse_fallocate_in inarg = {
@@ -2484,10 +2485,20 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
2484 2485
2485 if (lock_inode) { 2486 if (lock_inode) {
2486 mutex_lock(&inode->i_mutex); 2487 mutex_lock(&inode->i_mutex);
2487 if (mode & FALLOC_FL_PUNCH_HOLE) 2488 if (mode & FALLOC_FL_PUNCH_HOLE) {
2488 fuse_set_nowrite(inode); 2489 loff_t endbyte = offset + length - 1;
2490 err = filemap_write_and_wait_range(inode->i_mapping,
2491 offset, endbyte);
2492 if (err)
2493 goto out;
2494
2495 fuse_sync_writes(inode);
2496 }
2489 } 2497 }
2490 2498
2499 if (!(mode & FALLOC_FL_KEEP_SIZE))
2500 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
2501
2491 req = fuse_get_req_nopages(fc); 2502 req = fuse_get_req_nopages(fc);
2492 if (IS_ERR(req)) { 2503 if (IS_ERR(req)) {
2493 err = PTR_ERR(req); 2504 err = PTR_ERR(req);
@@ -2520,11 +2531,11 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
2520 fuse_invalidate_attr(inode); 2531 fuse_invalidate_attr(inode);
2521 2532
2522out: 2533out:
2523 if (lock_inode) { 2534 if (!(mode & FALLOC_FL_KEEP_SIZE))
2524 if (mode & FALLOC_FL_PUNCH_HOLE) 2535 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
2525 fuse_release_nowrite(inode); 2536
2537 if (lock_inode)
2526 mutex_unlock(&inode->i_mutex); 2538 mutex_unlock(&inode->i_mutex);
2527 }
2528 2539
2529 return err; 2540 return err;
2530} 2541}
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 5ced199b50bb..5b9e6f3b6aef 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -115,6 +115,8 @@ struct fuse_inode {
115enum { 115enum {
116 /** Advise readdirplus */ 116 /** Advise readdirplus */
117 FUSE_I_ADVISE_RDPLUS, 117 FUSE_I_ADVISE_RDPLUS,
118 /** Initialized with readdirplus */
119 FUSE_I_INIT_RDPLUS,
118 /** An operation changing file size is in progress */ 120 /** An operation changing file size is in progress */
119 FUSE_I_SIZE_UNSTABLE, 121 FUSE_I_SIZE_UNSTABLE,
120}; 122};
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 64915eeae5a7..ced3257f06e8 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -694,8 +694,10 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
694 694
695 mark_inode_dirty(inode); 695 mark_inode_dirty(inode);
696 d_instantiate(dentry, inode); 696 d_instantiate(dentry, inode);
697 if (file) 697 if (file) {
698 *opened |= FILE_CREATED;
698 error = finish_open(file, dentry, gfs2_open_common, opened); 699 error = finish_open(file, dentry, gfs2_open_common, opened);
700 }
699 gfs2_glock_dq_uninit(ghs); 701 gfs2_glock_dq_uninit(ghs);
700 gfs2_glock_dq_uninit(ghs + 1); 702 gfs2_glock_dq_uninit(ghs + 1);
701 return error; 703 return error;
diff --git a/fs/namei.c b/fs/namei.c
index 0dc4cbf21f37..645268f23eb6 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2656,6 +2656,7 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry,
2656 int acc_mode; 2656 int acc_mode;
2657 int create_error = 0; 2657 int create_error = 0;
2658 struct dentry *const DENTRY_NOT_SET = (void *) -1UL; 2658 struct dentry *const DENTRY_NOT_SET = (void *) -1UL;
2659 bool excl;
2659 2660
2660 BUG_ON(dentry->d_inode); 2661 BUG_ON(dentry->d_inode);
2661 2662
@@ -2669,10 +2670,9 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry,
2669 if ((open_flag & O_CREAT) && !IS_POSIXACL(dir)) 2670 if ((open_flag & O_CREAT) && !IS_POSIXACL(dir))
2670 mode &= ~current_umask(); 2671 mode &= ~current_umask();
2671 2672
2672 if ((open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT)) { 2673 excl = (open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT);
2674 if (excl)
2673 open_flag &= ~O_TRUNC; 2675 open_flag &= ~O_TRUNC;
2674 *opened |= FILE_CREATED;
2675 }
2676 2676
2677 /* 2677 /*
2678 * Checking write permission is tricky, bacuse we don't know if we are 2678 * Checking write permission is tricky, bacuse we don't know if we are
@@ -2725,12 +2725,6 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry,
2725 goto out; 2725 goto out;
2726 } 2726 }
2727 2727
2728 acc_mode = op->acc_mode;
2729 if (*opened & FILE_CREATED) {
2730 fsnotify_create(dir, dentry);
2731 acc_mode = MAY_OPEN;
2732 }
2733
2734 if (error) { /* returned 1, that is */ 2728 if (error) { /* returned 1, that is */
2735 if (WARN_ON(file->f_path.dentry == DENTRY_NOT_SET)) { 2729 if (WARN_ON(file->f_path.dentry == DENTRY_NOT_SET)) {
2736 error = -EIO; 2730 error = -EIO;
@@ -2740,9 +2734,19 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry,
2740 dput(dentry); 2734 dput(dentry);
2741 dentry = file->f_path.dentry; 2735 dentry = file->f_path.dentry;
2742 } 2736 }
2743 if (create_error && dentry->d_inode == NULL) { 2737 if (*opened & FILE_CREATED)
2744 error = create_error; 2738 fsnotify_create(dir, dentry);
2745 goto out; 2739 if (!dentry->d_inode) {
2740 WARN_ON(*opened & FILE_CREATED);
2741 if (create_error) {
2742 error = create_error;
2743 goto out;
2744 }
2745 } else {
2746 if (excl && !(*opened & FILE_CREATED)) {
2747 error = -EEXIST;
2748 goto out;
2749 }
2746 } 2750 }
2747 goto looked_up; 2751 goto looked_up;
2748 } 2752 }
@@ -2751,6 +2755,12 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry,
2751 * We didn't have the inode before the open, so check open permission 2755 * We didn't have the inode before the open, so check open permission
2752 * here. 2756 * here.
2753 */ 2757 */
2758 acc_mode = op->acc_mode;
2759 if (*opened & FILE_CREATED) {
2760 WARN_ON(!(open_flag & O_CREAT));
2761 fsnotify_create(dir, dentry);
2762 acc_mode = MAY_OPEN;
2763 }
2754 error = may_open(&file->f_path, acc_mode, open_flag); 2764 error = may_open(&file->f_path, acc_mode, open_flag);
2755 if (error) 2765 if (error)
2756 fput(file); 2766 fput(file);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index de434f309af0..02b0df769e2d 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1392,6 +1392,9 @@ static int nfs_finish_open(struct nfs_open_context *ctx,
1392{ 1392{
1393 int err; 1393 int err;
1394 1394
1395 if ((open_flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
1396 *opened |= FILE_CREATED;
1397
1395 err = finish_open(file, dentry, do_open, opened); 1398 err = finish_open(file, dentry, do_open, opened);
1396 if (err) 1399 if (err)
1397 goto out; 1400 goto out;
@@ -1455,7 +1458,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
1455 1458
1456 trace_nfs_atomic_open_enter(dir, ctx, open_flags); 1459 trace_nfs_atomic_open_enter(dir, ctx, open_flags);
1457 nfs_block_sillyrename(dentry->d_parent); 1460 nfs_block_sillyrename(dentry->d_parent);
1458 inode = NFS_PROTO(dir)->open_context(dir, ctx, open_flags, &attr); 1461 inode = NFS_PROTO(dir)->open_context(dir, ctx, open_flags, &attr, opened);
1459 nfs_unblock_sillyrename(dentry->d_parent); 1462 nfs_unblock_sillyrename(dentry->d_parent);
1460 if (IS_ERR(inode)) { 1463 if (IS_ERR(inode)) {
1461 err = PTR_ERR(inode); 1464 err = PTR_ERR(inode);
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index e5b804dd944c..77efaf15ec90 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -19,6 +19,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
19 struct inode *dir; 19 struct inode *dir;
20 unsigned openflags = filp->f_flags; 20 unsigned openflags = filp->f_flags;
21 struct iattr attr; 21 struct iattr attr;
22 int opened = 0;
22 int err; 23 int err;
23 24
24 /* 25 /*
@@ -55,7 +56,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
55 nfs_wb_all(inode); 56 nfs_wb_all(inode);
56 } 57 }
57 58
58 inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr); 59 inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr, &opened);
59 if (IS_ERR(inode)) { 60 if (IS_ERR(inode)) {
60 err = PTR_ERR(inode); 61 err = PTR_ERR(inode);
61 switch (err) { 62 switch (err) {
diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
index 95604f64cab8..c7c295e556ed 100644
--- a/fs/nfs/nfs4filelayoutdev.c
+++ b/fs/nfs/nfs4filelayoutdev.c
@@ -185,6 +185,7 @@ nfs4_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds)
185 if (status) 185 if (status)
186 goto out_put; 186 goto out_put;
187 187
188 smp_wmb();
188 ds->ds_clp = clp; 189 ds->ds_clp = clp;
189 dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr); 190 dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr);
190out: 191out:
@@ -801,34 +802,35 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
801 struct nfs4_file_layout_dsaddr *dsaddr = FILELAYOUT_LSEG(lseg)->dsaddr; 802 struct nfs4_file_layout_dsaddr *dsaddr = FILELAYOUT_LSEG(lseg)->dsaddr;
802 struct nfs4_pnfs_ds *ds = dsaddr->ds_list[ds_idx]; 803 struct nfs4_pnfs_ds *ds = dsaddr->ds_list[ds_idx];
803 struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg); 804 struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg);
804 805 struct nfs4_pnfs_ds *ret = ds;
805 if (filelayout_test_devid_unavailable(devid))
806 return NULL;
807 806
808 if (ds == NULL) { 807 if (ds == NULL) {
809 printk(KERN_ERR "NFS: %s: No data server for offset index %d\n", 808 printk(KERN_ERR "NFS: %s: No data server for offset index %d\n",
810 __func__, ds_idx); 809 __func__, ds_idx);
811 filelayout_mark_devid_invalid(devid); 810 filelayout_mark_devid_invalid(devid);
812 return NULL; 811 goto out;
813 } 812 }
813 smp_rmb();
814 if (ds->ds_clp) 814 if (ds->ds_clp)
815 return ds; 815 goto out_test_devid;
816 816
817 if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) { 817 if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) {
818 struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode); 818 struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode);
819 int err; 819 int err;
820 820
821 err = nfs4_ds_connect(s, ds); 821 err = nfs4_ds_connect(s, ds);
822 if (err) { 822 if (err)
823 nfs4_mark_deviceid_unavailable(devid); 823 nfs4_mark_deviceid_unavailable(devid);
824 ds = NULL;
825 }
826 nfs4_clear_ds_conn_bit(ds); 824 nfs4_clear_ds_conn_bit(ds);
827 } else { 825 } else {
828 /* Either ds is connected, or ds is NULL */ 826 /* Either ds is connected, or ds is NULL */
829 nfs4_wait_ds_connect(ds); 827 nfs4_wait_ds_connect(ds);
830 } 828 }
831 return ds; 829out_test_devid:
830 if (filelayout_test_devid_unavailable(devid))
831 ret = NULL;
832out:
833 return ret;
832} 834}
833 835
834module_param(dataserver_retrans, uint, 0644); 836module_param(dataserver_retrans, uint, 0644);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 989bb9d3074d..d53d6785cba2 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -912,6 +912,7 @@ struct nfs4_opendata {
912 struct iattr attrs; 912 struct iattr attrs;
913 unsigned long timestamp; 913 unsigned long timestamp;
914 unsigned int rpc_done : 1; 914 unsigned int rpc_done : 1;
915 unsigned int file_created : 1;
915 unsigned int is_recover : 1; 916 unsigned int is_recover : 1;
916 int rpc_status; 917 int rpc_status;
917 int cancelled; 918 int cancelled;
@@ -1946,8 +1947,13 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
1946 1947
1947 nfs_fattr_map_and_free_names(server, &data->f_attr); 1948 nfs_fattr_map_and_free_names(server, &data->f_attr);
1948 1949
1949 if (o_arg->open_flags & O_CREAT) 1950 if (o_arg->open_flags & O_CREAT) {
1950 update_changeattr(dir, &o_res->cinfo); 1951 update_changeattr(dir, &o_res->cinfo);
1952 if (o_arg->open_flags & O_EXCL)
1953 data->file_created = 1;
1954 else if (o_res->cinfo.before != o_res->cinfo.after)
1955 data->file_created = 1;
1956 }
1951 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) 1957 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
1952 server->caps &= ~NFS_CAP_POSIX_LOCK; 1958 server->caps &= ~NFS_CAP_POSIX_LOCK;
1953 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 1959 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
@@ -2191,7 +2197,8 @@ static int _nfs4_do_open(struct inode *dir,
2191 struct nfs_open_context *ctx, 2197 struct nfs_open_context *ctx,
2192 int flags, 2198 int flags,
2193 struct iattr *sattr, 2199 struct iattr *sattr,
2194 struct nfs4_label *label) 2200 struct nfs4_label *label,
2201 int *opened)
2195{ 2202{
2196 struct nfs4_state_owner *sp; 2203 struct nfs4_state_owner *sp;
2197 struct nfs4_state *state = NULL; 2204 struct nfs4_state *state = NULL;
@@ -2261,6 +2268,8 @@ static int _nfs4_do_open(struct inode *dir,
2261 nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel); 2268 nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel);
2262 } 2269 }
2263 } 2270 }
2271 if (opendata->file_created)
2272 *opened |= FILE_CREATED;
2264 2273
2265 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) 2274 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server))
2266 *ctx_th = opendata->f_attr.mdsthreshold; 2275 *ctx_th = opendata->f_attr.mdsthreshold;
@@ -2289,7 +2298,8 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir,
2289 struct nfs_open_context *ctx, 2298 struct nfs_open_context *ctx,
2290 int flags, 2299 int flags,
2291 struct iattr *sattr, 2300 struct iattr *sattr,
2292 struct nfs4_label *label) 2301 struct nfs4_label *label,
2302 int *opened)
2293{ 2303{
2294 struct nfs_server *server = NFS_SERVER(dir); 2304 struct nfs_server *server = NFS_SERVER(dir);
2295 struct nfs4_exception exception = { }; 2305 struct nfs4_exception exception = { };
@@ -2297,7 +2307,7 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir,
2297 int status; 2307 int status;
2298 2308
2299 do { 2309 do {
2300 status = _nfs4_do_open(dir, ctx, flags, sattr, label); 2310 status = _nfs4_do_open(dir, ctx, flags, sattr, label, opened);
2301 res = ctx->state; 2311 res = ctx->state;
2302 trace_nfs4_open_file(ctx, flags, status); 2312 trace_nfs4_open_file(ctx, flags, status);
2303 if (status == 0) 2313 if (status == 0)
@@ -2659,7 +2669,8 @@ out:
2659} 2669}
2660 2670
2661static struct inode * 2671static struct inode *
2662nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags, struct iattr *attr) 2672nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
2673 int open_flags, struct iattr *attr, int *opened)
2663{ 2674{
2664 struct nfs4_state *state; 2675 struct nfs4_state *state;
2665 struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL; 2676 struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL;
@@ -2667,7 +2678,7 @@ nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags
2667 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l); 2678 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l);
2668 2679
2669 /* Protect against concurrent sillydeletes */ 2680 /* Protect against concurrent sillydeletes */
2670 state = nfs4_do_open(dir, ctx, open_flags, attr, label); 2681 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened);
2671 2682
2672 nfs4_label_release_security(label); 2683 nfs4_label_release_security(label);
2673 2684
@@ -3332,6 +3343,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
3332 struct nfs4_label l, *ilabel = NULL; 3343 struct nfs4_label l, *ilabel = NULL;
3333 struct nfs_open_context *ctx; 3344 struct nfs_open_context *ctx;
3334 struct nfs4_state *state; 3345 struct nfs4_state *state;
3346 int opened = 0;
3335 int status = 0; 3347 int status = 0;
3336 3348
3337 ctx = alloc_nfs_open_context(dentry, FMODE_READ); 3349 ctx = alloc_nfs_open_context(dentry, FMODE_READ);
@@ -3341,7 +3353,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
3341 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l); 3353 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l);
3342 3354
3343 sattr->ia_mode &= ~current_umask(); 3355 sattr->ia_mode &= ~current_umask();
3344 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel); 3356 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, &opened);
3345 if (IS_ERR(state)) { 3357 if (IS_ERR(state)) {
3346 status = PTR_ERR(state); 3358 status = PTR_ERR(state);
3347 goto out; 3359 goto out;
@@ -7564,8 +7576,10 @@ nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
7564{ 7576{
7565 int err; 7577 int err;
7566 struct page *page; 7578 struct page *page;
7567 rpc_authflavor_t flavor; 7579 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR;
7568 struct nfs4_secinfo_flavors *flavors; 7580 struct nfs4_secinfo_flavors *flavors;
7581 struct nfs4_secinfo4 *secinfo;
7582 int i;
7569 7583
7570 page = alloc_page(GFP_KERNEL); 7584 page = alloc_page(GFP_KERNEL);
7571 if (!page) { 7585 if (!page) {
@@ -7587,9 +7601,31 @@ nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
7587 if (err) 7601 if (err)
7588 goto out_freepage; 7602 goto out_freepage;
7589 7603
7590 flavor = nfs_find_best_sec(flavors); 7604 for (i = 0; i < flavors->num_flavors; i++) {
7591 if (err == 0) 7605 secinfo = &flavors->flavors[i];
7592 err = nfs4_lookup_root_sec(server, fhandle, info, flavor); 7606
7607 switch (secinfo->flavor) {
7608 case RPC_AUTH_NULL:
7609 case RPC_AUTH_UNIX:
7610 case RPC_AUTH_GSS:
7611 flavor = rpcauth_get_pseudoflavor(secinfo->flavor,
7612 &secinfo->flavor_info);
7613 break;
7614 default:
7615 flavor = RPC_AUTH_MAXFLAVOR;
7616 break;
7617 }
7618
7619 if (flavor != RPC_AUTH_MAXFLAVOR) {
7620 err = nfs4_lookup_root_sec(server, fhandle,
7621 info, flavor);
7622 if (!err)
7623 break;
7624 }
7625 }
7626
7627 if (flavor == RPC_AUTH_MAXFLAVOR)
7628 err = -EPERM;
7593 7629
7594out_freepage: 7630out_freepage:
7595 put_page(page); 7631 put_page(page);
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index 0ba679866e50..da276640f776 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -94,6 +94,7 @@ void nilfs_forget_buffer(struct buffer_head *bh)
94 clear_buffer_nilfs_volatile(bh); 94 clear_buffer_nilfs_volatile(bh);
95 clear_buffer_nilfs_checked(bh); 95 clear_buffer_nilfs_checked(bh);
96 clear_buffer_nilfs_redirected(bh); 96 clear_buffer_nilfs_redirected(bh);
97 clear_buffer_async_write(bh);
97 clear_buffer_dirty(bh); 98 clear_buffer_dirty(bh);
98 if (nilfs_page_buffers_clean(page)) 99 if (nilfs_page_buffers_clean(page))
99 __nilfs_clear_page_dirty(page); 100 __nilfs_clear_page_dirty(page);
@@ -429,6 +430,7 @@ void nilfs_clear_dirty_page(struct page *page, bool silent)
429 "discard block %llu, size %zu", 430 "discard block %llu, size %zu",
430 (u64)bh->b_blocknr, bh->b_size); 431 (u64)bh->b_blocknr, bh->b_size);
431 } 432 }
433 clear_buffer_async_write(bh);
432 clear_buffer_dirty(bh); 434 clear_buffer_dirty(bh);
433 clear_buffer_nilfs_volatile(bh); 435 clear_buffer_nilfs_volatile(bh);
434 clear_buffer_nilfs_checked(bh); 436 clear_buffer_nilfs_checked(bh);
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index bd88a7461063..9f6b486b6c01 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -665,7 +665,7 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
665 665
666 bh = head = page_buffers(page); 666 bh = head = page_buffers(page);
667 do { 667 do {
668 if (!buffer_dirty(bh)) 668 if (!buffer_dirty(bh) || buffer_async_write(bh))
669 continue; 669 continue;
670 get_bh(bh); 670 get_bh(bh);
671 list_add_tail(&bh->b_assoc_buffers, listp); 671 list_add_tail(&bh->b_assoc_buffers, listp);
@@ -699,7 +699,8 @@ static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
699 for (i = 0; i < pagevec_count(&pvec); i++) { 699 for (i = 0; i < pagevec_count(&pvec); i++) {
700 bh = head = page_buffers(pvec.pages[i]); 700 bh = head = page_buffers(pvec.pages[i]);
701 do { 701 do {
702 if (buffer_dirty(bh)) { 702 if (buffer_dirty(bh) &&
703 !buffer_async_write(bh)) {
703 get_bh(bh); 704 get_bh(bh);
704 list_add_tail(&bh->b_assoc_buffers, 705 list_add_tail(&bh->b_assoc_buffers,
705 listp); 706 listp);
@@ -1579,6 +1580,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
1579 1580
1580 list_for_each_entry(bh, &segbuf->sb_segsum_buffers, 1581 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1581 b_assoc_buffers) { 1582 b_assoc_buffers) {
1583 set_buffer_async_write(bh);
1582 if (bh->b_page != bd_page) { 1584 if (bh->b_page != bd_page) {
1583 if (bd_page) { 1585 if (bd_page) {
1584 lock_page(bd_page); 1586 lock_page(bd_page);
@@ -1592,6 +1594,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
1592 1594
1593 list_for_each_entry(bh, &segbuf->sb_payload_buffers, 1595 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1594 b_assoc_buffers) { 1596 b_assoc_buffers) {
1597 set_buffer_async_write(bh);
1595 if (bh == segbuf->sb_super_root) { 1598 if (bh == segbuf->sb_super_root) {
1596 if (bh->b_page != bd_page) { 1599 if (bh->b_page != bd_page) {
1597 lock_page(bd_page); 1600 lock_page(bd_page);
@@ -1677,6 +1680,7 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
1677 list_for_each_entry(segbuf, logs, sb_list) { 1680 list_for_each_entry(segbuf, logs, sb_list) {
1678 list_for_each_entry(bh, &segbuf->sb_segsum_buffers, 1681 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1679 b_assoc_buffers) { 1682 b_assoc_buffers) {
1683 clear_buffer_async_write(bh);
1680 if (bh->b_page != bd_page) { 1684 if (bh->b_page != bd_page) {
1681 if (bd_page) 1685 if (bd_page)
1682 end_page_writeback(bd_page); 1686 end_page_writeback(bd_page);
@@ -1686,6 +1690,7 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
1686 1690
1687 list_for_each_entry(bh, &segbuf->sb_payload_buffers, 1691 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1688 b_assoc_buffers) { 1692 b_assoc_buffers) {
1693 clear_buffer_async_write(bh);
1689 if (bh == segbuf->sb_super_root) { 1694 if (bh == segbuf->sb_super_root) {
1690 if (bh->b_page != bd_page) { 1695 if (bh->b_page != bd_page) {
1691 end_page_writeback(bd_page); 1696 end_page_writeback(bd_page);
@@ -1755,6 +1760,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
1755 b_assoc_buffers) { 1760 b_assoc_buffers) {
1756 set_buffer_uptodate(bh); 1761 set_buffer_uptodate(bh);
1757 clear_buffer_dirty(bh); 1762 clear_buffer_dirty(bh);
1763 clear_buffer_async_write(bh);
1758 if (bh->b_page != bd_page) { 1764 if (bh->b_page != bd_page) {
1759 if (bd_page) 1765 if (bd_page)
1760 end_page_writeback(bd_page); 1766 end_page_writeback(bd_page);
@@ -1776,6 +1782,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
1776 b_assoc_buffers) { 1782 b_assoc_buffers) {
1777 set_buffer_uptodate(bh); 1783 set_buffer_uptodate(bh);
1778 clear_buffer_dirty(bh); 1784 clear_buffer_dirty(bh);
1785 clear_buffer_async_write(bh);
1779 clear_buffer_delay(bh); 1786 clear_buffer_delay(bh);
1780 clear_buffer_nilfs_volatile(bh); 1787 clear_buffer_nilfs_volatile(bh);
1781 clear_buffer_nilfs_redirected(bh); 1788 clear_buffer_nilfs_redirected(bh);
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
index ef999729e274..0d3a97d2d5f6 100644
--- a/fs/ocfs2/dcache.c
+++ b/fs/ocfs2/dcache.c
@@ -70,9 +70,10 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry, unsigned int flags)
70 */ 70 */
71 if (inode == NULL) { 71 if (inode == NULL) {
72 unsigned long gen = (unsigned long) dentry->d_fsdata; 72 unsigned long gen = (unsigned long) dentry->d_fsdata;
73 unsigned long pgen = 73 unsigned long pgen;
74 OCFS2_I(dentry->d_parent->d_inode)->ip_dir_lock_gen; 74 spin_lock(&dentry->d_lock);
75 75 pgen = OCFS2_I(dentry->d_parent->d_inode)->ip_dir_lock_gen;
76 spin_unlock(&dentry->d_lock);
76 trace_ocfs2_dentry_revalidate_negative(dentry->d_name.len, 77 trace_ocfs2_dentry_revalidate_negative(dentry->d_name.len,
77 dentry->d_name.name, 78 dentry->d_name.name,
78 pgen, gen); 79 pgen, gen);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 121da2dc3be8..d4e81e4a9b04 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1924,7 +1924,7 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
1924{ 1924{
1925 int tmp, hangup_needed = 0; 1925 int tmp, hangup_needed = 0;
1926 struct ocfs2_super *osb = NULL; 1926 struct ocfs2_super *osb = NULL;
1927 char nodestr[8]; 1927 char nodestr[12];
1928 1928
1929 trace_ocfs2_dismount_volume(sb); 1929 trace_ocfs2_dismount_volume(sb);
1930 1930
diff --git a/fs/open.c b/fs/open.c
index 2a731b0d08bc..d420331ca32a 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -744,14 +744,24 @@ cleanup_file:
744 744
745/** 745/**
746 * finish_open - finish opening a file 746 * finish_open - finish opening a file
747 * @od: opaque open data 747 * @file: file pointer
748 * @dentry: pointer to dentry 748 * @dentry: pointer to dentry
749 * @open: open callback 749 * @open: open callback
750 * @opened: state of open
750 * 751 *
751 * This can be used to finish opening a file passed to i_op->atomic_open(). 752 * This can be used to finish opening a file passed to i_op->atomic_open().
752 * 753 *
753 * If the open callback is set to NULL, then the standard f_op->open() 754 * If the open callback is set to NULL, then the standard f_op->open()
754 * filesystem callback is substituted. 755 * filesystem callback is substituted.
756 *
757 * NB: the dentry reference is _not_ consumed. If, for example, the dentry is
758 * the return value of d_splice_alias(), then the caller needs to perform dput()
759 * on it after finish_open().
760 *
761 * On successful return @file is a fully instantiated open file. After this, if
762 * an error occurs in ->atomic_open(), it needs to clean up with fput().
763 *
764 * Returns zero on success or -errno if the open failed.
755 */ 765 */
756int finish_open(struct file *file, struct dentry *dentry, 766int finish_open(struct file *file, struct dentry *dentry,
757 int (*open)(struct inode *, struct file *), 767 int (*open)(struct inode *, struct file *),
@@ -772,11 +782,16 @@ EXPORT_SYMBOL(finish_open);
772/** 782/**
773 * finish_no_open - finish ->atomic_open() without opening the file 783 * finish_no_open - finish ->atomic_open() without opening the file
774 * 784 *
775 * @od: opaque open data 785 * @file: file pointer
776 * @dentry: dentry or NULL (as returned from ->lookup()) 786 * @dentry: dentry or NULL (as returned from ->lookup())
777 * 787 *
778 * This can be used to set the result of a successful lookup in ->atomic_open(). 788 * This can be used to set the result of a successful lookup in ->atomic_open().
779 * The filesystem's atomic_open() method shall return NULL after calling this. 789 *
790 * NB: unlike finish_open() this function does consume the dentry reference and
791 * the caller need not dput() it.
792 *
793 * Returns "1" which must be the return value of ->atomic_open() after having
794 * called this function.
780 */ 795 */
781int finish_no_open(struct file *file, struct dentry *dentry) 796int finish_no_open(struct file *file, struct dentry *dentry)
782{ 797{
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 4ffb7ab5e397..b8e93a40a5d3 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -168,7 +168,7 @@ static int pstore_decompress(void *in, void *out, size_t inlen, size_t outlen)
168 int err, ret; 168 int err, ret;
169 169
170 ret = -EIO; 170 ret = -EIO;
171 err = zlib_inflateInit(&stream); 171 err = zlib_inflateInit2(&stream, WINDOW_BITS);
172 if (err != Z_OK) 172 if (err != Z_OK)
173 goto error; 173 goto error;
174 174
@@ -195,8 +195,29 @@ error:
195static void allocate_buf_for_compression(void) 195static void allocate_buf_for_compression(void)
196{ 196{
197 size_t size; 197 size_t size;
198 size_t cmpr;
199
200 switch (psinfo->bufsize) {
201 /* buffer range for efivars */
202 case 1000 ... 2000:
203 cmpr = 56;
204 break;
205 case 2001 ... 3000:
206 cmpr = 54;
207 break;
208 case 3001 ... 3999:
209 cmpr = 52;
210 break;
211 /* buffer range for nvram, erst */
212 case 4000 ... 10000:
213 cmpr = 45;
214 break;
215 default:
216 cmpr = 60;
217 break;
218 }
198 219
199 big_oops_buf_sz = (psinfo->bufsize * 100) / 45; 220 big_oops_buf_sz = (psinfo->bufsize * 100) / cmpr;
200 big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL); 221 big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
201 if (big_oops_buf) { 222 if (big_oops_buf) {
202 size = max(zlib_deflate_workspacesize(WINDOW_BITS, MEM_LEVEL), 223 size = max(zlib_deflate_workspacesize(WINDOW_BITS, MEM_LEVEL),
@@ -295,10 +316,6 @@ static void pstore_dump(struct kmsg_dumper *dumper,
295 compressed = true; 316 compressed = true;
296 total_len = zipped_len; 317 total_len = zipped_len;
297 } else { 318 } else {
298 pr_err("pstore: compression failed for Part %d"
299 " returned %d\n", part, zipped_len);
300 pr_err("pstore: Capture uncompressed"
301 " oops/panic report of Part %d\n", part);
302 compressed = false; 319 compressed = false;
303 total_len = copy_kmsg_to_buffer(hsize, len); 320 total_len = copy_kmsg_to_buffer(hsize, len);
304 } 321 }
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 73feacc49b2e..fd777032c2ba 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -1163,21 +1163,6 @@ static struct reiserfs_journal_list *find_newer_jl_for_cn(struct
1163 return NULL; 1163 return NULL;
1164} 1164}
1165 1165
1166static int newer_jl_done(struct reiserfs_journal_cnode *cn)
1167{
1168 struct super_block *sb = cn->sb;
1169 b_blocknr_t blocknr = cn->blocknr;
1170
1171 cn = cn->hprev;
1172 while (cn) {
1173 if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist &&
1174 atomic_read(&cn->jlist->j_commit_left) != 0)
1175 return 0;
1176 cn = cn->hprev;
1177 }
1178 return 1;
1179}
1180
1181static void remove_journal_hash(struct super_block *, 1166static void remove_journal_hash(struct super_block *,
1182 struct reiserfs_journal_cnode **, 1167 struct reiserfs_journal_cnode **,
1183 struct reiserfs_journal_list *, unsigned long, 1168 struct reiserfs_journal_list *, unsigned long,
@@ -1353,7 +1338,6 @@ static int flush_journal_list(struct super_block *s,
1353 reiserfs_warning(s, "clm-2048", "called with wcount %d", 1338 reiserfs_warning(s, "clm-2048", "called with wcount %d",
1354 atomic_read(&journal->j_wcount)); 1339 atomic_read(&journal->j_wcount));
1355 } 1340 }
1356 BUG_ON(jl->j_trans_id == 0);
1357 1341
1358 /* if flushall == 0, the lock is already held */ 1342 /* if flushall == 0, the lock is already held */
1359 if (flushall) { 1343 if (flushall) {
@@ -1593,31 +1577,6 @@ static int flush_journal_list(struct super_block *s,
1593 return err; 1577 return err;
1594} 1578}
1595 1579
1596static int test_transaction(struct super_block *s,
1597 struct reiserfs_journal_list *jl)
1598{
1599 struct reiserfs_journal_cnode *cn;
1600
1601 if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0)
1602 return 1;
1603
1604 cn = jl->j_realblock;
1605 while (cn) {
1606 /* if the blocknr == 0, this has been cleared from the hash,
1607 ** skip it
1608 */
1609 if (cn->blocknr == 0) {
1610 goto next;
1611 }
1612 if (cn->bh && !newer_jl_done(cn))
1613 return 0;
1614 next:
1615 cn = cn->next;
1616 cond_resched();
1617 }
1618 return 0;
1619}
1620
1621static int write_one_transaction(struct super_block *s, 1580static int write_one_transaction(struct super_block *s,
1622 struct reiserfs_journal_list *jl, 1581 struct reiserfs_journal_list *jl,
1623 struct buffer_chunk *chunk) 1582 struct buffer_chunk *chunk)
@@ -1805,6 +1764,8 @@ static int flush_used_journal_lists(struct super_block *s,
1805 break; 1764 break;
1806 tjl = JOURNAL_LIST_ENTRY(tjl->j_list.next); 1765 tjl = JOURNAL_LIST_ENTRY(tjl->j_list.next);
1807 } 1766 }
1767 get_journal_list(jl);
1768 get_journal_list(flush_jl);
1808 /* try to find a group of blocks we can flush across all the 1769 /* try to find a group of blocks we can flush across all the
1809 ** transactions, but only bother if we've actually spanned 1770 ** transactions, but only bother if we've actually spanned
1810 ** across multiple lists 1771 ** across multiple lists
@@ -1813,6 +1774,8 @@ static int flush_used_journal_lists(struct super_block *s,
1813 ret = kupdate_transactions(s, jl, &tjl, &trans_id, len, i); 1774 ret = kupdate_transactions(s, jl, &tjl, &trans_id, len, i);
1814 } 1775 }
1815 flush_journal_list(s, flush_jl, 1); 1776 flush_journal_list(s, flush_jl, 1);
1777 put_journal_list(s, flush_jl);
1778 put_journal_list(s, jl);
1816 return 0; 1779 return 0;
1817} 1780}
1818 1781
@@ -3868,27 +3831,6 @@ int reiserfs_prepare_for_journal(struct super_block *sb,
3868 return 1; 3831 return 1;
3869} 3832}
3870 3833
3871static void flush_old_journal_lists(struct super_block *s)
3872{
3873 struct reiserfs_journal *journal = SB_JOURNAL(s);
3874 struct reiserfs_journal_list *jl;
3875 struct list_head *entry;
3876 time_t now = get_seconds();
3877
3878 while (!list_empty(&journal->j_journal_list)) {
3879 entry = journal->j_journal_list.next;
3880 jl = JOURNAL_LIST_ENTRY(entry);
3881 /* this check should always be run, to send old lists to disk */
3882 if (jl->j_timestamp < (now - (JOURNAL_MAX_TRANS_AGE * 4)) &&
3883 atomic_read(&jl->j_commit_left) == 0 &&
3884 test_transaction(s, jl)) {
3885 flush_used_journal_lists(s, jl);
3886 } else {
3887 break;
3888 }
3889 }
3890}
3891
3892/* 3834/*
3893** long and ugly. If flush, will not return until all commit 3835** long and ugly. If flush, will not return until all commit
3894** blocks and all real buffers in the trans are on disk. 3836** blocks and all real buffers in the trans are on disk.
@@ -4232,7 +4174,6 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
4232 } 4174 }
4233 } 4175 }
4234 } 4176 }
4235 flush_old_journal_lists(sb);
4236 4177
4237 journal->j_current_jl->j_list_bitmap = 4178 journal->j_current_jl->j_list_bitmap =
4238 get_list_bitmap(sb, journal->j_current_jl); 4179 get_list_bitmap(sb, journal->j_current_jl);
diff --git a/fs/super.c b/fs/super.c
index 3a96c9783a8b..0225c20f8770 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -264,6 +264,8 @@ out_free_sb:
264 */ 264 */
265static inline void destroy_super(struct super_block *s) 265static inline void destroy_super(struct super_block *s)
266{ 266{
267 list_lru_destroy(&s->s_dentry_lru);
268 list_lru_destroy(&s->s_inode_lru);
267#ifdef CONFIG_SMP 269#ifdef CONFIG_SMP
268 free_percpu(s->s_files); 270 free_percpu(s->s_files);
269#endif 271#endif
@@ -323,8 +325,6 @@ void deactivate_locked_super(struct super_block *s)
323 325
324 /* caches are now gone, we can safely kill the shrinker now */ 326 /* caches are now gone, we can safely kill the shrinker now */
325 unregister_shrinker(&s->s_shrink); 327 unregister_shrinker(&s->s_shrink);
326 list_lru_destroy(&s->s_dentry_lru);
327 list_lru_destroy(&s->s_inode_lru);
328 328
329 put_filesystem(fs); 329 put_filesystem(fs);
330 put_super(s); 330 put_super(s);
diff --git a/fs/sysv/super.c b/fs/sysv/super.c
index d0c6a007ce83..eda10959714f 100644
--- a/fs/sysv/super.c
+++ b/fs/sysv/super.c
@@ -487,6 +487,7 @@ static int v7_fill_super(struct super_block *sb, void *data, int silent)
487 sbi->s_sb = sb; 487 sbi->s_sb = sb;
488 sbi->s_block_base = 0; 488 sbi->s_block_base = 0;
489 sbi->s_type = FSTYPE_V7; 489 sbi->s_type = FSTYPE_V7;
490 mutex_init(&sbi->s_lock);
490 sb->s_fs_info = sbi; 491 sb->s_fs_info = sbi;
491 492
492 sb_set_blocksize(sb, 512); 493 sb_set_blocksize(sb, 512);
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index 7e5aae4bf46f..6eaf5edf1ea1 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -30,18 +30,17 @@ void udf_free_inode(struct inode *inode)
30{ 30{
31 struct super_block *sb = inode->i_sb; 31 struct super_block *sb = inode->i_sb;
32 struct udf_sb_info *sbi = UDF_SB(sb); 32 struct udf_sb_info *sbi = UDF_SB(sb);
33 struct logicalVolIntegrityDescImpUse *lvidiu = udf_sb_lvidiu(sb);
33 34
34 mutex_lock(&sbi->s_alloc_mutex); 35 if (lvidiu) {
35 if (sbi->s_lvid_bh) { 36 mutex_lock(&sbi->s_alloc_mutex);
36 struct logicalVolIntegrityDescImpUse *lvidiu =
37 udf_sb_lvidiu(sbi);
38 if (S_ISDIR(inode->i_mode)) 37 if (S_ISDIR(inode->i_mode))
39 le32_add_cpu(&lvidiu->numDirs, -1); 38 le32_add_cpu(&lvidiu->numDirs, -1);
40 else 39 else
41 le32_add_cpu(&lvidiu->numFiles, -1); 40 le32_add_cpu(&lvidiu->numFiles, -1);
42 udf_updated_lvid(sb); 41 udf_updated_lvid(sb);
42 mutex_unlock(&sbi->s_alloc_mutex);
43 } 43 }
44 mutex_unlock(&sbi->s_alloc_mutex);
45 44
46 udf_free_blocks(sb, NULL, &UDF_I(inode)->i_location, 0, 1); 45 udf_free_blocks(sb, NULL, &UDF_I(inode)->i_location, 0, 1);
47} 46}
@@ -55,6 +54,7 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err)
55 uint32_t start = UDF_I(dir)->i_location.logicalBlockNum; 54 uint32_t start = UDF_I(dir)->i_location.logicalBlockNum;
56 struct udf_inode_info *iinfo; 55 struct udf_inode_info *iinfo;
57 struct udf_inode_info *dinfo = UDF_I(dir); 56 struct udf_inode_info *dinfo = UDF_I(dir);
57 struct logicalVolIntegrityDescImpUse *lvidiu;
58 58
59 inode = new_inode(sb); 59 inode = new_inode(sb);
60 60
@@ -92,12 +92,10 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err)
92 return NULL; 92 return NULL;
93 } 93 }
94 94
95 if (sbi->s_lvid_bh) { 95 lvidiu = udf_sb_lvidiu(sb);
96 struct logicalVolIntegrityDescImpUse *lvidiu; 96 if (lvidiu) {
97
98 iinfo->i_unique = lvid_get_unique_id(sb); 97 iinfo->i_unique = lvid_get_unique_id(sb);
99 mutex_lock(&sbi->s_alloc_mutex); 98 mutex_lock(&sbi->s_alloc_mutex);
100 lvidiu = udf_sb_lvidiu(sbi);
101 if (S_ISDIR(mode)) 99 if (S_ISDIR(mode))
102 le32_add_cpu(&lvidiu->numDirs, 1); 100 le32_add_cpu(&lvidiu->numDirs, 1);
103 else 101 else
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 839a2bad7f45..91219385691d 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -94,13 +94,25 @@ static unsigned int udf_count_free(struct super_block *);
94static int udf_statfs(struct dentry *, struct kstatfs *); 94static int udf_statfs(struct dentry *, struct kstatfs *);
95static int udf_show_options(struct seq_file *, struct dentry *); 95static int udf_show_options(struct seq_file *, struct dentry *);
96 96
97struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct udf_sb_info *sbi) 97struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb)
98{ 98{
99 struct logicalVolIntegrityDesc *lvid = 99 struct logicalVolIntegrityDesc *lvid;
100 (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data; 100 unsigned int partnum;
101 __u32 number_of_partitions = le32_to_cpu(lvid->numOfPartitions); 101 unsigned int offset;
102 __u32 offset = number_of_partitions * 2 * 102
103 sizeof(uint32_t)/sizeof(uint8_t); 103 if (!UDF_SB(sb)->s_lvid_bh)
104 return NULL;
105 lvid = (struct logicalVolIntegrityDesc *)UDF_SB(sb)->s_lvid_bh->b_data;
106 partnum = le32_to_cpu(lvid->numOfPartitions);
107 if ((sb->s_blocksize - sizeof(struct logicalVolIntegrityDescImpUse) -
108 offsetof(struct logicalVolIntegrityDesc, impUse)) /
109 (2 * sizeof(uint32_t)) < partnum) {
110 udf_err(sb, "Logical volume integrity descriptor corrupted "
111 "(numOfPartitions = %u)!\n", partnum);
112 return NULL;
113 }
114 /* The offset is to skip freeSpaceTable and sizeTable arrays */
115 offset = partnum * 2 * sizeof(uint32_t);
104 return (struct logicalVolIntegrityDescImpUse *)&(lvid->impUse[offset]); 116 return (struct logicalVolIntegrityDescImpUse *)&(lvid->impUse[offset]);
105} 117}
106 118
@@ -629,9 +641,10 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
629 struct udf_options uopt; 641 struct udf_options uopt;
630 struct udf_sb_info *sbi = UDF_SB(sb); 642 struct udf_sb_info *sbi = UDF_SB(sb);
631 int error = 0; 643 int error = 0;
644 struct logicalVolIntegrityDescImpUse *lvidiu = udf_sb_lvidiu(sb);
632 645
633 if (sbi->s_lvid_bh) { 646 if (lvidiu) {
634 int write_rev = le16_to_cpu(udf_sb_lvidiu(sbi)->minUDFWriteRev); 647 int write_rev = le16_to_cpu(lvidiu->minUDFWriteRev);
635 if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & MS_RDONLY)) 648 if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & MS_RDONLY))
636 return -EACCES; 649 return -EACCES;
637 } 650 }
@@ -1905,11 +1918,12 @@ static void udf_open_lvid(struct super_block *sb)
1905 1918
1906 if (!bh) 1919 if (!bh)
1907 return; 1920 return;
1908
1909 mutex_lock(&sbi->s_alloc_mutex);
1910 lvid = (struct logicalVolIntegrityDesc *)bh->b_data; 1921 lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
1911 lvidiu = udf_sb_lvidiu(sbi); 1922 lvidiu = udf_sb_lvidiu(sb);
1923 if (!lvidiu)
1924 return;
1912 1925
1926 mutex_lock(&sbi->s_alloc_mutex);
1913 lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; 1927 lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1914 lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; 1928 lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1915 udf_time_to_disk_stamp(&lvid->recordingDateAndTime, 1929 udf_time_to_disk_stamp(&lvid->recordingDateAndTime,
@@ -1937,10 +1951,12 @@ static void udf_close_lvid(struct super_block *sb)
1937 1951
1938 if (!bh) 1952 if (!bh)
1939 return; 1953 return;
1954 lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
1955 lvidiu = udf_sb_lvidiu(sb);
1956 if (!lvidiu)
1957 return;
1940 1958
1941 mutex_lock(&sbi->s_alloc_mutex); 1959 mutex_lock(&sbi->s_alloc_mutex);
1942 lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
1943 lvidiu = udf_sb_lvidiu(sbi);
1944 lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; 1960 lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1945 lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; 1961 lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1946 udf_time_to_disk_stamp(&lvid->recordingDateAndTime, CURRENT_TIME); 1962 udf_time_to_disk_stamp(&lvid->recordingDateAndTime, CURRENT_TIME);
@@ -2093,15 +2109,19 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
2093 2109
2094 if (sbi->s_lvid_bh) { 2110 if (sbi->s_lvid_bh) {
2095 struct logicalVolIntegrityDescImpUse *lvidiu = 2111 struct logicalVolIntegrityDescImpUse *lvidiu =
2096 udf_sb_lvidiu(sbi); 2112 udf_sb_lvidiu(sb);
2097 uint16_t minUDFReadRev = le16_to_cpu(lvidiu->minUDFReadRev); 2113 uint16_t minUDFReadRev;
2098 uint16_t minUDFWriteRev = le16_to_cpu(lvidiu->minUDFWriteRev); 2114 uint16_t minUDFWriteRev;
2099 /* uint16_t maxUDFWriteRev =
2100 le16_to_cpu(lvidiu->maxUDFWriteRev); */
2101 2115
2116 if (!lvidiu) {
2117 ret = -EINVAL;
2118 goto error_out;
2119 }
2120 minUDFReadRev = le16_to_cpu(lvidiu->minUDFReadRev);
2121 minUDFWriteRev = le16_to_cpu(lvidiu->minUDFWriteRev);
2102 if (minUDFReadRev > UDF_MAX_READ_VERSION) { 2122 if (minUDFReadRev > UDF_MAX_READ_VERSION) {
2103 udf_err(sb, "minUDFReadRev=%x (max is %x)\n", 2123 udf_err(sb, "minUDFReadRev=%x (max is %x)\n",
2104 le16_to_cpu(lvidiu->minUDFReadRev), 2124 minUDFReadRev,
2105 UDF_MAX_READ_VERSION); 2125 UDF_MAX_READ_VERSION);
2106 ret = -EINVAL; 2126 ret = -EINVAL;
2107 goto error_out; 2127 goto error_out;
@@ -2265,11 +2285,7 @@ static int udf_statfs(struct dentry *dentry, struct kstatfs *buf)
2265 struct logicalVolIntegrityDescImpUse *lvidiu; 2285 struct logicalVolIntegrityDescImpUse *lvidiu;
2266 u64 id = huge_encode_dev(sb->s_bdev->bd_dev); 2286 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
2267 2287
2268 if (sbi->s_lvid_bh != NULL) 2288 lvidiu = udf_sb_lvidiu(sb);
2269 lvidiu = udf_sb_lvidiu(sbi);
2270 else
2271 lvidiu = NULL;
2272
2273 buf->f_type = UDF_SUPER_MAGIC; 2289 buf->f_type = UDF_SUPER_MAGIC;
2274 buf->f_bsize = sb->s_blocksize; 2290 buf->f_bsize = sb->s_blocksize;
2275 buf->f_blocks = sbi->s_partmaps[sbi->s_partition].s_partition_len; 2291 buf->f_blocks = sbi->s_partmaps[sbi->s_partition].s_partition_len;
diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
index ed401e94aa8c..1f32c7bd9f57 100644
--- a/fs/udf/udf_sb.h
+++ b/fs/udf/udf_sb.h
@@ -162,7 +162,7 @@ static inline struct udf_sb_info *UDF_SB(struct super_block *sb)
162 return sb->s_fs_info; 162 return sb->s_fs_info;
163} 163}
164 164
165struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct udf_sb_info *sbi); 165struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb);
166 166
167int udf_compute_nr_groups(struct super_block *sb, u32 partition); 167int udf_compute_nr_groups(struct super_block *sb, u32 partition);
168 168
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 88c5ea75ebf6..f1d85cfc0a54 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -628,6 +628,7 @@ xfs_buf_item_unlock(
628 else if (aborted) { 628 else if (aborted) {
629 ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp)); 629 ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp));
630 if (lip->li_flags & XFS_LI_IN_AIL) { 630 if (lip->li_flags & XFS_LI_IN_AIL) {
631 spin_lock(&lip->li_ailp->xa_lock);
631 xfs_trans_ail_delete(lip->li_ailp, lip, 632 xfs_trans_ail_delete(lip->li_ailp, lip,
632 SHUTDOWN_LOG_IO_ERROR); 633 SHUTDOWN_LOG_IO_ERROR);
633 } 634 }
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index 069537c845e5..20bf8e8002d6 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -1224,6 +1224,7 @@ xfs_da3_node_toosmall(
1224 /* start with smaller blk num */ 1224 /* start with smaller blk num */
1225 forward = nodehdr.forw < nodehdr.back; 1225 forward = nodehdr.forw < nodehdr.back;
1226 for (i = 0; i < 2; forward = !forward, i++) { 1226 for (i = 0; i < 2; forward = !forward, i++) {
1227 struct xfs_da3_icnode_hdr thdr;
1227 if (forward) 1228 if (forward)
1228 blkno = nodehdr.forw; 1229 blkno = nodehdr.forw;
1229 else 1230 else
@@ -1236,10 +1237,10 @@ xfs_da3_node_toosmall(
1236 return(error); 1237 return(error);
1237 1238
1238 node = bp->b_addr; 1239 node = bp->b_addr;
1239 xfs_da3_node_hdr_from_disk(&nodehdr, node); 1240 xfs_da3_node_hdr_from_disk(&thdr, node);
1240 xfs_trans_brelse(state->args->trans, bp); 1241 xfs_trans_brelse(state->args->trans, bp);
1241 1242
1242 if (count - nodehdr.count >= 0) 1243 if (count - thdr.count >= 0)
1243 break; /* fits with at least 25% to spare */ 1244 break; /* fits with at least 25% to spare */
1244 } 1245 }
1245 if (i >= 2) { 1246 if (i >= 2) {
diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c
index 0957aa98b6c0..12dad188939d 100644
--- a/fs/xfs/xfs_dir2_block.c
+++ b/fs/xfs/xfs_dir2_block.c
@@ -1158,7 +1158,7 @@ xfs_dir2_sf_to_block(
1158 /* 1158 /*
1159 * Create entry for . 1159 * Create entry for .
1160 */ 1160 */
1161 dep = xfs_dir3_data_dot_entry_p(hdr); 1161 dep = xfs_dir3_data_dot_entry_p(mp, hdr);
1162 dep->inumber = cpu_to_be64(dp->i_ino); 1162 dep->inumber = cpu_to_be64(dp->i_ino);
1163 dep->namelen = 1; 1163 dep->namelen = 1;
1164 dep->name[0] = '.'; 1164 dep->name[0] = '.';
@@ -1172,7 +1172,7 @@ xfs_dir2_sf_to_block(
1172 /* 1172 /*
1173 * Create entry for .. 1173 * Create entry for ..
1174 */ 1174 */
1175 dep = xfs_dir3_data_dotdot_entry_p(hdr); 1175 dep = xfs_dir3_data_dotdot_entry_p(mp, hdr);
1176 dep->inumber = cpu_to_be64(xfs_dir2_sf_get_parent_ino(sfp)); 1176 dep->inumber = cpu_to_be64(xfs_dir2_sf_get_parent_ino(sfp));
1177 dep->namelen = 2; 1177 dep->namelen = 2;
1178 dep->name[0] = dep->name[1] = '.'; 1178 dep->name[0] = dep->name[1] = '.';
@@ -1183,7 +1183,7 @@ xfs_dir2_sf_to_block(
1183 blp[1].hashval = cpu_to_be32(xfs_dir_hash_dotdot); 1183 blp[1].hashval = cpu_to_be32(xfs_dir_hash_dotdot);
1184 blp[1].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp, 1184 blp[1].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp,
1185 (char *)dep - (char *)hdr)); 1185 (char *)dep - (char *)hdr));
1186 offset = xfs_dir3_data_first_offset(hdr); 1186 offset = xfs_dir3_data_first_offset(mp);
1187 /* 1187 /*
1188 * Loop over existing entries, stuff them in. 1188 * Loop over existing entries, stuff them in.
1189 */ 1189 */
diff --git a/fs/xfs/xfs_dir2_format.h b/fs/xfs/xfs_dir2_format.h
index a0961a61ac1a..9cf67381adf6 100644
--- a/fs/xfs/xfs_dir2_format.h
+++ b/fs/xfs/xfs_dir2_format.h
@@ -497,69 +497,58 @@ xfs_dir3_data_unused_p(struct xfs_dir2_data_hdr *hdr)
497/* 497/*
498 * Offsets of . and .. in data space (always block 0) 498 * Offsets of . and .. in data space (always block 0)
499 * 499 *
500 * The macros are used for shortform directories as they have no headers to read
501 * the magic number out of. Shortform directories need to know the size of the
502 * data block header because the sfe embeds the block offset of the entry into
503 * it so that it doesn't change when format conversion occurs. Bad Things Happen
504 * if we don't follow this rule.
505 *
506 * XXX: there is scope for significant optimisation of the logic here. Right 500 * XXX: there is scope for significant optimisation of the logic here. Right
507 * now we are checking for "dir3 format" over and over again. Ideally we should 501 * now we are checking for "dir3 format" over and over again. Ideally we should
508 * only do it once for each operation. 502 * only do it once for each operation.
509 */ 503 */
510#define XFS_DIR3_DATA_DOT_OFFSET(mp) \
511 xfs_dir3_data_hdr_size(xfs_sb_version_hascrc(&(mp)->m_sb))
512#define XFS_DIR3_DATA_DOTDOT_OFFSET(mp) \
513 (XFS_DIR3_DATA_DOT_OFFSET(mp) + xfs_dir3_data_entsize(mp, 1))
514#define XFS_DIR3_DATA_FIRST_OFFSET(mp) \
515 (XFS_DIR3_DATA_DOTDOT_OFFSET(mp) + xfs_dir3_data_entsize(mp, 2))
516
517static inline xfs_dir2_data_aoff_t 504static inline xfs_dir2_data_aoff_t
518xfs_dir3_data_dot_offset(struct xfs_dir2_data_hdr *hdr) 505xfs_dir3_data_dot_offset(struct xfs_mount *mp)
519{ 506{
520 return xfs_dir3_data_entry_offset(hdr); 507 return xfs_dir3_data_hdr_size(xfs_sb_version_hascrc(&mp->m_sb));
521} 508}
522 509
523static inline xfs_dir2_data_aoff_t 510static inline xfs_dir2_data_aoff_t
524xfs_dir3_data_dotdot_offset(struct xfs_dir2_data_hdr *hdr) 511xfs_dir3_data_dotdot_offset(struct xfs_mount *mp)
525{ 512{
526 bool dir3 = hdr->magic == cpu_to_be32(XFS_DIR3_DATA_MAGIC) || 513 return xfs_dir3_data_dot_offset(mp) +
527 hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC); 514 xfs_dir3_data_entsize(mp, 1);
528 return xfs_dir3_data_dot_offset(hdr) +
529 __xfs_dir3_data_entsize(dir3, 1);
530} 515}
531 516
532static inline xfs_dir2_data_aoff_t 517static inline xfs_dir2_data_aoff_t
533xfs_dir3_data_first_offset(struct xfs_dir2_data_hdr *hdr) 518xfs_dir3_data_first_offset(struct xfs_mount *mp)
534{ 519{
535 bool dir3 = hdr->magic == cpu_to_be32(XFS_DIR3_DATA_MAGIC) || 520 return xfs_dir3_data_dotdot_offset(mp) +
536 hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC); 521 xfs_dir3_data_entsize(mp, 2);
537 return xfs_dir3_data_dotdot_offset(hdr) +
538 __xfs_dir3_data_entsize(dir3, 2);
539} 522}
540 523
541/* 524/*
542 * location of . and .. in data space (always block 0) 525 * location of . and .. in data space (always block 0)
543 */ 526 */
544static inline struct xfs_dir2_data_entry * 527static inline struct xfs_dir2_data_entry *
545xfs_dir3_data_dot_entry_p(struct xfs_dir2_data_hdr *hdr) 528xfs_dir3_data_dot_entry_p(
529 struct xfs_mount *mp,
530 struct xfs_dir2_data_hdr *hdr)
546{ 531{
547 return (struct xfs_dir2_data_entry *) 532 return (struct xfs_dir2_data_entry *)
548 ((char *)hdr + xfs_dir3_data_dot_offset(hdr)); 533 ((char *)hdr + xfs_dir3_data_dot_offset(mp));
549} 534}
550 535
551static inline struct xfs_dir2_data_entry * 536static inline struct xfs_dir2_data_entry *
552xfs_dir3_data_dotdot_entry_p(struct xfs_dir2_data_hdr *hdr) 537xfs_dir3_data_dotdot_entry_p(
538 struct xfs_mount *mp,
539 struct xfs_dir2_data_hdr *hdr)
553{ 540{
554 return (struct xfs_dir2_data_entry *) 541 return (struct xfs_dir2_data_entry *)
555 ((char *)hdr + xfs_dir3_data_dotdot_offset(hdr)); 542 ((char *)hdr + xfs_dir3_data_dotdot_offset(mp));
556} 543}
557 544
558static inline struct xfs_dir2_data_entry * 545static inline struct xfs_dir2_data_entry *
559xfs_dir3_data_first_entry_p(struct xfs_dir2_data_hdr *hdr) 546xfs_dir3_data_first_entry_p(
547 struct xfs_mount *mp,
548 struct xfs_dir2_data_hdr *hdr)
560{ 549{
561 return (struct xfs_dir2_data_entry *) 550 return (struct xfs_dir2_data_entry *)
562 ((char *)hdr + xfs_dir3_data_first_offset(hdr)); 551 ((char *)hdr + xfs_dir3_data_first_offset(mp));
563} 552}
564 553
565/* 554/*
diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
index 8993ec17452c..8f84153e98a8 100644
--- a/fs/xfs/xfs_dir2_readdir.c
+++ b/fs/xfs/xfs_dir2_readdir.c
@@ -119,9 +119,9 @@ xfs_dir2_sf_getdents(
119 * mp->m_dirdatablk. 119 * mp->m_dirdatablk.
120 */ 120 */
121 dot_offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk, 121 dot_offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
122 XFS_DIR3_DATA_DOT_OFFSET(mp)); 122 xfs_dir3_data_dot_offset(mp));
123 dotdot_offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk, 123 dotdot_offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
124 XFS_DIR3_DATA_DOTDOT_OFFSET(mp)); 124 xfs_dir3_data_dotdot_offset(mp));
125 125
126 /* 126 /*
127 * Put . entry unless we're starting past it. 127 * Put . entry unless we're starting past it.
diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
index bb6e2848f473..3ef6d402084c 100644
--- a/fs/xfs/xfs_dir2_sf.c
+++ b/fs/xfs/xfs_dir2_sf.c
@@ -557,7 +557,7 @@ xfs_dir2_sf_addname_hard(
557 * to insert the new entry. 557 * to insert the new entry.
558 * If it's going to end up at the end then oldsfep will point there. 558 * If it's going to end up at the end then oldsfep will point there.
559 */ 559 */
560 for (offset = XFS_DIR3_DATA_FIRST_OFFSET(mp), 560 for (offset = xfs_dir3_data_first_offset(mp),
561 oldsfep = xfs_dir2_sf_firstentry(oldsfp), 561 oldsfep = xfs_dir2_sf_firstentry(oldsfp),
562 add_datasize = xfs_dir3_data_entsize(mp, args->namelen), 562 add_datasize = xfs_dir3_data_entsize(mp, args->namelen),
563 eof = (char *)oldsfep == &buf[old_isize]; 563 eof = (char *)oldsfep == &buf[old_isize];
@@ -640,7 +640,7 @@ xfs_dir2_sf_addname_pick(
640 640
641 sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; 641 sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
642 size = xfs_dir3_data_entsize(mp, args->namelen); 642 size = xfs_dir3_data_entsize(mp, args->namelen);
643 offset = XFS_DIR3_DATA_FIRST_OFFSET(mp); 643 offset = xfs_dir3_data_first_offset(mp);
644 sfep = xfs_dir2_sf_firstentry(sfp); 644 sfep = xfs_dir2_sf_firstentry(sfp);
645 holefit = 0; 645 holefit = 0;
646 /* 646 /*
@@ -713,7 +713,7 @@ xfs_dir2_sf_check(
713 mp = dp->i_mount; 713 mp = dp->i_mount;
714 714
715 sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; 715 sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
716 offset = XFS_DIR3_DATA_FIRST_OFFSET(mp); 716 offset = xfs_dir3_data_first_offset(mp);
717 ino = xfs_dir2_sf_get_parent_ino(sfp); 717 ino = xfs_dir2_sf_get_parent_ino(sfp);
718 i8count = ino > XFS_DIR2_MAX_SHORT_INUM; 718 i8count = ino > XFS_DIR2_MAX_SHORT_INUM;
719 719
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index 71520e6e5d65..1ee776d477c3 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -64,7 +64,8 @@ int xfs_dqerror_mod = 33;
64struct kmem_zone *xfs_qm_dqtrxzone; 64struct kmem_zone *xfs_qm_dqtrxzone;
65static struct kmem_zone *xfs_qm_dqzone; 65static struct kmem_zone *xfs_qm_dqzone;
66 66
67static struct lock_class_key xfs_dquot_other_class; 67static struct lock_class_key xfs_dquot_group_class;
68static struct lock_class_key xfs_dquot_project_class;
68 69
69/* 70/*
70 * This is called to free all the memory associated with a dquot 71 * This is called to free all the memory associated with a dquot
@@ -703,8 +704,20 @@ xfs_qm_dqread(
703 * Make sure group quotas have a different lock class than user 704 * Make sure group quotas have a different lock class than user
704 * quotas. 705 * quotas.
705 */ 706 */
706 if (!(type & XFS_DQ_USER)) 707 switch (type) {
707 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_other_class); 708 case XFS_DQ_USER:
709 /* uses the default lock class */
710 break;
711 case XFS_DQ_GROUP:
712 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class);
713 break;
714 case XFS_DQ_PROJ:
715 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class);
716 break;
717 default:
718 ASSERT(0);
719 break;
720 }
708 721
709 XFS_STATS_INC(xs_qm_dquot); 722 XFS_STATS_INC(xs_qm_dquot);
710 723
diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h
index 1edb5cc3e5f4..18272c766a50 100644
--- a/fs/xfs/xfs_fs.h
+++ b/fs/xfs/xfs_fs.h
@@ -515,7 +515,7 @@ typedef struct xfs_swapext
515/* XFS_IOC_GETBIOSIZE ---- deprecated 47 */ 515/* XFS_IOC_GETBIOSIZE ---- deprecated 47 */
516#define XFS_IOC_GETBMAPX _IOWR('X', 56, struct getbmap) 516#define XFS_IOC_GETBMAPX _IOWR('X', 56, struct getbmap)
517#define XFS_IOC_ZERO_RANGE _IOW ('X', 57, struct xfs_flock64) 517#define XFS_IOC_ZERO_RANGE _IOW ('X', 57, struct xfs_flock64)
518#define XFS_IOC_FREE_EOFBLOCKS _IOR ('X', 58, struct xfs_eofblocks) 518#define XFS_IOC_FREE_EOFBLOCKS _IOR ('X', 58, struct xfs_fs_eofblocks)
519 519
520/* 520/*
521 * ioctl commands that replace IRIX syssgi()'s 521 * ioctl commands that replace IRIX syssgi()'s
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 193206ba4358..474807a401c8 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -119,11 +119,6 @@ xfs_inode_free(
119 ip->i_itemp = NULL; 119 ip->i_itemp = NULL;
120 } 120 }
121 121
122 /* asserts to verify all state is correct here */
123 ASSERT(atomic_read(&ip->i_pincount) == 0);
124 ASSERT(!spin_is_locked(&ip->i_flags_lock));
125 ASSERT(!xfs_isiflocked(ip));
126
127 /* 122 /*
128 * Because we use RCU freeing we need to ensure the inode always 123 * Because we use RCU freeing we need to ensure the inode always
129 * appears to be reclaimed with an invalid inode number when in the 124 * appears to be reclaimed with an invalid inode number when in the
@@ -135,6 +130,10 @@ xfs_inode_free(
135 ip->i_ino = 0; 130 ip->i_ino = 0;
136 spin_unlock(&ip->i_flags_lock); 131 spin_unlock(&ip->i_flags_lock);
137 132
133 /* asserts to verify all state is correct here */
134 ASSERT(atomic_read(&ip->i_pincount) == 0);
135 ASSERT(!xfs_isiflocked(ip));
136
138 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback); 137 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
139} 138}
140 139
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index dabda9521b4b..39797490a1f1 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1585,6 +1585,7 @@ xlog_recover_add_to_trans(
1585 "bad number of regions (%d) in inode log format", 1585 "bad number of regions (%d) in inode log format",
1586 in_f->ilf_size); 1586 in_f->ilf_size);
1587 ASSERT(0); 1587 ASSERT(0);
1588 kmem_free(ptr);
1588 return XFS_ERROR(EIO); 1589 return XFS_ERROR(EIO);
1589 } 1590 }
1590 1591
@@ -1970,6 +1971,13 @@ xlog_recover_do_inode_buffer(
1970 * magic number. If we don't recognise the magic number in the buffer, then 1971 * magic number. If we don't recognise the magic number in the buffer, then
1971 * return a LSN of -1 so that the caller knows it was an unrecognised block and 1972 * return a LSN of -1 so that the caller knows it was an unrecognised block and
1972 * so can recover the buffer. 1973 * so can recover the buffer.
1974 *
1975 * Note: we cannot rely solely on magic number matches to determine that the
1976 * buffer has a valid LSN - we also need to verify that it belongs to this
1977 * filesystem, so we need to extract the object's LSN and compare it to that
1978 * which we read from the superblock. If the UUIDs don't match, then we've got a
1979 * stale metadata block from an old filesystem instance that we need to recover
1980 * over the top of.
1973 */ 1981 */
1974static xfs_lsn_t 1982static xfs_lsn_t
1975xlog_recover_get_buf_lsn( 1983xlog_recover_get_buf_lsn(
@@ -1980,6 +1988,8 @@ xlog_recover_get_buf_lsn(
1980 __uint16_t magic16; 1988 __uint16_t magic16;
1981 __uint16_t magicda; 1989 __uint16_t magicda;
1982 void *blk = bp->b_addr; 1990 void *blk = bp->b_addr;
1991 uuid_t *uuid;
1992 xfs_lsn_t lsn = -1;
1983 1993
1984 /* v4 filesystems always recover immediately */ 1994 /* v4 filesystems always recover immediately */
1985 if (!xfs_sb_version_hascrc(&mp->m_sb)) 1995 if (!xfs_sb_version_hascrc(&mp->m_sb))
@@ -1992,43 +2002,79 @@ xlog_recover_get_buf_lsn(
1992 case XFS_ABTB_MAGIC: 2002 case XFS_ABTB_MAGIC:
1993 case XFS_ABTC_MAGIC: 2003 case XFS_ABTC_MAGIC:
1994 case XFS_IBT_CRC_MAGIC: 2004 case XFS_IBT_CRC_MAGIC:
1995 case XFS_IBT_MAGIC: 2005 case XFS_IBT_MAGIC: {
1996 return be64_to_cpu( 2006 struct xfs_btree_block *btb = blk;
1997 ((struct xfs_btree_block *)blk)->bb_u.s.bb_lsn); 2007
2008 lsn = be64_to_cpu(btb->bb_u.s.bb_lsn);
2009 uuid = &btb->bb_u.s.bb_uuid;
2010 break;
2011 }
1998 case XFS_BMAP_CRC_MAGIC: 2012 case XFS_BMAP_CRC_MAGIC:
1999 case XFS_BMAP_MAGIC: 2013 case XFS_BMAP_MAGIC: {
2000 return be64_to_cpu( 2014 struct xfs_btree_block *btb = blk;
2001 ((struct xfs_btree_block *)blk)->bb_u.l.bb_lsn); 2015
2016 lsn = be64_to_cpu(btb->bb_u.l.bb_lsn);
2017 uuid = &btb->bb_u.l.bb_uuid;
2018 break;
2019 }
2002 case XFS_AGF_MAGIC: 2020 case XFS_AGF_MAGIC:
2003 return be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn); 2021 lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
2022 uuid = &((struct xfs_agf *)blk)->agf_uuid;
2023 break;
2004 case XFS_AGFL_MAGIC: 2024 case XFS_AGFL_MAGIC:
2005 return be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn); 2025 lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
2026 uuid = &((struct xfs_agfl *)blk)->agfl_uuid;
2027 break;
2006 case XFS_AGI_MAGIC: 2028 case XFS_AGI_MAGIC:
2007 return be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn); 2029 lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
2030 uuid = &((struct xfs_agi *)blk)->agi_uuid;
2031 break;
2008 case XFS_SYMLINK_MAGIC: 2032 case XFS_SYMLINK_MAGIC:
2009 return be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn); 2033 lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
2034 uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid;
2035 break;
2010 case XFS_DIR3_BLOCK_MAGIC: 2036 case XFS_DIR3_BLOCK_MAGIC:
2011 case XFS_DIR3_DATA_MAGIC: 2037 case XFS_DIR3_DATA_MAGIC:
2012 case XFS_DIR3_FREE_MAGIC: 2038 case XFS_DIR3_FREE_MAGIC:
2013 return be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn); 2039 lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
2040 uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
2041 break;
2014 case XFS_ATTR3_RMT_MAGIC: 2042 case XFS_ATTR3_RMT_MAGIC:
2015 return be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn); 2043 lsn = be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn);
2044 uuid = &((struct xfs_attr3_rmt_hdr *)blk)->rm_uuid;
2045 break;
2016 case XFS_SB_MAGIC: 2046 case XFS_SB_MAGIC:
2017 return be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn); 2047 lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
2048 uuid = &((struct xfs_dsb *)blk)->sb_uuid;
2049 break;
2018 default: 2050 default:
2019 break; 2051 break;
2020 } 2052 }
2021 2053
2054 if (lsn != (xfs_lsn_t)-1) {
2055 if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
2056 goto recover_immediately;
2057 return lsn;
2058 }
2059
2022 magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic); 2060 magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic);
2023 switch (magicda) { 2061 switch (magicda) {
2024 case XFS_DIR3_LEAF1_MAGIC: 2062 case XFS_DIR3_LEAF1_MAGIC:
2025 case XFS_DIR3_LEAFN_MAGIC: 2063 case XFS_DIR3_LEAFN_MAGIC:
2026 case XFS_DA3_NODE_MAGIC: 2064 case XFS_DA3_NODE_MAGIC:
2027 return be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn); 2065 lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
2066 uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
2067 break;
2028 default: 2068 default:
2029 break; 2069 break;
2030 } 2070 }
2031 2071
2072 if (lsn != (xfs_lsn_t)-1) {
2073 if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
2074 goto recover_immediately;
2075 return lsn;
2076 }
2077
2032 /* 2078 /*
2033 * We do individual object checks on dquot and inode buffers as they 2079 * We do individual object checks on dquot and inode buffers as they
2034 * have their own individual LSN records. Also, we could have a stale 2080 * have their own individual LSN records. Also, we could have a stale
diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h
index d06079c774a0..99b490b4d05a 100644
--- a/include/asm-generic/hugetlb.h
+++ b/include/asm-generic/hugetlb.h
@@ -6,12 +6,12 @@ static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
6 return mk_pte(page, pgprot); 6 return mk_pte(page, pgprot);
7} 7}
8 8
9static inline int huge_pte_write(pte_t pte) 9static inline unsigned long huge_pte_write(pte_t pte)
10{ 10{
11 return pte_write(pte); 11 return pte_write(pte);
12} 12}
13 13
14static inline int huge_pte_dirty(pte_t pte) 14static inline unsigned long huge_pte_dirty(pte_t pte)
15{ 15{
16 return pte_dirty(pte); 16 return pte_dirty(pte);
17} 17}
diff --git a/include/asm-generic/vtime.h b/include/asm-generic/vtime.h
index e69de29bb2d1..b1a49677fe25 100644
--- a/include/asm-generic/vtime.h
+++ b/include/asm-generic/vtime.h
@@ -0,0 +1 @@
/* no content, but patch(1) dislikes empty files */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 290734191f72..b46fb45f2cca 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1322,10 +1322,9 @@ extern int drm_newctx(struct drm_device *dev, void *data,
1322extern int drm_rmctx(struct drm_device *dev, void *data, 1322extern int drm_rmctx(struct drm_device *dev, void *data,
1323 struct drm_file *file_priv); 1323 struct drm_file *file_priv);
1324 1324
1325extern void drm_legacy_ctxbitmap_init(struct drm_device *dev); 1325extern int drm_ctxbitmap_init(struct drm_device *dev);
1326extern void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev); 1326extern void drm_ctxbitmap_cleanup(struct drm_device *dev);
1327extern void drm_legacy_ctxbitmap_release(struct drm_device *dev, 1327extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
1328 struct drm_file *file_priv);
1329 1328
1330extern int drm_setsareactx(struct drm_device *dev, void *data, 1329extern int drm_setsareactx(struct drm_device *dev, void *data,
1331 struct drm_file *file_priv); 1330 struct drm_file *file_priv);
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index fd54a14a7c2a..3d79e513c0b3 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -12,11 +12,14 @@
12 {0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 12 {0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
13 {0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 13 {0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
14 {0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 14 {0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
15 {0x1002, 0x1312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
15 {0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 16 {0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
16 {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 17 {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
17 {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 18 {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
19 {0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
18 {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 20 {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
19 {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 21 {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
22 {0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
20 {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ 23 {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
21 {0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 24 {0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
22 {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 25 {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h
index f7f1d7169b11..089743ade734 100644
--- a/include/linux/balloon_compaction.h
+++ b/include/linux/balloon_compaction.h
@@ -159,6 +159,26 @@ static inline bool balloon_page_movable(struct page *page)
159} 159}
160 160
161/* 161/*
162 * isolated_balloon_page - identify an isolated balloon page on private
163 * compaction/migration page lists.
164 *
165 * After a compaction thread isolates a balloon page for migration, it raises
166 * the page refcount to prevent concurrent compaction threads from re-isolating
167 * the same page. For that reason putback_movable_pages(), or other routines
168 * that need to identify isolated balloon pages on private pagelists, cannot
169 * rely on balloon_page_movable() to accomplish the task.
170 */
171static inline bool isolated_balloon_page(struct page *page)
172{
173 /* Already isolated balloon pages, by default, have a raised refcount */
174 if (page_flags_cleared(page) && !page_mapped(page) &&
175 page_count(page) >= 2)
176 return __is_movable_balloon_page(page);
177
178 return false;
179}
180
181/*
162 * balloon_page_insert - insert a page into the balloon's page list and make 182 * balloon_page_insert - insert a page into the balloon's page list and make
163 * the page->mapping assignment accordingly. 183 * the page->mapping assignment accordingly.
164 * @page : page to be assigned as a 'balloon page' 184 * @page : page to be assigned as a 'balloon page'
@@ -243,6 +263,11 @@ static inline bool balloon_page_movable(struct page *page)
243 return false; 263 return false;
244} 264}
245 265
266static inline bool isolated_balloon_page(struct page *page)
267{
268 return false;
269}
270
246static inline bool balloon_page_isolate(struct page *page) 271static inline bool balloon_page_isolate(struct page *page)
247{ 272{
248 return false; 273 return false;
diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h
index d66033f418c9..0333e605ea0d 100644
--- a/include/linux/bcma/bcma_driver_pci.h
+++ b/include/linux/bcma/bcma_driver_pci.h
@@ -242,6 +242,7 @@ extern int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc,
242 struct bcma_device *core, bool enable); 242 struct bcma_device *core, bool enable);
243extern void bcma_core_pci_up(struct bcma_bus *bus); 243extern void bcma_core_pci_up(struct bcma_bus *bus);
244extern void bcma_core_pci_down(struct bcma_bus *bus); 244extern void bcma_core_pci_down(struct bcma_bus *bus);
245extern void bcma_core_pci_power_save(struct bcma_bus *bus, bool up);
245 246
246extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev); 247extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev);
247extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev); 248extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 2fdb4a451b49..0e6f765aa1f5 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -862,6 +862,17 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
862 return blk_queue_get_max_sectors(q, rq->cmd_flags); 862 return blk_queue_get_max_sectors(q, rq->cmd_flags);
863} 863}
864 864
865static inline unsigned int blk_rq_count_bios(struct request *rq)
866{
867 unsigned int nr_bios = 0;
868 struct bio *bio;
869
870 __rq_for_each_bio(bio, rq)
871 nr_bios++;
872
873 return nr_bios;
874}
875
865/* 876/*
866 * Request issue related functions. 877 * Request issue related functions.
867 */ 878 */
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index ce6df39f60ff..8f47625a0661 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -335,6 +335,8 @@ extern int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
335 struct ceph_osd_request *req); 335 struct ceph_osd_request *req);
336extern void ceph_osdc_sync(struct ceph_osd_client *osdc); 336extern void ceph_osdc_sync(struct ceph_osd_client *osdc);
337 337
338extern void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc);
339
338extern int ceph_osdc_readpages(struct ceph_osd_client *osdc, 340extern int ceph_osdc_readpages(struct ceph_osd_client *osdc,
339 struct ceph_vino vino, 341 struct ceph_vino vino,
340 struct ceph_file_layout *layout, 342 struct ceph_file_layout *layout,
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 653073de09e3..ed419c62dde1 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -406,13 +406,14 @@ int dm_noflush_suspending(struct dm_target *ti);
406union map_info *dm_get_mapinfo(struct bio *bio); 406union map_info *dm_get_mapinfo(struct bio *bio);
407union map_info *dm_get_rq_mapinfo(struct request *rq); 407union map_info *dm_get_rq_mapinfo(struct request *rq);
408 408
409struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
410
409/* 411/*
410 * Geometry functions. 412 * Geometry functions.
411 */ 413 */
412int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo); 414int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
413int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo); 415int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
414 416
415
416/*----------------------------------------------------------------- 417/*-----------------------------------------------------------------
417 * Functions for manipulating device-mapper tables. 418 * Functions for manipulating device-mapper tables.
418 *---------------------------------------------------------------*/ 419 *---------------------------------------------------------------*/
diff --git a/include/linux/hid.h b/include/linux/hid.h
index ee1ffc5e19c9..31b9d299ef6c 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -756,6 +756,10 @@ u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags);
756struct hid_device *hid_allocate_device(void); 756struct hid_device *hid_allocate_device(void);
757struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id); 757struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id);
758int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size); 758int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
759struct hid_report *hid_validate_values(struct hid_device *hid,
760 unsigned int type, unsigned int id,
761 unsigned int field_index,
762 unsigned int report_counts);
759int hid_open_report(struct hid_device *device); 763int hid_open_report(struct hid_device *device);
760int hid_check_keys_pressed(struct hid_device *hid); 764int hid_check_keys_pressed(struct hid_device *hid);
761int hid_connect(struct hid_device *hid, unsigned int connect_mask); 765int hid_connect(struct hid_device *hid, unsigned int connect_mask);
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index a3b8b2e2d244..d98503bde7e9 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -30,10 +30,13 @@
30/* 30/*
31 * Framework version for util services. 31 * Framework version for util services.
32 */ 32 */
33#define UTIL_FW_MINOR 0
34
35#define UTIL_WS2K8_FW_MAJOR 1
36#define UTIL_WS2K8_FW_VERSION (UTIL_WS2K8_FW_MAJOR << 16 | UTIL_FW_MINOR)
33 37
34#define UTIL_FW_MAJOR 3 38#define UTIL_FW_MAJOR 3
35#define UTIL_FW_MINOR 0 39#define UTIL_FW_VERSION (UTIL_FW_MAJOR << 16 | UTIL_FW_MINOR)
36#define UTIL_FW_MAJOR_MINOR (UTIL_FW_MAJOR << 16 | UTIL_FW_MINOR)
37 40
38 41
39/* 42/*
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 78e2ada50cd5..d380c5e68008 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -55,7 +55,7 @@
55#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */ 55#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */
56#define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */ 56#define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */
57#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */ 57#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */
58#define DMAR_ICS_REG 0x98 /* Invalidation complete status register */ 58#define DMAR_ICS_REG 0x9c /* Invalidation complete status register */
59#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */ 59#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */
60 60
61#define OFFSET_STRIDE (9) 61#define OFFSET_STRIDE (9)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 482ad2d84a32..672ddc4de4af 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -439,6 +439,17 @@ static inline char *hex_byte_pack(char *buf, u8 byte)
439 return buf; 439 return buf;
440} 440}
441 441
442extern const char hex_asc_upper[];
443#define hex_asc_upper_lo(x) hex_asc_upper[((x) & 0x0f)]
444#define hex_asc_upper_hi(x) hex_asc_upper[((x) & 0xf0) >> 4]
445
446static inline char *hex_byte_pack_upper(char *buf, u8 byte)
447{
448 *buf++ = hex_asc_upper_hi(byte);
449 *buf++ = hex_asc_upper_lo(byte);
450 return buf;
451}
452
442static inline char * __deprecated pack_hex_byte(char *buf, u8 byte) 453static inline char * __deprecated pack_hex_byte(char *buf, u8 byte)
443{ 454{
444 return hex_byte_pack(buf, byte); 455 return hex_byte_pack(buf, byte);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index ca645a01d37a..0fbbc7aa02cb 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -533,6 +533,7 @@ int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
533 533
534struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); 534struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
535unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); 535unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
536unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
536unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); 537unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
537void kvm_release_page_clean(struct page *page); 538void kvm_release_page_clean(struct page *page);
538void kvm_release_page_dirty(struct page *page); 539void kvm_release_page_dirty(struct page *page);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 60e95872da29..ecc82b37c4cc 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -53,23 +53,6 @@ struct mem_cgroup_reclaim_cookie {
53 unsigned int generation; 53 unsigned int generation;
54}; 54};
55 55
56enum mem_cgroup_filter_t {
57 VISIT, /* visit current node */
58 SKIP, /* skip the current node and continue traversal */
59 SKIP_TREE, /* skip the whole subtree and continue traversal */
60};
61
62/*
63 * mem_cgroup_filter_t predicate might instruct mem_cgroup_iter_cond how to
64 * iterate through the hierarchy tree. Each tree element is checked by the
65 * predicate before it is returned by the iterator. If a filter returns
66 * SKIP or SKIP_TREE then the iterator code continues traversal (with the
67 * next node down the hierarchy or the next node that doesn't belong under the
68 * memcg's subtree).
69 */
70typedef enum mem_cgroup_filter_t
71(*mem_cgroup_iter_filter)(struct mem_cgroup *memcg, struct mem_cgroup *root);
72
73#ifdef CONFIG_MEMCG 56#ifdef CONFIG_MEMCG
74/* 57/*
75 * All "charge" functions with gfp_mask should use GFP_KERNEL or 58 * All "charge" functions with gfp_mask should use GFP_KERNEL or
@@ -137,18 +120,9 @@ mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
137extern void mem_cgroup_end_migration(struct mem_cgroup *memcg, 120extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
138 struct page *oldpage, struct page *newpage, bool migration_ok); 121 struct page *oldpage, struct page *newpage, bool migration_ok);
139 122
140struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root, 123struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
141 struct mem_cgroup *prev, 124 struct mem_cgroup *,
142 struct mem_cgroup_reclaim_cookie *reclaim, 125 struct mem_cgroup_reclaim_cookie *);
143 mem_cgroup_iter_filter cond);
144
145static inline struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
146 struct mem_cgroup *prev,
147 struct mem_cgroup_reclaim_cookie *reclaim)
148{
149 return mem_cgroup_iter_cond(root, prev, reclaim, NULL);
150}
151
152void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); 126void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
153 127
154/* 128/*
@@ -260,9 +234,9 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
260 mem_cgroup_update_page_stat(page, idx, -1); 234 mem_cgroup_update_page_stat(page, idx, -1);
261} 235}
262 236
263enum mem_cgroup_filter_t 237unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
264mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg, 238 gfp_t gfp_mask,
265 struct mem_cgroup *root); 239 unsigned long *total_scanned);
266 240
267void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx); 241void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
268static inline void mem_cgroup_count_vm_event(struct mm_struct *mm, 242static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
@@ -376,15 +350,6 @@ static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
376 struct page *oldpage, struct page *newpage, bool migration_ok) 350 struct page *oldpage, struct page *newpage, bool migration_ok)
377{ 351{
378} 352}
379static inline struct mem_cgroup *
380mem_cgroup_iter_cond(struct mem_cgroup *root,
381 struct mem_cgroup *prev,
382 struct mem_cgroup_reclaim_cookie *reclaim,
383 mem_cgroup_iter_filter cond)
384{
385 /* first call must return non-NULL, second return NULL */
386 return (struct mem_cgroup *)(unsigned long)!prev;
387}
388 353
389static inline struct mem_cgroup * 354static inline struct mem_cgroup *
390mem_cgroup_iter(struct mem_cgroup *root, 355mem_cgroup_iter(struct mem_cgroup *root,
@@ -471,11 +436,11 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
471} 436}
472 437
473static inline 438static inline
474enum mem_cgroup_filter_t 439unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
475mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg, 440 gfp_t gfp_mask,
476 struct mem_cgroup *root) 441 unsigned long *total_scanned)
477{ 442{
478 return VISIT; 443 return 0;
479} 444}
480 445
481static inline void mem_cgroup_split_huge_fixup(struct page *head) 446static inline void mem_cgroup_split_huge_fixup(struct page *head)
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index ccd4260834c5..bab49da8a0f0 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -15,8 +15,8 @@
15#include <linux/spinlock_types.h> 15#include <linux/spinlock_types.h>
16#include <linux/linkage.h> 16#include <linux/linkage.h>
17#include <linux/lockdep.h> 17#include <linux/lockdep.h>
18
19#include <linux/atomic.h> 18#include <linux/atomic.h>
19#include <asm/processor.h>
20 20
21/* 21/*
22 * Simple, straightforward mutexes with strict semantics: 22 * Simple, straightforward mutexes with strict semantics:
@@ -175,8 +175,8 @@ extern void mutex_unlock(struct mutex *lock);
175 175
176extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); 176extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
177 177
178#ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX 178#ifndef arch_mutex_cpu_relax
179#define arch_mutex_cpu_relax() cpu_relax() 179# define arch_mutex_cpu_relax() cpu_relax()
180#endif 180#endif
181 181
182#endif 182#endif
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 041b42a305f6..3de49aca4519 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -950,14 +950,14 @@ struct netdev_phys_port_id {
950 * multiple net devices on single physical port. 950 * multiple net devices on single physical port.
951 * 951 *
952 * void (*ndo_add_vxlan_port)(struct net_device *dev, 952 * void (*ndo_add_vxlan_port)(struct net_device *dev,
953 * sa_family_t sa_family, __u16 port); 953 * sa_family_t sa_family, __be16 port);
954 * Called by vxlan to notiy a driver about the UDP port and socket 954 * Called by vxlan to notiy a driver about the UDP port and socket
955 * address family that vxlan is listnening to. It is called only when 955 * address family that vxlan is listnening to. It is called only when
956 * a new port starts listening. The operation is protected by the 956 * a new port starts listening. The operation is protected by the
957 * vxlan_net->sock_lock. 957 * vxlan_net->sock_lock.
958 * 958 *
959 * void (*ndo_del_vxlan_port)(struct net_device *dev, 959 * void (*ndo_del_vxlan_port)(struct net_device *dev,
960 * sa_family_t sa_family, __u16 port); 960 * sa_family_t sa_family, __be16 port);
961 * Called by vxlan to notify the driver about a UDP port and socket 961 * Called by vxlan to notify the driver about a UDP port and socket
962 * address family that vxlan is not listening to anymore. The operation 962 * address family that vxlan is not listening to anymore. The operation
963 * is protected by the vxlan_net->sock_lock. 963 * is protected by the vxlan_net->sock_lock.
@@ -1093,10 +1093,10 @@ struct net_device_ops {
1093 struct netdev_phys_port_id *ppid); 1093 struct netdev_phys_port_id *ppid);
1094 void (*ndo_add_vxlan_port)(struct net_device *dev, 1094 void (*ndo_add_vxlan_port)(struct net_device *dev,
1095 sa_family_t sa_family, 1095 sa_family_t sa_family,
1096 __u16 port); 1096 __be16 port);
1097 void (*ndo_del_vxlan_port)(struct net_device *dev, 1097 void (*ndo_del_vxlan_port)(struct net_device *dev,
1098 sa_family_t sa_family, 1098 sa_family_t sa_family,
1099 __u16 port); 1099 __be16 port);
1100}; 1100};
1101 1101
1102/* 1102/*
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index d80e2753847c..9ac9fbde7b61 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -296,10 +296,12 @@ ip_set_eexist(int ret, u32 flags)
296 296
297/* Match elements marked with nomatch */ 297/* Match elements marked with nomatch */
298static inline bool 298static inline bool
299ip_set_enomatch(int ret, u32 flags, enum ipset_adt adt) 299ip_set_enomatch(int ret, u32 flags, enum ipset_adt adt, struct ip_set *set)
300{ 300{
301 return adt == IPSET_TEST && 301 return adt == IPSET_TEST &&
302 ret == -ENOTEMPTY && ((flags >> 16) & IPSET_FLAG_NOMATCH); 302 (set->type->features & IPSET_TYPE_NOMATCH) &&
303 ((flags >> 16) & IPSET_FLAG_NOMATCH) &&
304 (ret > 0 || ret == -ENOTEMPTY);
303} 305}
304 306
305/* Check the NLA_F_NET_BYTEORDER flag */ 307/* Check the NLA_F_NET_BYTEORDER flag */
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 01fd84b566f7..49f52c8f4422 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1455,7 +1455,8 @@ struct nfs_rpc_ops {
1455 struct inode * (*open_context) (struct inode *dir, 1455 struct inode * (*open_context) (struct inode *dir,
1456 struct nfs_open_context *ctx, 1456 struct nfs_open_context *ctx,
1457 int open_flags, 1457 int open_flags,
1458 struct iattr *iattr); 1458 struct iattr *iattr,
1459 int *);
1459 int (*have_delegation)(struct inode *, fmode_t); 1460 int (*have_delegation)(struct inode *, fmode_t);
1460 int (*return_delegation)(struct inode *); 1461 int (*return_delegation)(struct inode *);
1461 struct nfs_client *(*alloc_client) (const struct nfs_client_initdata *); 1462 struct nfs_client *(*alloc_client) (const struct nfs_client_initdata *);
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index 535cecf1e02f..fcd63baee5f2 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -1,8 +1,6 @@
1#ifndef __OF_IRQ_H 1#ifndef __OF_IRQ_H
2#define __OF_IRQ_H 2#define __OF_IRQ_H
3 3
4#if defined(CONFIG_OF)
5struct of_irq;
6#include <linux/types.h> 4#include <linux/types.h>
7#include <linux/errno.h> 5#include <linux/errno.h>
8#include <linux/irq.h> 6#include <linux/irq.h>
@@ -10,14 +8,6 @@ struct of_irq;
10#include <linux/ioport.h> 8#include <linux/ioport.h>
11#include <linux/of.h> 9#include <linux/of.h>
12 10
13/*
14 * irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC
15 * implements it differently. However, the prototype is the same for all,
16 * so declare it here regardless of the CONFIG_OF_IRQ setting.
17 */
18extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
19
20#if defined(CONFIG_OF_IRQ)
21/** 11/**
22 * of_irq - container for device_node/irq_specifier pair for an irq controller 12 * of_irq - container for device_node/irq_specifier pair for an irq controller
23 * @controller: pointer to interrupt controller device tree node 13 * @controller: pointer to interrupt controller device tree node
@@ -71,11 +61,17 @@ extern int of_irq_to_resource(struct device_node *dev, int index,
71extern int of_irq_count(struct device_node *dev); 61extern int of_irq_count(struct device_node *dev);
72extern int of_irq_to_resource_table(struct device_node *dev, 62extern int of_irq_to_resource_table(struct device_node *dev,
73 struct resource *res, int nr_irqs); 63 struct resource *res, int nr_irqs);
74extern struct device_node *of_irq_find_parent(struct device_node *child);
75 64
76extern void of_irq_init(const struct of_device_id *matches); 65extern void of_irq_init(const struct of_device_id *matches);
77 66
78#endif /* CONFIG_OF_IRQ */ 67#if defined(CONFIG_OF)
68/*
69 * irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC
70 * implements it differently. However, the prototype is the same for all,
71 * so declare it here regardless of the CONFIG_OF_IRQ setting.
72 */
73extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
74extern struct device_node *of_irq_find_parent(struct device_node *child);
79 75
80#else /* !CONFIG_OF */ 76#else /* !CONFIG_OF */
81static inline unsigned int irq_of_parse_and_map(struct device_node *dev, 77static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 67e13aa5a478..9bdad43ad228 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -40,6 +40,8 @@ enum regulator_status {
40}; 40};
41 41
42/** 42/**
43 * struct regulator_linear_range - specify linear voltage ranges
44 *
43 * Specify a range of voltages for regulator_map_linar_range() and 45 * Specify a range of voltages for regulator_map_linar_range() and
44 * regulator_list_linear_range(). 46 * regulator_list_linear_range().
45 * 47 *
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 2ddb48d9312c..c2d89335f637 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -498,7 +498,7 @@ struct sk_buff {
498 * headers if needed 498 * headers if needed
499 */ 499 */
500 __u8 encapsulation:1; 500 __u8 encapsulation:1;
501 /* 7/9 bit hole (depending on ndisc_nodetype presence) */ 501 /* 6/8 bit hole (depending on ndisc_nodetype presence) */
502 kmemcheck_bitfield_end(flags2); 502 kmemcheck_bitfield_end(flags2);
503 503
504#if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL 504#if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL
diff --git a/include/linux/smp.h b/include/linux/smp.h
index cfb7ca094b38..731f5237d5f4 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -155,6 +155,12 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
155 155
156static inline void kick_all_cpus_sync(void) { } 156static inline void kick_all_cpus_sync(void) { }
157 157
158static inline void __smp_call_function_single(int cpuid,
159 struct call_single_data *data, int wait)
160{
161 on_each_cpu(data->func, data->info, wait);
162}
163
158#endif /* !SMP */ 164#endif /* !SMP */
159 165
160/* 166/*
diff --git a/include/linux/timex.h b/include/linux/timex.h
index b3726e61368e..dd3edd7dfc94 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -141,6 +141,7 @@ extern int do_adjtimex(struct timex *);
141extern void hardpps(const struct timespec *, const struct timespec *); 141extern void hardpps(const struct timespec *, const struct timespec *);
142 142
143int read_current_timer(unsigned long *timer_val); 143int read_current_timer(unsigned long *timer_val);
144void ntp_notify_cmos_timer(void);
144 145
145/* The clock frequency of the i8253/i8254 PIT */ 146/* The clock frequency of the i8253/i8254 PIT */
146#define PIT_TICK_RATE 1193182ul 147#define PIT_TICK_RATE 1193182ul
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 9cb2fe8ca944..e303eef94dd5 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -42,6 +42,7 @@ struct usbnet {
42 struct usb_host_endpoint *status; 42 struct usb_host_endpoint *status;
43 unsigned maxpacket; 43 unsigned maxpacket;
44 struct timer_list delay; 44 struct timer_list delay;
45 const char *padding_pkt;
45 46
46 /* protocol/interface state */ 47 /* protocol/interface state */
47 struct net_device *net; 48 struct net_device *net;
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index fb314de2b61b..86505bfa5d2c 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -67,6 +67,10 @@ int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
67int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr); 67int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr);
68#endif 68#endif
69 69
70bool ipv6_chk_custom_prefix(const struct in6_addr *addr,
71 const unsigned int prefix_len,
72 struct net_device *dev);
73
70int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev); 74int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev);
71 75
72struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, 76struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net,
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index aaeaf0938ec0..15f10841e2b5 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -104,6 +104,7 @@ enum {
104enum { 104enum {
105 HCI_SETUP, 105 HCI_SETUP,
106 HCI_AUTO_OFF, 106 HCI_AUTO_OFF,
107 HCI_RFKILLED,
107 HCI_MGMT, 108 HCI_MGMT,
108 HCI_PAIRABLE, 109 HCI_PAIRABLE,
109 HCI_SERVICE_CACHE, 110 HCI_SERVICE_CACHE,
diff --git a/include/net/ip.h b/include/net/ip.h
index 48f55979d842..5e5268807a1c 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -264,9 +264,11 @@ int ip_dont_fragment(struct sock *sk, struct dst_entry *dst)
264 264
265extern void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more); 265extern void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more);
266 266
267static inline void ip_select_ident(struct iphdr *iph, struct dst_entry *dst, struct sock *sk) 267static inline void ip_select_ident(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk)
268{ 268{
269 if (iph->frag_off & htons(IP_DF)) { 269 struct iphdr *iph = ip_hdr(skb);
270
271 if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) {
270 /* This is only to work around buggy Windows95/2000 272 /* This is only to work around buggy Windows95/2000
271 * VJ compression implementations. If the ID field 273 * VJ compression implementations. If the ID field
272 * does not change, they drop every other packet in 274 * does not change, they drop every other packet in
@@ -278,9 +280,11 @@ static inline void ip_select_ident(struct iphdr *iph, struct dst_entry *dst, str
278 __ip_select_ident(iph, dst, 0); 280 __ip_select_ident(iph, dst, 0);
279} 281}
280 282
281static inline void ip_select_ident_more(struct iphdr *iph, struct dst_entry *dst, struct sock *sk, int more) 283static inline void ip_select_ident_more(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk, int more)
282{ 284{
283 if (iph->frag_off & htons(IP_DF)) { 285 struct iphdr *iph = ip_hdr(skb);
286
287 if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) {
284 if (sk && inet_sk(sk)->inet_daddr) { 288 if (sk && inet_sk(sk)->inet_daddr) {
285 iph->id = htons(inet_sk(sk)->inet_id); 289 iph->id = htons(inet_sk(sk)->inet_id);
286 inet_sk(sk)->inet_id += 1 + more; 290 inet_sk(sk)->inet_id += 1 + more;
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index f0d70f066f3d..9c4d37ec45a1 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -723,8 +723,6 @@ struct ip_vs_dest_dst {
723 struct rcu_head rcu_head; 723 struct rcu_head rcu_head;
724}; 724};
725 725
726/* In grace period after removing */
727#define IP_VS_DEST_STATE_REMOVING 0x01
728/* 726/*
729 * The real server destination forwarding entry 727 * The real server destination forwarding entry
730 * with ip address, port number, and so on. 728 * with ip address, port number, and so on.
@@ -742,7 +740,7 @@ struct ip_vs_dest {
742 740
743 atomic_t refcnt; /* reference counter */ 741 atomic_t refcnt; /* reference counter */
744 struct ip_vs_stats stats; /* statistics */ 742 struct ip_vs_stats stats; /* statistics */
745 unsigned long state; /* state flags */ 743 unsigned long idle_start; /* start time, jiffies */
746 744
747 /* connection counters and thresholds */ 745 /* connection counters and thresholds */
748 atomic_t activeconns; /* active connections */ 746 atomic_t activeconns; /* active connections */
@@ -756,14 +754,13 @@ struct ip_vs_dest {
756 struct ip_vs_dest_dst __rcu *dest_dst; /* cached dst info */ 754 struct ip_vs_dest_dst __rcu *dest_dst; /* cached dst info */
757 755
758 /* for virtual service */ 756 /* for virtual service */
759 struct ip_vs_service *svc; /* service it belongs to */ 757 struct ip_vs_service __rcu *svc; /* service it belongs to */
760 __u16 protocol; /* which protocol (TCP/UDP) */ 758 __u16 protocol; /* which protocol (TCP/UDP) */
761 __be16 vport; /* virtual port number */ 759 __be16 vport; /* virtual port number */
762 union nf_inet_addr vaddr; /* virtual IP address */ 760 union nf_inet_addr vaddr; /* virtual IP address */
763 __u32 vfwmark; /* firewall mark of service */ 761 __u32 vfwmark; /* firewall mark of service */
764 762
765 struct list_head t_list; /* in dest_trash */ 763 struct list_head t_list; /* in dest_trash */
766 struct rcu_head rcu_head;
767 unsigned int in_rs_table:1; /* we are in rs_table */ 764 unsigned int in_rs_table:1; /* we are in rs_table */
768}; 765};
769 766
@@ -1649,7 +1646,7 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
1649/* CONFIG_IP_VS_NFCT */ 1646/* CONFIG_IP_VS_NFCT */
1650#endif 1647#endif
1651 1648
1652static inline unsigned int 1649static inline int
1653ip_vs_dest_conn_overhead(struct ip_vs_dest *dest) 1650ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
1654{ 1651{
1655 /* 1652 /*
diff --git a/include/net/mrp.h b/include/net/mrp.h
index 4fbf02aa2ec1..0f7558b638ae 100644
--- a/include/net/mrp.h
+++ b/include/net/mrp.h
@@ -112,6 +112,7 @@ struct mrp_applicant {
112 struct mrp_application *app; 112 struct mrp_application *app;
113 struct net_device *dev; 113 struct net_device *dev;
114 struct timer_list join_timer; 114 struct timer_list join_timer;
115 struct timer_list periodic_timer;
115 116
116 spinlock_t lock; 117 spinlock_t lock;
117 struct sk_buff_head queue; 118 struct sk_buff_head queue;
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 1313456a0994..9d22f08896c6 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -74,6 +74,7 @@ struct net {
74 struct hlist_head *dev_index_head; 74 struct hlist_head *dev_index_head;
75 unsigned int dev_base_seq; /* protected by rtnl_mutex */ 75 unsigned int dev_base_seq; /* protected by rtnl_mutex */
76 int ifindex; 76 int ifindex;
77 unsigned int dev_unreg_count;
77 78
78 /* core fib_rules */ 79 /* core fib_rules */
79 struct list_head rules_ops; 80 struct list_head rules_ops;
diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h
index ff95434e50ca..88a1d4060d52 100644
--- a/include/net/netfilter/nf_conntrack_extend.h
+++ b/include/net/netfilter/nf_conntrack_extend.h
@@ -86,7 +86,7 @@ static inline void nf_ct_ext_destroy(struct nf_conn *ct)
86static inline void nf_ct_ext_free(struct nf_conn *ct) 86static inline void nf_ct_ext_free(struct nf_conn *ct)
87{ 87{
88 if (ct->ext) 88 if (ct->ext)
89 kfree(ct->ext); 89 kfree_rcu(ct->ext, rcu);
90} 90}
91 91
92/* Add this type, returns pointer to data or NULL. */ 92/* Add this type, returns pointer to data or NULL. */
diff --git a/include/net/netfilter/nf_conntrack_synproxy.h b/include/net/netfilter/nf_conntrack_synproxy.h
index 806f54a290d6..f572f313d6f1 100644
--- a/include/net/netfilter/nf_conntrack_synproxy.h
+++ b/include/net/netfilter/nf_conntrack_synproxy.h
@@ -56,7 +56,7 @@ struct synproxy_options {
56 56
57struct tcphdr; 57struct tcphdr;
58struct xt_synproxy_info; 58struct xt_synproxy_info;
59extern void synproxy_parse_options(const struct sk_buff *skb, unsigned int doff, 59extern bool synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
60 const struct tcphdr *th, 60 const struct tcphdr *th,
61 struct synproxy_options *opts); 61 struct synproxy_options *opts);
62extern unsigned int synproxy_options_size(const struct synproxy_options *opts); 62extern unsigned int synproxy_options_size(const struct synproxy_options *opts);
diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
index 6ca975bebd37..c2e542b27a5a 100644
--- a/include/net/secure_seq.h
+++ b/include/net/secure_seq.h
@@ -3,7 +3,6 @@
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5 5
6extern void net_secret_init(void);
7extern __u32 secure_ip_id(__be32 daddr); 6extern __u32 secure_ip_id(__be32 daddr);
8extern __u32 secure_ipv6_id(const __be32 daddr[4]); 7extern __u32 secure_ipv6_id(const __be32 daddr[4]);
9extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport); 8extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
diff --git a/include/net/sock.h b/include/net/sock.h
index 6ba2e7b0e2b1..1d37a8086bed 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -409,6 +409,11 @@ struct sock {
409 void (*sk_destruct)(struct sock *sk); 409 void (*sk_destruct)(struct sock *sk);
410}; 410};
411 411
412#define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
413
414#define rcu_dereference_sk_user_data(sk) rcu_dereference(__sk_user_data((sk)))
415#define rcu_assign_sk_user_data(sk, ptr) rcu_assign_pointer(__sk_user_data((sk)), ptr)
416
412/* 417/*
413 * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK 418 * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK
414 * or not whether his port will be reused by someone else. SK_FORCE_REUSE 419 * or not whether his port will be reused by someone else. SK_FORCE_REUSE
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 27a72d5d4b00..2037c45adfe6 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -286,6 +286,8 @@ struct device;
286 .info = snd_soc_info_volsw, \ 286 .info = snd_soc_info_volsw, \
287 .get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \ 287 .get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
288 .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 1) } 288 .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 1) }
289#define SOC_DAPM_SINGLE_VIRT(xname, max) \
290 SOC_DAPM_SINGLE(xname, SND_SOC_NOPM, 0, max, 0)
289#define SOC_DAPM_SINGLE_TLV(xname, reg, shift, max, invert, tlv_array) \ 291#define SOC_DAPM_SINGLE_TLV(xname, reg, shift, max, invert, tlv_array) \
290{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \ 292{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
291 .info = snd_soc_info_volsw, \ 293 .info = snd_soc_info_volsw, \
@@ -300,6 +302,8 @@ struct device;
300 .tlv.p = (tlv_array), \ 302 .tlv.p = (tlv_array), \
301 .get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \ 303 .get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
302 .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 0) } 304 .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 0) }
305#define SOC_DAPM_SINGLE_TLV_VIRT(xname, max, tlv_array) \
306 SOC_DAPM_SINGLE(xname, SND_SOC_NOPM, 0, max, 0, tlv_array)
303#define SOC_DAPM_ENUM(xname, xenum) \ 307#define SOC_DAPM_ENUM(xname, xenum) \
304{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \ 308{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
305 .info = snd_soc_info_enum_double, \ 309 .info = snd_soc_info_enum_double, \
diff --git a/include/sound/soc.h b/include/sound/soc.h
index d22cb0a06feb..b429dba57bf6 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -1088,7 +1088,8 @@ struct snd_soc_pcm_runtime {
1088/* mixer control */ 1088/* mixer control */
1089struct soc_mixer_control { 1089struct soc_mixer_control {
1090 int min, max, platform_max; 1090 int min, max, platform_max;
1091 unsigned int reg, rreg, shift, rshift; 1091 int reg, rreg;
1092 unsigned int shift, rshift;
1092 unsigned int invert:1; 1093 unsigned int invert:1;
1093 unsigned int autodisable:1; 1094 unsigned int autodisable:1;
1094}; 1095};
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 60ae7c3db912..4c2301d2ef1a 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -618,6 +618,7 @@ TRACE_EVENT(block_rq_remap,
618 __field( unsigned int, nr_sector ) 618 __field( unsigned int, nr_sector )
619 __field( dev_t, old_dev ) 619 __field( dev_t, old_dev )
620 __field( sector_t, old_sector ) 620 __field( sector_t, old_sector )
621 __field( unsigned int, nr_bios )
621 __array( char, rwbs, RWBS_LEN) 622 __array( char, rwbs, RWBS_LEN)
622 ), 623 ),
623 624
@@ -627,15 +628,16 @@ TRACE_EVENT(block_rq_remap,
627 __entry->nr_sector = blk_rq_sectors(rq); 628 __entry->nr_sector = blk_rq_sectors(rq);
628 __entry->old_dev = dev; 629 __entry->old_dev = dev;
629 __entry->old_sector = from; 630 __entry->old_sector = from;
631 __entry->nr_bios = blk_rq_count_bios(rq);
630 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); 632 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
631 ), 633 ),
632 634
633 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", 635 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u",
634 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 636 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
635 (unsigned long long)__entry->sector, 637 (unsigned long long)__entry->sector,
636 __entry->nr_sector, 638 __entry->nr_sector,
637 MAJOR(__entry->old_dev), MINOR(__entry->old_dev), 639 MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
638 (unsigned long long)__entry->old_sector) 640 (unsigned long long)__entry->old_sector, __entry->nr_bios)
639); 641);
640 642
641#endif /* _TRACE_BLOCK_H */ 643#endif /* _TRACE_BLOCK_H */
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 45702c3c3837..f18b3b76e01e 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -42,6 +42,7 @@ struct extent_buffer;
42 { BTRFS_TREE_LOG_OBJECTID, "TREE_LOG" }, \ 42 { BTRFS_TREE_LOG_OBJECTID, "TREE_LOG" }, \
43 { BTRFS_QUOTA_TREE_OBJECTID, "QUOTA_TREE" }, \ 43 { BTRFS_QUOTA_TREE_OBJECTID, "QUOTA_TREE" }, \
44 { BTRFS_TREE_RELOC_OBJECTID, "TREE_RELOC" }, \ 44 { BTRFS_TREE_RELOC_OBJECTID, "TREE_RELOC" }, \
45 { BTRFS_UUID_TREE_OBJECTID, "UUID_RELOC" }, \
45 { BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" }) 46 { BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" })
46 47
47#define show_root_type(obj) \ 48#define show_root_type(obj) \
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index fa8b3adf9ffb..46d41e8b0dcc 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -1007,4 +1007,6 @@ struct drm_radeon_info {
1007#define SI_TILE_MODE_DEPTH_STENCIL_2D_4AA 3 1007#define SI_TILE_MODE_DEPTH_STENCIL_2D_4AA 3
1008#define SI_TILE_MODE_DEPTH_STENCIL_2D_8AA 2 1008#define SI_TILE_MODE_DEPTH_STENCIL_2D_8AA 2
1009 1009
1010#define CIK_TILE_MODE_DEPTH_STENCIL_1D 5
1011
1010#endif 1012#endif
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index ca1d90bcb74d..009a655a5d35 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -324,7 +324,7 @@ struct perf_event_attr {
324#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64) 324#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
325#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) 325#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
326#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *) 326#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
327#define PERF_EVENT_IOC_ID _IOR('$', 7, u64 *) 327#define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *)
328 328
329enum perf_event_ioc_flags { 329enum perf_event_ioc_flags {
330 PERF_IOC_FLAG_GROUP = 1U << 0, 330 PERF_IOC_FLAG_GROUP = 1U << 0,
@@ -380,10 +380,13 @@ struct perf_event_mmap_page {
380 union { 380 union {
381 __u64 capabilities; 381 __u64 capabilities;
382 struct { 382 struct {
383 __u64 cap_usr_time : 1, 383 __u64 cap_bit0 : 1, /* Always 0, deprecated, see commit 860f085b74e9 */
384 cap_usr_rdpmc : 1, 384 cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */
385 cap_usr_time_zero : 1, 385
386 cap_____res : 61; 386 cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */
387 cap_user_time : 1, /* The time_* fields are used */
388 cap_user_time_zero : 1, /* The time_zero field is used */
389 cap_____res : 59;
387 }; 390 };
388 }; 391 };
389 392
@@ -442,12 +445,13 @@ struct perf_event_mmap_page {
442 * ((rem * time_mult) >> time_shift); 445 * ((rem * time_mult) >> time_shift);
443 */ 446 */
444 __u64 time_zero; 447 __u64 time_zero;
448 __u32 size; /* Header size up to __reserved[] fields. */
445 449
446 /* 450 /*
447 * Hole for extension of the self monitor capabilities 451 * Hole for extension of the self monitor capabilities
448 */ 452 */
449 453
450 __u64 __reserved[119]; /* align to 1k */ 454 __u8 __reserved[118*8+4]; /* align to 1k. */
451 455
452 /* 456 /*
453 * Control data for the mmap() data buffer. 457 * Control data for the mmap() data buffer.
@@ -528,6 +532,7 @@ enum perf_event_type {
528 * u64 len; 532 * u64 len;
529 * u64 pgoff; 533 * u64 pgoff;
530 * char filename[]; 534 * char filename[];
535 * struct sample_id sample_id;
531 * }; 536 * };
532 */ 537 */
533 PERF_RECORD_MMAP = 1, 538 PERF_RECORD_MMAP = 1,
diff --git a/ipc/msg.c b/ipc/msg.c
index b0d541d42677..558aa91186b6 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -165,6 +165,15 @@ static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s)
165 ipc_rmid(&msg_ids(ns), &s->q_perm); 165 ipc_rmid(&msg_ids(ns), &s->q_perm);
166} 166}
167 167
168static void msg_rcu_free(struct rcu_head *head)
169{
170 struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
171 struct msg_queue *msq = ipc_rcu_to_struct(p);
172
173 security_msg_queue_free(msq);
174 ipc_rcu_free(head);
175}
176
168/** 177/**
169 * newque - Create a new msg queue 178 * newque - Create a new msg queue
170 * @ns: namespace 179 * @ns: namespace
@@ -189,15 +198,14 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
189 msq->q_perm.security = NULL; 198 msq->q_perm.security = NULL;
190 retval = security_msg_queue_alloc(msq); 199 retval = security_msg_queue_alloc(msq);
191 if (retval) { 200 if (retval) {
192 ipc_rcu_putref(msq); 201 ipc_rcu_putref(msq, ipc_rcu_free);
193 return retval; 202 return retval;
194 } 203 }
195 204
196 /* ipc_addid() locks msq upon success. */ 205 /* ipc_addid() locks msq upon success. */
197 id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); 206 id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
198 if (id < 0) { 207 if (id < 0) {
199 security_msg_queue_free(msq); 208 ipc_rcu_putref(msq, msg_rcu_free);
200 ipc_rcu_putref(msq);
201 return id; 209 return id;
202 } 210 }
203 211
@@ -276,8 +284,7 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
276 free_msg(msg); 284 free_msg(msg);
277 } 285 }
278 atomic_sub(msq->q_cbytes, &ns->msg_bytes); 286 atomic_sub(msq->q_cbytes, &ns->msg_bytes);
279 security_msg_queue_free(msq); 287 ipc_rcu_putref(msq, msg_rcu_free);
280 ipc_rcu_putref(msq);
281} 288}
282 289
283/* 290/*
@@ -688,6 +695,12 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
688 if (ipcperms(ns, &msq->q_perm, S_IWUGO)) 695 if (ipcperms(ns, &msq->q_perm, S_IWUGO))
689 goto out_unlock0; 696 goto out_unlock0;
690 697
698 /* raced with RMID? */
699 if (msq->q_perm.deleted) {
700 err = -EIDRM;
701 goto out_unlock0;
702 }
703
691 err = security_msg_queue_msgsnd(msq, msg, msgflg); 704 err = security_msg_queue_msgsnd(msq, msg, msgflg);
692 if (err) 705 if (err)
693 goto out_unlock0; 706 goto out_unlock0;
@@ -717,7 +730,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
717 rcu_read_lock(); 730 rcu_read_lock();
718 ipc_lock_object(&msq->q_perm); 731 ipc_lock_object(&msq->q_perm);
719 732
720 ipc_rcu_putref(msq); 733 ipc_rcu_putref(msq, ipc_rcu_free);
721 if (msq->q_perm.deleted) { 734 if (msq->q_perm.deleted) {
722 err = -EIDRM; 735 err = -EIDRM;
723 goto out_unlock0; 736 goto out_unlock0;
@@ -894,6 +907,13 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl
894 goto out_unlock1; 907 goto out_unlock1;
895 908
896 ipc_lock_object(&msq->q_perm); 909 ipc_lock_object(&msq->q_perm);
910
911 /* raced with RMID? */
912 if (msq->q_perm.deleted) {
913 msg = ERR_PTR(-EIDRM);
914 goto out_unlock0;
915 }
916
897 msg = find_msg(msq, &msgtyp, mode); 917 msg = find_msg(msq, &msgtyp, mode);
898 if (!IS_ERR(msg)) { 918 if (!IS_ERR(msg)) {
899 /* 919 /*
diff --git a/ipc/sem.c b/ipc/sem.c
index 69b6a21f3844..8c4f59b0204a 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -243,71 +243,122 @@ static void merge_queues(struct sem_array *sma)
243 } 243 }
244} 244}
245 245
246static void sem_rcu_free(struct rcu_head *head)
247{
248 struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
249 struct sem_array *sma = ipc_rcu_to_struct(p);
250
251 security_sem_free(sma);
252 ipc_rcu_free(head);
253}
254
255/*
256 * Wait until all currently ongoing simple ops have completed.
257 * Caller must own sem_perm.lock.
258 * New simple ops cannot start, because simple ops first check
259 * that sem_perm.lock is free.
260 * that a) sem_perm.lock is free and b) complex_count is 0.
261 */
262static void sem_wait_array(struct sem_array *sma)
263{
264 int i;
265 struct sem *sem;
266
267 if (sma->complex_count) {
268 /* The thread that increased sma->complex_count waited on
269 * all sem->lock locks. Thus we don't need to wait again.
270 */
271 return;
272 }
273
274 for (i = 0; i < sma->sem_nsems; i++) {
275 sem = sma->sem_base + i;
276 spin_unlock_wait(&sem->lock);
277 }
278}
279
246/* 280/*
247 * If the request contains only one semaphore operation, and there are 281 * If the request contains only one semaphore operation, and there are
248 * no complex transactions pending, lock only the semaphore involved. 282 * no complex transactions pending, lock only the semaphore involved.
249 * Otherwise, lock the entire semaphore array, since we either have 283 * Otherwise, lock the entire semaphore array, since we either have
250 * multiple semaphores in our own semops, or we need to look at 284 * multiple semaphores in our own semops, or we need to look at
251 * semaphores from other pending complex operations. 285 * semaphores from other pending complex operations.
252 *
253 * Carefully guard against sma->complex_count changing between zero
254 * and non-zero while we are spinning for the lock. The value of
255 * sma->complex_count cannot change while we are holding the lock,
256 * so sem_unlock should be fine.
257 *
258 * The global lock path checks that all the local locks have been released,
259 * checking each local lock once. This means that the local lock paths
260 * cannot start their critical sections while the global lock is held.
261 */ 286 */
262static inline int sem_lock(struct sem_array *sma, struct sembuf *sops, 287static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
263 int nsops) 288 int nsops)
264{ 289{
265 int locknum; 290 struct sem *sem;
266 again:
267 if (nsops == 1 && !sma->complex_count) {
268 struct sem *sem = sma->sem_base + sops->sem_num;
269 291
270 /* Lock just the semaphore we are interested in. */ 292 if (nsops != 1) {
271 spin_lock(&sem->lock); 293 /* Complex operation - acquire a full lock */
294 ipc_lock_object(&sma->sem_perm);
272 295
273 /* 296 /* And wait until all simple ops that are processed
274 * If sma->complex_count was set while we were spinning, 297 * right now have dropped their locks.
275 * we may need to look at things we did not lock here.
276 */ 298 */
277 if (unlikely(sma->complex_count)) { 299 sem_wait_array(sma);
278 spin_unlock(&sem->lock); 300 return -1;
279 goto lock_array; 301 }
280 } 302
303 /*
304 * Only one semaphore affected - try to optimize locking.
305 * The rules are:
306 * - optimized locking is possible if no complex operation
307 * is either enqueued or processed right now.
308 * - The test for enqueued complex ops is simple:
309 * sma->complex_count != 0
310 * - Testing for complex ops that are processed right now is
311 * a bit more difficult. Complex ops acquire the full lock
312 * and first wait that the running simple ops have completed.
313 * (see above)
314 * Thus: If we own a simple lock and the global lock is free
315 * and complex_count is now 0, then it will stay 0 and
316 * thus just locking sem->lock is sufficient.
317 */
318 sem = sma->sem_base + sops->sem_num;
281 319
320 if (sma->complex_count == 0) {
282 /* 321 /*
283 * Another process is holding the global lock on the 322 * It appears that no complex operation is around.
284 * sem_array; we cannot enter our critical section, 323 * Acquire the per-semaphore lock.
285 * but have to wait for the global lock to be released.
286 */ 324 */
287 if (unlikely(spin_is_locked(&sma->sem_perm.lock))) { 325 spin_lock(&sem->lock);
288 spin_unlock(&sem->lock); 326
289 spin_unlock_wait(&sma->sem_perm.lock); 327 /* Then check that the global lock is free */
290 goto again; 328 if (!spin_is_locked(&sma->sem_perm.lock)) {
329 /* spin_is_locked() is not a memory barrier */
330 smp_mb();
331
332 /* Now repeat the test of complex_count:
333 * It can't change anymore until we drop sem->lock.
334 * Thus: if is now 0, then it will stay 0.
335 */
336 if (sma->complex_count == 0) {
337 /* fast path successful! */
338 return sops->sem_num;
339 }
291 } 340 }
341 spin_unlock(&sem->lock);
342 }
292 343
293 locknum = sops->sem_num; 344 /* slow path: acquire the full lock */
345 ipc_lock_object(&sma->sem_perm);
346
347 if (sma->complex_count == 0) {
348 /* False alarm:
349 * There is no complex operation, thus we can switch
350 * back to the fast path.
351 */
352 spin_lock(&sem->lock);
353 ipc_unlock_object(&sma->sem_perm);
354 return sops->sem_num;
294 } else { 355 } else {
295 int i; 356 /* Not a false alarm, thus complete the sequence for a
296 /* 357 * full lock.
297 * Lock the semaphore array, and wait for all of the
298 * individual semaphore locks to go away. The code
299 * above ensures no new single-lock holders will enter
300 * their critical section while the array lock is held.
301 */ 358 */
302 lock_array: 359 sem_wait_array(sma);
303 ipc_lock_object(&sma->sem_perm); 360 return -1;
304 for (i = 0; i < sma->sem_nsems; i++) {
305 struct sem *sem = sma->sem_base + i;
306 spin_unlock_wait(&sem->lock);
307 }
308 locknum = -1;
309 } 361 }
310 return locknum;
311} 362}
312 363
313static inline void sem_unlock(struct sem_array *sma, int locknum) 364static inline void sem_unlock(struct sem_array *sma, int locknum)
@@ -374,12 +425,7 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
374static inline void sem_lock_and_putref(struct sem_array *sma) 425static inline void sem_lock_and_putref(struct sem_array *sma)
375{ 426{
376 sem_lock(sma, NULL, -1); 427 sem_lock(sma, NULL, -1);
377 ipc_rcu_putref(sma); 428 ipc_rcu_putref(sma, ipc_rcu_free);
378}
379
380static inline void sem_putref(struct sem_array *sma)
381{
382 ipc_rcu_putref(sma);
383} 429}
384 430
385static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) 431static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
@@ -458,14 +504,13 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
458 sma->sem_perm.security = NULL; 504 sma->sem_perm.security = NULL;
459 retval = security_sem_alloc(sma); 505 retval = security_sem_alloc(sma);
460 if (retval) { 506 if (retval) {
461 ipc_rcu_putref(sma); 507 ipc_rcu_putref(sma, ipc_rcu_free);
462 return retval; 508 return retval;
463 } 509 }
464 510
465 id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni); 511 id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
466 if (id < 0) { 512 if (id < 0) {
467 security_sem_free(sma); 513 ipc_rcu_putref(sma, sem_rcu_free);
468 ipc_rcu_putref(sma);
469 return id; 514 return id;
470 } 515 }
471 ns->used_sems += nsems; 516 ns->used_sems += nsems;
@@ -873,6 +918,24 @@ again:
873} 918}
874 919
875/** 920/**
921 * set_semotime(sma, sops) - set sem_otime
922 * @sma: semaphore array
923 * @sops: operations that modified the array, may be NULL
924 *
925 * sem_otime is replicated to avoid cache line trashing.
926 * This function sets one instance to the current time.
927 */
928static void set_semotime(struct sem_array *sma, struct sembuf *sops)
929{
930 if (sops == NULL) {
931 sma->sem_base[0].sem_otime = get_seconds();
932 } else {
933 sma->sem_base[sops[0].sem_num].sem_otime =
934 get_seconds();
935 }
936}
937
938/**
876 * do_smart_update(sma, sops, nsops, otime, pt) - optimized update_queue 939 * do_smart_update(sma, sops, nsops, otime, pt) - optimized update_queue
877 * @sma: semaphore array 940 * @sma: semaphore array
878 * @sops: operations that were performed 941 * @sops: operations that were performed
@@ -922,17 +985,10 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop
922 } 985 }
923 } 986 }
924 } 987 }
925 if (otime) { 988 if (otime)
926 if (sops == NULL) { 989 set_semotime(sma, sops);
927 sma->sem_base[0].sem_otime = get_seconds();
928 } else {
929 sma->sem_base[sops[0].sem_num].sem_otime =
930 get_seconds();
931 }
932 }
933} 990}
934 991
935
936/* The following counts are associated to each semaphore: 992/* The following counts are associated to each semaphore:
937 * semncnt number of tasks waiting on semval being nonzero 993 * semncnt number of tasks waiting on semval being nonzero
938 * semzcnt number of tasks waiting on semval being zero 994 * semzcnt number of tasks waiting on semval being zero
@@ -1047,8 +1103,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1047 1103
1048 wake_up_sem_queue_do(&tasks); 1104 wake_up_sem_queue_do(&tasks);
1049 ns->used_sems -= sma->sem_nsems; 1105 ns->used_sems -= sma->sem_nsems;
1050 security_sem_free(sma); 1106 ipc_rcu_putref(sma, sem_rcu_free);
1051 ipc_rcu_putref(sma);
1052} 1107}
1053 1108
1054static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version) 1109static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
@@ -1292,7 +1347,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1292 rcu_read_unlock(); 1347 rcu_read_unlock();
1293 sem_io = ipc_alloc(sizeof(ushort)*nsems); 1348 sem_io = ipc_alloc(sizeof(ushort)*nsems);
1294 if(sem_io == NULL) { 1349 if(sem_io == NULL) {
1295 sem_putref(sma); 1350 ipc_rcu_putref(sma, ipc_rcu_free);
1296 return -ENOMEM; 1351 return -ENOMEM;
1297 } 1352 }
1298 1353
@@ -1328,20 +1383,20 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1328 if(nsems > SEMMSL_FAST) { 1383 if(nsems > SEMMSL_FAST) {
1329 sem_io = ipc_alloc(sizeof(ushort)*nsems); 1384 sem_io = ipc_alloc(sizeof(ushort)*nsems);
1330 if(sem_io == NULL) { 1385 if(sem_io == NULL) {
1331 sem_putref(sma); 1386 ipc_rcu_putref(sma, ipc_rcu_free);
1332 return -ENOMEM; 1387 return -ENOMEM;
1333 } 1388 }
1334 } 1389 }
1335 1390
1336 if (copy_from_user (sem_io, p, nsems*sizeof(ushort))) { 1391 if (copy_from_user (sem_io, p, nsems*sizeof(ushort))) {
1337 sem_putref(sma); 1392 ipc_rcu_putref(sma, ipc_rcu_free);
1338 err = -EFAULT; 1393 err = -EFAULT;
1339 goto out_free; 1394 goto out_free;
1340 } 1395 }
1341 1396
1342 for (i = 0; i < nsems; i++) { 1397 for (i = 0; i < nsems; i++) {
1343 if (sem_io[i] > SEMVMX) { 1398 if (sem_io[i] > SEMVMX) {
1344 sem_putref(sma); 1399 ipc_rcu_putref(sma, ipc_rcu_free);
1345 err = -ERANGE; 1400 err = -ERANGE;
1346 goto out_free; 1401 goto out_free;
1347 } 1402 }
@@ -1629,7 +1684,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1629 /* step 2: allocate new undo structure */ 1684 /* step 2: allocate new undo structure */
1630 new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); 1685 new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1631 if (!new) { 1686 if (!new) {
1632 sem_putref(sma); 1687 ipc_rcu_putref(sma, ipc_rcu_free);
1633 return ERR_PTR(-ENOMEM); 1688 return ERR_PTR(-ENOMEM);
1634 } 1689 }
1635 1690
@@ -1795,12 +1850,17 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1795 1850
1796 error = perform_atomic_semop(sma, sops, nsops, un, 1851 error = perform_atomic_semop(sma, sops, nsops, un,
1797 task_tgid_vnr(current)); 1852 task_tgid_vnr(current));
1798 if (error <= 0) { 1853 if (error == 0) {
1799 if (alter && error == 0) 1854 /* If the operation was successful, then do
1855 * the required updates.
1856 */
1857 if (alter)
1800 do_smart_update(sma, sops, nsops, 1, &tasks); 1858 do_smart_update(sma, sops, nsops, 1, &tasks);
1801 1859 else
1802 goto out_unlock_free; 1860 set_semotime(sma, sops);
1803 } 1861 }
1862 if (error <= 0)
1863 goto out_unlock_free;
1804 1864
1805 /* We need to sleep on this operation, so we put the current 1865 /* We need to sleep on this operation, so we put the current
1806 * task into the pending queue and go to sleep. 1866 * task into the pending queue and go to sleep.
@@ -2059,6 +2119,14 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
2059 struct sem_array *sma = it; 2119 struct sem_array *sma = it;
2060 time_t sem_otime; 2120 time_t sem_otime;
2061 2121
2122 /*
2123 * The proc interface isn't aware of sem_lock(), it calls
2124 * ipc_lock_object() directly (in sysvipc_find_ipc).
2125 * In order to stay compatible with sem_lock(), we must wait until
2126 * all simple semop() calls have left their critical regions.
2127 */
2128 sem_wait_array(sma);
2129
2062 sem_otime = get_semotime(sma); 2130 sem_otime = get_semotime(sma);
2063 2131
2064 return seq_printf(s, 2132 return seq_printf(s,
diff --git a/ipc/shm.c b/ipc/shm.c
index 2821cdf93adb..d69739610fd4 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -167,6 +167,15 @@ static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
167 ipc_lock_object(&ipcp->shm_perm); 167 ipc_lock_object(&ipcp->shm_perm);
168} 168}
169 169
170static void shm_rcu_free(struct rcu_head *head)
171{
172 struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
173 struct shmid_kernel *shp = ipc_rcu_to_struct(p);
174
175 security_shm_free(shp);
176 ipc_rcu_free(head);
177}
178
170static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) 179static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
171{ 180{
172 ipc_rmid(&shm_ids(ns), &s->shm_perm); 181 ipc_rmid(&shm_ids(ns), &s->shm_perm);
@@ -208,8 +217,7 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
208 user_shm_unlock(file_inode(shp->shm_file)->i_size, 217 user_shm_unlock(file_inode(shp->shm_file)->i_size,
209 shp->mlock_user); 218 shp->mlock_user);
210 fput (shp->shm_file); 219 fput (shp->shm_file);
211 security_shm_free(shp); 220 ipc_rcu_putref(shp, shm_rcu_free);
212 ipc_rcu_putref(shp);
213} 221}
214 222
215/* 223/*
@@ -497,7 +505,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
497 shp->shm_perm.security = NULL; 505 shp->shm_perm.security = NULL;
498 error = security_shm_alloc(shp); 506 error = security_shm_alloc(shp);
499 if (error) { 507 if (error) {
500 ipc_rcu_putref(shp); 508 ipc_rcu_putref(shp, ipc_rcu_free);
501 return error; 509 return error;
502 } 510 }
503 511
@@ -566,8 +574,7 @@ no_id:
566 user_shm_unlock(size, shp->mlock_user); 574 user_shm_unlock(size, shp->mlock_user);
567 fput(file); 575 fput(file);
568no_file: 576no_file:
569 security_shm_free(shp); 577 ipc_rcu_putref(shp, shm_rcu_free);
570 ipc_rcu_putref(shp);
571 return error; 578 return error;
572} 579}
573 580
diff --git a/ipc/util.c b/ipc/util.c
index e829da9ed01f..fdb8ae740775 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -474,11 +474,6 @@ void ipc_free(void* ptr, int size)
474 kfree(ptr); 474 kfree(ptr);
475} 475}
476 476
477struct ipc_rcu {
478 struct rcu_head rcu;
479 atomic_t refcount;
480} ____cacheline_aligned_in_smp;
481
482/** 477/**
483 * ipc_rcu_alloc - allocate ipc and rcu space 478 * ipc_rcu_alloc - allocate ipc and rcu space
484 * @size: size desired 479 * @size: size desired
@@ -505,27 +500,24 @@ int ipc_rcu_getref(void *ptr)
505 return atomic_inc_not_zero(&p->refcount); 500 return atomic_inc_not_zero(&p->refcount);
506} 501}
507 502
508/** 503void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head))
509 * ipc_schedule_free - free ipc + rcu space
510 * @head: RCU callback structure for queued work
511 */
512static void ipc_schedule_free(struct rcu_head *head)
513{
514 vfree(container_of(head, struct ipc_rcu, rcu));
515}
516
517void ipc_rcu_putref(void *ptr)
518{ 504{
519 struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1; 505 struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1;
520 506
521 if (!atomic_dec_and_test(&p->refcount)) 507 if (!atomic_dec_and_test(&p->refcount))
522 return; 508 return;
523 509
524 if (is_vmalloc_addr(ptr)) { 510 call_rcu(&p->rcu, func);
525 call_rcu(&p->rcu, ipc_schedule_free); 511}
526 } else { 512
527 kfree_rcu(p, rcu); 513void ipc_rcu_free(struct rcu_head *head)
528 } 514{
515 struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
516
517 if (is_vmalloc_addr(p))
518 vfree(p);
519 else
520 kfree(p);
529} 521}
530 522
531/** 523/**
diff --git a/ipc/util.h b/ipc/util.h
index c5f3338ba1fa..f2f5036f2eed 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -47,6 +47,13 @@ static inline void msg_exit_ns(struct ipc_namespace *ns) { }
47static inline void shm_exit_ns(struct ipc_namespace *ns) { } 47static inline void shm_exit_ns(struct ipc_namespace *ns) { }
48#endif 48#endif
49 49
50struct ipc_rcu {
51 struct rcu_head rcu;
52 atomic_t refcount;
53} ____cacheline_aligned_in_smp;
54
55#define ipc_rcu_to_struct(p) ((void *)(p+1))
56
50/* 57/*
51 * Structure that holds the parameters needed by the ipc operations 58 * Structure that holds the parameters needed by the ipc operations
52 * (see after) 59 * (see after)
@@ -120,7 +127,8 @@ void ipc_free(void* ptr, int size);
120 */ 127 */
121void* ipc_rcu_alloc(int size); 128void* ipc_rcu_alloc(int size);
122int ipc_rcu_getref(void *ptr); 129int ipc_rcu_getref(void *ptr);
123void ipc_rcu_putref(void *ptr); 130void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head));
131void ipc_rcu_free(struct rcu_head *head);
124 132
125struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int); 133struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int);
126struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id); 134struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id);
diff --git a/kernel/audit.c b/kernel/audit.c
index 91e53d04b6a9..7b0e23a740ce 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -1117,9 +1117,10 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
1117 1117
1118 sleep_time = timeout_start + audit_backlog_wait_time - 1118 sleep_time = timeout_start + audit_backlog_wait_time -
1119 jiffies; 1119 jiffies;
1120 if ((long)sleep_time > 0) 1120 if ((long)sleep_time > 0) {
1121 wait_for_auditd(sleep_time); 1121 wait_for_auditd(sleep_time);
1122 continue; 1122 continue;
1123 }
1123 } 1124 }
1124 if (audit_rate_check() && printk_ratelimit()) 1125 if (audit_rate_check() && printk_ratelimit())
1125 printk(KERN_WARNING 1126 printk(KERN_WARNING
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index 247091bf0587..859c8dfd78a1 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -51,6 +51,15 @@ void context_tracking_user_enter(void)
51 unsigned long flags; 51 unsigned long flags;
52 52
53 /* 53 /*
54 * Repeat the user_enter() check here because some archs may be calling
55 * this from asm and if no CPU needs context tracking, they shouldn't
56 * go further. Repeat the check here until they support the static key
57 * check.
58 */
59 if (!static_key_false(&context_tracking_enabled))
60 return;
61
62 /*
54 * Some contexts may involve an exception occuring in an irq, 63 * Some contexts may involve an exception occuring in an irq,
55 * leading to that nesting: 64 * leading to that nesting:
56 * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() 65 * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
@@ -151,6 +160,9 @@ void context_tracking_user_exit(void)
151{ 160{
152 unsigned long flags; 161 unsigned long flags;
153 162
163 if (!static_key_false(&context_tracking_enabled))
164 return;
165
154 if (in_interrupt()) 166 if (in_interrupt())
155 return; 167 return;
156 168
diff --git a/kernel/events/core.c b/kernel/events/core.c
index dd236b66ca3a..cb4238e85b38 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3660,6 +3660,26 @@ static void calc_timer_values(struct perf_event *event,
3660 *running = ctx_time - event->tstamp_running; 3660 *running = ctx_time - event->tstamp_running;
3661} 3661}
3662 3662
3663static void perf_event_init_userpage(struct perf_event *event)
3664{
3665 struct perf_event_mmap_page *userpg;
3666 struct ring_buffer *rb;
3667
3668 rcu_read_lock();
3669 rb = rcu_dereference(event->rb);
3670 if (!rb)
3671 goto unlock;
3672
3673 userpg = rb->user_page;
3674
3675 /* Allow new userspace to detect that bit 0 is deprecated */
3676 userpg->cap_bit0_is_deprecated = 1;
3677 userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
3678
3679unlock:
3680 rcu_read_unlock();
3681}
3682
3663void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) 3683void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
3664{ 3684{
3665} 3685}
@@ -4044,6 +4064,7 @@ again:
4044 ring_buffer_attach(event, rb); 4064 ring_buffer_attach(event, rb);
4045 rcu_assign_pointer(event->rb, rb); 4065 rcu_assign_pointer(event->rb, rb);
4046 4066
4067 perf_event_init_userpage(event);
4047 perf_event_update_userpage(event); 4068 perf_event_update_userpage(event);
4048 4069
4049unlock: 4070unlock:
diff --git a/kernel/kmod.c b/kernel/kmod.c
index fb326365b694..b086006c59e7 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -571,6 +571,10 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
571 DECLARE_COMPLETION_ONSTACK(done); 571 DECLARE_COMPLETION_ONSTACK(done);
572 int retval = 0; 572 int retval = 0;
573 573
574 if (!sub_info->path) {
575 call_usermodehelper_freeinfo(sub_info);
576 return -EINVAL;
577 }
574 helper_lock(); 578 helper_lock();
575 if (!khelper_wq || usermodehelper_disabled) { 579 if (!khelper_wq || usermodehelper_disabled) {
576 retval = -EBUSY; 580 retval = -EBUSY;
diff --git a/kernel/params.c b/kernel/params.c
index 81c4e78c8f4c..c00d5b502aa4 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -254,11 +254,11 @@ int parse_args(const char *doing,
254 254
255 255
256STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", unsigned long, kstrtoul); 256STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", unsigned long, kstrtoul);
257STANDARD_PARAM_DEF(short, short, "%hi", long, kstrtoul); 257STANDARD_PARAM_DEF(short, short, "%hi", long, kstrtol);
258STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", unsigned long, kstrtoul); 258STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", unsigned long, kstrtoul);
259STANDARD_PARAM_DEF(int, int, "%i", long, kstrtoul); 259STANDARD_PARAM_DEF(int, int, "%i", long, kstrtol);
260STANDARD_PARAM_DEF(uint, unsigned int, "%u", unsigned long, kstrtoul); 260STANDARD_PARAM_DEF(uint, unsigned int, "%u", unsigned long, kstrtoul);
261STANDARD_PARAM_DEF(long, long, "%li", long, kstrtoul); 261STANDARD_PARAM_DEF(long, long, "%li", long, kstrtol);
262STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", unsigned long, kstrtoul); 262STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", unsigned long, kstrtoul);
263 263
264int param_set_charp(const char *val, const struct kernel_param *kp) 264int param_set_charp(const char *val, const struct kernel_param *kp)
diff --git a/kernel/pid.c b/kernel/pid.c
index ebe5e80b10f8..9b9a26698144 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -273,6 +273,11 @@ void free_pid(struct pid *pid)
273 */ 273 */
274 wake_up_process(ns->child_reaper); 274 wake_up_process(ns->child_reaper);
275 break; 275 break;
276 case PIDNS_HASH_ADDING:
277 /* Handle a fork failure of the first process */
278 WARN_ON(ns->child_reaper);
279 ns->nr_hashed = 0;
280 /* fall through */
276 case 0: 281 case 0:
277 schedule_work(&ns->proc_work); 282 schedule_work(&ns->proc_work);
278 break; 283 break;
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 358a146fd4da..98c3b34a4cff 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -743,7 +743,10 @@ int create_basic_memory_bitmaps(void)
743 struct memory_bitmap *bm1, *bm2; 743 struct memory_bitmap *bm1, *bm2;
744 int error = 0; 744 int error = 0;
745 745
746 BUG_ON(forbidden_pages_map || free_pages_map); 746 if (forbidden_pages_map && free_pages_map)
747 return 0;
748 else
749 BUG_ON(forbidden_pages_map || free_pages_map);
747 750
748 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL); 751 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
749 if (!bm1) 752 if (!bm1)
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 72e8f4fd616d..957f06164ad1 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -39,6 +39,7 @@ static struct snapshot_data {
39 char frozen; 39 char frozen;
40 char ready; 40 char ready;
41 char platform_support; 41 char platform_support;
42 bool free_bitmaps;
42} snapshot_state; 43} snapshot_state;
43 44
44atomic_t snapshot_device_available = ATOMIC_INIT(1); 45atomic_t snapshot_device_available = ATOMIC_INIT(1);
@@ -82,6 +83,10 @@ static int snapshot_open(struct inode *inode, struct file *filp)
82 data->swap = -1; 83 data->swap = -1;
83 data->mode = O_WRONLY; 84 data->mode = O_WRONLY;
84 error = pm_notifier_call_chain(PM_RESTORE_PREPARE); 85 error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
86 if (!error) {
87 error = create_basic_memory_bitmaps();
88 data->free_bitmaps = !error;
89 }
85 if (error) 90 if (error)
86 pm_notifier_call_chain(PM_POST_RESTORE); 91 pm_notifier_call_chain(PM_POST_RESTORE);
87 } 92 }
@@ -111,6 +116,8 @@ static int snapshot_release(struct inode *inode, struct file *filp)
111 pm_restore_gfp_mask(); 116 pm_restore_gfp_mask();
112 free_basic_memory_bitmaps(); 117 free_basic_memory_bitmaps();
113 thaw_processes(); 118 thaw_processes();
119 } else if (data->free_bitmaps) {
120 free_basic_memory_bitmaps();
114 } 121 }
115 pm_notifier_call_chain(data->mode == O_RDONLY ? 122 pm_notifier_call_chain(data->mode == O_RDONLY ?
116 PM_POST_HIBERNATION : PM_POST_RESTORE); 123 PM_POST_HIBERNATION : PM_POST_RESTORE);
@@ -231,6 +238,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
231 break; 238 break;
232 pm_restore_gfp_mask(); 239 pm_restore_gfp_mask();
233 free_basic_memory_bitmaps(); 240 free_basic_memory_bitmaps();
241 data->free_bitmaps = false;
234 thaw_processes(); 242 thaw_processes();
235 data->frozen = 0; 243 data->frozen = 0;
236 break; 244 break;
diff --git a/kernel/reboot.c b/kernel/reboot.c
index 269ed9384cc4..f813b3474646 100644
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -32,7 +32,14 @@ EXPORT_SYMBOL(cad_pid);
32#endif 32#endif
33enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE; 33enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE;
34 34
35int reboot_default; 35/*
36 * This variable is used privately to keep track of whether or not
37 * reboot_type is still set to its default value (i.e., reboot= hasn't
38 * been set on the command line). This is needed so that we can
39 * suppress DMI scanning for reboot quirks. Without it, it's
40 * impossible to override a faulty reboot quirk without recompiling.
41 */
42int reboot_default = 1;
36int reboot_cpu; 43int reboot_cpu;
37enum reboot_type reboot_type = BOOT_ACPI; 44enum reboot_type reboot_type = BOOT_ACPI;
38int reboot_force; 45int reboot_force;
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index e076bddd4c66..196559994f7c 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -124,7 +124,7 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
124 SEQ_printf(m, " "); 124 SEQ_printf(m, " ");
125 125
126 SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ", 126 SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
127 p->comm, p->pid, 127 p->comm, task_pid_nr(p),
128 SPLIT_NS(p->se.vruntime), 128 SPLIT_NS(p->se.vruntime),
129 (long long)(p->nvcsw + p->nivcsw), 129 (long long)(p->nvcsw + p->nivcsw),
130 p->prio); 130 p->prio);
@@ -289,7 +289,7 @@ do { \
289 P(nr_load_updates); 289 P(nr_load_updates);
290 P(nr_uninterruptible); 290 P(nr_uninterruptible);
291 PN(next_balance); 291 PN(next_balance);
292 P(curr->pid); 292 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
293 PN(clock); 293 PN(clock);
294 P(cpu_load[0]); 294 P(cpu_load[0]);
295 P(cpu_load[1]); 295 P(cpu_load[1]);
@@ -492,7 +492,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
492{ 492{
493 unsigned long nr_switches; 493 unsigned long nr_switches;
494 494
495 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid, 495 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p),
496 get_nr_threads(p)); 496 get_nr_threads(p));
497 SEQ_printf(m, 497 SEQ_printf(m,
498 "---------------------------------------------------------" 498 "---------------------------------------------------------"
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 9b3fe1cd8f40..7c70201fbc61 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4242,7 +4242,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
4242 } 4242 }
4243 4243
4244 if (!se) { 4244 if (!se) {
4245 cfs_rq->h_load = rq->avg.load_avg_contrib; 4245 cfs_rq->h_load = cfs_rq->runnable_load_avg;
4246 cfs_rq->last_h_load_update = now; 4246 cfs_rq->last_h_load_update = now;
4247 } 4247 }
4248 4248
@@ -4823,8 +4823,8 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
4823 (busiest->load_per_task * SCHED_POWER_SCALE) / 4823 (busiest->load_per_task * SCHED_POWER_SCALE) /
4824 busiest->group_power; 4824 busiest->group_power;
4825 4825
4826 if (busiest->avg_load - local->avg_load + scaled_busy_load_per_task >= 4826 if (busiest->avg_load + scaled_busy_load_per_task >=
4827 (scaled_busy_load_per_task * imbn)) { 4827 local->avg_load + (scaled_busy_load_per_task * imbn)) {
4828 env->imbalance = busiest->load_per_task; 4828 env->imbalance = busiest->load_per_task;
4829 return; 4829 return;
4830 } 4830 }
@@ -4896,7 +4896,8 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
4896 * max load less than avg load(as we skip the groups at or below 4896 * max load less than avg load(as we skip the groups at or below
4897 * its cpu_power, while calculating max_load..) 4897 * its cpu_power, while calculating max_load..)
4898 */ 4898 */
4899 if (busiest->avg_load < sds->avg_load) { 4899 if (busiest->avg_load <= sds->avg_load ||
4900 local->avg_load >= sds->avg_load) {
4900 env->imbalance = 0; 4901 env->imbalance = 0;
4901 return fix_small_imbalance(env, sds); 4902 return fix_small_imbalance(env, sds);
4902 } 4903 }
@@ -5928,11 +5929,15 @@ static void task_fork_fair(struct task_struct *p)
5928 cfs_rq = task_cfs_rq(current); 5929 cfs_rq = task_cfs_rq(current);
5929 curr = cfs_rq->curr; 5930 curr = cfs_rq->curr;
5930 5931
5931 if (unlikely(task_cpu(p) != this_cpu)) { 5932 /*
5932 rcu_read_lock(); 5933 * Not only the cpu but also the task_group of the parent might have
5933 __set_task_cpu(p, this_cpu); 5934 * been changed after parent->se.parent,cfs_rq were copied to
5934 rcu_read_unlock(); 5935 * child->se.parent,cfs_rq. So call __set_task_cpu() to make those
5935 } 5936 * of child point to valid ones.
5937 */
5938 rcu_read_lock();
5939 __set_task_cpu(p, this_cpu);
5940 rcu_read_unlock();
5936 5941
5937 update_curr(cfs_rq); 5942 update_curr(cfs_rq);
5938 5943
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
index 5aef494fc8b4..c7edee71bce8 100644
--- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h
@@ -104,8 +104,9 @@ static inline void sched_info_queued(struct task_struct *t)
104} 104}
105 105
106/* 106/*
107 * Called when a process ceases being the active-running process, either 107 * Called when a process ceases being the active-running process involuntarily
108 * voluntarily or involuntarily. Now we can calculate how long we ran. 108 * due, typically, to expiring its time slice (this may also be called when
109 * switching to the idle task). Now we can calculate how long we ran.
109 * Also, if the process is still in the TASK_RUNNING state, call 110 * Also, if the process is still in the TASK_RUNNING state, call
110 * sched_info_queued() to mark that it has now again started waiting on 111 * sched_info_queued() to mark that it has now again started waiting on
111 * the runqueue. 112 * the runqueue.
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 53cc09ceb0b8..d7d498d8cc4f 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -328,10 +328,19 @@ void irq_enter(void)
328 328
329static inline void invoke_softirq(void) 329static inline void invoke_softirq(void)
330{ 330{
331 if (!force_irqthreads) 331 if (!force_irqthreads) {
332 __do_softirq(); 332 /*
333 else 333 * We can safely execute softirq on the current stack if
334 * it is the irq stack, because it should be near empty
335 * at this stage. But we have no way to know if the arch
336 * calls irq_exit() on the irq stack. So call softirq
337 * in its own stack to prevent from any overrun on top
338 * of a potentially deep task stack.
339 */
340 do_softirq();
341 } else {
334 wakeup_softirqd(); 342 wakeup_softirqd();
343 }
335} 344}
336 345
337static inline void tick_irq_exit(void) 346static inline void tick_irq_exit(void)
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 8f5b3b98577b..bb2215174f05 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -516,13 +516,13 @@ static void sync_cmos_clock(struct work_struct *work)
516 schedule_delayed_work(&sync_cmos_work, timespec_to_jiffies(&next)); 516 schedule_delayed_work(&sync_cmos_work, timespec_to_jiffies(&next));
517} 517}
518 518
519static void notify_cmos_timer(void) 519void ntp_notify_cmos_timer(void)
520{ 520{
521 schedule_delayed_work(&sync_cmos_work, 0); 521 schedule_delayed_work(&sync_cmos_work, 0);
522} 522}
523 523
524#else 524#else
525static inline void notify_cmos_timer(void) { } 525void ntp_notify_cmos_timer(void) { }
526#endif 526#endif
527 527
528 528
@@ -687,8 +687,6 @@ int __do_adjtimex(struct timex *txc, struct timespec *ts, s32 *time_tai)
687 if (!(time_status & STA_NANO)) 687 if (!(time_status & STA_NANO))
688 txc->time.tv_usec /= NSEC_PER_USEC; 688 txc->time.tv_usec /= NSEC_PER_USEC;
689 689
690 notify_cmos_timer();
691
692 return result; 690 return result;
693} 691}
694 692
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 48b9fffabdc2..947ba25a95a0 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -1703,6 +1703,8 @@ int do_adjtimex(struct timex *txc)
1703 write_seqcount_end(&timekeeper_seq); 1703 write_seqcount_end(&timekeeper_seq);
1704 raw_spin_unlock_irqrestore(&timekeeper_lock, flags); 1704 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1705 1705
1706 ntp_notify_cmos_timer();
1707
1706 return ret; 1708 return ret;
1707} 1709}
1708 1710
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 51c4f34d258e..4431610f049a 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -486,7 +486,52 @@ static struct smp_hotplug_thread watchdog_threads = {
486 .unpark = watchdog_enable, 486 .unpark = watchdog_enable,
487}; 487};
488 488
489static int watchdog_enable_all_cpus(void) 489static void restart_watchdog_hrtimer(void *info)
490{
491 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
492 int ret;
493
494 /*
495 * No need to cancel and restart hrtimer if it is currently executing
496 * because it will reprogram itself with the new period now.
497 * We should never see it unqueued here because we are running per-cpu
498 * with interrupts disabled.
499 */
500 ret = hrtimer_try_to_cancel(hrtimer);
501 if (ret == 1)
502 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
503 HRTIMER_MODE_REL_PINNED);
504}
505
506static void update_timers(int cpu)
507{
508 struct call_single_data data = {.func = restart_watchdog_hrtimer};
509 /*
510 * Make sure that perf event counter will adopt to a new
511 * sampling period. Updating the sampling period directly would
512 * be much nicer but we do not have an API for that now so
513 * let's use a big hammer.
514 * Hrtimer will adopt the new period on the next tick but this
515 * might be late already so we have to restart the timer as well.
516 */
517 watchdog_nmi_disable(cpu);
518 __smp_call_function_single(cpu, &data, 1);
519 watchdog_nmi_enable(cpu);
520}
521
522static void update_timers_all_cpus(void)
523{
524 int cpu;
525
526 get_online_cpus();
527 preempt_disable();
528 for_each_online_cpu(cpu)
529 update_timers(cpu);
530 preempt_enable();
531 put_online_cpus();
532}
533
534static int watchdog_enable_all_cpus(bool sample_period_changed)
490{ 535{
491 int err = 0; 536 int err = 0;
492 537
@@ -496,6 +541,8 @@ static int watchdog_enable_all_cpus(void)
496 pr_err("Failed to create watchdog threads, disabled\n"); 541 pr_err("Failed to create watchdog threads, disabled\n");
497 else 542 else
498 watchdog_running = 1; 543 watchdog_running = 1;
544 } else if (sample_period_changed) {
545 update_timers_all_cpus();
499 } 546 }
500 547
501 return err; 548 return err;
@@ -520,13 +567,15 @@ int proc_dowatchdog(struct ctl_table *table, int write,
520 void __user *buffer, size_t *lenp, loff_t *ppos) 567 void __user *buffer, size_t *lenp, loff_t *ppos)
521{ 568{
522 int err, old_thresh, old_enabled; 569 int err, old_thresh, old_enabled;
570 static DEFINE_MUTEX(watchdog_proc_mutex);
523 571
572 mutex_lock(&watchdog_proc_mutex);
524 old_thresh = ACCESS_ONCE(watchdog_thresh); 573 old_thresh = ACCESS_ONCE(watchdog_thresh);
525 old_enabled = ACCESS_ONCE(watchdog_user_enabled); 574 old_enabled = ACCESS_ONCE(watchdog_user_enabled);
526 575
527 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 576 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
528 if (err || !write) 577 if (err || !write)
529 return err; 578 goto out;
530 579
531 set_sample_period(); 580 set_sample_period();
532 /* 581 /*
@@ -535,7 +584,7 @@ int proc_dowatchdog(struct ctl_table *table, int write,
535 * watchdog_*_all_cpus() function takes care of this. 584 * watchdog_*_all_cpus() function takes care of this.
536 */ 585 */
537 if (watchdog_user_enabled && watchdog_thresh) 586 if (watchdog_user_enabled && watchdog_thresh)
538 err = watchdog_enable_all_cpus(); 587 err = watchdog_enable_all_cpus(old_thresh != watchdog_thresh);
539 else 588 else
540 watchdog_disable_all_cpus(); 589 watchdog_disable_all_cpus();
541 590
@@ -544,7 +593,8 @@ int proc_dowatchdog(struct ctl_table *table, int write,
544 watchdog_thresh = old_thresh; 593 watchdog_thresh = old_thresh;
545 watchdog_user_enabled = old_enabled; 594 watchdog_user_enabled = old_enabled;
546 } 595 }
547 596out:
597 mutex_unlock(&watchdog_proc_mutex);
548 return err; 598 return err;
549} 599}
550#endif /* CONFIG_SYSCTL */ 600#endif /* CONFIG_SYSCTL */
@@ -554,5 +604,5 @@ void __init lockup_detector_init(void)
554 set_sample_period(); 604 set_sample_period();
555 605
556 if (watchdog_user_enabled) 606 if (watchdog_user_enabled)
557 watchdog_enable_all_cpus(); 607 watchdog_enable_all_cpus(false);
558} 608}
diff --git a/lib/hexdump.c b/lib/hexdump.c
index 3f0494c9d57a..8499c810909a 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -14,6 +14,8 @@
14 14
15const char hex_asc[] = "0123456789abcdef"; 15const char hex_asc[] = "0123456789abcdef";
16EXPORT_SYMBOL(hex_asc); 16EXPORT_SYMBOL(hex_asc);
17const char hex_asc_upper[] = "0123456789ABCDEF";
18EXPORT_SYMBOL(hex_asc_upper);
17 19
18/** 20/**
19 * hex_to_bin - convert a hex digit to its real value 21 * hex_to_bin - convert a hex digit to its real value
diff --git a/lib/kobject.c b/lib/kobject.c
index 962175134702..669bf190d4fb 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -933,10 +933,7 @@ const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj)
933 933
934bool kobj_ns_current_may_mount(enum kobj_ns_type type) 934bool kobj_ns_current_may_mount(enum kobj_ns_type type)
935{ 935{
936 bool may_mount = false; 936 bool may_mount = true;
937
938 if (type == KOBJ_NS_TYPE_NONE)
939 return true;
940 937
941 spin_lock(&kobj_ns_type_lock); 938 spin_lock(&kobj_ns_type_lock);
942 if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) && 939 if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
diff --git a/lib/lockref.c b/lib/lockref.c
index e2cd2c0a8821..6f9d434c1521 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -4,6 +4,22 @@
4#ifdef CONFIG_CMPXCHG_LOCKREF 4#ifdef CONFIG_CMPXCHG_LOCKREF
5 5
6/* 6/*
7 * Allow weakly-ordered memory architectures to provide barrier-less
8 * cmpxchg semantics for lockref updates.
9 */
10#ifndef cmpxchg64_relaxed
11# define cmpxchg64_relaxed cmpxchg64
12#endif
13
14/*
15 * Allow architectures to override the default cpu_relax() within CMPXCHG_LOOP.
16 * This is useful for architectures with an expensive cpu_relax().
17 */
18#ifndef arch_mutex_cpu_relax
19# define arch_mutex_cpu_relax() cpu_relax()
20#endif
21
22/*
7 * Note that the "cmpxchg()" reloads the "old" value for the 23 * Note that the "cmpxchg()" reloads the "old" value for the
8 * failure case. 24 * failure case.
9 */ 25 */
@@ -14,12 +30,13 @@
14 while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \ 30 while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
15 struct lockref new = old, prev = old; \ 31 struct lockref new = old, prev = old; \
16 CODE \ 32 CODE \
17 old.lock_count = cmpxchg(&lockref->lock_count, \ 33 old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \
18 old.lock_count, new.lock_count); \ 34 old.lock_count, \
35 new.lock_count); \
19 if (likely(old.lock_count == prev.lock_count)) { \ 36 if (likely(old.lock_count == prev.lock_count)) { \
20 SUCCESS; \ 37 SUCCESS; \
21 } \ 38 } \
22 cpu_relax(); \ 39 arch_mutex_cpu_relax(); \
23 } \ 40 } \
24} while (0) 41} while (0)
25 42
diff --git a/mm/Kconfig b/mm/Kconfig
index 026771a9b097..394838f489eb 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -183,7 +183,7 @@ config MEMORY_HOTPLUG_SPARSE
183config MEMORY_HOTREMOVE 183config MEMORY_HOTREMOVE
184 bool "Allow for memory hot remove" 184 bool "Allow for memory hot remove"
185 select MEMORY_ISOLATION 185 select MEMORY_ISOLATION
186 select HAVE_BOOTMEM_INFO_NODE if X86_64 186 select HAVE_BOOTMEM_INFO_NODE if (X86_64 || PPC64)
187 depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE 187 depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE
188 depends on MIGRATION 188 depends on MIGRATION
189 189
diff --git a/mm/bounce.c b/mm/bounce.c
index c9f0a4339a7d..5a7d58fb883b 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -204,6 +204,8 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
204 struct bio_vec *to, *from; 204 struct bio_vec *to, *from;
205 unsigned i; 205 unsigned i;
206 206
207 if (force)
208 goto bounce;
207 bio_for_each_segment(from, *bio_orig, i) 209 bio_for_each_segment(from, *bio_orig, i)
208 if (page_to_pfn(from->bv_page) > queue_bounce_pfn(q)) 210 if (page_to_pfn(from->bv_page) > queue_bounce_pfn(q))
209 goto bounce; 211 goto bounce;
diff --git a/mm/compaction.c b/mm/compaction.c
index c43789388cd8..b5326b141a25 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -677,6 +677,13 @@ static void isolate_freepages(struct zone *zone,
677 pfn -= pageblock_nr_pages) { 677 pfn -= pageblock_nr_pages) {
678 unsigned long isolated; 678 unsigned long isolated;
679 679
680 /*
681 * This can iterate a massively long zone without finding any
682 * suitable migration targets, so periodically check if we need
683 * to schedule.
684 */
685 cond_resched();
686
680 if (!pfn_valid(pfn)) 687 if (!pfn_valid(pfn))
681 continue; 688 continue;
682 689
diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c
index afc2daa91c60..4c84678371eb 100644
--- a/mm/hwpoison-inject.c
+++ b/mm/hwpoison-inject.c
@@ -20,8 +20,6 @@ static int hwpoison_inject(void *data, u64 val)
20 if (!capable(CAP_SYS_ADMIN)) 20 if (!capable(CAP_SYS_ADMIN))
21 return -EPERM; 21 return -EPERM;
22 22
23 if (!hwpoison_filter_enable)
24 goto inject;
25 if (!pfn_valid(pfn)) 23 if (!pfn_valid(pfn))
26 return -ENXIO; 24 return -ENXIO;
27 25
@@ -33,6 +31,9 @@ static int hwpoison_inject(void *data, u64 val)
33 if (!get_page_unless_zero(hpage)) 31 if (!get_page_unless_zero(hpage))
34 return 0; 32 return 0;
35 33
34 if (!hwpoison_filter_enable)
35 goto inject;
36
36 if (!PageLRU(p) && !PageHuge(p)) 37 if (!PageLRU(p) && !PageHuge(p))
37 shake_page(p, 0); 38 shake_page(p, 0);
38 /* 39 /*
diff --git a/mm/madvise.c b/mm/madvise.c
index 6975bc812542..539eeb96b323 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -343,10 +343,11 @@ static long madvise_remove(struct vm_area_struct *vma,
343 */ 343 */
344static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end) 344static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end)
345{ 345{
346 struct page *p;
346 if (!capable(CAP_SYS_ADMIN)) 347 if (!capable(CAP_SYS_ADMIN))
347 return -EPERM; 348 return -EPERM;
348 for (; start < end; start += PAGE_SIZE) { 349 for (; start < end; start += PAGE_SIZE <<
349 struct page *p; 350 compound_order(compound_head(p))) {
350 int ret; 351 int ret;
351 352
352 ret = get_user_pages_fast(start, 1, 0, &p); 353 ret = get_user_pages_fast(start, 1, 0, &p);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d5ff3ce13029..1c52ddbc839b 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -39,6 +39,7 @@
39#include <linux/limits.h> 39#include <linux/limits.h>
40#include <linux/export.h> 40#include <linux/export.h>
41#include <linux/mutex.h> 41#include <linux/mutex.h>
42#include <linux/rbtree.h>
42#include <linux/slab.h> 43#include <linux/slab.h>
43#include <linux/swap.h> 44#include <linux/swap.h>
44#include <linux/swapops.h> 45#include <linux/swapops.h>
@@ -160,6 +161,10 @@ struct mem_cgroup_per_zone {
160 161
161 struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1]; 162 struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
162 163
164 struct rb_node tree_node; /* RB tree node */
165 unsigned long long usage_in_excess;/* Set to the value by which */
166 /* the soft limit is exceeded*/
167 bool on_tree;
163 struct mem_cgroup *memcg; /* Back pointer, we cannot */ 168 struct mem_cgroup *memcg; /* Back pointer, we cannot */
164 /* use container_of */ 169 /* use container_of */
165}; 170};
@@ -168,6 +173,26 @@ struct mem_cgroup_per_node {
168 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES]; 173 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
169}; 174};
170 175
176/*
177 * Cgroups above their limits are maintained in a RB-Tree, independent of
178 * their hierarchy representation
179 */
180
181struct mem_cgroup_tree_per_zone {
182 struct rb_root rb_root;
183 spinlock_t lock;
184};
185
186struct mem_cgroup_tree_per_node {
187 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
188};
189
190struct mem_cgroup_tree {
191 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
192};
193
194static struct mem_cgroup_tree soft_limit_tree __read_mostly;
195
171struct mem_cgroup_threshold { 196struct mem_cgroup_threshold {
172 struct eventfd_ctx *eventfd; 197 struct eventfd_ctx *eventfd;
173 u64 threshold; 198 u64 threshold;
@@ -303,22 +328,6 @@ struct mem_cgroup {
303 atomic_t numainfo_events; 328 atomic_t numainfo_events;
304 atomic_t numainfo_updating; 329 atomic_t numainfo_updating;
305#endif 330#endif
306 /*
307 * Protects soft_contributed transitions.
308 * See mem_cgroup_update_soft_limit
309 */
310 spinlock_t soft_lock;
311
312 /*
313 * If true then this group has increased parents' children_in_excess
314 * when it got over the soft limit.
315 * When a group falls bellow the soft limit, parents' children_in_excess
316 * is decreased and soft_contributed changed to false.
317 */
318 bool soft_contributed;
319
320 /* Number of children that are in soft limit excess */
321 atomic_t children_in_excess;
322 331
323 struct mem_cgroup_per_node *nodeinfo[0]; 332 struct mem_cgroup_per_node *nodeinfo[0];
324 /* WARNING: nodeinfo must be the last member here */ 333 /* WARNING: nodeinfo must be the last member here */
@@ -422,6 +431,7 @@ static bool move_file(void)
422 * limit reclaim to prevent infinite loops, if they ever occur. 431 * limit reclaim to prevent infinite loops, if they ever occur.
423 */ 432 */
424#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100 433#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
434#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
425 435
426enum charge_type { 436enum charge_type {
427 MEM_CGROUP_CHARGE_TYPE_CACHE = 0, 437 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
@@ -648,6 +658,164 @@ page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page)
648 return mem_cgroup_zoneinfo(memcg, nid, zid); 658 return mem_cgroup_zoneinfo(memcg, nid, zid);
649} 659}
650 660
661static struct mem_cgroup_tree_per_zone *
662soft_limit_tree_node_zone(int nid, int zid)
663{
664 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
665}
666
667static struct mem_cgroup_tree_per_zone *
668soft_limit_tree_from_page(struct page *page)
669{
670 int nid = page_to_nid(page);
671 int zid = page_zonenum(page);
672
673 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
674}
675
676static void
677__mem_cgroup_insert_exceeded(struct mem_cgroup *memcg,
678 struct mem_cgroup_per_zone *mz,
679 struct mem_cgroup_tree_per_zone *mctz,
680 unsigned long long new_usage_in_excess)
681{
682 struct rb_node **p = &mctz->rb_root.rb_node;
683 struct rb_node *parent = NULL;
684 struct mem_cgroup_per_zone *mz_node;
685
686 if (mz->on_tree)
687 return;
688
689 mz->usage_in_excess = new_usage_in_excess;
690 if (!mz->usage_in_excess)
691 return;
692 while (*p) {
693 parent = *p;
694 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
695 tree_node);
696 if (mz->usage_in_excess < mz_node->usage_in_excess)
697 p = &(*p)->rb_left;
698 /*
699 * We can't avoid mem cgroups that are over their soft
700 * limit by the same amount
701 */
702 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
703 p = &(*p)->rb_right;
704 }
705 rb_link_node(&mz->tree_node, parent, p);
706 rb_insert_color(&mz->tree_node, &mctz->rb_root);
707 mz->on_tree = true;
708}
709
710static void
711__mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
712 struct mem_cgroup_per_zone *mz,
713 struct mem_cgroup_tree_per_zone *mctz)
714{
715 if (!mz->on_tree)
716 return;
717 rb_erase(&mz->tree_node, &mctz->rb_root);
718 mz->on_tree = false;
719}
720
721static void
722mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
723 struct mem_cgroup_per_zone *mz,
724 struct mem_cgroup_tree_per_zone *mctz)
725{
726 spin_lock(&mctz->lock);
727 __mem_cgroup_remove_exceeded(memcg, mz, mctz);
728 spin_unlock(&mctz->lock);
729}
730
731
732static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
733{
734 unsigned long long excess;
735 struct mem_cgroup_per_zone *mz;
736 struct mem_cgroup_tree_per_zone *mctz;
737 int nid = page_to_nid(page);
738 int zid = page_zonenum(page);
739 mctz = soft_limit_tree_from_page(page);
740
741 /*
742 * Necessary to update all ancestors when hierarchy is used.
743 * because their event counter is not touched.
744 */
745 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
746 mz = mem_cgroup_zoneinfo(memcg, nid, zid);
747 excess = res_counter_soft_limit_excess(&memcg->res);
748 /*
749 * We have to update the tree if mz is on RB-tree or
750 * mem is over its softlimit.
751 */
752 if (excess || mz->on_tree) {
753 spin_lock(&mctz->lock);
754 /* if on-tree, remove it */
755 if (mz->on_tree)
756 __mem_cgroup_remove_exceeded(memcg, mz, mctz);
757 /*
758 * Insert again. mz->usage_in_excess will be updated.
759 * If excess is 0, no tree ops.
760 */
761 __mem_cgroup_insert_exceeded(memcg, mz, mctz, excess);
762 spin_unlock(&mctz->lock);
763 }
764 }
765}
766
767static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
768{
769 int node, zone;
770 struct mem_cgroup_per_zone *mz;
771 struct mem_cgroup_tree_per_zone *mctz;
772
773 for_each_node(node) {
774 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
775 mz = mem_cgroup_zoneinfo(memcg, node, zone);
776 mctz = soft_limit_tree_node_zone(node, zone);
777 mem_cgroup_remove_exceeded(memcg, mz, mctz);
778 }
779 }
780}
781
782static struct mem_cgroup_per_zone *
783__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
784{
785 struct rb_node *rightmost = NULL;
786 struct mem_cgroup_per_zone *mz;
787
788retry:
789 mz = NULL;
790 rightmost = rb_last(&mctz->rb_root);
791 if (!rightmost)
792 goto done; /* Nothing to reclaim from */
793
794 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
795 /*
796 * Remove the node now but someone else can add it back,
797 * we will to add it back at the end of reclaim to its correct
798 * position in the tree.
799 */
800 __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
801 if (!res_counter_soft_limit_excess(&mz->memcg->res) ||
802 !css_tryget(&mz->memcg->css))
803 goto retry;
804done:
805 return mz;
806}
807
808static struct mem_cgroup_per_zone *
809mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
810{
811 struct mem_cgroup_per_zone *mz;
812
813 spin_lock(&mctz->lock);
814 mz = __mem_cgroup_largest_soft_limit_node(mctz);
815 spin_unlock(&mctz->lock);
816 return mz;
817}
818
651/* 819/*
652 * Implementation Note: reading percpu statistics for memcg. 820 * Implementation Note: reading percpu statistics for memcg.
653 * 821 *
@@ -822,48 +990,6 @@ static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
822} 990}
823 991
824/* 992/*
825 * Called from rate-limited memcg_check_events when enough
826 * MEM_CGROUP_TARGET_SOFTLIMIT events are accumulated and it makes sure
827 * that all the parents up the hierarchy will be notified that this group
828 * is in excess or that it is not in excess anymore. mmecg->soft_contributed
829 * makes the transition a single action whenever the state flips from one to
830 * the other.
831 */
832static void mem_cgroup_update_soft_limit(struct mem_cgroup *memcg)
833{
834 unsigned long long excess = res_counter_soft_limit_excess(&memcg->res);
835 struct mem_cgroup *parent = memcg;
836 int delta = 0;
837
838 spin_lock(&memcg->soft_lock);
839 if (excess) {
840 if (!memcg->soft_contributed) {
841 delta = 1;
842 memcg->soft_contributed = true;
843 }
844 } else {
845 if (memcg->soft_contributed) {
846 delta = -1;
847 memcg->soft_contributed = false;
848 }
849 }
850
851 /*
852 * Necessary to update all ancestors when hierarchy is used
853 * because their event counter is not touched.
854 * We track children even outside the hierarchy for the root
855 * cgroup because tree walk starting at root should visit
856 * all cgroups and we want to prevent from pointless tree
857 * walk if no children is below the limit.
858 */
859 while (delta && (parent = parent_mem_cgroup(parent)))
860 atomic_add(delta, &parent->children_in_excess);
861 if (memcg != root_mem_cgroup && !root_mem_cgroup->use_hierarchy)
862 atomic_add(delta, &root_mem_cgroup->children_in_excess);
863 spin_unlock(&memcg->soft_lock);
864}
865
866/*
867 * Check events in order. 993 * Check events in order.
868 * 994 *
869 */ 995 */
@@ -886,7 +1012,7 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
886 1012
887 mem_cgroup_threshold(memcg); 1013 mem_cgroup_threshold(memcg);
888 if (unlikely(do_softlimit)) 1014 if (unlikely(do_softlimit))
889 mem_cgroup_update_soft_limit(memcg); 1015 mem_cgroup_update_tree(memcg, page);
890#if MAX_NUMNODES > 1 1016#if MAX_NUMNODES > 1
891 if (unlikely(do_numainfo)) 1017 if (unlikely(do_numainfo))
892 atomic_inc(&memcg->numainfo_events); 1018 atomic_inc(&memcg->numainfo_events);
@@ -929,15 +1055,6 @@ struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
929 return memcg; 1055 return memcg;
930} 1056}
931 1057
932static enum mem_cgroup_filter_t
933mem_cgroup_filter(struct mem_cgroup *memcg, struct mem_cgroup *root,
934 mem_cgroup_iter_filter cond)
935{
936 if (!cond)
937 return VISIT;
938 return cond(memcg, root);
939}
940
941/* 1058/*
942 * Returns a next (in a pre-order walk) alive memcg (with elevated css 1059 * Returns a next (in a pre-order walk) alive memcg (with elevated css
943 * ref. count) or NULL if the whole root's subtree has been visited. 1060 * ref. count) or NULL if the whole root's subtree has been visited.
@@ -945,7 +1062,7 @@ mem_cgroup_filter(struct mem_cgroup *memcg, struct mem_cgroup *root,
945 * helper function to be used by mem_cgroup_iter 1062 * helper function to be used by mem_cgroup_iter
946 */ 1063 */
947static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root, 1064static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root,
948 struct mem_cgroup *last_visited, mem_cgroup_iter_filter cond) 1065 struct mem_cgroup *last_visited)
949{ 1066{
950 struct cgroup_subsys_state *prev_css, *next_css; 1067 struct cgroup_subsys_state *prev_css, *next_css;
951 1068
@@ -963,31 +1080,11 @@ skip_node:
963 if (next_css) { 1080 if (next_css) {
964 struct mem_cgroup *mem = mem_cgroup_from_css(next_css); 1081 struct mem_cgroup *mem = mem_cgroup_from_css(next_css);
965 1082
966 switch (mem_cgroup_filter(mem, root, cond)) { 1083 if (css_tryget(&mem->css))
967 case SKIP: 1084 return mem;
1085 else {
968 prev_css = next_css; 1086 prev_css = next_css;
969 goto skip_node; 1087 goto skip_node;
970 case SKIP_TREE:
971 if (mem == root)
972 return NULL;
973 /*
974 * css_rightmost_descendant is not an optimal way to
975 * skip through a subtree (especially for imbalanced
976 * trees leaning to right) but that's what we have right
977 * now. More effective solution would be traversing
978 * right-up for first non-NULL without calling
979 * css_next_descendant_pre afterwards.
980 */
981 prev_css = css_rightmost_descendant(next_css);
982 goto skip_node;
983 case VISIT:
984 if (css_tryget(&mem->css))
985 return mem;
986 else {
987 prev_css = next_css;
988 goto skip_node;
989 }
990 break;
991 } 1088 }
992 } 1089 }
993 1090
@@ -1051,7 +1148,6 @@ static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter,
1051 * @root: hierarchy root 1148 * @root: hierarchy root
1052 * @prev: previously returned memcg, NULL on first invocation 1149 * @prev: previously returned memcg, NULL on first invocation
1053 * @reclaim: cookie for shared reclaim walks, NULL for full walks 1150 * @reclaim: cookie for shared reclaim walks, NULL for full walks
1054 * @cond: filter for visited nodes, NULL for no filter
1055 * 1151 *
1056 * Returns references to children of the hierarchy below @root, or 1152 * Returns references to children of the hierarchy below @root, or
1057 * @root itself, or %NULL after a full round-trip. 1153 * @root itself, or %NULL after a full round-trip.
@@ -1064,18 +1160,15 @@ static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter,
1064 * divide up the memcgs in the hierarchy among all concurrent 1160 * divide up the memcgs in the hierarchy among all concurrent
1065 * reclaimers operating on the same zone and priority. 1161 * reclaimers operating on the same zone and priority.
1066 */ 1162 */
1067struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root, 1163struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1068 struct mem_cgroup *prev, 1164 struct mem_cgroup *prev,
1069 struct mem_cgroup_reclaim_cookie *reclaim, 1165 struct mem_cgroup_reclaim_cookie *reclaim)
1070 mem_cgroup_iter_filter cond)
1071{ 1166{
1072 struct mem_cgroup *memcg = NULL; 1167 struct mem_cgroup *memcg = NULL;
1073 struct mem_cgroup *last_visited = NULL; 1168 struct mem_cgroup *last_visited = NULL;
1074 1169
1075 if (mem_cgroup_disabled()) { 1170 if (mem_cgroup_disabled())
1076 /* first call must return non-NULL, second return NULL */ 1171 return NULL;
1077 return (struct mem_cgroup *)(unsigned long)!prev;
1078 }
1079 1172
1080 if (!root) 1173 if (!root)
1081 root = root_mem_cgroup; 1174 root = root_mem_cgroup;
@@ -1086,9 +1179,7 @@ struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
1086 if (!root->use_hierarchy && root != root_mem_cgroup) { 1179 if (!root->use_hierarchy && root != root_mem_cgroup) {
1087 if (prev) 1180 if (prev)
1088 goto out_css_put; 1181 goto out_css_put;
1089 if (mem_cgroup_filter(root, root, cond) == VISIT) 1182 return root;
1090 return root;
1091 return NULL;
1092 } 1183 }
1093 1184
1094 rcu_read_lock(); 1185 rcu_read_lock();
@@ -1111,7 +1202,7 @@ struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
1111 last_visited = mem_cgroup_iter_load(iter, root, &seq); 1202 last_visited = mem_cgroup_iter_load(iter, root, &seq);
1112 } 1203 }
1113 1204
1114 memcg = __mem_cgroup_iter_next(root, last_visited, cond); 1205 memcg = __mem_cgroup_iter_next(root, last_visited);
1115 1206
1116 if (reclaim) { 1207 if (reclaim) {
1117 mem_cgroup_iter_update(iter, last_visited, memcg, seq); 1208 mem_cgroup_iter_update(iter, last_visited, memcg, seq);
@@ -1122,11 +1213,7 @@ struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
1122 reclaim->generation = iter->generation; 1213 reclaim->generation = iter->generation;
1123 } 1214 }
1124 1215
1125 /* 1216 if (prev && !memcg)
1126 * We have finished the whole tree walk or no group has been
1127 * visited because filter told us to skip the root node.
1128 */
1129 if (!memcg && (prev || (cond && !last_visited)))
1130 goto out_unlock; 1217 goto out_unlock;
1131 } 1218 }
1132out_unlock: 1219out_unlock:
@@ -1767,7 +1854,6 @@ static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg,
1767 return total; 1854 return total;
1768} 1855}
1769 1856
1770#if MAX_NUMNODES > 1
1771/** 1857/**
1772 * test_mem_cgroup_node_reclaimable 1858 * test_mem_cgroup_node_reclaimable
1773 * @memcg: the target memcg 1859 * @memcg: the target memcg
@@ -1790,6 +1876,7 @@ static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1790 return false; 1876 return false;
1791 1877
1792} 1878}
1879#if MAX_NUMNODES > 1
1793 1880
1794/* 1881/*
1795 * Always updating the nodemask is not very good - even if we have an empty 1882 * Always updating the nodemask is not very good - even if we have an empty
@@ -1857,50 +1944,104 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1857 return node; 1944 return node;
1858} 1945}
1859 1946
1947/*
1948 * Check all nodes whether it contains reclaimable pages or not.
1949 * For quick scan, we make use of scan_nodes. This will allow us to skip
1950 * unused nodes. But scan_nodes is lazily updated and may not cotain
1951 * enough new information. We need to do double check.
1952 */
1953static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1954{
1955 int nid;
1956
1957 /*
1958 * quick check...making use of scan_node.
1959 * We can skip unused nodes.
1960 */
1961 if (!nodes_empty(memcg->scan_nodes)) {
1962 for (nid = first_node(memcg->scan_nodes);
1963 nid < MAX_NUMNODES;
1964 nid = next_node(nid, memcg->scan_nodes)) {
1965
1966 if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1967 return true;
1968 }
1969 }
1970 /*
1971 * Check rest of nodes.
1972 */
1973 for_each_node_state(nid, N_MEMORY) {
1974 if (node_isset(nid, memcg->scan_nodes))
1975 continue;
1976 if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1977 return true;
1978 }
1979 return false;
1980}
1981
1860#else 1982#else
1861int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 1983int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1862{ 1984{
1863 return 0; 1985 return 0;
1864} 1986}
1865 1987
1866#endif 1988static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1867
1868/*
1869 * A group is eligible for the soft limit reclaim under the given root
1870 * hierarchy if
1871 * a) it is over its soft limit
1872 * b) any parent up the hierarchy is over its soft limit
1873 *
1874 * If the given group doesn't have any children over the limit then it
1875 * doesn't make any sense to iterate its subtree.
1876 */
1877enum mem_cgroup_filter_t
1878mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
1879 struct mem_cgroup *root)
1880{ 1989{
1881 struct mem_cgroup *parent; 1990 return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
1882 1991}
1883 if (!memcg) 1992#endif
1884 memcg = root_mem_cgroup;
1885 parent = memcg;
1886
1887 if (res_counter_soft_limit_excess(&memcg->res))
1888 return VISIT;
1889 1993
1890 /* 1994static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1891 * If any parent up to the root in the hierarchy is over its soft limit 1995 struct zone *zone,
1892 * then we have to obey and reclaim from this group as well. 1996 gfp_t gfp_mask,
1893 */ 1997 unsigned long *total_scanned)
1894 while ((parent = parent_mem_cgroup(parent))) { 1998{
1895 if (res_counter_soft_limit_excess(&parent->res)) 1999 struct mem_cgroup *victim = NULL;
1896 return VISIT; 2000 int total = 0;
1897 if (parent == root) 2001 int loop = 0;
2002 unsigned long excess;
2003 unsigned long nr_scanned;
2004 struct mem_cgroup_reclaim_cookie reclaim = {
2005 .zone = zone,
2006 .priority = 0,
2007 };
2008
2009 excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT;
2010
2011 while (1) {
2012 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
2013 if (!victim) {
2014 loop++;
2015 if (loop >= 2) {
2016 /*
2017 * If we have not been able to reclaim
2018 * anything, it might because there are
2019 * no reclaimable pages under this hierarchy
2020 */
2021 if (!total)
2022 break;
2023 /*
2024 * We want to do more targeted reclaim.
2025 * excess >> 2 is not to excessive so as to
2026 * reclaim too much, nor too less that we keep
2027 * coming back to reclaim from this cgroup
2028 */
2029 if (total >= (excess >> 2) ||
2030 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
2031 break;
2032 }
2033 continue;
2034 }
2035 if (!mem_cgroup_reclaimable(victim, false))
2036 continue;
2037 total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
2038 zone, &nr_scanned);
2039 *total_scanned += nr_scanned;
2040 if (!res_counter_soft_limit_excess(&root_memcg->res))
1898 break; 2041 break;
1899 } 2042 }
1900 2043 mem_cgroup_iter_break(root_memcg, victim);
1901 if (!atomic_read(&memcg->children_in_excess)) 2044 return total;
1902 return SKIP_TREE;
1903 return SKIP;
1904} 2045}
1905 2046
1906static DEFINE_SPINLOCK(memcg_oom_lock); 2047static DEFINE_SPINLOCK(memcg_oom_lock);
@@ -2812,7 +2953,9 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2812 unlock_page_cgroup(pc); 2953 unlock_page_cgroup(pc);
2813 2954
2814 /* 2955 /*
2815 * "charge_statistics" updated event counter. 2956 * "charge_statistics" updated event counter. Then, check it.
2957 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
2958 * if they exceeds softlimit.
2816 */ 2959 */
2817 memcg_check_events(memcg, page); 2960 memcg_check_events(memcg, page);
2818} 2961}
@@ -4647,6 +4790,98 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
4647 return ret; 4790 return ret;
4648} 4791}
4649 4792
4793unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
4794 gfp_t gfp_mask,
4795 unsigned long *total_scanned)
4796{
4797 unsigned long nr_reclaimed = 0;
4798 struct mem_cgroup_per_zone *mz, *next_mz = NULL;
4799 unsigned long reclaimed;
4800 int loop = 0;
4801 struct mem_cgroup_tree_per_zone *mctz;
4802 unsigned long long excess;
4803 unsigned long nr_scanned;
4804
4805 if (order > 0)
4806 return 0;
4807
4808 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
4809 /*
4810 * This loop can run a while, specially if mem_cgroup's continuously
4811 * keep exceeding their soft limit and putting the system under
4812 * pressure
4813 */
4814 do {
4815 if (next_mz)
4816 mz = next_mz;
4817 else
4818 mz = mem_cgroup_largest_soft_limit_node(mctz);
4819 if (!mz)
4820 break;
4821
4822 nr_scanned = 0;
4823 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
4824 gfp_mask, &nr_scanned);
4825 nr_reclaimed += reclaimed;
4826 *total_scanned += nr_scanned;
4827 spin_lock(&mctz->lock);
4828
4829 /*
4830 * If we failed to reclaim anything from this memory cgroup
4831 * it is time to move on to the next cgroup
4832 */
4833 next_mz = NULL;
4834 if (!reclaimed) {
4835 do {
4836 /*
4837 * Loop until we find yet another one.
4838 *
4839 * By the time we get the soft_limit lock
4840 * again, someone might have aded the
4841 * group back on the RB tree. Iterate to
4842 * make sure we get a different mem.
4843 * mem_cgroup_largest_soft_limit_node returns
4844 * NULL if no other cgroup is present on
4845 * the tree
4846 */
4847 next_mz =
4848 __mem_cgroup_largest_soft_limit_node(mctz);
4849 if (next_mz == mz)
4850 css_put(&next_mz->memcg->css);
4851 else /* next_mz == NULL or other memcg */
4852 break;
4853 } while (1);
4854 }
4855 __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
4856 excess = res_counter_soft_limit_excess(&mz->memcg->res);
4857 /*
4858 * One school of thought says that we should not add
4859 * back the node to the tree if reclaim returns 0.
4860 * But our reclaim could return 0, simply because due
4861 * to priority we are exposing a smaller subset of
4862 * memory to reclaim from. Consider this as a longer
4863 * term TODO.
4864 */
4865 /* If excess == 0, no tree ops */
4866 __mem_cgroup_insert_exceeded(mz->memcg, mz, mctz, excess);
4867 spin_unlock(&mctz->lock);
4868 css_put(&mz->memcg->css);
4869 loop++;
4870 /*
4871 * Could not reclaim anything and there are no more
4872 * mem cgroups to try or we seem to be looping without
4873 * reclaiming anything.
4874 */
4875 if (!nr_reclaimed &&
4876 (next_mz == NULL ||
4877 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
4878 break;
4879 } while (!nr_reclaimed);
4880 if (next_mz)
4881 css_put(&next_mz->memcg->css);
4882 return nr_reclaimed;
4883}
4884
4650/** 4885/**
4651 * mem_cgroup_force_empty_list - clears LRU of a group 4886 * mem_cgroup_force_empty_list - clears LRU of a group
4652 * @memcg: group to clear 4887 * @memcg: group to clear
@@ -5911,6 +6146,8 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
5911 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 6146 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
5912 mz = &pn->zoneinfo[zone]; 6147 mz = &pn->zoneinfo[zone];
5913 lruvec_init(&mz->lruvec); 6148 lruvec_init(&mz->lruvec);
6149 mz->usage_in_excess = 0;
6150 mz->on_tree = false;
5914 mz->memcg = memcg; 6151 mz->memcg = memcg;
5915 } 6152 }
5916 memcg->nodeinfo[node] = pn; 6153 memcg->nodeinfo[node] = pn;
@@ -5966,6 +6203,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
5966 int node; 6203 int node;
5967 size_t size = memcg_size(); 6204 size_t size = memcg_size();
5968 6205
6206 mem_cgroup_remove_from_trees(memcg);
5969 free_css_id(&mem_cgroup_subsys, &memcg->css); 6207 free_css_id(&mem_cgroup_subsys, &memcg->css);
5970 6208
5971 for_each_node(node) 6209 for_each_node(node)
@@ -6002,6 +6240,29 @@ struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
6002} 6240}
6003EXPORT_SYMBOL(parent_mem_cgroup); 6241EXPORT_SYMBOL(parent_mem_cgroup);
6004 6242
6243static void __init mem_cgroup_soft_limit_tree_init(void)
6244{
6245 struct mem_cgroup_tree_per_node *rtpn;
6246 struct mem_cgroup_tree_per_zone *rtpz;
6247 int tmp, node, zone;
6248
6249 for_each_node(node) {
6250 tmp = node;
6251 if (!node_state(node, N_NORMAL_MEMORY))
6252 tmp = -1;
6253 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
6254 BUG_ON(!rtpn);
6255
6256 soft_limit_tree.rb_tree_per_node[node] = rtpn;
6257
6258 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
6259 rtpz = &rtpn->rb_tree_per_zone[zone];
6260 rtpz->rb_root = RB_ROOT;
6261 spin_lock_init(&rtpz->lock);
6262 }
6263 }
6264}
6265
6005static struct cgroup_subsys_state * __ref 6266static struct cgroup_subsys_state * __ref
6006mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 6267mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
6007{ 6268{
@@ -6031,7 +6292,6 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
6031 mutex_init(&memcg->thresholds_lock); 6292 mutex_init(&memcg->thresholds_lock);
6032 spin_lock_init(&memcg->move_lock); 6293 spin_lock_init(&memcg->move_lock);
6033 vmpressure_init(&memcg->vmpressure); 6294 vmpressure_init(&memcg->vmpressure);
6034 spin_lock_init(&memcg->soft_lock);
6035 6295
6036 return &memcg->css; 6296 return &memcg->css;
6037 6297
@@ -6109,13 +6369,6 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
6109 6369
6110 mem_cgroup_invalidate_reclaim_iterators(memcg); 6370 mem_cgroup_invalidate_reclaim_iterators(memcg);
6111 mem_cgroup_reparent_charges(memcg); 6371 mem_cgroup_reparent_charges(memcg);
6112 if (memcg->soft_contributed) {
6113 while ((memcg = parent_mem_cgroup(memcg)))
6114 atomic_dec(&memcg->children_in_excess);
6115
6116 if (memcg != root_mem_cgroup && !root_mem_cgroup->use_hierarchy)
6117 atomic_dec(&root_mem_cgroup->children_in_excess);
6118 }
6119 mem_cgroup_destroy_all_caches(memcg); 6372 mem_cgroup_destroy_all_caches(memcg);
6120 vmpressure_cleanup(&memcg->vmpressure); 6373 vmpressure_cleanup(&memcg->vmpressure);
6121} 6374}
@@ -6790,6 +7043,7 @@ static int __init mem_cgroup_init(void)
6790{ 7043{
6791 hotcpu_notifier(memcg_cpu_hotplug_callback, 0); 7044 hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
6792 enable_swap_cgroup(); 7045 enable_swap_cgroup();
7046 mem_cgroup_soft_limit_tree_init();
6793 memcg_stock_init(); 7047 memcg_stock_init();
6794 return 0; 7048 return 0;
6795} 7049}
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 947ed5413279..bf3351b5115e 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1114,8 +1114,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1114 * shake_page could have turned it free. 1114 * shake_page could have turned it free.
1115 */ 1115 */
1116 if (is_free_buddy_page(p)) { 1116 if (is_free_buddy_page(p)) {
1117 action_result(pfn, "free buddy, 2nd try", 1117 if (flags & MF_COUNT_INCREASED)
1118 DELAYED); 1118 action_result(pfn, "free buddy", DELAYED);
1119 else
1120 action_result(pfn, "free buddy, 2nd try", DELAYED);
1119 return 0; 1121 return 0;
1120 } 1122 }
1121 action_result(pfn, "non LRU", IGNORED); 1123 action_result(pfn, "non LRU", IGNORED);
@@ -1349,7 +1351,7 @@ int unpoison_memory(unsigned long pfn)
1349 * worked by memory_failure() and the page lock is not held yet. 1351 * worked by memory_failure() and the page lock is not held yet.
1350 * In such case, we yield to memory_failure() and make unpoison fail. 1352 * In such case, we yield to memory_failure() and make unpoison fail.
1351 */ 1353 */
1352 if (PageTransHuge(page)) { 1354 if (!PageHuge(page) && PageTransHuge(page)) {
1353 pr_info("MCE: Memory failure is now running on %#lx\n", pfn); 1355 pr_info("MCE: Memory failure is now running on %#lx\n", pfn);
1354 return 0; 1356 return 0;
1355 } 1357 }
diff --git a/mm/migrate.c b/mm/migrate.c
index 9c8d5f59d30b..a26bccd44ccb 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -107,7 +107,7 @@ void putback_movable_pages(struct list_head *l)
107 list_del(&page->lru); 107 list_del(&page->lru);
108 dec_zone_page_state(page, NR_ISOLATED_ANON + 108 dec_zone_page_state(page, NR_ISOLATED_ANON +
109 page_is_file_cache(page)); 109 page_is_file_cache(page));
110 if (unlikely(balloon_page_movable(page))) 110 if (unlikely(isolated_balloon_page(page)))
111 balloon_page_putback(page); 111 balloon_page_putback(page);
112 else 112 else
113 putback_lru_page(page); 113 putback_lru_page(page);
diff --git a/mm/mlock.c b/mm/mlock.c
index d63802663242..d480cd6fc475 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -379,10 +379,14 @@ static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
379 379
380 /* 380 /*
381 * Initialize pte walk starting at the already pinned page where we 381 * Initialize pte walk starting at the already pinned page where we
382 * are sure that there is a pte. 382 * are sure that there is a pte, as it was pinned under the same
383 * mmap_sem write op.
383 */ 384 */
384 pte = get_locked_pte(vma->vm_mm, start, &ptl); 385 pte = get_locked_pte(vma->vm_mm, start, &ptl);
385 end = min(end, pmd_addr_end(start, end)); 386 /* Make sure we do not cross the page table boundary */
387 end = pgd_addr_end(start, end);
388 end = pud_addr_end(start, end);
389 end = pmd_addr_end(start, end);
386 390
387 /* The page next to the pinned page is the first we will try to get */ 391 /* The page next to the pinned page is the first we will try to get */
388 start += PAGE_SIZE; 392 start += PAGE_SIZE;
@@ -736,6 +740,7 @@ static int do_mlockall(int flags)
736 740
737 /* Ignore errors */ 741 /* Ignore errors */
738 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags); 742 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
743 cond_resched();
739 } 744 }
740out: 745out:
741 return 0; 746 return 0;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0ee638f76ebe..dd886fac451a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6366,10 +6366,6 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
6366 list_del(&page->lru); 6366 list_del(&page->lru);
6367 rmv_page_order(page); 6367 rmv_page_order(page);
6368 zone->free_area[order].nr_free--; 6368 zone->free_area[order].nr_free--;
6369#ifdef CONFIG_HIGHMEM
6370 if (PageHighMem(page))
6371 totalhigh_pages -= 1 << order;
6372#endif
6373 for (i = 0; i < (1 << order); i++) 6369 for (i = 0; i < (1 << order); i++)
6374 SetPageReserved((page+i)); 6370 SetPageReserved((page+i));
6375 pfn += (1 << order); 6371 pfn += (1 << order);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8ed1b775bdc9..53f2f82f83ae 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -48,6 +48,7 @@
48#include <asm/div64.h> 48#include <asm/div64.h>
49 49
50#include <linux/swapops.h> 50#include <linux/swapops.h>
51#include <linux/balloon_compaction.h>
51 52
52#include "internal.h" 53#include "internal.h"
53 54
@@ -139,23 +140,11 @@ static bool global_reclaim(struct scan_control *sc)
139{ 140{
140 return !sc->target_mem_cgroup; 141 return !sc->target_mem_cgroup;
141} 142}
142
143static bool mem_cgroup_should_soft_reclaim(struct scan_control *sc)
144{
145 struct mem_cgroup *root = sc->target_mem_cgroup;
146 return !mem_cgroup_disabled() &&
147 mem_cgroup_soft_reclaim_eligible(root, root) != SKIP_TREE;
148}
149#else 143#else
150static bool global_reclaim(struct scan_control *sc) 144static bool global_reclaim(struct scan_control *sc)
151{ 145{
152 return true; 146 return true;
153} 147}
154
155static bool mem_cgroup_should_soft_reclaim(struct scan_control *sc)
156{
157 return false;
158}
159#endif 148#endif
160 149
161unsigned long zone_reclaimable_pages(struct zone *zone) 150unsigned long zone_reclaimable_pages(struct zone *zone)
@@ -1125,7 +1114,8 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
1125 LIST_HEAD(clean_pages); 1114 LIST_HEAD(clean_pages);
1126 1115
1127 list_for_each_entry_safe(page, next, page_list, lru) { 1116 list_for_each_entry_safe(page, next, page_list, lru) {
1128 if (page_is_file_cache(page) && !PageDirty(page)) { 1117 if (page_is_file_cache(page) && !PageDirty(page) &&
1118 !isolated_balloon_page(page)) {
1129 ClearPageActive(page); 1119 ClearPageActive(page);
1130 list_move(&page->lru, &clean_pages); 1120 list_move(&page->lru, &clean_pages);
1131 } 1121 }
@@ -2176,11 +2166,9 @@ static inline bool should_continue_reclaim(struct zone *zone,
2176 } 2166 }
2177} 2167}
2178 2168
2179static int 2169static void shrink_zone(struct zone *zone, struct scan_control *sc)
2180__shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
2181{ 2170{
2182 unsigned long nr_reclaimed, nr_scanned; 2171 unsigned long nr_reclaimed, nr_scanned;
2183 int groups_scanned = 0;
2184 2172
2185 do { 2173 do {
2186 struct mem_cgroup *root = sc->target_mem_cgroup; 2174 struct mem_cgroup *root = sc->target_mem_cgroup;
@@ -2188,17 +2176,15 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
2188 .zone = zone, 2176 .zone = zone,
2189 .priority = sc->priority, 2177 .priority = sc->priority,
2190 }; 2178 };
2191 struct mem_cgroup *memcg = NULL; 2179 struct mem_cgroup *memcg;
2192 mem_cgroup_iter_filter filter = (soft_reclaim) ?
2193 mem_cgroup_soft_reclaim_eligible : NULL;
2194 2180
2195 nr_reclaimed = sc->nr_reclaimed; 2181 nr_reclaimed = sc->nr_reclaimed;
2196 nr_scanned = sc->nr_scanned; 2182 nr_scanned = sc->nr_scanned;
2197 2183
2198 while ((memcg = mem_cgroup_iter_cond(root, memcg, &reclaim, filter))) { 2184 memcg = mem_cgroup_iter(root, NULL, &reclaim);
2185 do {
2199 struct lruvec *lruvec; 2186 struct lruvec *lruvec;
2200 2187
2201 groups_scanned++;
2202 lruvec = mem_cgroup_zone_lruvec(zone, memcg); 2188 lruvec = mem_cgroup_zone_lruvec(zone, memcg);
2203 2189
2204 shrink_lruvec(lruvec, sc); 2190 shrink_lruvec(lruvec, sc);
@@ -2218,7 +2204,8 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
2218 mem_cgroup_iter_break(root, memcg); 2204 mem_cgroup_iter_break(root, memcg);
2219 break; 2205 break;
2220 } 2206 }
2221 } 2207 memcg = mem_cgroup_iter(root, memcg, &reclaim);
2208 } while (memcg);
2222 2209
2223 vmpressure(sc->gfp_mask, sc->target_mem_cgroup, 2210 vmpressure(sc->gfp_mask, sc->target_mem_cgroup,
2224 sc->nr_scanned - nr_scanned, 2211 sc->nr_scanned - nr_scanned,
@@ -2226,37 +2213,6 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
2226 2213
2227 } while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed, 2214 } while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed,
2228 sc->nr_scanned - nr_scanned, sc)); 2215 sc->nr_scanned - nr_scanned, sc));
2229
2230 return groups_scanned;
2231}
2232
2233
2234static void shrink_zone(struct zone *zone, struct scan_control *sc)
2235{
2236 bool do_soft_reclaim = mem_cgroup_should_soft_reclaim(sc);
2237 unsigned long nr_scanned = sc->nr_scanned;
2238 int scanned_groups;
2239
2240 scanned_groups = __shrink_zone(zone, sc, do_soft_reclaim);
2241 /*
2242 * memcg iterator might race with other reclaimer or start from
2243 * a incomplete tree walk so the tree walk in __shrink_zone
2244 * might have missed groups that are above the soft limit. Try
2245 * another loop to catch up with others. Do it just once to
2246 * prevent from reclaim latencies when other reclaimers always
2247 * preempt this one.
2248 */
2249 if (do_soft_reclaim && !scanned_groups)
2250 __shrink_zone(zone, sc, do_soft_reclaim);
2251
2252 /*
2253 * No group is over the soft limit or those that are do not have
2254 * pages in the zone we are reclaiming so we have to reclaim everybody
2255 */
2256 if (do_soft_reclaim && (sc->nr_scanned == nr_scanned)) {
2257 __shrink_zone(zone, sc, false);
2258 return;
2259 }
2260} 2216}
2261 2217
2262/* Returns true if compaction should go ahead for a high-order request */ 2218/* Returns true if compaction should go ahead for a high-order request */
@@ -2320,6 +2276,8 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2320{ 2276{
2321 struct zoneref *z; 2277 struct zoneref *z;
2322 struct zone *zone; 2278 struct zone *zone;
2279 unsigned long nr_soft_reclaimed;
2280 unsigned long nr_soft_scanned;
2323 bool aborted_reclaim = false; 2281 bool aborted_reclaim = false;
2324 2282
2325 /* 2283 /*
@@ -2359,6 +2317,18 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2359 continue; 2317 continue;
2360 } 2318 }
2361 } 2319 }
2320 /*
2321 * This steals pages from memory cgroups over softlimit
2322 * and returns the number of reclaimed pages and
2323 * scanned pages. This works for global memory pressure
2324 * and balancing, not for a memcg's limit.
2325 */
2326 nr_soft_scanned = 0;
2327 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
2328 sc->order, sc->gfp_mask,
2329 &nr_soft_scanned);
2330 sc->nr_reclaimed += nr_soft_reclaimed;
2331 sc->nr_scanned += nr_soft_scanned;
2362 /* need some check for avoid more shrink_zone() */ 2332 /* need some check for avoid more shrink_zone() */
2363 } 2333 }
2364 2334
@@ -2952,6 +2922,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
2952{ 2922{
2953 int i; 2923 int i;
2954 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ 2924 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
2925 unsigned long nr_soft_reclaimed;
2926 unsigned long nr_soft_scanned;
2955 struct scan_control sc = { 2927 struct scan_control sc = {
2956 .gfp_mask = GFP_KERNEL, 2928 .gfp_mask = GFP_KERNEL,
2957 .priority = DEF_PRIORITY, 2929 .priority = DEF_PRIORITY,
@@ -3066,6 +3038,15 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
3066 3038
3067 sc.nr_scanned = 0; 3039 sc.nr_scanned = 0;
3068 3040
3041 nr_soft_scanned = 0;
3042 /*
3043 * Call soft limit reclaim before calling shrink_zone.
3044 */
3045 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
3046 order, sc.gfp_mask,
3047 &nr_soft_scanned);
3048 sc.nr_reclaimed += nr_soft_reclaimed;
3049
3069 /* 3050 /*
3070 * There should be no need to raise the scanning 3051 * There should be no need to raise the scanning
3071 * priority if enough pages are already being scanned 3052 * priority if enough pages are already being scanned
diff --git a/net/802/mrp.c b/net/802/mrp.c
index 1eb05d80b07b..3ed616215870 100644
--- a/net/802/mrp.c
+++ b/net/802/mrp.c
@@ -24,6 +24,11 @@
24static unsigned int mrp_join_time __read_mostly = 200; 24static unsigned int mrp_join_time __read_mostly = 200;
25module_param(mrp_join_time, uint, 0644); 25module_param(mrp_join_time, uint, 0644);
26MODULE_PARM_DESC(mrp_join_time, "Join time in ms (default 200ms)"); 26MODULE_PARM_DESC(mrp_join_time, "Join time in ms (default 200ms)");
27
28static unsigned int mrp_periodic_time __read_mostly = 1000;
29module_param(mrp_periodic_time, uint, 0644);
30MODULE_PARM_DESC(mrp_periodic_time, "Periodic time in ms (default 1s)");
31
27MODULE_LICENSE("GPL"); 32MODULE_LICENSE("GPL");
28 33
29static const u8 34static const u8
@@ -595,6 +600,24 @@ static void mrp_join_timer(unsigned long data)
595 mrp_join_timer_arm(app); 600 mrp_join_timer_arm(app);
596} 601}
597 602
603static void mrp_periodic_timer_arm(struct mrp_applicant *app)
604{
605 mod_timer(&app->periodic_timer,
606 jiffies + msecs_to_jiffies(mrp_periodic_time));
607}
608
609static void mrp_periodic_timer(unsigned long data)
610{
611 struct mrp_applicant *app = (struct mrp_applicant *)data;
612
613 spin_lock(&app->lock);
614 mrp_mad_event(app, MRP_EVENT_PERIODIC);
615 mrp_pdu_queue(app);
616 spin_unlock(&app->lock);
617
618 mrp_periodic_timer_arm(app);
619}
620
598static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset) 621static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset)
599{ 622{
600 __be16 endmark; 623 __be16 endmark;
@@ -845,6 +868,9 @@ int mrp_init_applicant(struct net_device *dev, struct mrp_application *appl)
845 rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app); 868 rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app);
846 setup_timer(&app->join_timer, mrp_join_timer, (unsigned long)app); 869 setup_timer(&app->join_timer, mrp_join_timer, (unsigned long)app);
847 mrp_join_timer_arm(app); 870 mrp_join_timer_arm(app);
871 setup_timer(&app->periodic_timer, mrp_periodic_timer,
872 (unsigned long)app);
873 mrp_periodic_timer_arm(app);
848 return 0; 874 return 0;
849 875
850err3: 876err3:
@@ -870,6 +896,7 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
870 * all pending messages before the applicant is gone. 896 * all pending messages before the applicant is gone.
871 */ 897 */
872 del_timer_sync(&app->join_timer); 898 del_timer_sync(&app->join_timer);
899 del_timer_sync(&app->periodic_timer);
873 900
874 spin_lock_bh(&app->lock); 901 spin_lock_bh(&app->lock);
875 mrp_mad_event(app, MRP_EVENT_TX); 902 mrp_mad_event(app, MRP_EVENT_TX);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 4493913f0d5c..813db4e64602 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -168,6 +168,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
168 case ETH_P_8021Q: 168 case ETH_P_8021Q:
169 vhdr = (struct vlan_ethhdr *)skb->data; 169 vhdr = (struct vlan_ethhdr *)skb->data;
170 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK; 170 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
171 vid |= BATADV_VLAN_HAS_TAG;
171 172
172 if (vhdr->h_vlan_encapsulated_proto != ethertype) 173 if (vhdr->h_vlan_encapsulated_proto != ethertype)
173 break; 174 break;
@@ -331,6 +332,7 @@ void batadv_interface_rx(struct net_device *soft_iface,
331 case ETH_P_8021Q: 332 case ETH_P_8021Q:
332 vhdr = (struct vlan_ethhdr *)skb->data; 333 vhdr = (struct vlan_ethhdr *)skb->data;
333 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK; 334 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
335 vid |= BATADV_VLAN_HAS_TAG;
334 336
335 if (vhdr->h_vlan_encapsulated_proto != ethertype) 337 if (vhdr->h_vlan_encapsulated_proto != ethertype)
336 break; 338 break;
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 634debab4d54..fb7356fcfe51 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1146,7 +1146,11 @@ int hci_dev_open(__u16 dev)
1146 goto done; 1146 goto done;
1147 } 1147 }
1148 1148
1149 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) { 1149 /* Check for rfkill but allow the HCI setup stage to proceed
1150 * (which in itself doesn't cause any RF activity).
1151 */
1152 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) &&
1153 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1150 ret = -ERFKILL; 1154 ret = -ERFKILL;
1151 goto done; 1155 goto done;
1152 } 1156 }
@@ -1566,10 +1570,13 @@ static int hci_rfkill_set_block(void *data, bool blocked)
1566 1570
1567 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked); 1571 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1568 1572
1569 if (!blocked) 1573 if (blocked) {
1570 return 0; 1574 set_bit(HCI_RFKILLED, &hdev->dev_flags);
1571 1575 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1572 hci_dev_do_close(hdev); 1576 hci_dev_do_close(hdev);
1577 } else {
1578 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1579 }
1573 1580
1574 return 0; 1581 return 0;
1575} 1582}
@@ -1591,9 +1598,13 @@ static void hci_power_on(struct work_struct *work)
1591 return; 1598 return;
1592 } 1599 }
1593 1600
1594 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) 1601 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1602 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1603 hci_dev_do_close(hdev);
1604 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1595 queue_delayed_work(hdev->req_workqueue, &hdev->power_off, 1605 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1596 HCI_AUTO_OFF_TIMEOUT); 1606 HCI_AUTO_OFF_TIMEOUT);
1607 }
1597 1608
1598 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) 1609 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1599 mgmt_index_added(hdev); 1610 mgmt_index_added(hdev);
@@ -2209,6 +2220,9 @@ int hci_register_dev(struct hci_dev *hdev)
2209 } 2220 }
2210 } 2221 }
2211 2222
2223 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2224 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2225
2212 set_bit(HCI_SETUP, &hdev->dev_flags); 2226 set_bit(HCI_SETUP, &hdev->dev_flags);
2213 2227
2214 if (hdev->dev_type != HCI_AMP) 2228 if (hdev->dev_type != HCI_AMP)
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 94aab73f89d4..8db3e89fae35 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -3557,7 +3557,11 @@ static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3557 cp.handle = cpu_to_le16(conn->handle); 3557 cp.handle = cpu_to_le16(conn->handle);
3558 3558
3559 if (ltk->authenticated) 3559 if (ltk->authenticated)
3560 conn->sec_level = BT_SECURITY_HIGH; 3560 conn->pending_sec_level = BT_SECURITY_HIGH;
3561 else
3562 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3563
3564 conn->enc_key_size = ltk->enc_size;
3561 3565
3562 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); 3566 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3563 3567
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index b3bb7bca8e60..63fa11109a1c 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -3755,6 +3755,13 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3755 3755
3756 sk = chan->sk; 3756 sk = chan->sk;
3757 3757
3758 /* For certain devices (ex: HID mouse), support for authentication,
3759 * pairing and bonding is optional. For such devices, inorder to avoid
3760 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3761 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3762 */
3763 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3764
3758 bacpy(&bt_sk(sk)->src, conn->src); 3765 bacpy(&bt_sk(sk)->src, conn->src);
3759 bacpy(&bt_sk(sk)->dst, conn->dst); 3766 bacpy(&bt_sk(sk)->dst, conn->dst);
3760 chan->psm = psm; 3767 chan->psm = psm;
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 6d126faf145f..84fcf9fff3ea 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -569,7 +569,6 @@ static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb)
569static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err) 569static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err)
570{ 570{
571 struct rfcomm_dev *dev = dlc->owner; 571 struct rfcomm_dev *dev = dlc->owner;
572 struct tty_struct *tty;
573 if (!dev) 572 if (!dev)
574 return; 573 return;
575 574
@@ -581,38 +580,8 @@ static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err)
581 DPM_ORDER_DEV_AFTER_PARENT); 580 DPM_ORDER_DEV_AFTER_PARENT);
582 581
583 wake_up_interruptible(&dev->port.open_wait); 582 wake_up_interruptible(&dev->port.open_wait);
584 } else if (dlc->state == BT_CLOSED) { 583 } else if (dlc->state == BT_CLOSED)
585 tty = tty_port_tty_get(&dev->port); 584 tty_port_tty_hangup(&dev->port, false);
586 if (!tty) {
587 if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) {
588 /* Drop DLC lock here to avoid deadlock
589 * 1. rfcomm_dev_get will take rfcomm_dev_lock
590 * but in rfcomm_dev_add there's lock order:
591 * rfcomm_dev_lock -> dlc lock
592 * 2. tty_port_put will deadlock if it's
593 * the last reference
594 *
595 * FIXME: when we release the lock anything
596 * could happen to dev, even its destruction
597 */
598 rfcomm_dlc_unlock(dlc);
599 if (rfcomm_dev_get(dev->id) == NULL) {
600 rfcomm_dlc_lock(dlc);
601 return;
602 }
603
604 if (!test_and_set_bit(RFCOMM_TTY_RELEASED,
605 &dev->flags))
606 tty_port_put(&dev->port);
607
608 tty_port_put(&dev->port);
609 rfcomm_dlc_lock(dlc);
610 }
611 } else {
612 tty_hangup(tty);
613 tty_kref_put(tty);
614 }
615 }
616} 585}
617 586
618static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig) 587static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig)
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index b9259efa636e..e74ddc1c29a8 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -207,7 +207,7 @@ int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
207 struct net_device *dev, u32 filter_mask) 207 struct net_device *dev, u32 filter_mask)
208{ 208{
209 int err = 0; 209 int err = 0;
210 struct net_bridge_port *port = br_port_get_rcu(dev); 210 struct net_bridge_port *port = br_port_get_rtnl(dev);
211 211
212 /* not a bridge port and */ 212 /* not a bridge port and */
213 if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN)) 213 if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN))
@@ -451,7 +451,7 @@ static size_t br_get_link_af_size(const struct net_device *dev)
451 struct net_port_vlans *pv; 451 struct net_port_vlans *pv;
452 452
453 if (br_port_exists(dev)) 453 if (br_port_exists(dev))
454 pv = nbp_get_vlan_info(br_port_get_rcu(dev)); 454 pv = nbp_get_vlan_info(br_port_get_rtnl(dev));
455 else if (dev->priv_flags & IFF_EBRIDGE) 455 else if (dev->priv_flags & IFF_EBRIDGE)
456 pv = br_get_vlan_info((struct net_bridge *)netdev_priv(dev)); 456 pv = br_get_vlan_info((struct net_bridge *)netdev_priv(dev));
457 else 457 else
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 598cb0b333c6..efb57d911569 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -202,13 +202,10 @@ struct net_bridge_port
202 202
203static inline struct net_bridge_port *br_port_get_rcu(const struct net_device *dev) 203static inline struct net_bridge_port *br_port_get_rcu(const struct net_device *dev)
204{ 204{
205 struct net_bridge_port *port = 205 return rcu_dereference(dev->rx_handler_data);
206 rcu_dereference_rtnl(dev->rx_handler_data);
207
208 return br_port_exists(dev) ? port : NULL;
209} 206}
210 207
211static inline struct net_bridge_port *br_port_get_rtnl(struct net_device *dev) 208static inline struct net_bridge_port *br_port_get_rtnl(const struct net_device *dev)
212{ 209{
213 return br_port_exists(dev) ? 210 return br_port_exists(dev) ?
214 rtnl_dereference(dev->rx_handler_data) : NULL; 211 rtnl_dereference(dev->rx_handler_data) : NULL;
@@ -746,6 +743,7 @@ extern struct net_bridge_port *br_get_port(struct net_bridge *br,
746extern void br_init_port(struct net_bridge_port *p); 743extern void br_init_port(struct net_bridge_port *p);
747extern void br_become_designated_port(struct net_bridge_port *p); 744extern void br_become_designated_port(struct net_bridge_port *p);
748 745
746extern void __br_set_forward_delay(struct net_bridge *br, unsigned long t);
749extern int br_set_forward_delay(struct net_bridge *br, unsigned long x); 747extern int br_set_forward_delay(struct net_bridge *br, unsigned long x);
750extern int br_set_hello_time(struct net_bridge *br, unsigned long x); 748extern int br_set_hello_time(struct net_bridge *br, unsigned long x);
751extern int br_set_max_age(struct net_bridge *br, unsigned long x); 749extern int br_set_max_age(struct net_bridge *br, unsigned long x);
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index 1c0a50f13229..3c86f0538cbb 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -209,7 +209,7 @@ static void br_record_config_information(struct net_bridge_port *p,
209 p->designated_age = jiffies - bpdu->message_age; 209 p->designated_age = jiffies - bpdu->message_age;
210 210
211 mod_timer(&p->message_age_timer, jiffies 211 mod_timer(&p->message_age_timer, jiffies
212 + (p->br->max_age - bpdu->message_age)); 212 + (bpdu->max_age - bpdu->message_age));
213} 213}
214 214
215/* called under bridge lock */ 215/* called under bridge lock */
@@ -544,18 +544,27 @@ int br_set_max_age(struct net_bridge *br, unsigned long val)
544 544
545} 545}
546 546
547void __br_set_forward_delay(struct net_bridge *br, unsigned long t)
548{
549 br->bridge_forward_delay = t;
550 if (br_is_root_bridge(br))
551 br->forward_delay = br->bridge_forward_delay;
552}
553
547int br_set_forward_delay(struct net_bridge *br, unsigned long val) 554int br_set_forward_delay(struct net_bridge *br, unsigned long val)
548{ 555{
549 unsigned long t = clock_t_to_jiffies(val); 556 unsigned long t = clock_t_to_jiffies(val);
557 int err = -ERANGE;
550 558
559 spin_lock_bh(&br->lock);
551 if (br->stp_enabled != BR_NO_STP && 560 if (br->stp_enabled != BR_NO_STP &&
552 (t < BR_MIN_FORWARD_DELAY || t > BR_MAX_FORWARD_DELAY)) 561 (t < BR_MIN_FORWARD_DELAY || t > BR_MAX_FORWARD_DELAY))
553 return -ERANGE; 562 goto unlock;
554 563
555 spin_lock_bh(&br->lock); 564 __br_set_forward_delay(br, t);
556 br->bridge_forward_delay = t; 565 err = 0;
557 if (br_is_root_bridge(br)) 566
558 br->forward_delay = br->bridge_forward_delay; 567unlock:
559 spin_unlock_bh(&br->lock); 568 spin_unlock_bh(&br->lock);
560 return 0; 569 return err;
561} 570}
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index d45e760141bb..108084a04671 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -129,6 +129,14 @@ static void br_stp_start(struct net_bridge *br)
129 char *envp[] = { NULL }; 129 char *envp[] = { NULL };
130 130
131 r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC); 131 r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
132
133 spin_lock_bh(&br->lock);
134
135 if (br->bridge_forward_delay < BR_MIN_FORWARD_DELAY)
136 __br_set_forward_delay(br, BR_MIN_FORWARD_DELAY);
137 else if (br->bridge_forward_delay < BR_MAX_FORWARD_DELAY)
138 __br_set_forward_delay(br, BR_MAX_FORWARD_DELAY);
139
132 if (r == 0) { 140 if (r == 0) {
133 br->stp_enabled = BR_USER_STP; 141 br->stp_enabled = BR_USER_STP;
134 br_debug(br, "userspace STP started\n"); 142 br_debug(br, "userspace STP started\n");
@@ -137,10 +145,10 @@ static void br_stp_start(struct net_bridge *br)
137 br_debug(br, "using kernel STP\n"); 145 br_debug(br, "using kernel STP\n");
138 146
139 /* To start timers on any ports left in blocking */ 147 /* To start timers on any ports left in blocking */
140 spin_lock_bh(&br->lock);
141 br_port_state_selection(br); 148 br_port_state_selection(br);
142 spin_unlock_bh(&br->lock);
143 } 149 }
150
151 spin_unlock_bh(&br->lock);
144} 152}
145 153
146static void br_stp_stop(struct net_bridge *br) 154static void br_stp_stop(struct net_bridge *br)
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 1606f740d6ae..2b4b32aaa893 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -2216,6 +2216,17 @@ void ceph_osdc_sync(struct ceph_osd_client *osdc)
2216EXPORT_SYMBOL(ceph_osdc_sync); 2216EXPORT_SYMBOL(ceph_osdc_sync);
2217 2217
2218/* 2218/*
2219 * Call all pending notify callbacks - for use after a watch is
2220 * unregistered, to make sure no more callbacks for it will be invoked
2221 */
2222extern void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
2223{
2224 flush_workqueue(osdc->notify_wq);
2225}
2226EXPORT_SYMBOL(ceph_osdc_flush_notifies);
2227
2228
2229/*
2219 * init, shutdown 2230 * init, shutdown
2220 */ 2231 */
2221int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client) 2232int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
diff --git a/net/core/dev.c b/net/core/dev.c
index 5c713f2239cc..65f829cfd928 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5247,10 +5247,12 @@ static int dev_new_index(struct net *net)
5247 5247
5248/* Delayed registration/unregisteration */ 5248/* Delayed registration/unregisteration */
5249static LIST_HEAD(net_todo_list); 5249static LIST_HEAD(net_todo_list);
5250static DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
5250 5251
5251static void net_set_todo(struct net_device *dev) 5252static void net_set_todo(struct net_device *dev)
5252{ 5253{
5253 list_add_tail(&dev->todo_list, &net_todo_list); 5254 list_add_tail(&dev->todo_list, &net_todo_list);
5255 dev_net(dev)->dev_unreg_count++;
5254} 5256}
5255 5257
5256static void rollback_registered_many(struct list_head *head) 5258static void rollback_registered_many(struct list_head *head)
@@ -5918,6 +5920,12 @@ void netdev_run_todo(void)
5918 if (dev->destructor) 5920 if (dev->destructor)
5919 dev->destructor(dev); 5921 dev->destructor(dev);
5920 5922
5923 /* Report a network device has been unregistered */
5924 rtnl_lock();
5925 dev_net(dev)->dev_unreg_count--;
5926 __rtnl_unlock();
5927 wake_up(&netdev_unregistering_wq);
5928
5921 /* Free network device */ 5929 /* Free network device */
5922 kobject_put(&dev->dev.kobj); 5930 kobject_put(&dev->dev.kobj);
5923 } 5931 }
@@ -6603,6 +6611,34 @@ static void __net_exit default_device_exit(struct net *net)
6603 rtnl_unlock(); 6611 rtnl_unlock();
6604} 6612}
6605 6613
6614static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
6615{
6616 /* Return with the rtnl_lock held when there are no network
6617 * devices unregistering in any network namespace in net_list.
6618 */
6619 struct net *net;
6620 bool unregistering;
6621 DEFINE_WAIT(wait);
6622
6623 for (;;) {
6624 prepare_to_wait(&netdev_unregistering_wq, &wait,
6625 TASK_UNINTERRUPTIBLE);
6626 unregistering = false;
6627 rtnl_lock();
6628 list_for_each_entry(net, net_list, exit_list) {
6629 if (net->dev_unreg_count > 0) {
6630 unregistering = true;
6631 break;
6632 }
6633 }
6634 if (!unregistering)
6635 break;
6636 __rtnl_unlock();
6637 schedule();
6638 }
6639 finish_wait(&netdev_unregistering_wq, &wait);
6640}
6641
6606static void __net_exit default_device_exit_batch(struct list_head *net_list) 6642static void __net_exit default_device_exit_batch(struct list_head *net_list)
6607{ 6643{
6608 /* At exit all network devices most be removed from a network 6644 /* At exit all network devices most be removed from a network
@@ -6614,7 +6650,18 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list)
6614 struct net *net; 6650 struct net *net;
6615 LIST_HEAD(dev_kill_list); 6651 LIST_HEAD(dev_kill_list);
6616 6652
6617 rtnl_lock(); 6653 /* To prevent network device cleanup code from dereferencing
6654 * loopback devices or network devices that have been freed
6655 * wait here for all pending unregistrations to complete,
6656 * before unregistring the loopback device and allowing the
6657 * network namespace be freed.
6658 *
6659 * The netdev todo list containing all network devices
6660 * unregistrations that happen in default_device_exit_batch
6661 * will run in the rtnl_unlock() at the end of
6662 * default_device_exit_batch.
6663 */
6664 rtnl_lock_unregistering(net_list);
6618 list_for_each_entry(net, net_list, exit_list) { 6665 list_for_each_entry(net, net_list, exit_list) {
6619 for_each_netdev_reverse(net, dev) { 6666 for_each_netdev_reverse(net, dev) {
6620 if (dev->rtnl_link_ops) 6667 if (dev->rtnl_link_ops)
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 1929af87b260..8d7d0dd72db2 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -154,8 +154,8 @@ ipv6:
154 if (poff >= 0) { 154 if (poff >= 0) {
155 __be32 *ports, _ports; 155 __be32 *ports, _ports;
156 156
157 nhoff += poff; 157 ports = skb_header_pointer(skb, nhoff + poff,
158 ports = skb_header_pointer(skb, nhoff, sizeof(_ports), &_ports); 158 sizeof(_ports), &_ports);
159 if (ports) 159 if (ports)
160 flow->ports = *ports; 160 flow->ports = *ports;
161 } 161 }
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 2c637e9a0b27..fc75c9e461b8 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -550,7 +550,7 @@ static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo
550 return; 550 return;
551 551
552 proto = ntohs(eth_hdr(skb)->h_proto); 552 proto = ntohs(eth_hdr(skb)->h_proto);
553 if (proto == ETH_P_IP) { 553 if (proto == ETH_P_ARP) {
554 struct arphdr *arp; 554 struct arphdr *arp;
555 unsigned char *arp_ptr; 555 unsigned char *arp_ptr;
556 /* No arp on this interface */ 556 /* No arp on this interface */
@@ -1284,15 +1284,14 @@ EXPORT_SYMBOL_GPL(__netpoll_free_async);
1284 1284
1285void netpoll_cleanup(struct netpoll *np) 1285void netpoll_cleanup(struct netpoll *np)
1286{ 1286{
1287 if (!np->dev)
1288 return;
1289
1290 rtnl_lock(); 1287 rtnl_lock();
1288 if (!np->dev)
1289 goto out;
1291 __netpoll_cleanup(np); 1290 __netpoll_cleanup(np);
1292 rtnl_unlock();
1293
1294 dev_put(np->dev); 1291 dev_put(np->dev);
1295 np->dev = NULL; 1292 np->dev = NULL;
1293out:
1294 rtnl_unlock();
1296} 1295}
1297EXPORT_SYMBOL(netpoll_cleanup); 1296EXPORT_SYMBOL(netpoll_cleanup);
1298 1297
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
index 6a2f13cee86a..3f1ec1586ae1 100644
--- a/net/core/secure_seq.c
+++ b/net/core/secure_seq.c
@@ -10,11 +10,24 @@
10 10
11#include <net/secure_seq.h> 11#include <net/secure_seq.h>
12 12
13static u32 net_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned; 13#define NET_SECRET_SIZE (MD5_MESSAGE_BYTES / 4)
14 14
15void net_secret_init(void) 15static u32 net_secret[NET_SECRET_SIZE] ____cacheline_aligned;
16
17static void net_secret_init(void)
16{ 18{
17 get_random_bytes(net_secret, sizeof(net_secret)); 19 u32 tmp;
20 int i;
21
22 if (likely(net_secret[0]))
23 return;
24
25 for (i = NET_SECRET_SIZE; i > 0;) {
26 do {
27 get_random_bytes(&tmp, sizeof(tmp));
28 } while (!tmp);
29 cmpxchg(&net_secret[--i], 0, tmp);
30 }
18} 31}
19 32
20#ifdef CONFIG_INET 33#ifdef CONFIG_INET
@@ -42,6 +55,7 @@ __u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
42 u32 hash[MD5_DIGEST_WORDS]; 55 u32 hash[MD5_DIGEST_WORDS];
43 u32 i; 56 u32 i;
44 57
58 net_secret_init();
45 memcpy(hash, saddr, 16); 59 memcpy(hash, saddr, 16);
46 for (i = 0; i < 4; i++) 60 for (i = 0; i < 4; i++)
47 secret[i] = net_secret[i] + (__force u32)daddr[i]; 61 secret[i] = net_secret[i] + (__force u32)daddr[i];
@@ -63,6 +77,7 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
63 u32 hash[MD5_DIGEST_WORDS]; 77 u32 hash[MD5_DIGEST_WORDS];
64 u32 i; 78 u32 i;
65 79
80 net_secret_init();
66 memcpy(hash, saddr, 16); 81 memcpy(hash, saddr, 16);
67 for (i = 0; i < 4; i++) 82 for (i = 0; i < 4; i++)
68 secret[i] = net_secret[i] + (__force u32) daddr[i]; 83 secret[i] = net_secret[i] + (__force u32) daddr[i];
@@ -82,6 +97,7 @@ __u32 secure_ip_id(__be32 daddr)
82{ 97{
83 u32 hash[MD5_DIGEST_WORDS]; 98 u32 hash[MD5_DIGEST_WORDS];
84 99
100 net_secret_init();
85 hash[0] = (__force __u32) daddr; 101 hash[0] = (__force __u32) daddr;
86 hash[1] = net_secret[13]; 102 hash[1] = net_secret[13];
87 hash[2] = net_secret[14]; 103 hash[2] = net_secret[14];
@@ -96,6 +112,7 @@ __u32 secure_ipv6_id(const __be32 daddr[4])
96{ 112{
97 __u32 hash[4]; 113 __u32 hash[4];
98 114
115 net_secret_init();
99 memcpy(hash, daddr, 16); 116 memcpy(hash, daddr, 16);
100 md5_transform(hash, net_secret); 117 md5_transform(hash, net_secret);
101 118
@@ -107,6 +124,7 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
107{ 124{
108 u32 hash[MD5_DIGEST_WORDS]; 125 u32 hash[MD5_DIGEST_WORDS];
109 126
127 net_secret_init();
110 hash[0] = (__force u32)saddr; 128 hash[0] = (__force u32)saddr;
111 hash[1] = (__force u32)daddr; 129 hash[1] = (__force u32)daddr;
112 hash[2] = ((__force u16)sport << 16) + (__force u16)dport; 130 hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
@@ -121,6 +139,7 @@ u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
121{ 139{
122 u32 hash[MD5_DIGEST_WORDS]; 140 u32 hash[MD5_DIGEST_WORDS];
123 141
142 net_secret_init();
124 hash[0] = (__force u32)saddr; 143 hash[0] = (__force u32)saddr;
125 hash[1] = (__force u32)daddr; 144 hash[1] = (__force u32)daddr;
126 hash[2] = (__force u32)dport ^ net_secret[14]; 145 hash[2] = (__force u32)dport ^ net_secret[14];
@@ -140,6 +159,7 @@ u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
140 u32 hash[MD5_DIGEST_WORDS]; 159 u32 hash[MD5_DIGEST_WORDS];
141 u64 seq; 160 u64 seq;
142 161
162 net_secret_init();
143 hash[0] = (__force u32)saddr; 163 hash[0] = (__force u32)saddr;
144 hash[1] = (__force u32)daddr; 164 hash[1] = (__force u32)daddr;
145 hash[2] = ((__force u16)sport << 16) + (__force u16)dport; 165 hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
@@ -164,6 +184,7 @@ u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
164 u64 seq; 184 u64 seq;
165 u32 i; 185 u32 i;
166 186
187 net_secret_init();
167 memcpy(hash, saddr, 16); 188 memcpy(hash, saddr, 16);
168 for (i = 0; i < 4; i++) 189 for (i = 0; i < 4; i++)
169 secret[i] = net_secret[i] + daddr[i]; 190 secret[i] = net_secret[i] + daddr[i];
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 9c61f9c02fdb..6cf9f7782ad4 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -135,6 +135,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
135 135
136 if (dst) 136 if (dst)
137 dst->ops->redirect(dst, sk, skb); 137 dst->ops->redirect(dst, sk, skb);
138 goto out;
138 } 139 }
139 140
140 if (type == ICMPV6_PKT_TOOBIG) { 141 if (type == ICMPV6_PKT_TOOBIG) {
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 7a1874b7b8fd..cfeb85cff4f0 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -263,10 +263,8 @@ void build_ehash_secret(void)
263 get_random_bytes(&rnd, sizeof(rnd)); 263 get_random_bytes(&rnd, sizeof(rnd));
264 } while (rnd == 0); 264 } while (rnd == 0);
265 265
266 if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0) { 266 if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0)
267 get_random_bytes(&ipv6_hash_secret, sizeof(ipv6_hash_secret)); 267 get_random_bytes(&ipv6_hash_secret, sizeof(ipv6_hash_secret));
268 net_secret_init();
269 }
270} 268}
271EXPORT_SYMBOL(build_ehash_secret); 269EXPORT_SYMBOL(build_ehash_secret);
272 270
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index d6c0e64ec97f..7defdc9ba167 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -369,7 +369,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
369 pip->saddr = fl4.saddr; 369 pip->saddr = fl4.saddr;
370 pip->protocol = IPPROTO_IGMP; 370 pip->protocol = IPPROTO_IGMP;
371 pip->tot_len = 0; /* filled in later */ 371 pip->tot_len = 0; /* filled in later */
372 ip_select_ident(pip, &rt->dst, NULL); 372 ip_select_ident(skb, &rt->dst, NULL);
373 ((u8 *)&pip[1])[0] = IPOPT_RA; 373 ((u8 *)&pip[1])[0] = IPOPT_RA;
374 ((u8 *)&pip[1])[1] = 4; 374 ((u8 *)&pip[1])[1] = 4;
375 ((u8 *)&pip[1])[2] = 0; 375 ((u8 *)&pip[1])[2] = 0;
@@ -714,7 +714,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
714 iph->daddr = dst; 714 iph->daddr = dst;
715 iph->saddr = fl4.saddr; 715 iph->saddr = fl4.saddr;
716 iph->protocol = IPPROTO_IGMP; 716 iph->protocol = IPPROTO_IGMP;
717 ip_select_ident(iph, &rt->dst, NULL); 717 ip_select_ident(skb, &rt->dst, NULL);
718 ((u8 *)&iph[1])[0] = IPOPT_RA; 718 ((u8 *)&iph[1])[0] = IPOPT_RA;
719 ((u8 *)&iph[1])[1] = 4; 719 ((u8 *)&iph[1])[1] = 4;
720 ((u8 *)&iph[1])[2] = 0; 720 ((u8 *)&iph[1])[2] = 0;
@@ -736,7 +736,7 @@ static void igmp_gq_timer_expire(unsigned long data)
736 736
737 in_dev->mr_gq_running = 0; 737 in_dev->mr_gq_running = 0;
738 igmpv3_send_report(in_dev, NULL); 738 igmpv3_send_report(in_dev, NULL);
739 __in_dev_put(in_dev); 739 in_dev_put(in_dev);
740} 740}
741 741
742static void igmp_ifc_timer_expire(unsigned long data) 742static void igmp_ifc_timer_expire(unsigned long data)
@@ -749,7 +749,7 @@ static void igmp_ifc_timer_expire(unsigned long data)
749 igmp_ifc_start_timer(in_dev, 749 igmp_ifc_start_timer(in_dev,
750 unsolicited_report_interval(in_dev)); 750 unsolicited_report_interval(in_dev));
751 } 751 }
752 __in_dev_put(in_dev); 752 in_dev_put(in_dev);
753} 753}
754 754
755static void igmp_ifc_event(struct in_device *in_dev) 755static void igmp_ifc_event(struct in_device *in_dev)
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 000e3d239d64..33d5537881ed 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -32,8 +32,8 @@
32 * At the moment of writing this notes identifier of IP packets is generated 32 * At the moment of writing this notes identifier of IP packets is generated
33 * to be unpredictable using this code only for packets subjected 33 * to be unpredictable using this code only for packets subjected
34 * (actually or potentially) to defragmentation. I.e. DF packets less than 34 * (actually or potentially) to defragmentation. I.e. DF packets less than
35 * PMTU in size uses a constant ID and do not use this code (see 35 * PMTU in size when local fragmentation is disabled use a constant ID and do
36 * ip_select_ident() in include/net/ip.h). 36 * not use this code (see ip_select_ident() in include/net/ip.h).
37 * 37 *
38 * Route cache entries hold references to our nodes. 38 * Route cache entries hold references to our nodes.
39 * New cache entries get references via lookup by destination IP address in 39 * New cache entries get references via lookup by destination IP address in
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 9ee17e3d11c3..a04d872c54f9 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -148,7 +148,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
148 iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr); 148 iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
149 iph->saddr = saddr; 149 iph->saddr = saddr;
150 iph->protocol = sk->sk_protocol; 150 iph->protocol = sk->sk_protocol;
151 ip_select_ident(iph, &rt->dst, sk); 151 ip_select_ident(skb, &rt->dst, sk);
152 152
153 if (opt && opt->opt.optlen) { 153 if (opt && opt->opt.optlen) {
154 iph->ihl += opt->opt.optlen>>2; 154 iph->ihl += opt->opt.optlen>>2;
@@ -386,7 +386,7 @@ packet_routed:
386 ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0); 386 ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
387 } 387 }
388 388
389 ip_select_ident_more(iph, &rt->dst, sk, 389 ip_select_ident_more(skb, &rt->dst, sk,
390 (skb_shinfo(skb)->gso_segs ?: 1) - 1); 390 (skb_shinfo(skb)->gso_segs ?: 1) - 1);
391 391
392 skb->priority = sk->sk_priority; 392 skb->priority = sk->sk_priority;
@@ -1316,7 +1316,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
1316 else 1316 else
1317 ttl = ip_select_ttl(inet, &rt->dst); 1317 ttl = ip_select_ttl(inet, &rt->dst);
1318 1318
1319 iph = (struct iphdr *)skb->data; 1319 iph = ip_hdr(skb);
1320 iph->version = 4; 1320 iph->version = 4;
1321 iph->ihl = 5; 1321 iph->ihl = 5;
1322 iph->tos = inet->tos; 1322 iph->tos = inet->tos;
@@ -1324,7 +1324,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
1324 iph->ttl = ttl; 1324 iph->ttl = ttl;
1325 iph->protocol = sk->sk_protocol; 1325 iph->protocol = sk->sk_protocol;
1326 ip_copy_addrs(iph, fl4); 1326 ip_copy_addrs(iph, fl4);
1327 ip_select_ident(iph, &rt->dst, sk); 1327 ip_select_ident(skb, &rt->dst, sk);
1328 1328
1329 if (opt) { 1329 if (opt) {
1330 iph->ihl += opt->optlen>>2; 1330 iph->ihl += opt->optlen>>2;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index ac9fabe0300f..63a6d6d6b875 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -623,6 +623,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
623 tunnel->err_count = 0; 623 tunnel->err_count = 0;
624 } 624 }
625 625
626 tos = ip_tunnel_ecn_encap(tos, inner_iph, skb);
626 ttl = tnl_params->ttl; 627 ttl = tnl_params->ttl;
627 if (ttl == 0) { 628 if (ttl == 0) {
628 if (skb->protocol == htons(ETH_P_IP)) 629 if (skb->protocol == htons(ETH_P_IP))
@@ -641,18 +642,17 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
641 642
642 max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr) 643 max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
643 + rt->dst.header_len; 644 + rt->dst.header_len;
644 if (max_headroom > dev->needed_headroom) { 645 if (max_headroom > dev->needed_headroom)
645 dev->needed_headroom = max_headroom; 646 dev->needed_headroom = max_headroom;
646 if (skb_cow_head(skb, dev->needed_headroom)) { 647
647 dev->stats.tx_dropped++; 648 if (skb_cow_head(skb, dev->needed_headroom)) {
648 dev_kfree_skb(skb); 649 dev->stats.tx_dropped++;
649 return; 650 dev_kfree_skb(skb);
650 } 651 return;
651 } 652 }
652 653
653 err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, protocol, 654 err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, protocol,
654 ip_tunnel_ecn_encap(tos, inner_iph, skb), ttl, df, 655 tos, ttl, df, !net_eq(tunnel->net, dev_net(dev)));
655 !net_eq(tunnel->net, dev_net(dev)));
656 iptunnel_xmit_stats(err, &dev->stats, dev->tstats); 656 iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
657 657
658 return; 658 return;
@@ -853,8 +853,10 @@ int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
853 /* FB netdevice is special: we have one, and only one per netns. 853 /* FB netdevice is special: we have one, and only one per netns.
854 * Allowing to move it to another netns is clearly unsafe. 854 * Allowing to move it to another netns is clearly unsafe.
855 */ 855 */
856 if (!IS_ERR(itn->fb_tunnel_dev)) 856 if (!IS_ERR(itn->fb_tunnel_dev)) {
857 itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL; 857 itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
858 ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev));
859 }
858 rtnl_unlock(); 860 rtnl_unlock();
859 861
860 return PTR_RET(itn->fb_tunnel_dev); 862 return PTR_RET(itn->fb_tunnel_dev);
@@ -884,8 +886,6 @@ static void ip_tunnel_destroy(struct ip_tunnel_net *itn, struct list_head *head,
884 if (!net_eq(dev_net(t->dev), net)) 886 if (!net_eq(dev_net(t->dev), net))
885 unregister_netdevice_queue(t->dev, head); 887 unregister_netdevice_queue(t->dev, head);
886 } 888 }
887 if (itn->fb_tunnel_dev)
888 unregister_netdevice_queue(itn->fb_tunnel_dev, head);
889} 889}
890 890
891void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops) 891void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops)
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index d6c856b17fd4..c31e3ad98ef2 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -61,7 +61,7 @@ int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb,
61 memset(IPCB(skb), 0, sizeof(*IPCB(skb))); 61 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
62 62
63 /* Push down and install the IP header. */ 63 /* Push down and install the IP header. */
64 __skb_push(skb, sizeof(struct iphdr)); 64 skb_push(skb, sizeof(struct iphdr));
65 skb_reset_network_header(skb); 65 skb_reset_network_header(skb);
66 66
67 iph = ip_hdr(skb); 67 iph = ip_hdr(skb);
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 9ae54b09254f..62212c772a4b 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1658,7 +1658,7 @@ static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
1658 iph->protocol = IPPROTO_IPIP; 1658 iph->protocol = IPPROTO_IPIP;
1659 iph->ihl = 5; 1659 iph->ihl = 5;
1660 iph->tot_len = htons(skb->len); 1660 iph->tot_len = htons(skb->len);
1661 ip_select_ident(iph, skb_dst(skb), NULL); 1661 ip_select_ident(skb, skb_dst(skb), NULL);
1662 ip_send_check(iph); 1662 ip_send_check(iph);
1663 1663
1664 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 1664 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c
index 67e17dcda65e..b6346bf2fde3 100644
--- a/net/ipv4/netfilter/ipt_SYNPROXY.c
+++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
@@ -267,7 +267,8 @@ synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par)
267 if (th == NULL) 267 if (th == NULL)
268 return NF_DROP; 268 return NF_DROP;
269 269
270 synproxy_parse_options(skb, par->thoff, th, &opts); 270 if (!synproxy_parse_options(skb, par->thoff, th, &opts))
271 return NF_DROP;
271 272
272 if (th->syn && !(th->ack || th->fin || th->rst)) { 273 if (th->syn && !(th->ack || th->fin || th->rst)) {
273 /* Initial SYN from client */ 274 /* Initial SYN from client */
@@ -350,7 +351,8 @@ static unsigned int ipv4_synproxy_hook(unsigned int hooknum,
350 351
351 /* fall through */ 352 /* fall through */
352 case TCP_CONNTRACK_SYN_SENT: 353 case TCP_CONNTRACK_SYN_SENT:
353 synproxy_parse_options(skb, thoff, th, &opts); 354 if (!synproxy_parse_options(skb, thoff, th, &opts))
355 return NF_DROP;
354 356
355 if (!th->syn && th->ack && 357 if (!th->syn && th->ack &&
356 CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) { 358 CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
@@ -373,7 +375,9 @@ static unsigned int ipv4_synproxy_hook(unsigned int hooknum,
373 if (!th->syn || !th->ack) 375 if (!th->syn || !th->ack)
374 break; 376 break;
375 377
376 synproxy_parse_options(skb, thoff, th, &opts); 378 if (!synproxy_parse_options(skb, thoff, th, &opts))
379 return NF_DROP;
380
377 if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP) 381 if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
378 synproxy->tsoff = opts.tsval - synproxy->its; 382 synproxy->tsoff = opts.tsval - synproxy->its;
379 383
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index a86c7ae71881..193db03540ad 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -218,8 +218,10 @@ static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info)
218 218
219 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) 219 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
220 ipv4_sk_update_pmtu(skb, sk, info); 220 ipv4_sk_update_pmtu(skb, sk, info);
221 else if (type == ICMP_REDIRECT) 221 else if (type == ICMP_REDIRECT) {
222 ipv4_sk_redirect(skb, sk); 222 ipv4_sk_redirect(skb, sk);
223 return;
224 }
223 225
224 /* Report error on raw socket, if: 226 /* Report error on raw socket, if:
225 1. User requested ip_recverr. 227 1. User requested ip_recverr.
@@ -387,7 +389,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
387 iph->check = 0; 389 iph->check = 0;
388 iph->tot_len = htons(length); 390 iph->tot_len = htons(length);
389 if (!iph->id) 391 if (!iph->id)
390 ip_select_ident(iph, &rt->dst, NULL); 392 ip_select_ident(skb, &rt->dst, NULL);
391 393
392 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); 394 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
393 } 395 }
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 4a22f3e715df..52f3c6b971d2 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -502,7 +502,9 @@ reset:
502 * ACKs, wait for troubles. 502 * ACKs, wait for troubles.
503 */ 503 */
504 if (crtt > tp->srtt) { 504 if (crtt > tp->srtt) {
505 inet_csk(sk)->icsk_rto = crtt + max(crtt >> 2, tcp_rto_min(sk)); 505 /* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
506 crtt >>= 3;
507 inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
506 } else if (tp->srtt == 0) { 508 } else if (tp->srtt == 0) {
507 /* RFC6298: 5.7 We've failed to get a valid RTT sample from 509 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
508 * 3WHS. This is most likely due to retransmission, 510 * 3WHS. This is most likely due to retransmission,
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 7c83cb8bf137..e6bb8256e59f 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -895,8 +895,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
895 895
896 skb_orphan(skb); 896 skb_orphan(skb);
897 skb->sk = sk; 897 skb->sk = sk;
898 skb->destructor = (sysctl_tcp_limit_output_bytes > 0) ? 898 skb->destructor = tcp_wfree;
899 tcp_wfree : sock_wfree;
900 atomic_add(skb->truesize, &sk->sk_wmem_alloc); 899 atomic_add(skb->truesize, &sk->sk_wmem_alloc);
901 900
902 /* Build TCP header and checksum it. */ 901 /* Build TCP header and checksum it. */
@@ -1840,7 +1839,6 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1840 while ((skb = tcp_send_head(sk))) { 1839 while ((skb = tcp_send_head(sk))) {
1841 unsigned int limit; 1840 unsigned int limit;
1842 1841
1843
1844 tso_segs = tcp_init_tso_segs(sk, skb, mss_now); 1842 tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1845 BUG_ON(!tso_segs); 1843 BUG_ON(!tso_segs);
1846 1844
@@ -1869,13 +1867,20 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1869 break; 1867 break;
1870 } 1868 }
1871 1869
1872 /* TSQ : sk_wmem_alloc accounts skb truesize, 1870 /* TCP Small Queues :
1873 * including skb overhead. But thats OK. 1871 * Control number of packets in qdisc/devices to two packets / or ~1 ms.
1872 * This allows for :
1873 * - better RTT estimation and ACK scheduling
1874 * - faster recovery
1875 * - high rates
1874 */ 1876 */
1875 if (atomic_read(&sk->sk_wmem_alloc) >= sysctl_tcp_limit_output_bytes) { 1877 limit = max(skb->truesize, sk->sk_pacing_rate >> 10);
1878
1879 if (atomic_read(&sk->sk_wmem_alloc) > limit) {
1876 set_bit(TSQ_THROTTLED, &tp->tsq_flags); 1880 set_bit(TSQ_THROTTLED, &tp->tsq_flags);
1877 break; 1881 break;
1878 } 1882 }
1883
1879 limit = mss_now; 1884 limit = mss_now;
1880 if (tso_segs > 1 && !tcp_urg_mode(tp)) 1885 if (tso_segs > 1 && !tcp_urg_mode(tp))
1881 limit = tcp_mss_split_point(sk, skb, mss_now, 1886 limit = tcp_mss_split_point(sk, skb, mss_now,
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 74d2c95db57f..0ca44df51ee9 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -658,7 +658,7 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
658 break; 658 break;
659 case ICMP_REDIRECT: 659 case ICMP_REDIRECT:
660 ipv4_sk_redirect(skb, sk); 660 ipv4_sk_redirect(skb, sk);
661 break; 661 goto out;
662 } 662 }
663 663
664 /* 664 /*
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index eb1dd4d643f2..b5663c37f089 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -117,7 +117,7 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
117 117
118 top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ? 118 top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ?
119 0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF)); 119 0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF));
120 ip_select_ident(top_iph, dst->child, NULL); 120 ip_select_ident(skb, dst->child, NULL);
121 121
122 top_iph->ttl = ip4_dst_hoplimit(dst->child); 122 top_iph->ttl = ip4_dst_hoplimit(dst->child);
123 123
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index d6ff12617f36..cd3fb301da38 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1499,6 +1499,33 @@ static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
1499 return false; 1499 return false;
1500} 1500}
1501 1501
1502/* Compares an address/prefix_len with addresses on device @dev.
1503 * If one is found it returns true.
1504 */
1505bool ipv6_chk_custom_prefix(const struct in6_addr *addr,
1506 const unsigned int prefix_len, struct net_device *dev)
1507{
1508 struct inet6_dev *idev;
1509 struct inet6_ifaddr *ifa;
1510 bool ret = false;
1511
1512 rcu_read_lock();
1513 idev = __in6_dev_get(dev);
1514 if (idev) {
1515 read_lock_bh(&idev->lock);
1516 list_for_each_entry(ifa, &idev->addr_list, if_list) {
1517 ret = ipv6_prefix_equal(addr, &ifa->addr, prefix_len);
1518 if (ret)
1519 break;
1520 }
1521 read_unlock_bh(&idev->lock);
1522 }
1523 rcu_read_unlock();
1524
1525 return ret;
1526}
1527EXPORT_SYMBOL(ipv6_chk_custom_prefix);
1528
1502int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev) 1529int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev)
1503{ 1530{
1504 struct inet6_dev *idev; 1531 struct inet6_dev *idev;
@@ -2193,43 +2220,21 @@ ok:
2193 else 2220 else
2194 stored_lft = 0; 2221 stored_lft = 0;
2195 if (!update_lft && !create && stored_lft) { 2222 if (!update_lft && !create && stored_lft) {
2196 if (valid_lft > MIN_VALID_LIFETIME || 2223 const u32 minimum_lft = min(
2197 valid_lft > stored_lft) 2224 stored_lft, (u32)MIN_VALID_LIFETIME);
2198 update_lft = 1; 2225 valid_lft = max(valid_lft, minimum_lft);
2199 else if (stored_lft <= MIN_VALID_LIFETIME) { 2226
2200 /* valid_lft <= stored_lft is always true */ 2227 /* RFC4862 Section 5.5.3e:
2201 /* 2228 * "Note that the preferred lifetime of the
2202 * RFC 4862 Section 5.5.3e: 2229 * corresponding address is always reset to
2203 * "Note that the preferred lifetime of 2230 * the Preferred Lifetime in the received
2204 * the corresponding address is always 2231 * Prefix Information option, regardless of
2205 * reset to the Preferred Lifetime in 2232 * whether the valid lifetime is also reset or
2206 * the received Prefix Information 2233 * ignored."
2207 * option, regardless of whether the 2234 *
2208 * valid lifetime is also reset or 2235 * So we should always update prefered_lft here.
2209 * ignored." 2236 */
2210 * 2237 update_lft = 1;
2211 * So if the preferred lifetime in
2212 * this advertisement is different
2213 * than what we have stored, but the
2214 * valid lifetime is invalid, just
2215 * reset prefered_lft.
2216 *
2217 * We must set the valid lifetime
2218 * to the stored lifetime since we'll
2219 * be updating the timestamp below,
2220 * else we'll set it back to the
2221 * minimum.
2222 */
2223 if (prefered_lft != ifp->prefered_lft) {
2224 valid_lft = stored_lft;
2225 update_lft = 1;
2226 }
2227 } else {
2228 valid_lft = MIN_VALID_LIFETIME;
2229 if (valid_lft < prefered_lft)
2230 prefered_lft = valid_lft;
2231 update_lft = 1;
2232 }
2233 } 2238 }
2234 2239
2235 if (update_lft) { 2240 if (update_lft) {
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 6b26e9feafb9..7bb5446b9d73 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -618,7 +618,7 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
618 struct ip6_tnl *tunnel = netdev_priv(dev); 618 struct ip6_tnl *tunnel = netdev_priv(dev);
619 struct net_device *tdev; /* Device to other host */ 619 struct net_device *tdev; /* Device to other host */
620 struct ipv6hdr *ipv6h; /* Our new IP header */ 620 struct ipv6hdr *ipv6h; /* Our new IP header */
621 unsigned int max_headroom; /* The extra header space needed */ 621 unsigned int max_headroom = 0; /* The extra header space needed */
622 int gre_hlen; 622 int gre_hlen;
623 struct ipv6_tel_txoption opt; 623 struct ipv6_tel_txoption opt;
624 int mtu; 624 int mtu;
@@ -693,7 +693,7 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
693 693
694 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev))); 694 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev)));
695 695
696 max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len; 696 max_headroom += LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len;
697 697
698 if (skb_headroom(skb) < max_headroom || skb_shared(skb) || 698 if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
699 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { 699 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 3a692d529163..a54c45ce4a48 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1015,6 +1015,8 @@ static inline int ip6_ufo_append_data(struct sock *sk,
1015 * udp datagram 1015 * udp datagram
1016 */ 1016 */
1017 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) { 1017 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
1018 struct frag_hdr fhdr;
1019
1018 skb = sock_alloc_send_skb(sk, 1020 skb = sock_alloc_send_skb(sk,
1019 hh_len + fragheaderlen + transhdrlen + 20, 1021 hh_len + fragheaderlen + transhdrlen + 20,
1020 (flags & MSG_DONTWAIT), &err); 1022 (flags & MSG_DONTWAIT), &err);
@@ -1036,12 +1038,6 @@ static inline int ip6_ufo_append_data(struct sock *sk,
1036 skb->protocol = htons(ETH_P_IPV6); 1038 skb->protocol = htons(ETH_P_IPV6);
1037 skb->ip_summed = CHECKSUM_PARTIAL; 1039 skb->ip_summed = CHECKSUM_PARTIAL;
1038 skb->csum = 0; 1040 skb->csum = 0;
1039 }
1040
1041 err = skb_append_datato_frags(sk,skb, getfrag, from,
1042 (length - transhdrlen));
1043 if (!err) {
1044 struct frag_hdr fhdr;
1045 1041
1046 /* Specify the length of each IPv6 datagram fragment. 1042 /* Specify the length of each IPv6 datagram fragment.
1047 * It has to be a multiple of 8. 1043 * It has to be a multiple of 8.
@@ -1052,15 +1048,10 @@ static inline int ip6_ufo_append_data(struct sock *sk,
1052 ipv6_select_ident(&fhdr, rt); 1048 ipv6_select_ident(&fhdr, rt);
1053 skb_shinfo(skb)->ip6_frag_id = fhdr.identification; 1049 skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
1054 __skb_queue_tail(&sk->sk_write_queue, skb); 1050 __skb_queue_tail(&sk->sk_write_queue, skb);
1055
1056 return 0;
1057 } 1051 }
1058 /* There is not enough support do UPD LSO,
1059 * so follow normal path
1060 */
1061 kfree_skb(skb);
1062 1052
1063 return err; 1053 return skb_append_datato_frags(sk, skb, getfrag, from,
1054 (length - transhdrlen));
1064} 1055}
1065 1056
1066static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src, 1057static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
@@ -1227,27 +1218,27 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1227 * --yoshfuji 1218 * --yoshfuji
1228 */ 1219 */
1229 1220
1230 cork->length += length; 1221 if ((length > mtu) && dontfrag && (sk->sk_protocol == IPPROTO_UDP ||
1231 if (length > mtu) { 1222 sk->sk_protocol == IPPROTO_RAW)) {
1232 int proto = sk->sk_protocol; 1223 ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
1233 if (dontfrag && (proto == IPPROTO_UDP || proto == IPPROTO_RAW)){ 1224 return -EMSGSIZE;
1234 ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen); 1225 }
1235 return -EMSGSIZE;
1236 }
1237
1238 if (proto == IPPROTO_UDP &&
1239 (rt->dst.dev->features & NETIF_F_UFO)) {
1240 1226
1241 err = ip6_ufo_append_data(sk, getfrag, from, length, 1227 skb = skb_peek_tail(&sk->sk_write_queue);
1242 hh_len, fragheaderlen, 1228 cork->length += length;
1243 transhdrlen, mtu, flags, rt); 1229 if (((length > mtu) ||
1244 if (err) 1230 (skb && skb_is_gso(skb))) &&
1245 goto error; 1231 (sk->sk_protocol == IPPROTO_UDP) &&
1246 return 0; 1232 (rt->dst.dev->features & NETIF_F_UFO)) {
1247 } 1233 err = ip6_ufo_append_data(sk, getfrag, from, length,
1234 hh_len, fragheaderlen,
1235 transhdrlen, mtu, flags, rt);
1236 if (err)
1237 goto error;
1238 return 0;
1248 } 1239 }
1249 1240
1250 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) 1241 if (!skb)
1251 goto alloc_new_skb; 1242 goto alloc_new_skb;
1252 1243
1253 while (length > 0) { 1244 while (length > 0) {
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 61355f7f4da5..a791552e0422 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1656,9 +1656,9 @@ static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev)
1656 1656
1657 if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) || 1657 if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
1658 nla_put(skb, IFLA_IPTUN_LOCAL, sizeof(struct in6_addr), 1658 nla_put(skb, IFLA_IPTUN_LOCAL, sizeof(struct in6_addr),
1659 &parm->raddr) ||
1660 nla_put(skb, IFLA_IPTUN_REMOTE, sizeof(struct in6_addr),
1661 &parm->laddr) || 1659 &parm->laddr) ||
1660 nla_put(skb, IFLA_IPTUN_REMOTE, sizeof(struct in6_addr),
1661 &parm->raddr) ||
1662 nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) || 1662 nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) ||
1663 nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) || 1663 nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) ||
1664 nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) || 1664 nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) ||
@@ -1731,8 +1731,6 @@ static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
1731 } 1731 }
1732 } 1732 }
1733 1733
1734 t = rtnl_dereference(ip6n->tnls_wc[0]);
1735 unregister_netdevice_queue(t->dev, &list);
1736 unregister_netdevice_many(&list); 1734 unregister_netdevice_many(&list);
1737} 1735}
1738 1736
@@ -1752,6 +1750,7 @@ static int __net_init ip6_tnl_init_net(struct net *net)
1752 if (!ip6n->fb_tnl_dev) 1750 if (!ip6n->fb_tnl_dev)
1753 goto err_alloc_dev; 1751 goto err_alloc_dev;
1754 dev_net_set(ip6n->fb_tnl_dev, net); 1752 dev_net_set(ip6n->fb_tnl_dev, net);
1753 ip6n->fb_tnl_dev->rtnl_link_ops = &ip6_link_ops;
1755 /* FB netdevice is special: we have one, and only one per netns. 1754 /* FB netdevice is special: we have one, and only one per netns.
1756 * Allowing to move it to another netns is clearly unsafe. 1755 * Allowing to move it to another netns is clearly unsafe.
1757 */ 1756 */
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 096cd67b737c..d18f9f903db6 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -2034,7 +2034,7 @@ static void mld_dad_timer_expire(unsigned long data)
2034 if (idev->mc_dad_count) 2034 if (idev->mc_dad_count)
2035 mld_dad_start_timer(idev, idev->mc_maxdelay); 2035 mld_dad_start_timer(idev, idev->mc_maxdelay);
2036 } 2036 }
2037 __in6_dev_put(idev); 2037 in6_dev_put(idev);
2038} 2038}
2039 2039
2040static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode, 2040static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
@@ -2379,7 +2379,7 @@ static void mld_gq_timer_expire(unsigned long data)
2379 2379
2380 idev->mc_gq_running = 0; 2380 idev->mc_gq_running = 0;
2381 mld_send_report(idev, NULL); 2381 mld_send_report(idev, NULL);
2382 __in6_dev_put(idev); 2382 in6_dev_put(idev);
2383} 2383}
2384 2384
2385static void mld_ifc_timer_expire(unsigned long data) 2385static void mld_ifc_timer_expire(unsigned long data)
@@ -2392,7 +2392,7 @@ static void mld_ifc_timer_expire(unsigned long data)
2392 if (idev->mc_ifc_count) 2392 if (idev->mc_ifc_count)
2393 mld_ifc_start_timer(idev, idev->mc_maxdelay); 2393 mld_ifc_start_timer(idev, idev->mc_maxdelay);
2394 } 2394 }
2395 __in6_dev_put(idev); 2395 in6_dev_put(idev);
2396} 2396}
2397 2397
2398static void mld_ifc_event(struct inet6_dev *idev) 2398static void mld_ifc_event(struct inet6_dev *idev)
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
index 19cfea8dbcaa..2748b042da72 100644
--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -282,7 +282,8 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
282 if (th == NULL) 282 if (th == NULL)
283 return NF_DROP; 283 return NF_DROP;
284 284
285 synproxy_parse_options(skb, par->thoff, th, &opts); 285 if (!synproxy_parse_options(skb, par->thoff, th, &opts))
286 return NF_DROP;
286 287
287 if (th->syn && !(th->ack || th->fin || th->rst)) { 288 if (th->syn && !(th->ack || th->fin || th->rst)) {
288 /* Initial SYN from client */ 289 /* Initial SYN from client */
@@ -372,7 +373,8 @@ static unsigned int ipv6_synproxy_hook(unsigned int hooknum,
372 373
373 /* fall through */ 374 /* fall through */
374 case TCP_CONNTRACK_SYN_SENT: 375 case TCP_CONNTRACK_SYN_SENT:
375 synproxy_parse_options(skb, thoff, th, &opts); 376 if (!synproxy_parse_options(skb, thoff, th, &opts))
377 return NF_DROP;
376 378
377 if (!th->syn && th->ack && 379 if (!th->syn && th->ack &&
378 CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) { 380 CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
@@ -395,7 +397,9 @@ static unsigned int ipv6_synproxy_hook(unsigned int hooknum,
395 if (!th->syn || !th->ack) 397 if (!th->syn || !th->ack)
396 break; 398 break;
397 399
398 synproxy_parse_options(skb, thoff, th, &opts); 400 if (!synproxy_parse_options(skb, thoff, th, &opts))
401 return NF_DROP;
402
399 if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP) 403 if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
400 synproxy->tsoff = opts.tsval - synproxy->its; 404 synproxy->tsoff = opts.tsval - synproxy->its;
401 405
diff --git a/net/ipv6/netfilter/nf_nat_proto_icmpv6.c b/net/ipv6/netfilter/nf_nat_proto_icmpv6.c
index 61aaf70f376e..2205e8eeeacf 100644
--- a/net/ipv6/netfilter/nf_nat_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_nat_proto_icmpv6.c
@@ -69,8 +69,8 @@ icmpv6_manip_pkt(struct sk_buff *skb,
69 hdr = (struct icmp6hdr *)(skb->data + hdroff); 69 hdr = (struct icmp6hdr *)(skb->data + hdroff);
70 l3proto->csum_update(skb, iphdroff, &hdr->icmp6_cksum, 70 l3proto->csum_update(skb, iphdroff, &hdr->icmp6_cksum,
71 tuple, maniptype); 71 tuple, maniptype);
72 if (hdr->icmp6_code == ICMPV6_ECHO_REQUEST || 72 if (hdr->icmp6_type == ICMPV6_ECHO_REQUEST ||
73 hdr->icmp6_code == ICMPV6_ECHO_REPLY) { 73 hdr->icmp6_type == ICMPV6_ECHO_REPLY) {
74 inet_proto_csum_replace2(&hdr->icmp6_cksum, skb, 74 inet_proto_csum_replace2(&hdr->icmp6_cksum, skb,
75 hdr->icmp6_identifier, 75 hdr->icmp6_identifier,
76 tuple->src.u.icmp.id, 0); 76 tuple->src.u.icmp.id, 0);
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 58916bbb1728..a4ed2416399e 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -335,8 +335,10 @@ static void rawv6_err(struct sock *sk, struct sk_buff *skb,
335 ip6_sk_update_pmtu(skb, sk, info); 335 ip6_sk_update_pmtu(skb, sk, info);
336 harderr = (np->pmtudisc == IPV6_PMTUDISC_DO); 336 harderr = (np->pmtudisc == IPV6_PMTUDISC_DO);
337 } 337 }
338 if (type == NDISC_REDIRECT) 338 if (type == NDISC_REDIRECT) {
339 ip6_sk_redirect(skb, sk); 339 ip6_sk_redirect(skb, sk);
340 return;
341 }
340 if (np->recverr) { 342 if (np->recverr) {
341 u8 *payload = skb->data; 343 u8 *payload = skb->data;
342 if (!inet->hdrincl) 344 if (!inet->hdrincl)
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 7ee5cb96db34..19269453a8ea 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -566,6 +566,70 @@ static inline bool is_spoofed_6rd(struct ip_tunnel *tunnel, const __be32 v4addr,
566 return false; 566 return false;
567} 567}
568 568
569/* Checks if an address matches an address on the tunnel interface.
570 * Used to detect the NAT of proto 41 packets and let them pass spoofing test.
571 * Long story:
572 * This function is called after we considered the packet as spoofed
573 * in is_spoofed_6rd.
574 * We may have a router that is doing NAT for proto 41 packets
575 * for an internal station. Destination a.a.a.a/PREFIX:bbbb:bbbb
576 * will be translated to n.n.n.n/PREFIX:bbbb:bbbb. And is_spoofed_6rd
577 * function will return true, dropping the packet.
578 * But, we can still check if is spoofed against the IP
579 * addresses associated with the interface.
580 */
581static bool only_dnatted(const struct ip_tunnel *tunnel,
582 const struct in6_addr *v6dst)
583{
584 int prefix_len;
585
586#ifdef CONFIG_IPV6_SIT_6RD
587 prefix_len = tunnel->ip6rd.prefixlen + 32
588 - tunnel->ip6rd.relay_prefixlen;
589#else
590 prefix_len = 48;
591#endif
592 return ipv6_chk_custom_prefix(v6dst, prefix_len, tunnel->dev);
593}
594
595/* Returns true if a packet is spoofed */
596static bool packet_is_spoofed(struct sk_buff *skb,
597 const struct iphdr *iph,
598 struct ip_tunnel *tunnel)
599{
600 const struct ipv6hdr *ipv6h;
601
602 if (tunnel->dev->priv_flags & IFF_ISATAP) {
603 if (!isatap_chksrc(skb, iph, tunnel))
604 return true;
605
606 return false;
607 }
608
609 if (tunnel->dev->flags & IFF_POINTOPOINT)
610 return false;
611
612 ipv6h = ipv6_hdr(skb);
613
614 if (unlikely(is_spoofed_6rd(tunnel, iph->saddr, &ipv6h->saddr))) {
615 net_warn_ratelimited("Src spoofed %pI4/%pI6c -> %pI4/%pI6c\n",
616 &iph->saddr, &ipv6h->saddr,
617 &iph->daddr, &ipv6h->daddr);
618 return true;
619 }
620
621 if (likely(!is_spoofed_6rd(tunnel, iph->daddr, &ipv6h->daddr)))
622 return false;
623
624 if (only_dnatted(tunnel, &ipv6h->daddr))
625 return false;
626
627 net_warn_ratelimited("Dst spoofed %pI4/%pI6c -> %pI4/%pI6c\n",
628 &iph->saddr, &ipv6h->saddr,
629 &iph->daddr, &ipv6h->daddr);
630 return true;
631}
632
569static int ipip6_rcv(struct sk_buff *skb) 633static int ipip6_rcv(struct sk_buff *skb)
570{ 634{
571 const struct iphdr *iph = ip_hdr(skb); 635 const struct iphdr *iph = ip_hdr(skb);
@@ -586,19 +650,9 @@ static int ipip6_rcv(struct sk_buff *skb)
586 IPCB(skb)->flags = 0; 650 IPCB(skb)->flags = 0;
587 skb->protocol = htons(ETH_P_IPV6); 651 skb->protocol = htons(ETH_P_IPV6);
588 652
589 if (tunnel->dev->priv_flags & IFF_ISATAP) { 653 if (packet_is_spoofed(skb, iph, tunnel)) {
590 if (!isatap_chksrc(skb, iph, tunnel)) { 654 tunnel->dev->stats.rx_errors++;
591 tunnel->dev->stats.rx_errors++; 655 goto out;
592 goto out;
593 }
594 } else if (!(tunnel->dev->flags&IFF_POINTOPOINT)) {
595 if (is_spoofed_6rd(tunnel, iph->saddr,
596 &ipv6_hdr(skb)->saddr) ||
597 is_spoofed_6rd(tunnel, iph->daddr,
598 &ipv6_hdr(skb)->daddr)) {
599 tunnel->dev->stats.rx_errors++;
600 goto out;
601 }
602 } 656 }
603 657
604 __skb_tunnel_rx(skb, tunnel->dev, tunnel->net); 658 __skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
@@ -748,7 +802,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
748 neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr); 802 neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
749 803
750 if (neigh == NULL) { 804 if (neigh == NULL) {
751 net_dbg_ratelimited("sit: nexthop == NULL\n"); 805 net_dbg_ratelimited("nexthop == NULL\n");
752 goto tx_error; 806 goto tx_error;
753 } 807 }
754 808
@@ -777,7 +831,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
777 neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr); 831 neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
778 832
779 if (neigh == NULL) { 833 if (neigh == NULL) {
780 net_dbg_ratelimited("sit: nexthop == NULL\n"); 834 net_dbg_ratelimited("nexthop == NULL\n");
781 goto tx_error; 835 goto tx_error;
782 } 836 }
783 837
@@ -1612,6 +1666,7 @@ static int __net_init sit_init_net(struct net *net)
1612 goto err_alloc_dev; 1666 goto err_alloc_dev;
1613 } 1667 }
1614 dev_net_set(sitn->fb_tunnel_dev, net); 1668 dev_net_set(sitn->fb_tunnel_dev, net);
1669 sitn->fb_tunnel_dev->rtnl_link_ops = &sit_link_ops;
1615 /* FB netdevice is special: we have one, and only one per netns. 1670 /* FB netdevice is special: we have one, and only one per netns.
1616 * Allowing to move it to another netns is clearly unsafe. 1671 * Allowing to move it to another netns is clearly unsafe.
1617 */ 1672 */
@@ -1646,7 +1701,6 @@ static void __net_exit sit_exit_net(struct net *net)
1646 1701
1647 rtnl_lock(); 1702 rtnl_lock();
1648 sit_destroy_tunnels(sitn, &list); 1703 sit_destroy_tunnels(sitn, &list);
1649 unregister_netdevice_queue(sitn->fb_tunnel_dev, &list);
1650 unregister_netdevice_many(&list); 1704 unregister_netdevice_many(&list);
1651 rtnl_unlock(); 1705 rtnl_unlock();
1652} 1706}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index f4058150262b..72b7eaaf3ca0 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -525,8 +525,10 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
525 525
526 if (type == ICMPV6_PKT_TOOBIG) 526 if (type == ICMPV6_PKT_TOOBIG)
527 ip6_sk_update_pmtu(skb, sk, info); 527 ip6_sk_update_pmtu(skb, sk, info);
528 if (type == NDISC_REDIRECT) 528 if (type == NDISC_REDIRECT) {
529 ip6_sk_redirect(skb, sk); 529 ip6_sk_redirect(skb, sk);
530 goto out;
531 }
530 532
531 np = inet6_sk(sk); 533 np = inet6_sk(sk);
532 534
diff --git a/net/lapb/lapb_timer.c b/net/lapb/lapb_timer.c
index 54563ad8aeb1..355cc3b6fa4d 100644
--- a/net/lapb/lapb_timer.c
+++ b/net/lapb/lapb_timer.c
@@ -154,6 +154,7 @@ static void lapb_t1timer_expiry(unsigned long param)
154 } else { 154 } else {
155 lapb->n2count++; 155 lapb->n2count++;
156 lapb_requeue_frames(lapb); 156 lapb_requeue_frames(lapb);
157 lapb_kick(lapb);
157 } 158 }
158 break; 159 break;
159 160
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index f77139007983..f2e30fb31e78 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -1052,7 +1052,7 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
1052 * Not an artificial restriction anymore, as we must prevent 1052 * Not an artificial restriction anymore, as we must prevent
1053 * possible loops created by swapping in setlist type of sets. */ 1053 * possible loops created by swapping in setlist type of sets. */
1054 if (!(from->type->features == to->type->features && 1054 if (!(from->type->features == to->type->features &&
1055 from->type->family == to->type->family)) 1055 from->family == to->family))
1056 return -IPSET_ERR_TYPE_MISMATCH; 1056 return -IPSET_ERR_TYPE_MISMATCH;
1057 1057
1058 strncpy(from_name, from->name, IPSET_MAXNAMELEN); 1058 strncpy(from_name, from->name, IPSET_MAXNAMELEN);
@@ -1489,8 +1489,7 @@ ip_set_utest(struct sock *ctnl, struct sk_buff *skb,
1489 if (ret == -EAGAIN) 1489 if (ret == -EAGAIN)
1490 ret = 1; 1490 ret = 1;
1491 1491
1492 return (ret < 0 && ret != -ENOTEMPTY) ? ret : 1492 return ret > 0 ? 0 : -IPSET_ERR_EXIST;
1493 ret > 0 ? 0 : -IPSET_ERR_EXIST;
1494} 1493}
1495 1494
1496/* Get headed data of a set */ 1495/* Get headed data of a set */
diff --git a/net/netfilter/ipset/ip_set_getport.c b/net/netfilter/ipset/ip_set_getport.c
index 6fdf88ae2353..dac156f819ac 100644
--- a/net/netfilter/ipset/ip_set_getport.c
+++ b/net/netfilter/ipset/ip_set_getport.c
@@ -116,12 +116,12 @@ ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
116{ 116{
117 int protoff; 117 int protoff;
118 u8 nexthdr; 118 u8 nexthdr;
119 __be16 frag_off; 119 __be16 frag_off = 0;
120 120
121 nexthdr = ipv6_hdr(skb)->nexthdr; 121 nexthdr = ipv6_hdr(skb)->nexthdr;
122 protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, 122 protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
123 &frag_off); 123 &frag_off);
124 if (protoff < 0) 124 if (protoff < 0 || (frag_off & htons(~0x7)) != 0)
125 return false; 125 return false;
126 126
127 return get_port(skb, nexthdr, protoff, src, port, proto); 127 return get_port(skb, nexthdr, protoff, src, port, proto);
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index 57beb1762b2d..707bc520d629 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -325,18 +325,22 @@ mtype_add_cidr(struct htype *h, u8 cidr, u8 nets_length)
325static void 325static void
326mtype_del_cidr(struct htype *h, u8 cidr, u8 nets_length) 326mtype_del_cidr(struct htype *h, u8 cidr, u8 nets_length)
327{ 327{
328 u8 i, j; 328 u8 i, j, net_end = nets_length - 1;
329 329
330 for (i = 0; i < nets_length - 1 && h->nets[i].cidr != cidr; i++) 330 for (i = 0; i < nets_length; i++) {
331 ; 331 if (h->nets[i].cidr != cidr)
332 h->nets[i].nets--; 332 continue;
333 333 if (h->nets[i].nets > 1 || i == net_end ||
334 if (h->nets[i].nets != 0) 334 h->nets[i + 1].nets == 0) {
335 return; 335 h->nets[i].nets--;
336 336 return;
337 for (j = i; j < nets_length - 1 && h->nets[j].nets; j++) { 337 }
338 h->nets[j].cidr = h->nets[j + 1].cidr; 338 for (j = i; j < net_end && h->nets[j].nets; j++) {
339 h->nets[j].nets = h->nets[j + 1].nets; 339 h->nets[j].cidr = h->nets[j + 1].cidr;
340 h->nets[j].nets = h->nets[j + 1].nets;
341 }
342 h->nets[j].nets = 0;
343 return;
340 } 344 }
341} 345}
342#endif 346#endif
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index c6a525373be4..f15f3e28b9c3 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -260,7 +260,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
260 e.ip = htonl(ip); 260 e.ip = htonl(ip);
261 e.ip2 = htonl(ip2_from & ip_set_hostmask(e.cidr + 1)); 261 e.ip2 = htonl(ip2_from & ip_set_hostmask(e.cidr + 1));
262 ret = adtfn(set, &e, &ext, &ext, flags); 262 ret = adtfn(set, &e, &ext, &ext, flags);
263 return ip_set_enomatch(ret, flags, adt) ? 1 : 263 return ip_set_enomatch(ret, flags, adt, set) ? -ret :
264 ip_set_eexist(ret, flags) ? 0 : ret; 264 ip_set_eexist(ret, flags) ? 0 : ret;
265 } 265 }
266 266
@@ -544,7 +544,7 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
544 544
545 if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { 545 if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
546 ret = adtfn(set, &e, &ext, &ext, flags); 546 ret = adtfn(set, &e, &ext, &ext, flags);
547 return ip_set_enomatch(ret, flags, adt) ? 1 : 547 return ip_set_enomatch(ret, flags, adt, set) ? -ret :
548 ip_set_eexist(ret, flags) ? 0 : ret; 548 ip_set_eexist(ret, flags) ? 0 : ret;
549 } 549 }
550 550
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index da740ceb56ae..223e9f546d0f 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -199,7 +199,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
199 if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) { 199 if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) {
200 e.ip = htonl(ip & ip_set_hostmask(e.cidr)); 200 e.ip = htonl(ip & ip_set_hostmask(e.cidr));
201 ret = adtfn(set, &e, &ext, &ext, flags); 201 ret = adtfn(set, &e, &ext, &ext, flags);
202 return ip_set_enomatch(ret, flags, adt) ? 1 : 202 return ip_set_enomatch(ret, flags, adt, set) ? -ret:
203 ip_set_eexist(ret, flags) ? 0 : ret; 203 ip_set_eexist(ret, flags) ? 0 : ret;
204 } 204 }
205 205
@@ -396,7 +396,7 @@ hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
396 396
397 ret = adtfn(set, &e, &ext, &ext, flags); 397 ret = adtfn(set, &e, &ext, &ext, flags);
398 398
399 return ip_set_enomatch(ret, flags, adt) ? 1 : 399 return ip_set_enomatch(ret, flags, adt, set) ? -ret :
400 ip_set_eexist(ret, flags) ? 0 : ret; 400 ip_set_eexist(ret, flags) ? 0 : ret;
401} 401}
402 402
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
index 84ae6f6ce624..7d798d5d5cd3 100644
--- a/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -368,7 +368,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
368 if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) { 368 if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) {
369 e.ip = htonl(ip & ip_set_hostmask(e.cidr)); 369 e.ip = htonl(ip & ip_set_hostmask(e.cidr));
370 ret = adtfn(set, &e, &ext, &ext, flags); 370 ret = adtfn(set, &e, &ext, &ext, flags);
371 return ip_set_enomatch(ret, flags, adt) ? 1 : 371 return ip_set_enomatch(ret, flags, adt, set) ? -ret :
372 ip_set_eexist(ret, flags) ? 0 : ret; 372 ip_set_eexist(ret, flags) ? 0 : ret;
373 } 373 }
374 374
@@ -634,7 +634,7 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
634 634
635 ret = adtfn(set, &e, &ext, &ext, flags); 635 ret = adtfn(set, &e, &ext, &ext, flags);
636 636
637 return ip_set_enomatch(ret, flags, adt) ? 1 : 637 return ip_set_enomatch(ret, flags, adt, set) ? -ret :
638 ip_set_eexist(ret, flags) ? 0 : ret; 638 ip_set_eexist(ret, flags) ? 0 : ret;
639} 639}
640 640
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index 9a0869853be5..09d6690bee6f 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -244,7 +244,7 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
244 if (adt == IPSET_TEST || !(with_ports || tb[IPSET_ATTR_IP_TO])) { 244 if (adt == IPSET_TEST || !(with_ports || tb[IPSET_ATTR_IP_TO])) {
245 e.ip = htonl(ip & ip_set_hostmask(e.cidr + 1)); 245 e.ip = htonl(ip & ip_set_hostmask(e.cidr + 1));
246 ret = adtfn(set, &e, &ext, &ext, flags); 246 ret = adtfn(set, &e, &ext, &ext, flags);
247 return ip_set_enomatch(ret, flags, adt) ? 1 : 247 return ip_set_enomatch(ret, flags, adt, set) ? -ret :
248 ip_set_eexist(ret, flags) ? 0 : ret; 248 ip_set_eexist(ret, flags) ? 0 : ret;
249 } 249 }
250 250
@@ -489,7 +489,7 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
489 489
490 if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { 490 if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
491 ret = adtfn(set, &e, &ext, &ext, flags); 491 ret = adtfn(set, &e, &ext, &ext, flags);
492 return ip_set_enomatch(ret, flags, adt) ? 1 : 492 return ip_set_enomatch(ret, flags, adt, set) ? -ret :
493 ip_set_eexist(ret, flags) ? 0 : ret; 493 ip_set_eexist(ret, flags) ? 0 : ret;
494 } 494 }
495 495
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 4f69e83ff836..74fd00c27210 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -116,6 +116,7 @@ ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
116 116
117 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { 117 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
118 struct ip_vs_cpu_stats *s; 118 struct ip_vs_cpu_stats *s;
119 struct ip_vs_service *svc;
119 120
120 s = this_cpu_ptr(dest->stats.cpustats); 121 s = this_cpu_ptr(dest->stats.cpustats);
121 s->ustats.inpkts++; 122 s->ustats.inpkts++;
@@ -123,11 +124,14 @@ ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
123 s->ustats.inbytes += skb->len; 124 s->ustats.inbytes += skb->len;
124 u64_stats_update_end(&s->syncp); 125 u64_stats_update_end(&s->syncp);
125 126
126 s = this_cpu_ptr(dest->svc->stats.cpustats); 127 rcu_read_lock();
128 svc = rcu_dereference(dest->svc);
129 s = this_cpu_ptr(svc->stats.cpustats);
127 s->ustats.inpkts++; 130 s->ustats.inpkts++;
128 u64_stats_update_begin(&s->syncp); 131 u64_stats_update_begin(&s->syncp);
129 s->ustats.inbytes += skb->len; 132 s->ustats.inbytes += skb->len;
130 u64_stats_update_end(&s->syncp); 133 u64_stats_update_end(&s->syncp);
134 rcu_read_unlock();
131 135
132 s = this_cpu_ptr(ipvs->tot_stats.cpustats); 136 s = this_cpu_ptr(ipvs->tot_stats.cpustats);
133 s->ustats.inpkts++; 137 s->ustats.inpkts++;
@@ -146,6 +150,7 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
146 150
147 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { 151 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
148 struct ip_vs_cpu_stats *s; 152 struct ip_vs_cpu_stats *s;
153 struct ip_vs_service *svc;
149 154
150 s = this_cpu_ptr(dest->stats.cpustats); 155 s = this_cpu_ptr(dest->stats.cpustats);
151 s->ustats.outpkts++; 156 s->ustats.outpkts++;
@@ -153,11 +158,14 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
153 s->ustats.outbytes += skb->len; 158 s->ustats.outbytes += skb->len;
154 u64_stats_update_end(&s->syncp); 159 u64_stats_update_end(&s->syncp);
155 160
156 s = this_cpu_ptr(dest->svc->stats.cpustats); 161 rcu_read_lock();
162 svc = rcu_dereference(dest->svc);
163 s = this_cpu_ptr(svc->stats.cpustats);
157 s->ustats.outpkts++; 164 s->ustats.outpkts++;
158 u64_stats_update_begin(&s->syncp); 165 u64_stats_update_begin(&s->syncp);
159 s->ustats.outbytes += skb->len; 166 s->ustats.outbytes += skb->len;
160 u64_stats_update_end(&s->syncp); 167 u64_stats_update_end(&s->syncp);
168 rcu_read_unlock();
161 169
162 s = this_cpu_ptr(ipvs->tot_stats.cpustats); 170 s = this_cpu_ptr(ipvs->tot_stats.cpustats);
163 s->ustats.outpkts++; 171 s->ustats.outpkts++;
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index c8148e487386..a3df9bddc4f7 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -460,7 +460,7 @@ static inline void
460__ip_vs_bind_svc(struct ip_vs_dest *dest, struct ip_vs_service *svc) 460__ip_vs_bind_svc(struct ip_vs_dest *dest, struct ip_vs_service *svc)
461{ 461{
462 atomic_inc(&svc->refcnt); 462 atomic_inc(&svc->refcnt);
463 dest->svc = svc; 463 rcu_assign_pointer(dest->svc, svc);
464} 464}
465 465
466static void ip_vs_service_free(struct ip_vs_service *svc) 466static void ip_vs_service_free(struct ip_vs_service *svc)
@@ -470,18 +470,25 @@ static void ip_vs_service_free(struct ip_vs_service *svc)
470 kfree(svc); 470 kfree(svc);
471} 471}
472 472
473static void 473static void ip_vs_service_rcu_free(struct rcu_head *head)
474__ip_vs_unbind_svc(struct ip_vs_dest *dest)
475{ 474{
476 struct ip_vs_service *svc = dest->svc; 475 struct ip_vs_service *svc;
476
477 svc = container_of(head, struct ip_vs_service, rcu_head);
478 ip_vs_service_free(svc);
479}
477 480
478 dest->svc = NULL; 481static void __ip_vs_svc_put(struct ip_vs_service *svc, bool do_delay)
482{
479 if (atomic_dec_and_test(&svc->refcnt)) { 483 if (atomic_dec_and_test(&svc->refcnt)) {
480 IP_VS_DBG_BUF(3, "Removing service %u/%s:%u\n", 484 IP_VS_DBG_BUF(3, "Removing service %u/%s:%u\n",
481 svc->fwmark, 485 svc->fwmark,
482 IP_VS_DBG_ADDR(svc->af, &svc->addr), 486 IP_VS_DBG_ADDR(svc->af, &svc->addr),
483 ntohs(svc->port)); 487 ntohs(svc->port));
484 ip_vs_service_free(svc); 488 if (do_delay)
489 call_rcu(&svc->rcu_head, ip_vs_service_rcu_free);
490 else
491 ip_vs_service_free(svc);
485 } 492 }
486} 493}
487 494
@@ -667,11 +674,6 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
667 IP_VS_DBG_ADDR(svc->af, &dest->addr), 674 IP_VS_DBG_ADDR(svc->af, &dest->addr),
668 ntohs(dest->port), 675 ntohs(dest->port),
669 atomic_read(&dest->refcnt)); 676 atomic_read(&dest->refcnt));
670 /* We can not reuse dest while in grace period
671 * because conns still can use dest->svc
672 */
673 if (test_bit(IP_VS_DEST_STATE_REMOVING, &dest->state))
674 continue;
675 if (dest->af == svc->af && 677 if (dest->af == svc->af &&
676 ip_vs_addr_equal(svc->af, &dest->addr, daddr) && 678 ip_vs_addr_equal(svc->af, &dest->addr, daddr) &&
677 dest->port == dport && 679 dest->port == dport &&
@@ -697,8 +699,10 @@ out:
697 699
698static void ip_vs_dest_free(struct ip_vs_dest *dest) 700static void ip_vs_dest_free(struct ip_vs_dest *dest)
699{ 701{
702 struct ip_vs_service *svc = rcu_dereference_protected(dest->svc, 1);
703
700 __ip_vs_dst_cache_reset(dest); 704 __ip_vs_dst_cache_reset(dest);
701 __ip_vs_unbind_svc(dest); 705 __ip_vs_svc_put(svc, false);
702 free_percpu(dest->stats.cpustats); 706 free_percpu(dest->stats.cpustats);
703 kfree(dest); 707 kfree(dest);
704} 708}
@@ -771,6 +775,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
771 struct ip_vs_dest_user_kern *udest, int add) 775 struct ip_vs_dest_user_kern *udest, int add)
772{ 776{
773 struct netns_ipvs *ipvs = net_ipvs(svc->net); 777 struct netns_ipvs *ipvs = net_ipvs(svc->net);
778 struct ip_vs_service *old_svc;
774 struct ip_vs_scheduler *sched; 779 struct ip_vs_scheduler *sched;
775 int conn_flags; 780 int conn_flags;
776 781
@@ -792,13 +797,14 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
792 atomic_set(&dest->conn_flags, conn_flags); 797 atomic_set(&dest->conn_flags, conn_flags);
793 798
794 /* bind the service */ 799 /* bind the service */
795 if (!dest->svc) { 800 old_svc = rcu_dereference_protected(dest->svc, 1);
801 if (!old_svc) {
796 __ip_vs_bind_svc(dest, svc); 802 __ip_vs_bind_svc(dest, svc);
797 } else { 803 } else {
798 if (dest->svc != svc) { 804 if (old_svc != svc) {
799 __ip_vs_unbind_svc(dest);
800 ip_vs_zero_stats(&dest->stats); 805 ip_vs_zero_stats(&dest->stats);
801 __ip_vs_bind_svc(dest, svc); 806 __ip_vs_bind_svc(dest, svc);
807 __ip_vs_svc_put(old_svc, true);
802 } 808 }
803 } 809 }
804 810
@@ -998,16 +1004,6 @@ ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
998 return 0; 1004 return 0;
999} 1005}
1000 1006
1001static void ip_vs_dest_wait_readers(struct rcu_head *head)
1002{
1003 struct ip_vs_dest *dest = container_of(head, struct ip_vs_dest,
1004 rcu_head);
1005
1006 /* End of grace period after unlinking */
1007 clear_bit(IP_VS_DEST_STATE_REMOVING, &dest->state);
1008}
1009
1010
1011/* 1007/*
1012 * Delete a destination (must be already unlinked from the service) 1008 * Delete a destination (must be already unlinked from the service)
1013 */ 1009 */
@@ -1023,20 +1019,16 @@ static void __ip_vs_del_dest(struct net *net, struct ip_vs_dest *dest,
1023 */ 1019 */
1024 ip_vs_rs_unhash(dest); 1020 ip_vs_rs_unhash(dest);
1025 1021
1026 if (!cleanup) {
1027 set_bit(IP_VS_DEST_STATE_REMOVING, &dest->state);
1028 call_rcu(&dest->rcu_head, ip_vs_dest_wait_readers);
1029 }
1030
1031 spin_lock_bh(&ipvs->dest_trash_lock); 1022 spin_lock_bh(&ipvs->dest_trash_lock);
1032 IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, dest->refcnt=%d\n", 1023 IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, dest->refcnt=%d\n",
1033 IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port), 1024 IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port),
1034 atomic_read(&dest->refcnt)); 1025 atomic_read(&dest->refcnt));
1035 if (list_empty(&ipvs->dest_trash) && !cleanup) 1026 if (list_empty(&ipvs->dest_trash) && !cleanup)
1036 mod_timer(&ipvs->dest_trash_timer, 1027 mod_timer(&ipvs->dest_trash_timer,
1037 jiffies + IP_VS_DEST_TRASH_PERIOD); 1028 jiffies + (IP_VS_DEST_TRASH_PERIOD >> 1));
1038 /* dest lives in trash without reference */ 1029 /* dest lives in trash without reference */
1039 list_add(&dest->t_list, &ipvs->dest_trash); 1030 list_add(&dest->t_list, &ipvs->dest_trash);
1031 dest->idle_start = 0;
1040 spin_unlock_bh(&ipvs->dest_trash_lock); 1032 spin_unlock_bh(&ipvs->dest_trash_lock);
1041 ip_vs_dest_put(dest); 1033 ip_vs_dest_put(dest);
1042} 1034}
@@ -1108,24 +1100,30 @@ static void ip_vs_dest_trash_expire(unsigned long data)
1108 struct net *net = (struct net *) data; 1100 struct net *net = (struct net *) data;
1109 struct netns_ipvs *ipvs = net_ipvs(net); 1101 struct netns_ipvs *ipvs = net_ipvs(net);
1110 struct ip_vs_dest *dest, *next; 1102 struct ip_vs_dest *dest, *next;
1103 unsigned long now = jiffies;
1111 1104
1112 spin_lock(&ipvs->dest_trash_lock); 1105 spin_lock(&ipvs->dest_trash_lock);
1113 list_for_each_entry_safe(dest, next, &ipvs->dest_trash, t_list) { 1106 list_for_each_entry_safe(dest, next, &ipvs->dest_trash, t_list) {
1114 /* Skip if dest is in grace period */
1115 if (test_bit(IP_VS_DEST_STATE_REMOVING, &dest->state))
1116 continue;
1117 if (atomic_read(&dest->refcnt) > 0) 1107 if (atomic_read(&dest->refcnt) > 0)
1118 continue; 1108 continue;
1109 if (dest->idle_start) {
1110 if (time_before(now, dest->idle_start +
1111 IP_VS_DEST_TRASH_PERIOD))
1112 continue;
1113 } else {
1114 dest->idle_start = max(1UL, now);
1115 continue;
1116 }
1119 IP_VS_DBG_BUF(3, "Removing destination %u/%s:%u from trash\n", 1117 IP_VS_DBG_BUF(3, "Removing destination %u/%s:%u from trash\n",
1120 dest->vfwmark, 1118 dest->vfwmark,
1121 IP_VS_DBG_ADDR(dest->svc->af, &dest->addr), 1119 IP_VS_DBG_ADDR(dest->af, &dest->addr),
1122 ntohs(dest->port)); 1120 ntohs(dest->port));
1123 list_del(&dest->t_list); 1121 list_del(&dest->t_list);
1124 ip_vs_dest_free(dest); 1122 ip_vs_dest_free(dest);
1125 } 1123 }
1126 if (!list_empty(&ipvs->dest_trash)) 1124 if (!list_empty(&ipvs->dest_trash))
1127 mod_timer(&ipvs->dest_trash_timer, 1125 mod_timer(&ipvs->dest_trash_timer,
1128 jiffies + IP_VS_DEST_TRASH_PERIOD); 1126 jiffies + (IP_VS_DEST_TRASH_PERIOD >> 1));
1129 spin_unlock(&ipvs->dest_trash_lock); 1127 spin_unlock(&ipvs->dest_trash_lock);
1130} 1128}
1131 1129
@@ -1320,14 +1318,6 @@ out:
1320 return ret; 1318 return ret;
1321} 1319}
1322 1320
1323static void ip_vs_service_rcu_free(struct rcu_head *head)
1324{
1325 struct ip_vs_service *svc;
1326
1327 svc = container_of(head, struct ip_vs_service, rcu_head);
1328 ip_vs_service_free(svc);
1329}
1330
1331/* 1321/*
1332 * Delete a service from the service list 1322 * Delete a service from the service list
1333 * - The service must be unlinked, unlocked and not referenced! 1323 * - The service must be unlinked, unlocked and not referenced!
@@ -1376,13 +1366,7 @@ static void __ip_vs_del_service(struct ip_vs_service *svc, bool cleanup)
1376 /* 1366 /*
1377 * Free the service if nobody refers to it 1367 * Free the service if nobody refers to it
1378 */ 1368 */
1379 if (atomic_dec_and_test(&svc->refcnt)) { 1369 __ip_vs_svc_put(svc, true);
1380 IP_VS_DBG_BUF(3, "Removing service %u/%s:%u\n",
1381 svc->fwmark,
1382 IP_VS_DBG_ADDR(svc->af, &svc->addr),
1383 ntohs(svc->port));
1384 call_rcu(&svc->rcu_head, ip_vs_service_rcu_free);
1385 }
1386 1370
1387 /* decrease the module use count */ 1371 /* decrease the module use count */
1388 ip_vs_use_count_dec(); 1372 ip_vs_use_count_dec();
diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c
index 6bee6d0c73a5..1425e9a924c4 100644
--- a/net/netfilter/ipvs/ip_vs_est.c
+++ b/net/netfilter/ipvs/ip_vs_est.c
@@ -59,12 +59,13 @@ static void ip_vs_read_cpu_stats(struct ip_vs_stats_user *sum,
59 struct ip_vs_cpu_stats __percpu *stats) 59 struct ip_vs_cpu_stats __percpu *stats)
60{ 60{
61 int i; 61 int i;
62 bool add = false;
62 63
63 for_each_possible_cpu(i) { 64 for_each_possible_cpu(i) {
64 struct ip_vs_cpu_stats *s = per_cpu_ptr(stats, i); 65 struct ip_vs_cpu_stats *s = per_cpu_ptr(stats, i);
65 unsigned int start; 66 unsigned int start;
66 __u64 inbytes, outbytes; 67 __u64 inbytes, outbytes;
67 if (i) { 68 if (add) {
68 sum->conns += s->ustats.conns; 69 sum->conns += s->ustats.conns;
69 sum->inpkts += s->ustats.inpkts; 70 sum->inpkts += s->ustats.inpkts;
70 sum->outpkts += s->ustats.outpkts; 71 sum->outpkts += s->ustats.outpkts;
@@ -76,6 +77,7 @@ static void ip_vs_read_cpu_stats(struct ip_vs_stats_user *sum,
76 sum->inbytes += inbytes; 77 sum->inbytes += inbytes;
77 sum->outbytes += outbytes; 78 sum->outbytes += outbytes;
78 } else { 79 } else {
80 add = true;
79 sum->conns = s->ustats.conns; 81 sum->conns = s->ustats.conns;
80 sum->inpkts = s->ustats.inpkts; 82 sum->inpkts = s->ustats.inpkts;
81 sum->outpkts = s->ustats.outpkts; 83 sum->outpkts = s->ustats.outpkts;
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 1383b0eadc0e..eff13c94498e 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -93,7 +93,7 @@ struct ip_vs_lblc_entry {
93 struct hlist_node list; 93 struct hlist_node list;
94 int af; /* address family */ 94 int af; /* address family */
95 union nf_inet_addr addr; /* destination IP address */ 95 union nf_inet_addr addr; /* destination IP address */
96 struct ip_vs_dest __rcu *dest; /* real server (cache) */ 96 struct ip_vs_dest *dest; /* real server (cache) */
97 unsigned long lastuse; /* last used time */ 97 unsigned long lastuse; /* last used time */
98 struct rcu_head rcu_head; 98 struct rcu_head rcu_head;
99}; 99};
@@ -130,20 +130,21 @@ static struct ctl_table vs_vars_table[] = {
130}; 130};
131#endif 131#endif
132 132
133static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en) 133static void ip_vs_lblc_rcu_free(struct rcu_head *head)
134{ 134{
135 struct ip_vs_dest *dest; 135 struct ip_vs_lblc_entry *en = container_of(head,
136 struct ip_vs_lblc_entry,
137 rcu_head);
136 138
137 hlist_del_rcu(&en->list); 139 ip_vs_dest_put(en->dest);
138 /* 140 kfree(en);
139 * We don't kfree dest because it is referred either by its service
140 * or the trash dest list.
141 */
142 dest = rcu_dereference_protected(en->dest, 1);
143 ip_vs_dest_put(dest);
144 kfree_rcu(en, rcu_head);
145} 141}
146 142
143static inline void ip_vs_lblc_del(struct ip_vs_lblc_entry *en)
144{
145 hlist_del_rcu(&en->list);
146 call_rcu(&en->rcu_head, ip_vs_lblc_rcu_free);
147}
147 148
148/* 149/*
149 * Returns hash value for IPVS LBLC entry 150 * Returns hash value for IPVS LBLC entry
@@ -203,30 +204,23 @@ ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr,
203 struct ip_vs_lblc_entry *en; 204 struct ip_vs_lblc_entry *en;
204 205
205 en = ip_vs_lblc_get(dest->af, tbl, daddr); 206 en = ip_vs_lblc_get(dest->af, tbl, daddr);
206 if (!en) { 207 if (en) {
207 en = kmalloc(sizeof(*en), GFP_ATOMIC); 208 if (en->dest == dest)
208 if (!en) 209 return en;
209 return NULL; 210 ip_vs_lblc_del(en);
210 211 }
211 en->af = dest->af; 212 en = kmalloc(sizeof(*en), GFP_ATOMIC);
212 ip_vs_addr_copy(dest->af, &en->addr, daddr); 213 if (!en)
213 en->lastuse = jiffies; 214 return NULL;
214 215
215 ip_vs_dest_hold(dest); 216 en->af = dest->af;
216 RCU_INIT_POINTER(en->dest, dest); 217 ip_vs_addr_copy(dest->af, &en->addr, daddr);
218 en->lastuse = jiffies;
217 219
218 ip_vs_lblc_hash(tbl, en); 220 ip_vs_dest_hold(dest);
219 } else { 221 en->dest = dest;
220 struct ip_vs_dest *old_dest;
221 222
222 old_dest = rcu_dereference_protected(en->dest, 1); 223 ip_vs_lblc_hash(tbl, en);
223 if (old_dest != dest) {
224 ip_vs_dest_put(old_dest);
225 ip_vs_dest_hold(dest);
226 /* No ordering constraints for refcnt */
227 RCU_INIT_POINTER(en->dest, dest);
228 }
229 }
230 224
231 return en; 225 return en;
232} 226}
@@ -246,7 +240,7 @@ static void ip_vs_lblc_flush(struct ip_vs_service *svc)
246 tbl->dead = 1; 240 tbl->dead = 1;
247 for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) { 241 for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
248 hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) { 242 hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
249 ip_vs_lblc_free(en); 243 ip_vs_lblc_del(en);
250 atomic_dec(&tbl->entries); 244 atomic_dec(&tbl->entries);
251 } 245 }
252 } 246 }
@@ -281,7 +275,7 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
281 sysctl_lblc_expiration(svc))) 275 sysctl_lblc_expiration(svc)))
282 continue; 276 continue;
283 277
284 ip_vs_lblc_free(en); 278 ip_vs_lblc_del(en);
285 atomic_dec(&tbl->entries); 279 atomic_dec(&tbl->entries);
286 } 280 }
287 spin_unlock(&svc->sched_lock); 281 spin_unlock(&svc->sched_lock);
@@ -335,7 +329,7 @@ static void ip_vs_lblc_check_expire(unsigned long data)
335 if (time_before(now, en->lastuse + ENTRY_TIMEOUT)) 329 if (time_before(now, en->lastuse + ENTRY_TIMEOUT))
336 continue; 330 continue;
337 331
338 ip_vs_lblc_free(en); 332 ip_vs_lblc_del(en);
339 atomic_dec(&tbl->entries); 333 atomic_dec(&tbl->entries);
340 goal--; 334 goal--;
341 } 335 }
@@ -443,8 +437,8 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc)
443 continue; 437 continue;
444 438
445 doh = ip_vs_dest_conn_overhead(dest); 439 doh = ip_vs_dest_conn_overhead(dest);
446 if (loh * atomic_read(&dest->weight) > 440 if ((__s64)loh * atomic_read(&dest->weight) >
447 doh * atomic_read(&least->weight)) { 441 (__s64)doh * atomic_read(&least->weight)) {
448 least = dest; 442 least = dest;
449 loh = doh; 443 loh = doh;
450 } 444 }
@@ -511,7 +505,7 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
511 * free up entries from the trash at any time. 505 * free up entries from the trash at any time.
512 */ 506 */
513 507
514 dest = rcu_dereference(en->dest); 508 dest = en->dest;
515 if ((dest->flags & IP_VS_DEST_F_AVAILABLE) && 509 if ((dest->flags & IP_VS_DEST_F_AVAILABLE) &&
516 atomic_read(&dest->weight) > 0 && !is_overloaded(dest, svc)) 510 atomic_read(&dest->weight) > 0 && !is_overloaded(dest, svc))
517 goto out; 511 goto out;
@@ -631,7 +625,7 @@ static void __exit ip_vs_lblc_cleanup(void)
631{ 625{
632 unregister_ip_vs_scheduler(&ip_vs_lblc_scheduler); 626 unregister_ip_vs_scheduler(&ip_vs_lblc_scheduler);
633 unregister_pernet_subsys(&ip_vs_lblc_ops); 627 unregister_pernet_subsys(&ip_vs_lblc_ops);
634 synchronize_rcu(); 628 rcu_barrier();
635} 629}
636 630
637 631
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index 5199448697f6..0b8550089a2e 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -89,7 +89,7 @@
89 */ 89 */
90struct ip_vs_dest_set_elem { 90struct ip_vs_dest_set_elem {
91 struct list_head list; /* list link */ 91 struct list_head list; /* list link */
92 struct ip_vs_dest __rcu *dest; /* destination server */ 92 struct ip_vs_dest *dest; /* destination server */
93 struct rcu_head rcu_head; 93 struct rcu_head rcu_head;
94}; 94};
95 95
@@ -107,11 +107,7 @@ static void ip_vs_dest_set_insert(struct ip_vs_dest_set *set,
107 107
108 if (check) { 108 if (check) {
109 list_for_each_entry(e, &set->list, list) { 109 list_for_each_entry(e, &set->list, list) {
110 struct ip_vs_dest *d; 110 if (e->dest == dest)
111
112 d = rcu_dereference_protected(e->dest, 1);
113 if (d == dest)
114 /* already existed */
115 return; 111 return;
116 } 112 }
117 } 113 }
@@ -121,7 +117,7 @@ static void ip_vs_dest_set_insert(struct ip_vs_dest_set *set,
121 return; 117 return;
122 118
123 ip_vs_dest_hold(dest); 119 ip_vs_dest_hold(dest);
124 RCU_INIT_POINTER(e->dest, dest); 120 e->dest = dest;
125 121
126 list_add_rcu(&e->list, &set->list); 122 list_add_rcu(&e->list, &set->list);
127 atomic_inc(&set->size); 123 atomic_inc(&set->size);
@@ -129,22 +125,27 @@ static void ip_vs_dest_set_insert(struct ip_vs_dest_set *set,
129 set->lastmod = jiffies; 125 set->lastmod = jiffies;
130} 126}
131 127
128static void ip_vs_lblcr_elem_rcu_free(struct rcu_head *head)
129{
130 struct ip_vs_dest_set_elem *e;
131
132 e = container_of(head, struct ip_vs_dest_set_elem, rcu_head);
133 ip_vs_dest_put(e->dest);
134 kfree(e);
135}
136
132static void 137static void
133ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest) 138ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
134{ 139{
135 struct ip_vs_dest_set_elem *e; 140 struct ip_vs_dest_set_elem *e;
136 141
137 list_for_each_entry(e, &set->list, list) { 142 list_for_each_entry(e, &set->list, list) {
138 struct ip_vs_dest *d; 143 if (e->dest == dest) {
139
140 d = rcu_dereference_protected(e->dest, 1);
141 if (d == dest) {
142 /* HIT */ 144 /* HIT */
143 atomic_dec(&set->size); 145 atomic_dec(&set->size);
144 set->lastmod = jiffies; 146 set->lastmod = jiffies;
145 ip_vs_dest_put(dest);
146 list_del_rcu(&e->list); 147 list_del_rcu(&e->list);
147 kfree_rcu(e, rcu_head); 148 call_rcu(&e->rcu_head, ip_vs_lblcr_elem_rcu_free);
148 break; 149 break;
149 } 150 }
150 } 151 }
@@ -155,16 +156,8 @@ static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set)
155 struct ip_vs_dest_set_elem *e, *ep; 156 struct ip_vs_dest_set_elem *e, *ep;
156 157
157 list_for_each_entry_safe(e, ep, &set->list, list) { 158 list_for_each_entry_safe(e, ep, &set->list, list) {
158 struct ip_vs_dest *d;
159
160 d = rcu_dereference_protected(e->dest, 1);
161 /*
162 * We don't kfree dest because it is referred either
163 * by its service or by the trash dest list.
164 */
165 ip_vs_dest_put(d);
166 list_del_rcu(&e->list); 159 list_del_rcu(&e->list);
167 kfree_rcu(e, rcu_head); 160 call_rcu(&e->rcu_head, ip_vs_lblcr_elem_rcu_free);
168 } 161 }
169} 162}
170 163
@@ -175,12 +168,9 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
175 struct ip_vs_dest *dest, *least; 168 struct ip_vs_dest *dest, *least;
176 int loh, doh; 169 int loh, doh;
177 170
178 if (set == NULL)
179 return NULL;
180
181 /* select the first destination server, whose weight > 0 */ 171 /* select the first destination server, whose weight > 0 */
182 list_for_each_entry_rcu(e, &set->list, list) { 172 list_for_each_entry_rcu(e, &set->list, list) {
183 least = rcu_dereference(e->dest); 173 least = e->dest;
184 if (least->flags & IP_VS_DEST_F_OVERLOAD) 174 if (least->flags & IP_VS_DEST_F_OVERLOAD)
185 continue; 175 continue;
186 176
@@ -195,13 +185,13 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
195 /* find the destination with the weighted least load */ 185 /* find the destination with the weighted least load */
196 nextstage: 186 nextstage:
197 list_for_each_entry_continue_rcu(e, &set->list, list) { 187 list_for_each_entry_continue_rcu(e, &set->list, list) {
198 dest = rcu_dereference(e->dest); 188 dest = e->dest;
199 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 189 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
200 continue; 190 continue;
201 191
202 doh = ip_vs_dest_conn_overhead(dest); 192 doh = ip_vs_dest_conn_overhead(dest);
203 if ((loh * atomic_read(&dest->weight) > 193 if (((__s64)loh * atomic_read(&dest->weight) >
204 doh * atomic_read(&least->weight)) 194 (__s64)doh * atomic_read(&least->weight))
205 && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { 195 && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
206 least = dest; 196 least = dest;
207 loh = doh; 197 loh = doh;
@@ -232,7 +222,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
232 222
233 /* select the first destination server, whose weight > 0 */ 223 /* select the first destination server, whose weight > 0 */
234 list_for_each_entry(e, &set->list, list) { 224 list_for_each_entry(e, &set->list, list) {
235 most = rcu_dereference_protected(e->dest, 1); 225 most = e->dest;
236 if (atomic_read(&most->weight) > 0) { 226 if (atomic_read(&most->weight) > 0) {
237 moh = ip_vs_dest_conn_overhead(most); 227 moh = ip_vs_dest_conn_overhead(most);
238 goto nextstage; 228 goto nextstage;
@@ -243,11 +233,11 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
243 /* find the destination with the weighted most load */ 233 /* find the destination with the weighted most load */
244 nextstage: 234 nextstage:
245 list_for_each_entry_continue(e, &set->list, list) { 235 list_for_each_entry_continue(e, &set->list, list) {
246 dest = rcu_dereference_protected(e->dest, 1); 236 dest = e->dest;
247 doh = ip_vs_dest_conn_overhead(dest); 237 doh = ip_vs_dest_conn_overhead(dest);
248 /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */ 238 /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */
249 if ((moh * atomic_read(&dest->weight) < 239 if (((__s64)moh * atomic_read(&dest->weight) <
250 doh * atomic_read(&most->weight)) 240 (__s64)doh * atomic_read(&most->weight))
251 && (atomic_read(&dest->weight) > 0)) { 241 && (atomic_read(&dest->weight) > 0)) {
252 most = dest; 242 most = dest;
253 moh = doh; 243 moh = doh;
@@ -611,8 +601,8 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
611 continue; 601 continue;
612 602
613 doh = ip_vs_dest_conn_overhead(dest); 603 doh = ip_vs_dest_conn_overhead(dest);
614 if (loh * atomic_read(&dest->weight) > 604 if ((__s64)loh * atomic_read(&dest->weight) >
615 doh * atomic_read(&least->weight)) { 605 (__s64)doh * atomic_read(&least->weight)) {
616 least = dest; 606 least = dest;
617 loh = doh; 607 loh = doh;
618 } 608 }
@@ -819,7 +809,7 @@ static void __exit ip_vs_lblcr_cleanup(void)
819{ 809{
820 unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler); 810 unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
821 unregister_pernet_subsys(&ip_vs_lblcr_ops); 811 unregister_pernet_subsys(&ip_vs_lblcr_ops);
822 synchronize_rcu(); 812 rcu_barrier();
823} 813}
824 814
825 815
diff --git a/net/netfilter/ipvs/ip_vs_nq.c b/net/netfilter/ipvs/ip_vs_nq.c
index d8d9860934fe..961a6de9bb29 100644
--- a/net/netfilter/ipvs/ip_vs_nq.c
+++ b/net/netfilter/ipvs/ip_vs_nq.c
@@ -40,7 +40,7 @@
40#include <net/ip_vs.h> 40#include <net/ip_vs.h>
41 41
42 42
43static inline unsigned int 43static inline int
44ip_vs_nq_dest_overhead(struct ip_vs_dest *dest) 44ip_vs_nq_dest_overhead(struct ip_vs_dest *dest)
45{ 45{
46 /* 46 /*
@@ -59,7 +59,7 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
59 struct ip_vs_iphdr *iph) 59 struct ip_vs_iphdr *iph)
60{ 60{
61 struct ip_vs_dest *dest, *least = NULL; 61 struct ip_vs_dest *dest, *least = NULL;
62 unsigned int loh = 0, doh; 62 int loh = 0, doh;
63 63
64 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); 64 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
65 65
@@ -92,8 +92,8 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
92 } 92 }
93 93
94 if (!least || 94 if (!least ||
95 (loh * atomic_read(&dest->weight) > 95 ((__s64)loh * atomic_read(&dest->weight) >
96 doh * atomic_read(&least->weight))) { 96 (__s64)doh * atomic_read(&least->weight))) {
97 least = dest; 97 least = dest;
98 loh = doh; 98 loh = doh;
99 } 99 }
diff --git a/net/netfilter/ipvs/ip_vs_sed.c b/net/netfilter/ipvs/ip_vs_sed.c
index a5284cc3d882..e446b9fa7424 100644
--- a/net/netfilter/ipvs/ip_vs_sed.c
+++ b/net/netfilter/ipvs/ip_vs_sed.c
@@ -44,7 +44,7 @@
44#include <net/ip_vs.h> 44#include <net/ip_vs.h>
45 45
46 46
47static inline unsigned int 47static inline int
48ip_vs_sed_dest_overhead(struct ip_vs_dest *dest) 48ip_vs_sed_dest_overhead(struct ip_vs_dest *dest)
49{ 49{
50 /* 50 /*
@@ -63,7 +63,7 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
63 struct ip_vs_iphdr *iph) 63 struct ip_vs_iphdr *iph)
64{ 64{
65 struct ip_vs_dest *dest, *least; 65 struct ip_vs_dest *dest, *least;
66 unsigned int loh, doh; 66 int loh, doh;
67 67
68 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); 68 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
69 69
@@ -99,8 +99,8 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
99 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 99 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
100 continue; 100 continue;
101 doh = ip_vs_sed_dest_overhead(dest); 101 doh = ip_vs_sed_dest_overhead(dest);
102 if (loh * atomic_read(&dest->weight) > 102 if ((__s64)loh * atomic_read(&dest->weight) >
103 doh * atomic_read(&least->weight)) { 103 (__s64)doh * atomic_read(&least->weight)) {
104 least = dest; 104 least = dest;
105 loh = doh; 105 loh = doh;
106 } 106 }
diff --git a/net/netfilter/ipvs/ip_vs_wlc.c b/net/netfilter/ipvs/ip_vs_wlc.c
index 6dc1fa128840..b5b4650d50a9 100644
--- a/net/netfilter/ipvs/ip_vs_wlc.c
+++ b/net/netfilter/ipvs/ip_vs_wlc.c
@@ -35,7 +35,7 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
35 struct ip_vs_iphdr *iph) 35 struct ip_vs_iphdr *iph)
36{ 36{
37 struct ip_vs_dest *dest, *least; 37 struct ip_vs_dest *dest, *least;
38 unsigned int loh, doh; 38 int loh, doh;
39 39
40 IP_VS_DBG(6, "ip_vs_wlc_schedule(): Scheduling...\n"); 40 IP_VS_DBG(6, "ip_vs_wlc_schedule(): Scheduling...\n");
41 41
@@ -71,8 +71,8 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
71 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 71 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
72 continue; 72 continue;
73 doh = ip_vs_dest_conn_overhead(dest); 73 doh = ip_vs_dest_conn_overhead(dest);
74 if (loh * atomic_read(&dest->weight) > 74 if ((__s64)loh * atomic_read(&dest->weight) >
75 doh * atomic_read(&least->weight)) { 75 (__s64)doh * atomic_read(&least->weight)) {
76 least = dest; 76 least = dest;
77 loh = doh; 77 loh = doh;
78 } 78 }
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index b75ff6429a04..c47444e4cf8c 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -883,7 +883,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
883 iph->daddr = cp->daddr.ip; 883 iph->daddr = cp->daddr.ip;
884 iph->saddr = saddr; 884 iph->saddr = saddr;
885 iph->ttl = old_iph->ttl; 885 iph->ttl = old_iph->ttl;
886 ip_select_ident(iph, &rt->dst, NULL); 886 ip_select_ident(skb, &rt->dst, NULL);
887 887
888 /* Another hack: avoid icmp_send in ip_fragment */ 888 /* Another hack: avoid icmp_send in ip_fragment */
889 skb->local_df = 1; 889 skb->local_df = 1;
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index 6fd967c6278c..cdf4567ba9b3 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -24,7 +24,7 @@
24int synproxy_net_id; 24int synproxy_net_id;
25EXPORT_SYMBOL_GPL(synproxy_net_id); 25EXPORT_SYMBOL_GPL(synproxy_net_id);
26 26
27void 27bool
28synproxy_parse_options(const struct sk_buff *skb, unsigned int doff, 28synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
29 const struct tcphdr *th, struct synproxy_options *opts) 29 const struct tcphdr *th, struct synproxy_options *opts)
30{ 30{
@@ -32,7 +32,8 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
32 u8 buf[40], *ptr; 32 u8 buf[40], *ptr;
33 33
34 ptr = skb_header_pointer(skb, doff + sizeof(*th), length, buf); 34 ptr = skb_header_pointer(skb, doff + sizeof(*th), length, buf);
35 BUG_ON(ptr == NULL); 35 if (ptr == NULL)
36 return false;
36 37
37 opts->options = 0; 38 opts->options = 0;
38 while (length > 0) { 39 while (length > 0) {
@@ -41,16 +42,16 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
41 42
42 switch (opcode) { 43 switch (opcode) {
43 case TCPOPT_EOL: 44 case TCPOPT_EOL:
44 return; 45 return true;
45 case TCPOPT_NOP: 46 case TCPOPT_NOP:
46 length--; 47 length--;
47 continue; 48 continue;
48 default: 49 default:
49 opsize = *ptr++; 50 opsize = *ptr++;
50 if (opsize < 2) 51 if (opsize < 2)
51 return; 52 return true;
52 if (opsize > length) 53 if (opsize > length)
53 return; 54 return true;
54 55
55 switch (opcode) { 56 switch (opcode) {
56 case TCPOPT_MSS: 57 case TCPOPT_MSS:
@@ -84,6 +85,7 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
84 length -= opsize; 85 length -= opsize;
85 } 86 }
86 } 87 }
88 return true;
87} 89}
88EXPORT_SYMBOL_GPL(synproxy_parse_options); 90EXPORT_SYMBOL_GPL(synproxy_parse_options);
89 91
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index 95a98c8c1da6..ae2e5c11d01a 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -1009,7 +1009,7 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
1009 verdict = NF_DROP; 1009 verdict = NF_DROP;
1010 1010
1011 if (ct) 1011 if (ct)
1012 nfqnl_ct_seq_adjust(skb, ct, ctinfo, diff); 1012 nfqnl_ct_seq_adjust(entry->skb, ct, ctinfo, diff);
1013 } 1013 }
1014 1014
1015 if (nfqa[NFQA_MARK]) 1015 if (nfqa[NFQA_MARK])
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 32ad015ee8ce..a2fef8b10b96 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -285,7 +285,7 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
285 285
286 286
287/* remove one skb from head of flow queue */ 287/* remove one skb from head of flow queue */
288static struct sk_buff *fq_dequeue_head(struct fq_flow *flow) 288static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow)
289{ 289{
290 struct sk_buff *skb = flow->head; 290 struct sk_buff *skb = flow->head;
291 291
@@ -293,6 +293,8 @@ static struct sk_buff *fq_dequeue_head(struct fq_flow *flow)
293 flow->head = skb->next; 293 flow->head = skb->next;
294 skb->next = NULL; 294 skb->next = NULL;
295 flow->qlen--; 295 flow->qlen--;
296 sch->qstats.backlog -= qdisc_pkt_len(skb);
297 sch->q.qlen--;
296 } 298 }
297 return skb; 299 return skb;
298} 300}
@@ -418,8 +420,9 @@ static struct sk_buff *fq_dequeue(struct Qdisc *sch)
418 struct fq_flow_head *head; 420 struct fq_flow_head *head;
419 struct sk_buff *skb; 421 struct sk_buff *skb;
420 struct fq_flow *f; 422 struct fq_flow *f;
423 u32 rate;
421 424
422 skb = fq_dequeue_head(&q->internal); 425 skb = fq_dequeue_head(sch, &q->internal);
423 if (skb) 426 if (skb)
424 goto out; 427 goto out;
425 fq_check_throttled(q, now); 428 fq_check_throttled(q, now);
@@ -449,7 +452,7 @@ begin:
449 goto begin; 452 goto begin;
450 } 453 }
451 454
452 skb = fq_dequeue_head(f); 455 skb = fq_dequeue_head(sch, f);
453 if (!skb) { 456 if (!skb) {
454 head->first = f->next; 457 head->first = f->next;
455 /* force a pass through old_flows to prevent starvation */ 458 /* force a pass through old_flows to prevent starvation */
@@ -466,43 +469,74 @@ begin:
466 f->time_next_packet = now; 469 f->time_next_packet = now;
467 f->credit -= qdisc_pkt_len(skb); 470 f->credit -= qdisc_pkt_len(skb);
468 471
469 if (f->credit <= 0 && 472 if (f->credit > 0 || !q->rate_enable)
470 q->rate_enable && 473 goto out;
471 skb->sk && skb->sk->sk_state != TCP_TIME_WAIT) {
472 u32 rate = skb->sk->sk_pacing_rate ?: q->flow_default_rate;
473 474
474 rate = min(rate, q->flow_max_rate); 475 if (skb->sk && skb->sk->sk_state != TCP_TIME_WAIT) {
475 if (rate) { 476 rate = skb->sk->sk_pacing_rate ?: q->flow_default_rate;
476 u64 len = (u64)qdisc_pkt_len(skb) * NSEC_PER_SEC;
477
478 do_div(len, rate);
479 /* Since socket rate can change later,
480 * clamp the delay to 125 ms.
481 * TODO: maybe segment the too big skb, as in commit
482 * e43ac79a4bc ("sch_tbf: segment too big GSO packets")
483 */
484 if (unlikely(len > 125 * NSEC_PER_MSEC)) {
485 len = 125 * NSEC_PER_MSEC;
486 q->stat_pkts_too_long++;
487 }
488 477
489 f->time_next_packet = now + len; 478 rate = min(rate, q->flow_max_rate);
479 } else {
480 rate = q->flow_max_rate;
481 if (rate == ~0U)
482 goto out;
483 }
484 if (rate) {
485 u32 plen = max(qdisc_pkt_len(skb), q->quantum);
486 u64 len = (u64)plen * NSEC_PER_SEC;
487
488 do_div(len, rate);
489 /* Since socket rate can change later,
490 * clamp the delay to 125 ms.
491 * TODO: maybe segment the too big skb, as in commit
492 * e43ac79a4bc ("sch_tbf: segment too big GSO packets")
493 */
494 if (unlikely(len > 125 * NSEC_PER_MSEC)) {
495 len = 125 * NSEC_PER_MSEC;
496 q->stat_pkts_too_long++;
490 } 497 }
498
499 f->time_next_packet = now + len;
491 } 500 }
492out: 501out:
493 sch->qstats.backlog -= qdisc_pkt_len(skb);
494 qdisc_bstats_update(sch, skb); 502 qdisc_bstats_update(sch, skb);
495 sch->q.qlen--;
496 qdisc_unthrottled(sch); 503 qdisc_unthrottled(sch);
497 return skb; 504 return skb;
498} 505}
499 506
500static void fq_reset(struct Qdisc *sch) 507static void fq_reset(struct Qdisc *sch)
501{ 508{
509 struct fq_sched_data *q = qdisc_priv(sch);
510 struct rb_root *root;
502 struct sk_buff *skb; 511 struct sk_buff *skb;
512 struct rb_node *p;
513 struct fq_flow *f;
514 unsigned int idx;
503 515
504 while ((skb = fq_dequeue(sch)) != NULL) 516 while ((skb = fq_dequeue_head(sch, &q->internal)) != NULL)
505 kfree_skb(skb); 517 kfree_skb(skb);
518
519 if (!q->fq_root)
520 return;
521
522 for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
523 root = &q->fq_root[idx];
524 while ((p = rb_first(root)) != NULL) {
525 f = container_of(p, struct fq_flow, fq_node);
526 rb_erase(p, root);
527
528 while ((skb = fq_dequeue_head(sch, f)) != NULL)
529 kfree_skb(skb);
530
531 kmem_cache_free(fq_flow_cachep, f);
532 }
533 }
534 q->new_flows.first = NULL;
535 q->old_flows.first = NULL;
536 q->delayed = RB_ROOT;
537 q->flows = 0;
538 q->inactive_flows = 0;
539 q->throttled_flows = 0;
506} 540}
507 541
508static void fq_rehash(struct fq_sched_data *q, 542static void fq_rehash(struct fq_sched_data *q,
@@ -645,6 +679,8 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
645 while (sch->q.qlen > sch->limit) { 679 while (sch->q.qlen > sch->limit) {
646 struct sk_buff *skb = fq_dequeue(sch); 680 struct sk_buff *skb = fq_dequeue(sch);
647 681
682 if (!skb)
683 break;
648 kfree_skb(skb); 684 kfree_skb(skb);
649 drop_count++; 685 drop_count++;
650 } 686 }
@@ -657,21 +693,9 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
657static void fq_destroy(struct Qdisc *sch) 693static void fq_destroy(struct Qdisc *sch)
658{ 694{
659 struct fq_sched_data *q = qdisc_priv(sch); 695 struct fq_sched_data *q = qdisc_priv(sch);
660 struct rb_root *root;
661 struct rb_node *p;
662 unsigned int idx;
663 696
664 if (q->fq_root) { 697 fq_reset(sch);
665 for (idx = 0; idx < (1U << q->fq_trees_log); idx++) { 698 kfree(q->fq_root);
666 root = &q->fq_root[idx];
667 while ((p = rb_first(root)) != NULL) {
668 rb_erase(p, root);
669 kmem_cache_free(fq_flow_cachep,
670 container_of(p, struct fq_flow, fq_node));
671 }
672 }
673 kfree(q->fq_root);
674 }
675 qdisc_watchdog_cancel(&q->watchdog); 699 qdisc_watchdog_cancel(&q->watchdog);
676} 700}
677 701
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 5f2068679f83..98b69bbecdd9 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -634,8 +634,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
634 break; 634 break;
635 case ICMP_REDIRECT: 635 case ICMP_REDIRECT:
636 sctp_icmp_redirect(sk, transport, skb); 636 sctp_icmp_redirect(sk, transport, skb);
637 err = 0; 637 /* Fall through to out_unlock. */
638 break;
639 default: 638 default:
640 goto out_unlock; 639 goto out_unlock;
641 } 640 }
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index da613ceae28c..e7b2d4fe2b6a 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -183,7 +183,7 @@ static void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
183 break; 183 break;
184 case NDISC_REDIRECT: 184 case NDISC_REDIRECT:
185 sctp_icmp_redirect(sk, transport, skb); 185 sctp_icmp_redirect(sk, transport, skb);
186 break; 186 goto out_unlock;
187 default: 187 default:
188 break; 188 break;
189 } 189 }
@@ -204,44 +204,23 @@ out:
204 in6_dev_put(idev); 204 in6_dev_put(idev);
205} 205}
206 206
207/* Based on tcp_v6_xmit() in tcp_ipv6.c. */
208static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport) 207static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
209{ 208{
210 struct sock *sk = skb->sk; 209 struct sock *sk = skb->sk;
211 struct ipv6_pinfo *np = inet6_sk(sk); 210 struct ipv6_pinfo *np = inet6_sk(sk);
212 struct flowi6 fl6; 211 struct flowi6 *fl6 = &transport->fl.u.ip6;
213
214 memset(&fl6, 0, sizeof(fl6));
215
216 fl6.flowi6_proto = sk->sk_protocol;
217
218 /* Fill in the dest address from the route entry passed with the skb
219 * and the source address from the transport.
220 */
221 fl6.daddr = transport->ipaddr.v6.sin6_addr;
222 fl6.saddr = transport->saddr.v6.sin6_addr;
223
224 fl6.flowlabel = np->flow_label;
225 IP6_ECN_flow_xmit(sk, fl6.flowlabel);
226 if (ipv6_addr_type(&fl6.saddr) & IPV6_ADDR_LINKLOCAL)
227 fl6.flowi6_oif = transport->saddr.v6.sin6_scope_id;
228 else
229 fl6.flowi6_oif = sk->sk_bound_dev_if;
230
231 if (np->opt && np->opt->srcrt) {
232 struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
233 fl6.daddr = *rt0->addr;
234 }
235 212
236 pr_debug("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n", __func__, skb, 213 pr_debug("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n", __func__, skb,
237 skb->len, &fl6.saddr, &fl6.daddr); 214 skb->len, &fl6->saddr, &fl6->daddr);
238 215
239 SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS); 216 IP6_ECN_flow_xmit(sk, fl6->flowlabel);
240 217
241 if (!(transport->param_flags & SPP_PMTUD_ENABLE)) 218 if (!(transport->param_flags & SPP_PMTUD_ENABLE))
242 skb->local_df = 1; 219 skb->local_df = 1;
243 220
244 return ip6_xmit(sk, skb, &fl6, np->opt, np->tclass); 221 SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
222
223 return ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
245} 224}
246 225
247/* Returns the dst cache entry for the given source and destination ip 226/* Returns the dst cache entry for the given source and destination ip
@@ -254,10 +233,12 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
254 struct dst_entry *dst = NULL; 233 struct dst_entry *dst = NULL;
255 struct flowi6 *fl6 = &fl->u.ip6; 234 struct flowi6 *fl6 = &fl->u.ip6;
256 struct sctp_bind_addr *bp; 235 struct sctp_bind_addr *bp;
236 struct ipv6_pinfo *np = inet6_sk(sk);
257 struct sctp_sockaddr_entry *laddr; 237 struct sctp_sockaddr_entry *laddr;
258 union sctp_addr *baddr = NULL; 238 union sctp_addr *baddr = NULL;
259 union sctp_addr *daddr = &t->ipaddr; 239 union sctp_addr *daddr = &t->ipaddr;
260 union sctp_addr dst_saddr; 240 union sctp_addr dst_saddr;
241 struct in6_addr *final_p, final;
261 __u8 matchlen = 0; 242 __u8 matchlen = 0;
262 __u8 bmatchlen; 243 __u8 bmatchlen;
263 sctp_scope_t scope; 244 sctp_scope_t scope;
@@ -281,7 +262,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
281 pr_debug("src=%pI6 - ", &fl6->saddr); 262 pr_debug("src=%pI6 - ", &fl6->saddr);
282 } 263 }
283 264
284 dst = ip6_dst_lookup_flow(sk, fl6, NULL, false); 265 final_p = fl6_update_dst(fl6, np->opt, &final);
266 dst = ip6_dst_lookup_flow(sk, fl6, final_p, false);
285 if (!asoc || saddr) 267 if (!asoc || saddr)
286 goto out; 268 goto out;
287 269
@@ -333,10 +315,12 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
333 } 315 }
334 } 316 }
335 rcu_read_unlock(); 317 rcu_read_unlock();
318
336 if (baddr) { 319 if (baddr) {
337 fl6->saddr = baddr->v6.sin6_addr; 320 fl6->saddr = baddr->v6.sin6_addr;
338 fl6->fl6_sport = baddr->v6.sin6_port; 321 fl6->fl6_sport = baddr->v6.sin6_port;
339 dst = ip6_dst_lookup_flow(sk, fl6, NULL, false); 322 final_p = fl6_update_dst(fl6, np->opt, &final);
323 dst = ip6_dst_lookup_flow(sk, fl6, final_p, false);
340 } 324 }
341 325
342out: 326out:
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index fcac5d141717..084656671d6e 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1075,6 +1075,15 @@ gss_destroy(struct rpc_auth *auth)
1075 kref_put(&gss_auth->kref, gss_free_callback); 1075 kref_put(&gss_auth->kref, gss_free_callback);
1076} 1076}
1077 1077
1078/*
1079 * Auths may be shared between rpc clients that were cloned from a
1080 * common client with the same xprt, if they also share the flavor and
1081 * target_name.
1082 *
1083 * The auth is looked up from the oldest parent sharing the same
1084 * cl_xprt, and the auth itself references only that common parent
1085 * (which is guaranteed to last as long as any of its descendants).
1086 */
1078static struct gss_auth * 1087static struct gss_auth *
1079gss_auth_find_or_add_hashed(struct rpc_auth_create_args *args, 1088gss_auth_find_or_add_hashed(struct rpc_auth_create_args *args,
1080 struct rpc_clnt *clnt, 1089 struct rpc_clnt *clnt,
@@ -1088,6 +1097,8 @@ gss_auth_find_or_add_hashed(struct rpc_auth_create_args *args,
1088 gss_auth, 1097 gss_auth,
1089 hash, 1098 hash,
1090 hashval) { 1099 hashval) {
1100 if (gss_auth->client != clnt)
1101 continue;
1091 if (gss_auth->rpc_auth.au_flavor != args->pseudoflavor) 1102 if (gss_auth->rpc_auth.au_flavor != args->pseudoflavor)
1092 continue; 1103 continue;
1093 if (gss_auth->target_name != args->target_name) { 1104 if (gss_auth->target_name != args->target_name) {
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index 9bc6db04be3e..e7000be321b0 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -47,12 +47,12 @@ static int net_ctl_permissions(struct ctl_table_header *head,
47 47
48 /* Allow network administrator to have same access as root. */ 48 /* Allow network administrator to have same access as root. */
49 if (ns_capable(net->user_ns, CAP_NET_ADMIN) || 49 if (ns_capable(net->user_ns, CAP_NET_ADMIN) ||
50 uid_eq(root_uid, current_uid())) { 50 uid_eq(root_uid, current_euid())) {
51 int mode = (table->mode >> 6) & 7; 51 int mode = (table->mode >> 6) & 7;
52 return (mode << 6) | (mode << 3) | mode; 52 return (mode << 6) | (mode << 3) | mode;
53 } 53 }
54 /* Allow netns root group to have the same access as the root group */ 54 /* Allow netns root group to have the same access as the root group */
55 if (gid_eq(root_gid, current_gid())) { 55 if (in_egroup_p(root_gid)) {
56 int mode = (table->mode >> 3) & 7; 56 int mode = (table->mode >> 3) & 7;
57 return (mode << 3) | mode; 57 return (mode << 3) | mode;
58 } 58 }
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 47016c304c84..66cad506b8a2 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -3975,8 +3975,8 @@ sub string_find_replace {
3975# check for new externs in .h files. 3975# check for new externs in .h files.
3976 if ($realfile =~ /\.h$/ && 3976 if ($realfile =~ /\.h$/ &&
3977 $line =~ /^\+\s*(extern\s+)$Type\s*$Ident\s*\(/s) { 3977 $line =~ /^\+\s*(extern\s+)$Type\s*$Ident\s*\(/s) {
3978 if (WARN("AVOID_EXTERNS", 3978 if (CHK("AVOID_EXTERNS",
3979 "extern prototypes should be avoided in .h files\n" . $herecurr) && 3979 "extern prototypes should be avoided in .h files\n" . $herecurr) &&
3980 $fix) { 3980 $fix) {
3981 $fixed[$linenr - 1] =~ s/(.*)\bextern\b\s*(.*)/$1$2/; 3981 $fixed[$linenr - 1] =~ s/(.*)\bextern\b\s*(.*)/$1$2/;
3982 } 3982 }
diff --git a/security/apparmor/crypto.c b/security/apparmor/crypto.c
index d6222ba4e919..532471d0b3a0 100644
--- a/security/apparmor/crypto.c
+++ b/security/apparmor/crypto.c
@@ -15,14 +15,14 @@
15 * it should be. 15 * it should be.
16 */ 16 */
17 17
18#include <linux/crypto.h> 18#include <crypto/hash.h>
19 19
20#include "include/apparmor.h" 20#include "include/apparmor.h"
21#include "include/crypto.h" 21#include "include/crypto.h"
22 22
23static unsigned int apparmor_hash_size; 23static unsigned int apparmor_hash_size;
24 24
25static struct crypto_hash *apparmor_tfm; 25static struct crypto_shash *apparmor_tfm;
26 26
27unsigned int aa_hash_size(void) 27unsigned int aa_hash_size(void)
28{ 28{
@@ -32,35 +32,33 @@ unsigned int aa_hash_size(void)
32int aa_calc_profile_hash(struct aa_profile *profile, u32 version, void *start, 32int aa_calc_profile_hash(struct aa_profile *profile, u32 version, void *start,
33 size_t len) 33 size_t len)
34{ 34{
35 struct scatterlist sg[2]; 35 struct {
36 struct hash_desc desc = { 36 struct shash_desc shash;
37 .tfm = apparmor_tfm, 37 char ctx[crypto_shash_descsize(apparmor_tfm)];
38 .flags = 0 38 } desc;
39 };
40 int error = -ENOMEM; 39 int error = -ENOMEM;
41 u32 le32_version = cpu_to_le32(version); 40 u32 le32_version = cpu_to_le32(version);
42 41
43 if (!apparmor_tfm) 42 if (!apparmor_tfm)
44 return 0; 43 return 0;
45 44
46 sg_init_table(sg, 2);
47 sg_set_buf(&sg[0], &le32_version, 4);
48 sg_set_buf(&sg[1], (u8 *) start, len);
49
50 profile->hash = kzalloc(apparmor_hash_size, GFP_KERNEL); 45 profile->hash = kzalloc(apparmor_hash_size, GFP_KERNEL);
51 if (!profile->hash) 46 if (!profile->hash)
52 goto fail; 47 goto fail;
53 48
54 error = crypto_hash_init(&desc); 49 desc.shash.tfm = apparmor_tfm;
50 desc.shash.flags = 0;
51
52 error = crypto_shash_init(&desc.shash);
55 if (error) 53 if (error)
56 goto fail; 54 goto fail;
57 error = crypto_hash_update(&desc, &sg[0], 4); 55 error = crypto_shash_update(&desc.shash, (u8 *) &le32_version, 4);
58 if (error) 56 if (error)
59 goto fail; 57 goto fail;
60 error = crypto_hash_update(&desc, &sg[1], len); 58 error = crypto_shash_update(&desc.shash, (u8 *) start, len);
61 if (error) 59 if (error)
62 goto fail; 60 goto fail;
63 error = crypto_hash_final(&desc, profile->hash); 61 error = crypto_shash_final(&desc.shash, profile->hash);
64 if (error) 62 if (error)
65 goto fail; 63 goto fail;
66 64
@@ -75,19 +73,19 @@ fail:
75 73
76static int __init init_profile_hash(void) 74static int __init init_profile_hash(void)
77{ 75{
78 struct crypto_hash *tfm; 76 struct crypto_shash *tfm;
79 77
80 if (!apparmor_initialized) 78 if (!apparmor_initialized)
81 return 0; 79 return 0;
82 80
83 tfm = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC); 81 tfm = crypto_alloc_shash("sha1", 0, CRYPTO_ALG_ASYNC);
84 if (IS_ERR(tfm)) { 82 if (IS_ERR(tfm)) {
85 int error = PTR_ERR(tfm); 83 int error = PTR_ERR(tfm);
86 AA_ERROR("failed to setup profile sha1 hashing: %d\n", error); 84 AA_ERROR("failed to setup profile sha1 hashing: %d\n", error);
87 return error; 85 return error;
88 } 86 }
89 apparmor_tfm = tfm; 87 apparmor_tfm = tfm;
90 apparmor_hash_size = crypto_hash_digestsize(apparmor_tfm); 88 apparmor_hash_size = crypto_shash_digestsize(apparmor_tfm);
91 89
92 aa_info_message("AppArmor sha1 policy hashing enabled"); 90 aa_info_message("AppArmor sha1 policy hashing enabled");
93 91
diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h
index f2d4b6348cbc..c28b0f20ab53 100644
--- a/security/apparmor/include/policy.h
+++ b/security/apparmor/include/policy.h
@@ -360,7 +360,9 @@ static inline void aa_put_replacedby(struct aa_replacedby *p)
360static inline void __aa_update_replacedby(struct aa_profile *orig, 360static inline void __aa_update_replacedby(struct aa_profile *orig,
361 struct aa_profile *new) 361 struct aa_profile *new)
362{ 362{
363 struct aa_profile *tmp = rcu_dereference(orig->replacedby->profile); 363 struct aa_profile *tmp;
364 tmp = rcu_dereference_protected(orig->replacedby->profile,
365 mutex_is_locked(&orig->ns->lock));
364 rcu_assign_pointer(orig->replacedby->profile, aa_get_profile(new)); 366 rcu_assign_pointer(orig->replacedby->profile, aa_get_profile(new));
365 orig->flags |= PFLAG_INVALID; 367 orig->flags |= PFLAG_INVALID;
366 aa_put_profile(tmp); 368 aa_put_profile(tmp);
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
index 6172509fa2b7..345bec07a27d 100644
--- a/security/apparmor/policy.c
+++ b/security/apparmor/policy.c
@@ -563,7 +563,8 @@ void __init aa_free_root_ns(void)
563static void free_replacedby(struct aa_replacedby *r) 563static void free_replacedby(struct aa_replacedby *r)
564{ 564{
565 if (r) { 565 if (r) {
566 aa_put_profile(rcu_dereference(r->profile)); 566 /* r->profile will not be updated any more as r is dead */
567 aa_put_profile(rcu_dereference_protected(r->profile, true));
567 kzfree(r); 568 kzfree(r);
568 } 569 }
569} 570}
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index dad36a6ab45f..fc3e6628a864 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -746,7 +746,6 @@ inline int avc_has_perm_noaudit(u32 ssid, u32 tsid,
746 * @tclass: target security class 746 * @tclass: target security class
747 * @requested: requested permissions, interpreted based on @tclass 747 * @requested: requested permissions, interpreted based on @tclass
748 * @auditdata: auxiliary audit data 748 * @auditdata: auxiliary audit data
749 * @flags: VFS walk flags
750 * 749 *
751 * Check the AVC to determine whether the @requested permissions are granted 750 * Check the AVC to determine whether the @requested permissions are granted
752 * for the SID pair (@ssid, @tsid), interpreting the permissions 751 * for the SID pair (@ssid, @tsid), interpreting the permissions
@@ -756,17 +755,15 @@ inline int avc_has_perm_noaudit(u32 ssid, u32 tsid,
756 * permissions are granted, -%EACCES if any permissions are denied, or 755 * permissions are granted, -%EACCES if any permissions are denied, or
757 * another -errno upon other errors. 756 * another -errno upon other errors.
758 */ 757 */
759int avc_has_perm_flags(u32 ssid, u32 tsid, u16 tclass, 758int avc_has_perm(u32 ssid, u32 tsid, u16 tclass,
760 u32 requested, struct common_audit_data *auditdata, 759 u32 requested, struct common_audit_data *auditdata)
761 unsigned flags)
762{ 760{
763 struct av_decision avd; 761 struct av_decision avd;
764 int rc, rc2; 762 int rc, rc2;
765 763
766 rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, 0, &avd); 764 rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, 0, &avd);
767 765
768 rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata, 766 rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata);
769 flags);
770 if (rc2) 767 if (rc2)
771 return rc2; 768 return rc2;
772 return rc; 769 return rc;
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index a5091ec06aa6..5b5231068516 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1502,7 +1502,7 @@ static int cred_has_capability(const struct cred *cred,
1502 1502
1503 rc = avc_has_perm_noaudit(sid, sid, sclass, av, 0, &avd); 1503 rc = avc_has_perm_noaudit(sid, sid, sclass, av, 0, &avd);
1504 if (audit == SECURITY_CAP_AUDIT) { 1504 if (audit == SECURITY_CAP_AUDIT) {
1505 int rc2 = avc_audit(sid, sid, sclass, av, &avd, rc, &ad, 0); 1505 int rc2 = avc_audit(sid, sid, sclass, av, &avd, rc, &ad);
1506 if (rc2) 1506 if (rc2)
1507 return rc2; 1507 return rc2;
1508 } 1508 }
@@ -1525,8 +1525,7 @@ static int task_has_system(struct task_struct *tsk,
1525static int inode_has_perm(const struct cred *cred, 1525static int inode_has_perm(const struct cred *cred,
1526 struct inode *inode, 1526 struct inode *inode,
1527 u32 perms, 1527 u32 perms,
1528 struct common_audit_data *adp, 1528 struct common_audit_data *adp)
1529 unsigned flags)
1530{ 1529{
1531 struct inode_security_struct *isec; 1530 struct inode_security_struct *isec;
1532 u32 sid; 1531 u32 sid;
@@ -1539,7 +1538,7 @@ static int inode_has_perm(const struct cred *cred,
1539 sid = cred_sid(cred); 1538 sid = cred_sid(cred);
1540 isec = inode->i_security; 1539 isec = inode->i_security;
1541 1540
1542 return avc_has_perm_flags(sid, isec->sid, isec->sclass, perms, adp, flags); 1541 return avc_has_perm(sid, isec->sid, isec->sclass, perms, adp);
1543} 1542}
1544 1543
1545/* Same as inode_has_perm, but pass explicit audit data containing 1544/* Same as inode_has_perm, but pass explicit audit data containing
@@ -1554,7 +1553,7 @@ static inline int dentry_has_perm(const struct cred *cred,
1554 1553
1555 ad.type = LSM_AUDIT_DATA_DENTRY; 1554 ad.type = LSM_AUDIT_DATA_DENTRY;
1556 ad.u.dentry = dentry; 1555 ad.u.dentry = dentry;
1557 return inode_has_perm(cred, inode, av, &ad, 0); 1556 return inode_has_perm(cred, inode, av, &ad);
1558} 1557}
1559 1558
1560/* Same as inode_has_perm, but pass explicit audit data containing 1559/* Same as inode_has_perm, but pass explicit audit data containing
@@ -1569,7 +1568,7 @@ static inline int path_has_perm(const struct cred *cred,
1569 1568
1570 ad.type = LSM_AUDIT_DATA_PATH; 1569 ad.type = LSM_AUDIT_DATA_PATH;
1571 ad.u.path = *path; 1570 ad.u.path = *path;
1572 return inode_has_perm(cred, inode, av, &ad, 0); 1571 return inode_has_perm(cred, inode, av, &ad);
1573} 1572}
1574 1573
1575/* Same as path_has_perm, but uses the inode from the file struct. */ 1574/* Same as path_has_perm, but uses the inode from the file struct. */
@@ -1581,7 +1580,7 @@ static inline int file_path_has_perm(const struct cred *cred,
1581 1580
1582 ad.type = LSM_AUDIT_DATA_PATH; 1581 ad.type = LSM_AUDIT_DATA_PATH;
1583 ad.u.path = file->f_path; 1582 ad.u.path = file->f_path;
1584 return inode_has_perm(cred, file_inode(file), av, &ad, 0); 1583 return inode_has_perm(cred, file_inode(file), av, &ad);
1585} 1584}
1586 1585
1587/* Check whether a task can use an open file descriptor to 1586/* Check whether a task can use an open file descriptor to
@@ -1617,7 +1616,7 @@ static int file_has_perm(const struct cred *cred,
1617 /* av is zero if only checking access to the descriptor. */ 1616 /* av is zero if only checking access to the descriptor. */
1618 rc = 0; 1617 rc = 0;
1619 if (av) 1618 if (av)
1620 rc = inode_has_perm(cred, inode, av, &ad, 0); 1619 rc = inode_has_perm(cred, inode, av, &ad);
1621 1620
1622out: 1621out:
1623 return rc; 1622 return rc;
diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
index 92d0ab561db8..f53ee3c58d0f 100644
--- a/security/selinux/include/avc.h
+++ b/security/selinux/include/avc.h
@@ -130,7 +130,7 @@ static inline int avc_audit(u32 ssid, u32 tsid,
130 u16 tclass, u32 requested, 130 u16 tclass, u32 requested,
131 struct av_decision *avd, 131 struct av_decision *avd,
132 int result, 132 int result,
133 struct common_audit_data *a, unsigned flags) 133 struct common_audit_data *a)
134{ 134{
135 u32 audited, denied; 135 u32 audited, denied;
136 audited = avc_audit_required(requested, avd, result, 0, &denied); 136 audited = avc_audit_required(requested, avd, result, 0, &denied);
@@ -138,7 +138,7 @@ static inline int avc_audit(u32 ssid, u32 tsid,
138 return 0; 138 return 0;
139 return slow_avc_audit(ssid, tsid, tclass, 139 return slow_avc_audit(ssid, tsid, tclass,
140 requested, audited, denied, 140 requested, audited, denied,
141 a, flags); 141 a, 0);
142} 142}
143 143
144#define AVC_STRICT 1 /* Ignore permissive mode. */ 144#define AVC_STRICT 1 /* Ignore permissive mode. */
@@ -147,17 +147,9 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid,
147 unsigned flags, 147 unsigned flags,
148 struct av_decision *avd); 148 struct av_decision *avd);
149 149
150int avc_has_perm_flags(u32 ssid, u32 tsid, 150int avc_has_perm(u32 ssid, u32 tsid,
151 u16 tclass, u32 requested, 151 u16 tclass, u32 requested,
152 struct common_audit_data *auditdata, 152 struct common_audit_data *auditdata);
153 unsigned);
154
155static inline int avc_has_perm(u32 ssid, u32 tsid,
156 u16 tclass, u32 requested,
157 struct common_audit_data *auditdata)
158{
159 return avc_has_perm_flags(ssid, tsid, tclass, requested, auditdata, 0);
160}
161 153
162u32 avc_policy_seqno(void); 154u32 avc_policy_seqno(void);
163 155
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index 98969541cbcc..bea523a5d852 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -139,6 +139,18 @@ static int snd_compr_open(struct inode *inode, struct file *f)
139static int snd_compr_free(struct inode *inode, struct file *f) 139static int snd_compr_free(struct inode *inode, struct file *f)
140{ 140{
141 struct snd_compr_file *data = f->private_data; 141 struct snd_compr_file *data = f->private_data;
142 struct snd_compr_runtime *runtime = data->stream.runtime;
143
144 switch (runtime->state) {
145 case SNDRV_PCM_STATE_RUNNING:
146 case SNDRV_PCM_STATE_DRAINING:
147 case SNDRV_PCM_STATE_PAUSED:
148 data->stream.ops->trigger(&data->stream, SNDRV_PCM_TRIGGER_STOP);
149 break;
150 default:
151 break;
152 }
153
142 data->stream.ops->free(&data->stream); 154 data->stream.ops->free(&data->stream);
143 kfree(data->stream.runtime->buffer); 155 kfree(data->stream.runtime->buffer);
144 kfree(data->stream.runtime); 156 kfree(data->stream.runtime);
@@ -837,7 +849,8 @@ static int snd_compress_dev_disconnect(struct snd_device *device)
837 struct snd_compr *compr; 849 struct snd_compr *compr;
838 850
839 compr = device->device_data; 851 compr = device->device_data;
840 snd_unregister_device(compr->direction, compr->card, compr->device); 852 snd_unregister_device(SNDRV_DEVICE_TYPE_COMPRESS, compr->card,
853 compr->device);
841 return 0; 854 return 0;
842} 855}
843 856
diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
index 445ca481d8d3..bf578ba2677e 100644
--- a/sound/pci/ac97/ac97_codec.c
+++ b/sound/pci/ac97/ac97_codec.c
@@ -175,6 +175,7 @@ static const struct ac97_codec_id snd_ac97_codec_ids[] = {
175{ 0x54524106, 0xffffffff, "TR28026", NULL, NULL }, 175{ 0x54524106, 0xffffffff, "TR28026", NULL, NULL },
176{ 0x54524108, 0xffffffff, "TR28028", patch_tritech_tr28028, NULL }, // added by xin jin [07/09/99] 176{ 0x54524108, 0xffffffff, "TR28028", patch_tritech_tr28028, NULL }, // added by xin jin [07/09/99]
177{ 0x54524123, 0xffffffff, "TR28602", NULL, NULL }, // only guess --jk [TR28023 = eMicro EM28023 (new CT1297)] 177{ 0x54524123, 0xffffffff, "TR28602", NULL, NULL }, // only guess --jk [TR28023 = eMicro EM28023 (new CT1297)]
178{ 0x54584e03, 0xffffffff, "TLV320AIC27", NULL, NULL },
178{ 0x54584e20, 0xffffffff, "TLC320AD9xC", NULL, NULL }, 179{ 0x54584e20, 0xffffffff, "TLC320AD9xC", NULL, NULL },
179{ 0x56494161, 0xffffffff, "VIA1612A", NULL, NULL }, // modified ICE1232 with S/PDIF 180{ 0x56494161, 0xffffffff, "VIA1612A", NULL, NULL }, // modified ICE1232 with S/PDIF
180{ 0x56494170, 0xffffffff, "VIA1617A", patch_vt1617a, NULL }, // modified VT1616 with S/PDIF 181{ 0x56494170, 0xffffffff, "VIA1617A", patch_vt1617a, NULL }, // modified VT1616 with S/PDIF
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index b524f89a1f13..18d972501585 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -111,6 +111,9 @@ enum {
111/* 0x0009 - 0x0014 -> 12 test regs */ 111/* 0x0009 - 0x0014 -> 12 test regs */
112/* 0x0015 - visibility reg */ 112/* 0x0015 - visibility reg */
113 113
114/* Cirrus Logic CS4208 */
115#define CS4208_VENDOR_NID 0x24
116
114/* 117/*
115 * Cirrus Logic CS4210 118 * Cirrus Logic CS4210
116 * 119 *
@@ -223,6 +226,16 @@ static const struct hda_verb cs_coef_init_verbs[] = {
223 {} /* terminator */ 226 {} /* terminator */
224}; 227};
225 228
229static const struct hda_verb cs4208_coef_init_verbs[] = {
230 {0x01, AC_VERB_SET_POWER_STATE, 0x00}, /* AFG: D0 */
231 {0x24, AC_VERB_SET_PROC_STATE, 0x01}, /* VPW: processing on */
232 {0x24, AC_VERB_SET_COEF_INDEX, 0x0033},
233 {0x24, AC_VERB_SET_PROC_COEF, 0x0001}, /* A1 ICS */
234 {0x24, AC_VERB_SET_COEF_INDEX, 0x0034},
235 {0x24, AC_VERB_SET_PROC_COEF, 0x1C01}, /* A1 Enable, A Thresh = 300mV */
236 {} /* terminator */
237};
238
226/* Errata: CS4207 rev C0/C1/C2 Silicon 239/* Errata: CS4207 rev C0/C1/C2 Silicon
227 * 240 *
228 * http://www.cirrus.com/en/pubs/errata/ER880C3.pdf 241 * http://www.cirrus.com/en/pubs/errata/ER880C3.pdf
@@ -295,6 +308,8 @@ static int cs_init(struct hda_codec *codec)
295 /* init_verb sequence for C0/C1/C2 errata*/ 308 /* init_verb sequence for C0/C1/C2 errata*/
296 snd_hda_sequence_write(codec, cs_errata_init_verbs); 309 snd_hda_sequence_write(codec, cs_errata_init_verbs);
297 snd_hda_sequence_write(codec, cs_coef_init_verbs); 310 snd_hda_sequence_write(codec, cs_coef_init_verbs);
311 } else if (spec->vendor_nid == CS4208_VENDOR_NID) {
312 snd_hda_sequence_write(codec, cs4208_coef_init_verbs);
298 } 313 }
299 314
300 snd_hda_gen_init(codec); 315 snd_hda_gen_init(codec);
@@ -434,6 +449,29 @@ static const struct hda_pintbl mba42_pincfgs[] = {
434 {} /* terminator */ 449 {} /* terminator */
435}; 450};
436 451
452static const struct hda_pintbl mba6_pincfgs[] = {
453 { 0x10, 0x032120f0 }, /* HP */
454 { 0x11, 0x500000f0 },
455 { 0x12, 0x90100010 }, /* Speaker */
456 { 0x13, 0x500000f0 },
457 { 0x14, 0x500000f0 },
458 { 0x15, 0x770000f0 },
459 { 0x16, 0x770000f0 },
460 { 0x17, 0x430000f0 },
461 { 0x18, 0x43ab9030 }, /* Mic */
462 { 0x19, 0x770000f0 },
463 { 0x1a, 0x770000f0 },
464 { 0x1b, 0x770000f0 },
465 { 0x1c, 0x90a00090 },
466 { 0x1d, 0x500000f0 },
467 { 0x1e, 0x500000f0 },
468 { 0x1f, 0x500000f0 },
469 { 0x20, 0x500000f0 },
470 { 0x21, 0x430000f0 },
471 { 0x22, 0x430000f0 },
472 {} /* terminator */
473};
474
437static void cs420x_fixup_gpio_13(struct hda_codec *codec, 475static void cs420x_fixup_gpio_13(struct hda_codec *codec,
438 const struct hda_fixup *fix, int action) 476 const struct hda_fixup *fix, int action)
439{ 477{
@@ -556,22 +594,23 @@ static int patch_cs420x(struct hda_codec *codec)
556 594
557/* 595/*
558 * CS4208 support: 596 * CS4208 support:
559 * Its layout is no longer compatible with CS4206/CS4207, and the generic 597 * Its layout is no longer compatible with CS4206/CS4207
560 * parser seems working fairly well, except for trivial fixups.
561 */ 598 */
562enum { 599enum {
600 CS4208_MBA6,
563 CS4208_GPIO0, 601 CS4208_GPIO0,
564}; 602};
565 603
566static const struct hda_model_fixup cs4208_models[] = { 604static const struct hda_model_fixup cs4208_models[] = {
567 { .id = CS4208_GPIO0, .name = "gpio0" }, 605 { .id = CS4208_GPIO0, .name = "gpio0" },
606 { .id = CS4208_MBA6, .name = "mba6" },
568 {} 607 {}
569}; 608};
570 609
571static const struct snd_pci_quirk cs4208_fixup_tbl[] = { 610static const struct snd_pci_quirk cs4208_fixup_tbl[] = {
572 /* codec SSID */ 611 /* codec SSID */
573 SND_PCI_QUIRK(0x106b, 0x7100, "MacBookPro 6,1", CS4208_GPIO0), 612 SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6),
574 SND_PCI_QUIRK(0x106b, 0x7200, "MacBookPro 6,2", CS4208_GPIO0), 613 SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6),
575 {} /* terminator */ 614 {} /* terminator */
576}; 615};
577 616
@@ -588,18 +627,35 @@ static void cs4208_fixup_gpio0(struct hda_codec *codec,
588} 627}
589 628
590static const struct hda_fixup cs4208_fixups[] = { 629static const struct hda_fixup cs4208_fixups[] = {
630 [CS4208_MBA6] = {
631 .type = HDA_FIXUP_PINS,
632 .v.pins = mba6_pincfgs,
633 .chained = true,
634 .chain_id = CS4208_GPIO0,
635 },
591 [CS4208_GPIO0] = { 636 [CS4208_GPIO0] = {
592 .type = HDA_FIXUP_FUNC, 637 .type = HDA_FIXUP_FUNC,
593 .v.func = cs4208_fixup_gpio0, 638 .v.func = cs4208_fixup_gpio0,
594 }, 639 },
595}; 640};
596 641
642/* correct the 0dB offset of input pins */
643static void cs4208_fix_amp_caps(struct hda_codec *codec, hda_nid_t adc)
644{
645 unsigned int caps;
646
647 caps = query_amp_caps(codec, adc, HDA_INPUT);
648 caps &= ~(AC_AMPCAP_OFFSET);
649 caps |= 0x02;
650 snd_hda_override_amp_caps(codec, adc, HDA_INPUT, caps);
651}
652
597static int patch_cs4208(struct hda_codec *codec) 653static int patch_cs4208(struct hda_codec *codec)
598{ 654{
599 struct cs_spec *spec; 655 struct cs_spec *spec;
600 int err; 656 int err;
601 657
602 spec = cs_alloc_spec(codec, 0); /* no specific w/a */ 658 spec = cs_alloc_spec(codec, CS4208_VENDOR_NID);
603 if (!spec) 659 if (!spec)
604 return -ENOMEM; 660 return -ENOMEM;
605 661
@@ -609,6 +665,12 @@ static int patch_cs4208(struct hda_codec *codec)
609 cs4208_fixups); 665 cs4208_fixups);
610 snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE); 666 snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
611 667
668 snd_hda_override_wcaps(codec, 0x18,
669 get_wcaps(codec, 0x18) | AC_WCAP_STEREO);
670 cs4208_fix_amp_caps(codec, 0x18);
671 cs4208_fix_amp_caps(codec, 0x1b);
672 cs4208_fix_amp_caps(codec, 0x1c);
673
612 err = cs_parse_auto_config(codec); 674 err = cs_parse_auto_config(codec);
613 if (err < 0) 675 if (err < 0)
614 goto error; 676 goto error;
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 4edd2d0f9a3c..ec68eaea0336 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -3231,6 +3231,7 @@ enum {
3231 CXT_FIXUP_INC_MIC_BOOST, 3231 CXT_FIXUP_INC_MIC_BOOST,
3232 CXT_FIXUP_HEADPHONE_MIC_PIN, 3232 CXT_FIXUP_HEADPHONE_MIC_PIN,
3233 CXT_FIXUP_HEADPHONE_MIC, 3233 CXT_FIXUP_HEADPHONE_MIC,
3234 CXT_FIXUP_GPIO1,
3234}; 3235};
3235 3236
3236static void cxt_fixup_stereo_dmic(struct hda_codec *codec, 3237static void cxt_fixup_stereo_dmic(struct hda_codec *codec,
@@ -3375,6 +3376,15 @@ static const struct hda_fixup cxt_fixups[] = {
3375 .type = HDA_FIXUP_FUNC, 3376 .type = HDA_FIXUP_FUNC,
3376 .v.func = cxt_fixup_headphone_mic, 3377 .v.func = cxt_fixup_headphone_mic,
3377 }, 3378 },
3379 [CXT_FIXUP_GPIO1] = {
3380 .type = HDA_FIXUP_VERBS,
3381 .v.verbs = (const struct hda_verb[]) {
3382 { 0x01, AC_VERB_SET_GPIO_MASK, 0x01 },
3383 { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x01 },
3384 { 0x01, AC_VERB_SET_GPIO_DATA, 0x01 },
3385 { }
3386 },
3387 },
3378}; 3388};
3379 3389
3380static const struct snd_pci_quirk cxt5051_fixups[] = { 3390static const struct snd_pci_quirk cxt5051_fixups[] = {
@@ -3384,6 +3394,7 @@ static const struct snd_pci_quirk cxt5051_fixups[] = {
3384 3394
3385static const struct snd_pci_quirk cxt5066_fixups[] = { 3395static const struct snd_pci_quirk cxt5066_fixups[] = {
3386 SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC), 3396 SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC),
3397 SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_GPIO1),
3387 SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN), 3398 SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
3388 SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410), 3399 SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
3389 SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T410", CXT_PINCFG_LENOVO_TP410), 3400 SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T410", CXT_PINCFG_LENOVO_TP410),
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 3d8cd04455a6..7ea0245fc6bd 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1149,32 +1149,43 @@ static int hdmi_choose_cvt(struct hda_codec *codec,
1149} 1149}
1150 1150
1151static void haswell_config_cvts(struct hda_codec *codec, 1151static void haswell_config_cvts(struct hda_codec *codec,
1152 int pin_id, int mux_id) 1152 hda_nid_t pin_nid, int mux_idx)
1153{ 1153{
1154 struct hdmi_spec *spec = codec->spec; 1154 struct hdmi_spec *spec = codec->spec;
1155 struct hdmi_spec_per_pin *per_pin; 1155 hda_nid_t nid, end_nid;
1156 int pin_idx, mux_idx; 1156 int cvt_idx, curr;
1157 int curr; 1157 struct hdmi_spec_per_cvt *per_cvt;
1158 int err;
1159 1158
1160 for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) { 1159 /* configure all pins, including "no physical connection" ones */
1161 per_pin = get_pin(spec, pin_idx); 1160 end_nid = codec->start_nid + codec->num_nodes;
1161 for (nid = codec->start_nid; nid < end_nid; nid++) {
1162 unsigned int wid_caps = get_wcaps(codec, nid);
1163 unsigned int wid_type = get_wcaps_type(wid_caps);
1162 1164
1163 if (pin_idx == pin_id) 1165 if (wid_type != AC_WID_PIN)
1164 continue; 1166 continue;
1165 1167
1166 curr = snd_hda_codec_read(codec, per_pin->pin_nid, 0, 1168 if (nid == pin_nid)
1169 continue;
1170
1171 curr = snd_hda_codec_read(codec, nid, 0,
1167 AC_VERB_GET_CONNECT_SEL, 0); 1172 AC_VERB_GET_CONNECT_SEL, 0);
1173 if (curr != mux_idx)
1174 continue;
1168 1175
1169 /* Choose another unused converter */ 1176 /* choose an unassigned converter. The conveters in the
1170 if (curr == mux_id) { 1177 * connection list are in the same order as in the codec.
1171 err = hdmi_choose_cvt(codec, pin_idx, NULL, &mux_idx); 1178 */
1172 if (err < 0) 1179 for (cvt_idx = 0; cvt_idx < spec->num_cvts; cvt_idx++) {
1173 return; 1180 per_cvt = get_cvt(spec, cvt_idx);
1174 snd_printdd("HDMI: choose converter %d for pin %d\n", mux_idx, pin_idx); 1181 if (!per_cvt->assigned) {
1175 snd_hda_codec_write_cache(codec, per_pin->pin_nid, 0, 1182 snd_printdd("choose cvt %d for pin nid %d\n",
1183 cvt_idx, nid);
1184 snd_hda_codec_write_cache(codec, nid, 0,
1176 AC_VERB_SET_CONNECT_SEL, 1185 AC_VERB_SET_CONNECT_SEL,
1177 mux_idx); 1186 cvt_idx);
1187 break;
1188 }
1178 } 1189 }
1179 } 1190 }
1180} 1191}
@@ -1216,7 +1227,7 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
1216 1227
1217 /* configure unused pins to choose other converters */ 1228 /* configure unused pins to choose other converters */
1218 if (is_haswell(codec)) 1229 if (is_haswell(codec))
1219 haswell_config_cvts(codec, pin_idx, mux_idx); 1230 haswell_config_cvts(codec, per_pin->pin_nid, mux_idx);
1220 1231
1221 snd_hda_spdif_ctls_assign(codec, pin_idx, per_cvt->cvt_nid); 1232 snd_hda_spdif_ctls_assign(codec, pin_idx, per_cvt->cvt_nid);
1222 1233
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index bc07d369fac4..0e303b99a47c 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -3439,6 +3439,9 @@ static void alc283_fixup_chromebook(struct hda_codec *codec,
3439 /* Set to manual mode */ 3439 /* Set to manual mode */
3440 val = alc_read_coef_idx(codec, 0x06); 3440 val = alc_read_coef_idx(codec, 0x06);
3441 alc_write_coef_idx(codec, 0x06, val & ~0x000c); 3441 alc_write_coef_idx(codec, 0x06, val & ~0x000c);
3442 /* Enable Line1 input control by verb */
3443 val = alc_read_coef_idx(codec, 0x1a);
3444 alc_write_coef_idx(codec, 0x1a, val | (1 << 4));
3442 break; 3445 break;
3443 } 3446 }
3444} 3447}
@@ -3531,6 +3534,7 @@ enum {
3531 ALC269VB_FIXUP_ORDISSIMO_EVE2, 3534 ALC269VB_FIXUP_ORDISSIMO_EVE2,
3532 ALC283_FIXUP_CHROME_BOOK, 3535 ALC283_FIXUP_CHROME_BOOK,
3533 ALC282_FIXUP_ASUS_TX300, 3536 ALC282_FIXUP_ASUS_TX300,
3537 ALC283_FIXUP_INT_MIC,
3534}; 3538};
3535 3539
3536static const struct hda_fixup alc269_fixups[] = { 3540static const struct hda_fixup alc269_fixups[] = {
@@ -3790,6 +3794,16 @@ static const struct hda_fixup alc269_fixups[] = {
3790 .type = HDA_FIXUP_FUNC, 3794 .type = HDA_FIXUP_FUNC,
3791 .v.func = alc282_fixup_asus_tx300, 3795 .v.func = alc282_fixup_asus_tx300,
3792 }, 3796 },
3797 [ALC283_FIXUP_INT_MIC] = {
3798 .type = HDA_FIXUP_VERBS,
3799 .v.verbs = (const struct hda_verb[]) {
3800 {0x20, AC_VERB_SET_COEF_INDEX, 0x1a},
3801 {0x20, AC_VERB_SET_PROC_COEF, 0x0011},
3802 { }
3803 },
3804 .chained = true,
3805 .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST
3806 },
3793}; 3807};
3794 3808
3795static const struct snd_pci_quirk alc269_fixup_tbl[] = { 3809static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -3874,7 +3888,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3874 SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 3888 SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
3875 SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 3889 SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
3876 SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 3890 SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
3877 SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 3891 SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
3878 SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 3892 SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
3879 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 3893 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
3880 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), 3894 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
diff --git a/sound/soc/blackfin/bf6xx-i2s.c b/sound/soc/blackfin/bf6xx-i2s.c
index c02405cc007d..5810a0603f2f 100644
--- a/sound/soc/blackfin/bf6xx-i2s.c
+++ b/sound/soc/blackfin/bf6xx-i2s.c
@@ -88,6 +88,7 @@ static int bfin_i2s_hw_params(struct snd_pcm_substream *substream,
88 case SNDRV_PCM_FORMAT_S8: 88 case SNDRV_PCM_FORMAT_S8:
89 param.spctl |= 0x70; 89 param.spctl |= 0x70;
90 sport->wdsize = 1; 90 sport->wdsize = 1;
91 break;
91 case SNDRV_PCM_FORMAT_S16_LE: 92 case SNDRV_PCM_FORMAT_S16_LE:
92 param.spctl |= 0xf0; 93 param.spctl |= 0xf0;
93 sport->wdsize = 2; 94 sport->wdsize = 2;
diff --git a/sound/soc/codecs/88pm860x-codec.c b/sound/soc/codecs/88pm860x-codec.c
index 8af04343cc1a..259d1ac4492f 100644
--- a/sound/soc/codecs/88pm860x-codec.c
+++ b/sound/soc/codecs/88pm860x-codec.c
@@ -349,6 +349,9 @@ static int snd_soc_put_volsw_2r_st(struct snd_kcontrol *kcontrol,
349 val = ucontrol->value.integer.value[0]; 349 val = ucontrol->value.integer.value[0];
350 val2 = ucontrol->value.integer.value[1]; 350 val2 = ucontrol->value.integer.value[1];
351 351
352 if (val >= ARRAY_SIZE(st_table) || val2 >= ARRAY_SIZE(st_table))
353 return -EINVAL;
354
352 err = snd_soc_update_bits(codec, reg, 0x3f, st_table[val].m); 355 err = snd_soc_update_bits(codec, reg, 0x3f, st_table[val].m);
353 if (err < 0) 356 if (err < 0)
354 return err; 357 return err;
diff --git a/sound/soc/codecs/ab8500-codec.c b/sound/soc/codecs/ab8500-codec.c
index b8ba0adacfce..80555d7551e6 100644
--- a/sound/soc/codecs/ab8500-codec.c
+++ b/sound/soc/codecs/ab8500-codec.c
@@ -1225,13 +1225,18 @@ static int anc_status_control_put(struct snd_kcontrol *kcontrol,
1225 struct ab8500_codec_drvdata *drvdata = dev_get_drvdata(codec->dev); 1225 struct ab8500_codec_drvdata *drvdata = dev_get_drvdata(codec->dev);
1226 struct device *dev = codec->dev; 1226 struct device *dev = codec->dev;
1227 bool apply_fir, apply_iir; 1227 bool apply_fir, apply_iir;
1228 int req, status; 1228 unsigned int req;
1229 int status;
1229 1230
1230 dev_dbg(dev, "%s: Enter.\n", __func__); 1231 dev_dbg(dev, "%s: Enter.\n", __func__);
1231 1232
1232 mutex_lock(&drvdata->anc_lock); 1233 mutex_lock(&drvdata->anc_lock);
1233 1234
1234 req = ucontrol->value.integer.value[0]; 1235 req = ucontrol->value.integer.value[0];
1236 if (req >= ARRAY_SIZE(enum_anc_state)) {
1237 status = -EINVAL;
1238 goto cleanup;
1239 }
1235 if (req != ANC_APPLY_FIR_IIR && req != ANC_APPLY_FIR && 1240 if (req != ANC_APPLY_FIR_IIR && req != ANC_APPLY_FIR &&
1236 req != ANC_APPLY_IIR) { 1241 req != ANC_APPLY_IIR) {
1237 dev_err(dev, "%s: ERROR: Unsupported status to set '%s'!\n", 1242 dev_err(dev, "%s: ERROR: Unsupported status to set '%s'!\n",
diff --git a/sound/soc/codecs/max98095.c b/sound/soc/codecs/max98095.c
index 41cdd1642970..8dbcacd44e6a 100644
--- a/sound/soc/codecs/max98095.c
+++ b/sound/soc/codecs/max98095.c
@@ -1863,7 +1863,7 @@ static int max98095_put_eq_enum(struct snd_kcontrol *kcontrol,
1863 struct max98095_pdata *pdata = max98095->pdata; 1863 struct max98095_pdata *pdata = max98095->pdata;
1864 int channel = max98095_get_eq_channel(kcontrol->id.name); 1864 int channel = max98095_get_eq_channel(kcontrol->id.name);
1865 struct max98095_cdata *cdata; 1865 struct max98095_cdata *cdata;
1866 int sel = ucontrol->value.integer.value[0]; 1866 unsigned int sel = ucontrol->value.integer.value[0];
1867 struct max98095_eq_cfg *coef_set; 1867 struct max98095_eq_cfg *coef_set;
1868 int fs, best, best_val, i; 1868 int fs, best, best_val, i;
1869 int regmask, regsave; 1869 int regmask, regsave;
@@ -2016,7 +2016,7 @@ static int max98095_put_bq_enum(struct snd_kcontrol *kcontrol,
2016 struct max98095_pdata *pdata = max98095->pdata; 2016 struct max98095_pdata *pdata = max98095->pdata;
2017 int channel = max98095_get_bq_channel(codec, kcontrol->id.name); 2017 int channel = max98095_get_bq_channel(codec, kcontrol->id.name);
2018 struct max98095_cdata *cdata; 2018 struct max98095_cdata *cdata;
2019 int sel = ucontrol->value.integer.value[0]; 2019 unsigned int sel = ucontrol->value.integer.value[0];
2020 struct max98095_biquad_cfg *coef_set; 2020 struct max98095_biquad_cfg *coef_set;
2021 int fs, best, best_val, i; 2021 int fs, best, best_val, i;
2022 int regmask, regsave; 2022 int regmask, regsave;
diff --git a/sound/soc/fsl/imx-sgtl5000.c b/sound/soc/fsl/imx-sgtl5000.c
index 46c5b4fdfc52..ca1be1d9dcf0 100644
--- a/sound/soc/fsl/imx-sgtl5000.c
+++ b/sound/soc/fsl/imx-sgtl5000.c
@@ -62,7 +62,7 @@ static int imx_sgtl5000_probe(struct platform_device *pdev)
62 struct device_node *ssi_np, *codec_np; 62 struct device_node *ssi_np, *codec_np;
63 struct platform_device *ssi_pdev; 63 struct platform_device *ssi_pdev;
64 struct i2c_client *codec_dev; 64 struct i2c_client *codec_dev;
65 struct imx_sgtl5000_data *data; 65 struct imx_sgtl5000_data *data = NULL;
66 int int_port, ext_port; 66 int int_port, ext_port;
67 int ret; 67 int ret;
68 68
@@ -128,7 +128,7 @@ static int imx_sgtl5000_probe(struct platform_device *pdev)
128 goto fail; 128 goto fail;
129 } 129 }
130 130
131 data->codec_clk = devm_clk_get(&codec_dev->dev, NULL); 131 data->codec_clk = clk_get(&codec_dev->dev, NULL);
132 if (IS_ERR(data->codec_clk)) { 132 if (IS_ERR(data->codec_clk)) {
133 ret = PTR_ERR(data->codec_clk); 133 ret = PTR_ERR(data->codec_clk);
134 goto fail; 134 goto fail;
@@ -172,6 +172,8 @@ static int imx_sgtl5000_probe(struct platform_device *pdev)
172 return 0; 172 return 0;
173 173
174fail: 174fail:
175 if (data && !IS_ERR(data->codec_clk))
176 clk_put(data->codec_clk);
175 if (ssi_np) 177 if (ssi_np)
176 of_node_put(ssi_np); 178 of_node_put(ssi_np);
177 if (codec_np) 179 if (codec_np)
@@ -185,6 +187,7 @@ static int imx_sgtl5000_remove(struct platform_device *pdev)
185 struct imx_sgtl5000_data *data = platform_get_drvdata(pdev); 187 struct imx_sgtl5000_data *data = platform_get_drvdata(pdev);
186 188
187 snd_soc_unregister_card(&data->card); 189 snd_soc_unregister_card(&data->card);
190 clk_put(data->codec_clk);
188 191
189 return 0; 192 return 0;
190} 193}
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 4d0561312f3b..1a38be0d0ca8 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1380,7 +1380,6 @@ static int soc_probe_link_dais(struct snd_soc_card *card, int num, int order)
1380 return -ENODEV; 1380 return -ENODEV;
1381 1381
1382 list_add(&cpu_dai->dapm.list, &card->dapm_list); 1382 list_add(&cpu_dai->dapm.list, &card->dapm_list);
1383 snd_soc_dapm_new_dai_widgets(&cpu_dai->dapm, cpu_dai);
1384 } 1383 }
1385 1384
1386 if (cpu_dai->driver->probe) { 1385 if (cpu_dai->driver->probe) {
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index c17c14c394df..9273216f22fc 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -499,18 +499,22 @@ static void dapm_set_path_status(struct snd_soc_dapm_widget *w,
499 int val; 499 int val;
500 struct soc_mixer_control *mc = (struct soc_mixer_control *) 500 struct soc_mixer_control *mc = (struct soc_mixer_control *)
501 w->kcontrol_news[i].private_value; 501 w->kcontrol_news[i].private_value;
502 unsigned int reg = mc->reg; 502 int reg = mc->reg;
503 unsigned int shift = mc->shift; 503 unsigned int shift = mc->shift;
504 int max = mc->max; 504 int max = mc->max;
505 unsigned int mask = (1 << fls(max)) - 1; 505 unsigned int mask = (1 << fls(max)) - 1;
506 unsigned int invert = mc->invert; 506 unsigned int invert = mc->invert;
507 507
508 val = soc_widget_read(w, reg); 508 if (reg != SND_SOC_NOPM) {
509 val = (val >> shift) & mask; 509 val = soc_widget_read(w, reg);
510 if (invert) 510 val = (val >> shift) & mask;
511 val = max - val; 511 if (invert)
512 val = max - val;
513 p->connect = !!val;
514 } else {
515 p->connect = 0;
516 }
512 517
513 p->connect = !!val;
514 } 518 }
515 break; 519 break;
516 case snd_soc_dapm_mux: { 520 case snd_soc_dapm_mux: {
@@ -1840,6 +1844,7 @@ static int dapm_power_widgets(struct snd_soc_card *card, int event)
1840 */ 1844 */
1841 switch (w->id) { 1845 switch (w->id) {
1842 case snd_soc_dapm_siggen: 1846 case snd_soc_dapm_siggen:
1847 case snd_soc_dapm_vmid:
1843 break; 1848 break;
1844 case snd_soc_dapm_supply: 1849 case snd_soc_dapm_supply:
1845 case snd_soc_dapm_regulator_supply: 1850 case snd_soc_dapm_regulator_supply:
@@ -2791,7 +2796,7 @@ int snd_soc_dapm_get_volsw(struct snd_kcontrol *kcontrol,
2791 struct snd_soc_card *card = codec->card; 2796 struct snd_soc_card *card = codec->card;
2792 struct soc_mixer_control *mc = 2797 struct soc_mixer_control *mc =
2793 (struct soc_mixer_control *)kcontrol->private_value; 2798 (struct soc_mixer_control *)kcontrol->private_value;
2794 unsigned int reg = mc->reg; 2799 int reg = mc->reg;
2795 unsigned int shift = mc->shift; 2800 unsigned int shift = mc->shift;
2796 int max = mc->max; 2801 int max = mc->max;
2797 unsigned int mask = (1 << fls(max)) - 1; 2802 unsigned int mask = (1 << fls(max)) - 1;
@@ -2804,7 +2809,7 @@ int snd_soc_dapm_get_volsw(struct snd_kcontrol *kcontrol,
2804 kcontrol->id.name); 2809 kcontrol->id.name);
2805 2810
2806 mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); 2811 mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
2807 if (dapm_kcontrol_is_powered(kcontrol)) 2812 if (dapm_kcontrol_is_powered(kcontrol) && reg != SND_SOC_NOPM)
2808 val = (snd_soc_read(codec, reg) >> shift) & mask; 2813 val = (snd_soc_read(codec, reg) >> shift) & mask;
2809 else 2814 else
2810 val = dapm_kcontrol_get_value(kcontrol); 2815 val = dapm_kcontrol_get_value(kcontrol);
@@ -2835,7 +2840,7 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
2835 struct snd_soc_card *card = codec->card; 2840 struct snd_soc_card *card = codec->card;
2836 struct soc_mixer_control *mc = 2841 struct soc_mixer_control *mc =
2837 (struct soc_mixer_control *)kcontrol->private_value; 2842 (struct soc_mixer_control *)kcontrol->private_value;
2838 unsigned int reg = mc->reg; 2843 int reg = mc->reg;
2839 unsigned int shift = mc->shift; 2844 unsigned int shift = mc->shift;
2840 int max = mc->max; 2845 int max = mc->max;
2841 unsigned int mask = (1 << fls(max)) - 1; 2846 unsigned int mask = (1 << fls(max)) - 1;
@@ -2857,19 +2862,24 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
2857 2862
2858 mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); 2863 mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
2859 2864
2860 dapm_kcontrol_set_value(kcontrol, val); 2865 change = dapm_kcontrol_set_value(kcontrol, val);
2861 2866
2862 mask = mask << shift; 2867 if (reg != SND_SOC_NOPM) {
2863 val = val << shift; 2868 mask = mask << shift;
2869 val = val << shift;
2870
2871 change = snd_soc_test_bits(codec, reg, mask, val);
2872 }
2864 2873
2865 change = snd_soc_test_bits(codec, reg, mask, val);
2866 if (change) { 2874 if (change) {
2867 update.kcontrol = kcontrol; 2875 if (reg != SND_SOC_NOPM) {
2868 update.reg = reg; 2876 update.kcontrol = kcontrol;
2869 update.mask = mask; 2877 update.reg = reg;
2870 update.val = val; 2878 update.mask = mask;
2879 update.val = val;
2871 2880
2872 card->update = &update; 2881 card->update = &update;
2882 }
2873 2883
2874 soc_dapm_mixer_update_power(card, kcontrol, connect); 2884 soc_dapm_mixer_update_power(card, kcontrol, connect);
2875 2885
diff --git a/tools/lib/lk/debugfs.c b/tools/lib/lk/debugfs.c
index 099e7cd022e4..7c4347962353 100644
--- a/tools/lib/lk/debugfs.c
+++ b/tools/lib/lk/debugfs.c
@@ -5,7 +5,6 @@
5#include <stdbool.h> 5#include <stdbool.h>
6#include <sys/vfs.h> 6#include <sys/vfs.h>
7#include <sys/mount.h> 7#include <sys/mount.h>
8#include <linux/magic.h>
9#include <linux/kernel.h> 8#include <linux/kernel.h>
10 9
11#include "debugfs.h" 10#include "debugfs.h"
diff --git a/tools/perf/arch/x86/util/tsc.c b/tools/perf/arch/x86/util/tsc.c
index 9570c2b0f83c..b2519e49424f 100644
--- a/tools/perf/arch/x86/util/tsc.c
+++ b/tools/perf/arch/x86/util/tsc.c
@@ -32,7 +32,7 @@ u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc)
32int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc, 32int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
33 struct perf_tsc_conversion *tc) 33 struct perf_tsc_conversion *tc)
34{ 34{
35 bool cap_usr_time_zero; 35 bool cap_user_time_zero;
36 u32 seq; 36 u32 seq;
37 int i = 0; 37 int i = 0;
38 38
@@ -42,7 +42,7 @@ int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
42 tc->time_mult = pc->time_mult; 42 tc->time_mult = pc->time_mult;
43 tc->time_shift = pc->time_shift; 43 tc->time_shift = pc->time_shift;
44 tc->time_zero = pc->time_zero; 44 tc->time_zero = pc->time_zero;
45 cap_usr_time_zero = pc->cap_usr_time_zero; 45 cap_user_time_zero = pc->cap_user_time_zero;
46 rmb(); 46 rmb();
47 if (pc->lock == seq && !(seq & 1)) 47 if (pc->lock == seq && !(seq & 1))
48 break; 48 break;
@@ -52,7 +52,7 @@ int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
52 } 52 }
53 } 53 }
54 54
55 if (!cap_usr_time_zero) 55 if (!cap_user_time_zero)
56 return -EOPNOTSUPP; 56 return -EOPNOTSUPP;
57 57
58 return 0; 58 return 0;
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 423875c999b2..afe377b2884f 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -321,8 +321,6 @@ found:
321 return perf_event__repipe(tool, event_sw, &sample_sw, machine); 321 return perf_event__repipe(tool, event_sw, &sample_sw, machine);
322} 322}
323 323
324extern volatile int session_done;
325
326static void sig_handler(int sig __maybe_unused) 324static void sig_handler(int sig __maybe_unused)
327{ 325{
328 session_done = 1; 326 session_done = 1;
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index c2dff9cb1f2c..9b5f077fee5b 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -101,7 +101,7 @@ static int setup_cpunode_map(void)
101 101
102 dir1 = opendir(PATH_SYS_NODE); 102 dir1 = opendir(PATH_SYS_NODE);
103 if (!dir1) 103 if (!dir1)
104 return -1; 104 return 0;
105 105
106 while ((dent1 = readdir(dir1)) != NULL) { 106 while ((dent1 = readdir(dir1)) != NULL) {
107 if (dent1->d_type != DT_DIR || 107 if (dent1->d_type != DT_DIR ||
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 8e50d8d77419..72eae7498c09 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -401,8 +401,6 @@ static int perf_report__setup_sample_type(struct perf_report *rep)
401 return 0; 401 return 0;
402} 402}
403 403
404extern volatile int session_done;
405
406static void sig_handler(int sig __maybe_unused) 404static void sig_handler(int sig __maybe_unused)
407{ 405{
408 session_done = 1; 406 session_done = 1;
@@ -568,6 +566,9 @@ static int __cmd_report(struct perf_report *rep)
568 } 566 }
569 } 567 }
570 568
569 if (session_done())
570 return 0;
571
571 if (nr_samples == 0) { 572 if (nr_samples == 0) {
572 ui__error("The %s file has no samples!\n", session->filename); 573 ui__error("The %s file has no samples!\n", session->filename);
573 return 0; 574 return 0;
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 7f31a3ded1b6..9c333ff3dfeb 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -553,8 +553,6 @@ static struct perf_tool perf_script = {
553 .ordering_requires_timestamps = true, 553 .ordering_requires_timestamps = true,
554}; 554};
555 555
556extern volatile int session_done;
557
558static void sig_handler(int sig __maybe_unused) 556static void sig_handler(int sig __maybe_unused)
559{ 557{
560 session_done = 1; 558 session_done = 1;
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index f5aa6375e3e9..71aa3e35406b 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -16,6 +16,23 @@
16#include <sys/mman.h> 16#include <sys/mman.h>
17#include <linux/futex.h> 17#include <linux/futex.h>
18 18
19/* For older distros: */
20#ifndef MAP_STACK
21# define MAP_STACK 0x20000
22#endif
23
24#ifndef MADV_HWPOISON
25# define MADV_HWPOISON 100
26#endif
27
28#ifndef MADV_MERGEABLE
29# define MADV_MERGEABLE 12
30#endif
31
32#ifndef MADV_UNMERGEABLE
33# define MADV_UNMERGEABLE 13
34#endif
35
19static size_t syscall_arg__scnprintf_hex(char *bf, size_t size, 36static size_t syscall_arg__scnprintf_hex(char *bf, size_t size,
20 unsigned long arg, 37 unsigned long arg,
21 u8 arg_idx __maybe_unused, 38 u8 arg_idx __maybe_unused,
@@ -1038,6 +1055,7 @@ static int trace__replay(struct trace *trace)
1038 1055
1039 trace->tool.sample = trace__process_sample; 1056 trace->tool.sample = trace__process_sample;
1040 trace->tool.mmap = perf_event__process_mmap; 1057 trace->tool.mmap = perf_event__process_mmap;
1058 trace->tool.mmap2 = perf_event__process_mmap2;
1041 trace->tool.comm = perf_event__process_comm; 1059 trace->tool.comm = perf_event__process_comm;
1042 trace->tool.exit = perf_event__process_exit; 1060 trace->tool.exit = perf_event__process_exit;
1043 trace->tool.fork = perf_event__process_fork; 1061 trace->tool.fork = perf_event__process_fork;
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index 214e17e97e5c..5f6f9b3271bb 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -87,7 +87,7 @@ CFLAGS += -Wall
87CFLAGS += -Wextra 87CFLAGS += -Wextra
88CFLAGS += -std=gnu99 88CFLAGS += -std=gnu99
89 89
90EXTLIBS = -lelf -lpthread -lrt -lm 90EXTLIBS = -lelf -lpthread -lrt -lm -ldl
91 91
92ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -Werror -fstack-protector-all,-fstack-protector-all),y) 92ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -Werror -fstack-protector-all,-fstack-protector-all),y)
93 CFLAGS += -fstack-protector-all 93 CFLAGS += -fstack-protector-all
@@ -180,6 +180,9 @@ FLAGS_LIBELF=$(CFLAGS) $(LDFLAGS) $(EXTLIBS)
180ifeq ($(call try-cc,$(SOURCE_ELF_MMAP),$(FLAGS_LIBELF),-DLIBELF_MMAP),y) 180ifeq ($(call try-cc,$(SOURCE_ELF_MMAP),$(FLAGS_LIBELF),-DLIBELF_MMAP),y)
181 CFLAGS += -DLIBELF_MMAP 181 CFLAGS += -DLIBELF_MMAP
182endif 182endif
183ifeq ($(call try-cc,$(SOURCE_ELF_GETPHDRNUM),$(FLAGS_LIBELF),-DHAVE_ELF_GETPHDRNUM),y)
184 CFLAGS += -DHAVE_ELF_GETPHDRNUM
185endif
183 186
184# include ARCH specific config 187# include ARCH specific config
185-include $(src-perf)/arch/$(ARCH)/Makefile 188-include $(src-perf)/arch/$(ARCH)/Makefile
diff --git a/tools/perf/config/feature-tests.mak b/tools/perf/config/feature-tests.mak
index 708fb8e9822a..d5a8dd44945f 100644
--- a/tools/perf/config/feature-tests.mak
+++ b/tools/perf/config/feature-tests.mak
@@ -61,6 +61,15 @@ int main(void)
61} 61}
62endef 62endef
63 63
64define SOURCE_ELF_GETPHDRNUM
65#include <libelf.h>
66int main(void)
67{
68 size_t dst;
69 return elf_getphdrnum(0, &dst);
70}
71endef
72
64ifndef NO_SLANG 73ifndef NO_SLANG
65define SOURCE_SLANG 74define SOURCE_SLANG
66#include <slang.h> 75#include <slang.h>
@@ -210,6 +219,7 @@ define SOURCE_LIBAUDIT
210 219
211int main(void) 220int main(void)
212{ 221{
222 printf(\"error message: %s\n\", audit_errno_to_name(0));
213 return audit_open(); 223 return audit_open();
214} 224}
215endef 225endef
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index bfc5a27597d6..7eae5488ecea 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -809,7 +809,7 @@ static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
809 end = map__rip_2objdump(map, sym->end); 809 end = map__rip_2objdump(map, sym->end);
810 810
811 offset = line_ip - start; 811 offset = line_ip - start;
812 if (offset < 0 || (u64)line_ip > end) 812 if ((u64)line_ip < start || (u64)line_ip > end)
813 offset = -1; 813 offset = -1;
814 else 814 else
815 parsed_line = tmp2 + 1; 815 parsed_line = tmp2 + 1;
diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
index 3e5f5430a28a..e23bde19d590 100644
--- a/tools/perf/util/dwarf-aux.c
+++ b/tools/perf/util/dwarf-aux.c
@@ -263,6 +263,21 @@ bool die_is_signed_type(Dwarf_Die *tp_die)
263} 263}
264 264
265/** 265/**
266 * die_is_func_def - Ensure that this DIE is a subprogram and definition
267 * @dw_die: a DIE
268 *
269 * Ensure that this DIE is a subprogram and NOT a declaration. This
270 * returns true if @dw_die is a function definition.
271 **/
272bool die_is_func_def(Dwarf_Die *dw_die)
273{
274 Dwarf_Attribute attr;
275
276 return (dwarf_tag(dw_die) == DW_TAG_subprogram &&
277 dwarf_attr(dw_die, DW_AT_declaration, &attr) == NULL);
278}
279
280/**
266 * die_get_data_member_location - Get the data-member offset 281 * die_get_data_member_location - Get the data-member offset
267 * @mb_die: a DIE of a member of a data structure 282 * @mb_die: a DIE of a member of a data structure
268 * @offs: The offset of the member in the data structure 283 * @offs: The offset of the member in the data structure
@@ -392,6 +407,10 @@ static int __die_search_func_cb(Dwarf_Die *fn_die, void *data)
392{ 407{
393 struct __addr_die_search_param *ad = data; 408 struct __addr_die_search_param *ad = data;
394 409
410 /*
411 * Since a declaration entry doesn't has given pc, this always returns
412 * function definition entry.
413 */
395 if (dwarf_tag(fn_die) == DW_TAG_subprogram && 414 if (dwarf_tag(fn_die) == DW_TAG_subprogram &&
396 dwarf_haspc(fn_die, ad->addr)) { 415 dwarf_haspc(fn_die, ad->addr)) {
397 memcpy(ad->die_mem, fn_die, sizeof(Dwarf_Die)); 416 memcpy(ad->die_mem, fn_die, sizeof(Dwarf_Die));
diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h
index 6ce1717784b7..8658d41697d2 100644
--- a/tools/perf/util/dwarf-aux.h
+++ b/tools/perf/util/dwarf-aux.h
@@ -38,6 +38,9 @@ extern int cu_find_lineinfo(Dwarf_Die *cudie, unsigned long addr,
38extern int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr, 38extern int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr,
39 int (*callback)(Dwarf_Die *, void *), void *data); 39 int (*callback)(Dwarf_Die *, void *), void *data);
40 40
41/* Ensure that this DIE is a subprogram and definition (not declaration) */
42extern bool die_is_func_def(Dwarf_Die *dw_die);
43
41/* Compare diename and tname */ 44/* Compare diename and tname */
42extern bool die_compare_name(Dwarf_Die *dw_die, const char *tname); 45extern bool die_compare_name(Dwarf_Die *dw_die, const char *tname);
43 46
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 26441d0e571b..ce69901176d8 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -199,9 +199,11 @@ static int write_buildid(char *name, size_t name_len, u8 *build_id,
199 return write_padded(fd, name, name_len + 1, len); 199 return write_padded(fd, name, name_len + 1, len);
200} 200}
201 201
202static int __dsos__write_buildid_table(struct list_head *head, pid_t pid, 202static int __dsos__write_buildid_table(struct list_head *head,
203 u16 misc, int fd) 203 struct machine *machine,
204 pid_t pid, u16 misc, int fd)
204{ 205{
206 char nm[PATH_MAX];
205 struct dso *pos; 207 struct dso *pos;
206 208
207 dsos__for_each_with_build_id(pos, head) { 209 dsos__for_each_with_build_id(pos, head) {
@@ -215,6 +217,10 @@ static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
215 if (is_vdso_map(pos->short_name)) { 217 if (is_vdso_map(pos->short_name)) {
216 name = (char *) VDSO__MAP_NAME; 218 name = (char *) VDSO__MAP_NAME;
217 name_len = sizeof(VDSO__MAP_NAME) + 1; 219 name_len = sizeof(VDSO__MAP_NAME) + 1;
220 } else if (dso__is_kcore(pos)) {
221 machine__mmap_name(machine, nm, sizeof(nm));
222 name = nm;
223 name_len = strlen(nm) + 1;
218 } else { 224 } else {
219 name = pos->long_name; 225 name = pos->long_name;
220 name_len = pos->long_name_len + 1; 226 name_len = pos->long_name_len + 1;
@@ -240,10 +246,10 @@ static int machine__write_buildid_table(struct machine *machine, int fd)
240 umisc = PERF_RECORD_MISC_GUEST_USER; 246 umisc = PERF_RECORD_MISC_GUEST_USER;
241 } 247 }
242 248
243 err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid, 249 err = __dsos__write_buildid_table(&machine->kernel_dsos, machine,
244 kmisc, fd); 250 machine->pid, kmisc, fd);
245 if (err == 0) 251 if (err == 0)
246 err = __dsos__write_buildid_table(&machine->user_dsos, 252 err = __dsos__write_buildid_table(&machine->user_dsos, machine,
247 machine->pid, umisc, fd); 253 machine->pid, umisc, fd);
248 return err; 254 return err;
249} 255}
@@ -375,23 +381,31 @@ out_free:
375 return err; 381 return err;
376} 382}
377 383
378static int dso__cache_build_id(struct dso *dso, const char *debugdir) 384static int dso__cache_build_id(struct dso *dso, struct machine *machine,
385 const char *debugdir)
379{ 386{
380 bool is_kallsyms = dso->kernel && dso->long_name[0] != '/'; 387 bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
381 bool is_vdso = is_vdso_map(dso->short_name); 388 bool is_vdso = is_vdso_map(dso->short_name);
389 char *name = dso->long_name;
390 char nm[PATH_MAX];
382 391
383 return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), 392 if (dso__is_kcore(dso)) {
384 dso->long_name, debugdir, 393 is_kallsyms = true;
385 is_kallsyms, is_vdso); 394 machine__mmap_name(machine, nm, sizeof(nm));
395 name = nm;
396 }
397 return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), name,
398 debugdir, is_kallsyms, is_vdso);
386} 399}
387 400
388static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir) 401static int __dsos__cache_build_ids(struct list_head *head,
402 struct machine *machine, const char *debugdir)
389{ 403{
390 struct dso *pos; 404 struct dso *pos;
391 int err = 0; 405 int err = 0;
392 406
393 dsos__for_each_with_build_id(pos, head) 407 dsos__for_each_with_build_id(pos, head)
394 if (dso__cache_build_id(pos, debugdir)) 408 if (dso__cache_build_id(pos, machine, debugdir))
395 err = -1; 409 err = -1;
396 410
397 return err; 411 return err;
@@ -399,8 +413,9 @@ static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
399 413
400static int machine__cache_build_ids(struct machine *machine, const char *debugdir) 414static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
401{ 415{
402 int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir); 416 int ret = __dsos__cache_build_ids(&machine->kernel_dsos, machine,
403 ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir); 417 debugdir);
418 ret |= __dsos__cache_build_ids(&machine->user_dsos, machine, debugdir);
404 return ret; 419 return ret;
405} 420}
406 421
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 46a0d35a05e1..9ff6cf3e9a99 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -611,6 +611,8 @@ void hists__collapse_resort(struct hists *hists)
611 next = rb_first(root); 611 next = rb_first(root);
612 612
613 while (next) { 613 while (next) {
614 if (session_done())
615 break;
614 n = rb_entry(next, struct hist_entry, rb_node_in); 616 n = rb_entry(next, struct hist_entry, rb_node_in);
615 next = rb_next(&n->rb_node_in); 617 next = rb_next(&n->rb_node_in);
616 618
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 933d14f287ca..6188d2876a71 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -792,7 +792,7 @@ static int machine__create_modules(struct machine *machine)
792 modules = path; 792 modules = path;
793 } 793 }
794 794
795 if (symbol__restricted_filename(path, "/proc/modules")) 795 if (symbol__restricted_filename(modules, "/proc/modules"))
796 return -1; 796 return -1;
797 797
798 file = fopen(modules, "r"); 798 file = fopen(modules, "r");
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index be0329394d56..371476cb8ddc 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -118,7 +118,6 @@ static const Dwfl_Callbacks offline_callbacks = {
118static int debuginfo__init_offline_dwarf(struct debuginfo *self, 118static int debuginfo__init_offline_dwarf(struct debuginfo *self,
119 const char *path) 119 const char *path)
120{ 120{
121 Dwfl_Module *mod;
122 int fd; 121 int fd;
123 122
124 fd = open(path, O_RDONLY); 123 fd = open(path, O_RDONLY);
@@ -129,11 +128,11 @@ static int debuginfo__init_offline_dwarf(struct debuginfo *self,
129 if (!self->dwfl) 128 if (!self->dwfl)
130 goto error; 129 goto error;
131 130
132 mod = dwfl_report_offline(self->dwfl, "", "", fd); 131 self->mod = dwfl_report_offline(self->dwfl, "", "", fd);
133 if (!mod) 132 if (!self->mod)
134 goto error; 133 goto error;
135 134
136 self->dbg = dwfl_module_getdwarf(mod, &self->bias); 135 self->dbg = dwfl_module_getdwarf(self->mod, &self->bias);
137 if (!self->dbg) 136 if (!self->dbg)
138 goto error; 137 goto error;
139 138
@@ -676,37 +675,42 @@ static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf)
676} 675}
677 676
678/* Convert subprogram DIE to trace point */ 677/* Convert subprogram DIE to trace point */
679static int convert_to_trace_point(Dwarf_Die *sp_die, Dwarf_Addr paddr, 678static int convert_to_trace_point(Dwarf_Die *sp_die, Dwfl_Module *mod,
680 bool retprobe, struct probe_trace_point *tp) 679 Dwarf_Addr paddr, bool retprobe,
680 struct probe_trace_point *tp)
681{ 681{
682 Dwarf_Addr eaddr, highaddr; 682 Dwarf_Addr eaddr, highaddr;
683 const char *name; 683 GElf_Sym sym;
684 684 const char *symbol;
685 /* Copy the name of probe point */ 685
686 name = dwarf_diename(sp_die); 686 /* Verify the address is correct */
687 if (name) { 687 if (dwarf_entrypc(sp_die, &eaddr) != 0) {
688 if (dwarf_entrypc(sp_die, &eaddr) != 0) { 688 pr_warning("Failed to get entry address of %s\n",
689 pr_warning("Failed to get entry address of %s\n", 689 dwarf_diename(sp_die));
690 dwarf_diename(sp_die)); 690 return -ENOENT;
691 return -ENOENT; 691 }
692 } 692 if (dwarf_highpc(sp_die, &highaddr) != 0) {
693 if (dwarf_highpc(sp_die, &highaddr) != 0) { 693 pr_warning("Failed to get end address of %s\n",
694 pr_warning("Failed to get end address of %s\n", 694 dwarf_diename(sp_die));
695 dwarf_diename(sp_die)); 695 return -ENOENT;
696 return -ENOENT; 696 }
697 } 697 if (paddr > highaddr) {
698 if (paddr > highaddr) { 698 pr_warning("Offset specified is greater than size of %s\n",
699 pr_warning("Offset specified is greater than size of %s\n", 699 dwarf_diename(sp_die));
700 dwarf_diename(sp_die)); 700 return -EINVAL;
701 return -EINVAL; 701 }
702 } 702
703 tp->symbol = strdup(name); 703 /* Get an appropriate symbol from symtab */
704 if (tp->symbol == NULL) 704 symbol = dwfl_module_addrsym(mod, paddr, &sym, NULL);
705 return -ENOMEM; 705 if (!symbol) {
706 tp->offset = (unsigned long)(paddr - eaddr); 706 pr_warning("Failed to find symbol at 0x%lx\n",
707 } else 707 (unsigned long)paddr);
708 /* This function has no name. */ 708 return -ENOENT;
709 tp->offset = (unsigned long)paddr; 709 }
710 tp->offset = (unsigned long)(paddr - sym.st_value);
711 tp->symbol = strdup(symbol);
712 if (!tp->symbol)
713 return -ENOMEM;
710 714
711 /* Return probe must be on the head of a subprogram */ 715 /* Return probe must be on the head of a subprogram */
712 if (retprobe) { 716 if (retprobe) {
@@ -734,7 +738,7 @@ static int call_probe_finder(Dwarf_Die *sc_die, struct probe_finder *pf)
734 } 738 }
735 739
736 /* If not a real subprogram, find a real one */ 740 /* If not a real subprogram, find a real one */
737 if (dwarf_tag(sc_die) != DW_TAG_subprogram) { 741 if (!die_is_func_def(sc_die)) {
738 if (!die_find_realfunc(&pf->cu_die, pf->addr, &pf->sp_die)) { 742 if (!die_find_realfunc(&pf->cu_die, pf->addr, &pf->sp_die)) {
739 pr_warning("Failed to find probe point in any " 743 pr_warning("Failed to find probe point in any "
740 "functions.\n"); 744 "functions.\n");
@@ -980,12 +984,10 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
980 struct dwarf_callback_param *param = data; 984 struct dwarf_callback_param *param = data;
981 struct probe_finder *pf = param->data; 985 struct probe_finder *pf = param->data;
982 struct perf_probe_point *pp = &pf->pev->point; 986 struct perf_probe_point *pp = &pf->pev->point;
983 Dwarf_Attribute attr;
984 987
985 /* Check tag and diename */ 988 /* Check tag and diename */
986 if (dwarf_tag(sp_die) != DW_TAG_subprogram || 989 if (!die_is_func_def(sp_die) ||
987 !die_compare_name(sp_die, pp->function) || 990 !die_compare_name(sp_die, pp->function))
988 dwarf_attr(sp_die, DW_AT_declaration, &attr))
989 return DWARF_CB_OK; 991 return DWARF_CB_OK;
990 992
991 /* Check declared file */ 993 /* Check declared file */
@@ -1151,7 +1153,7 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
1151 tev = &tf->tevs[tf->ntevs++]; 1153 tev = &tf->tevs[tf->ntevs++];
1152 1154
1153 /* Trace point should be converted from subprogram DIE */ 1155 /* Trace point should be converted from subprogram DIE */
1154 ret = convert_to_trace_point(&pf->sp_die, pf->addr, 1156 ret = convert_to_trace_point(&pf->sp_die, tf->mod, pf->addr,
1155 pf->pev->point.retprobe, &tev->point); 1157 pf->pev->point.retprobe, &tev->point);
1156 if (ret < 0) 1158 if (ret < 0)
1157 return ret; 1159 return ret;
@@ -1183,7 +1185,7 @@ int debuginfo__find_trace_events(struct debuginfo *self,
1183{ 1185{
1184 struct trace_event_finder tf = { 1186 struct trace_event_finder tf = {
1185 .pf = {.pev = pev, .callback = add_probe_trace_event}, 1187 .pf = {.pev = pev, .callback = add_probe_trace_event},
1186 .max_tevs = max_tevs}; 1188 .mod = self->mod, .max_tevs = max_tevs};
1187 int ret; 1189 int ret;
1188 1190
1189 /* Allocate result tevs array */ 1191 /* Allocate result tevs array */
@@ -1252,7 +1254,7 @@ static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf)
1252 vl = &af->vls[af->nvls++]; 1254 vl = &af->vls[af->nvls++];
1253 1255
1254 /* Trace point should be converted from subprogram DIE */ 1256 /* Trace point should be converted from subprogram DIE */
1255 ret = convert_to_trace_point(&pf->sp_die, pf->addr, 1257 ret = convert_to_trace_point(&pf->sp_die, af->mod, pf->addr,
1256 pf->pev->point.retprobe, &vl->point); 1258 pf->pev->point.retprobe, &vl->point);
1257 if (ret < 0) 1259 if (ret < 0)
1258 return ret; 1260 return ret;
@@ -1291,6 +1293,7 @@ int debuginfo__find_available_vars_at(struct debuginfo *self,
1291{ 1293{
1292 struct available_var_finder af = { 1294 struct available_var_finder af = {
1293 .pf = {.pev = pev, .callback = add_available_vars}, 1295 .pf = {.pev = pev, .callback = add_available_vars},
1296 .mod = self->mod,
1294 .max_vls = max_vls, .externs = externs}; 1297 .max_vls = max_vls, .externs = externs};
1295 int ret; 1298 int ret;
1296 1299
@@ -1474,7 +1477,7 @@ static int line_range_inline_cb(Dwarf_Die *in_die, void *data)
1474 return 0; 1477 return 0;
1475} 1478}
1476 1479
1477/* Search function from function name */ 1480/* Search function definition from function name */
1478static int line_range_search_cb(Dwarf_Die *sp_die, void *data) 1481static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
1479{ 1482{
1480 struct dwarf_callback_param *param = data; 1483 struct dwarf_callback_param *param = data;
@@ -1485,7 +1488,7 @@ static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
1485 if (lr->file && strtailcmp(lr->file, dwarf_decl_file(sp_die))) 1488 if (lr->file && strtailcmp(lr->file, dwarf_decl_file(sp_die)))
1486 return DWARF_CB_OK; 1489 return DWARF_CB_OK;
1487 1490
1488 if (dwarf_tag(sp_die) == DW_TAG_subprogram && 1491 if (die_is_func_def(sp_die) &&
1489 die_compare_name(sp_die, lr->function)) { 1492 die_compare_name(sp_die, lr->function)) {
1490 lf->fname = dwarf_decl_file(sp_die); 1493 lf->fname = dwarf_decl_file(sp_die);
1491 dwarf_decl_line(sp_die, &lr->offset); 1494 dwarf_decl_line(sp_die, &lr->offset);
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index 17e94d0c36f9..3b7d63018960 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -23,6 +23,7 @@ static inline int is_c_varname(const char *name)
23/* debug information structure */ 23/* debug information structure */
24struct debuginfo { 24struct debuginfo {
25 Dwarf *dbg; 25 Dwarf *dbg;
26 Dwfl_Module *mod;
26 Dwfl *dwfl; 27 Dwfl *dwfl;
27 Dwarf_Addr bias; 28 Dwarf_Addr bias;
28}; 29};
@@ -77,6 +78,7 @@ struct probe_finder {
77 78
78struct trace_event_finder { 79struct trace_event_finder {
79 struct probe_finder pf; 80 struct probe_finder pf;
81 Dwfl_Module *mod; /* For solving symbols */
80 struct probe_trace_event *tevs; /* Found trace events */ 82 struct probe_trace_event *tevs; /* Found trace events */
81 int ntevs; /* Number of trace events */ 83 int ntevs; /* Number of trace events */
82 int max_tevs; /* Max number of trace events */ 84 int max_tevs; /* Max number of trace events */
@@ -84,6 +86,7 @@ struct trace_event_finder {
84 86
85struct available_var_finder { 87struct available_var_finder {
86 struct probe_finder pf; 88 struct probe_finder pf;
89 Dwfl_Module *mod; /* For solving symbols */
87 struct variable_list *vls; /* Found variable lists */ 90 struct variable_list *vls; /* Found variable lists */
88 int nvls; /* Number of variable lists */ 91 int nvls; /* Number of variable lists */
89 int max_vls; /* Max no. of variable lists */ 92 int max_vls; /* Max no. of variable lists */
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 51f5edf2a6d0..70ffa41518f3 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -531,6 +531,9 @@ static int flush_sample_queue(struct perf_session *s,
531 return 0; 531 return 0;
532 532
533 list_for_each_entry_safe(iter, tmp, head, list) { 533 list_for_each_entry_safe(iter, tmp, head, list) {
534 if (session_done())
535 return 0;
536
534 if (iter->timestamp > limit) 537 if (iter->timestamp > limit)
535 break; 538 break;
536 539
@@ -1160,7 +1163,6 @@ static void perf_session__warn_about_errors(const struct perf_session *session,
1160 } 1163 }
1161} 1164}
1162 1165
1163#define session_done() (*(volatile int *)(&session_done))
1164volatile int session_done; 1166volatile int session_done;
1165 1167
1166static int __perf_session__process_pipe_events(struct perf_session *self, 1168static int __perf_session__process_pipe_events(struct perf_session *self,
@@ -1372,10 +1374,13 @@ more:
1372 "Processing events..."); 1374 "Processing events...");
1373 } 1375 }
1374 1376
1377 err = 0;
1378 if (session_done())
1379 goto out_err;
1380
1375 if (file_pos < file_size) 1381 if (file_pos < file_size)
1376 goto more; 1382 goto more;
1377 1383
1378 err = 0;
1379 /* do the final flush for ordered samples */ 1384 /* do the final flush for ordered samples */
1380 session->ordered_samples.next_flush = ULLONG_MAX; 1385 session->ordered_samples.next_flush = ULLONG_MAX;
1381 err = flush_sample_queue(session, tool); 1386 err = flush_sample_queue(session, tool);
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 3aa75fb2225f..04bf7373a7e5 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -124,4 +124,8 @@ int __perf_session__set_tracepoints_handlers(struct perf_session *session,
124 124
125#define perf_session__set_tracepoints_handlers(session, array) \ 125#define perf_session__set_tracepoints_handlers(session, array) \
126 __perf_session__set_tracepoints_handlers(session, array, ARRAY_SIZE(array)) 126 __perf_session__set_tracepoints_handlers(session, array, ARRAY_SIZE(array))
127
128extern volatile int session_done;
129
130#define session_done() (*(volatile int *)(&session_done))
127#endif /* __PERF_SESSION_H */ 131#endif /* __PERF_SESSION_H */
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index a7b9ab557380..a9c829be5216 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -8,6 +8,22 @@
8#include "symbol.h" 8#include "symbol.h"
9#include "debug.h" 9#include "debug.h"
10 10
11#ifndef HAVE_ELF_GETPHDRNUM
12static int elf_getphdrnum(Elf *elf, size_t *dst)
13{
14 GElf_Ehdr gehdr;
15 GElf_Ehdr *ehdr;
16
17 ehdr = gelf_getehdr(elf, &gehdr);
18 if (!ehdr)
19 return -1;
20
21 *dst = ehdr->e_phnum;
22
23 return 0;
24}
25#endif
26
11#ifndef NT_GNU_BUILD_ID 27#ifndef NT_GNU_BUILD_ID
12#define NT_GNU_BUILD_ID 3 28#define NT_GNU_BUILD_ID 3
13#endif 29#endif
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index fe7a27d67d2b..e9e1c03f927d 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -186,7 +186,7 @@ void parse_proc_kallsyms(struct pevent *pevent,
186 char *next = NULL; 186 char *next = NULL;
187 char *addr_str; 187 char *addr_str;
188 char *mod; 188 char *mod;
189 char *fmt; 189 char *fmt = NULL;
190 190
191 line = strtok_r(file, "\n", &next); 191 line = strtok_r(file, "\n", &next);
192 while (line) { 192 while (line) {
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index ea475cd03511..8a39dda7a325 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -101,8 +101,11 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
101 typeof(*work), queue); 101 typeof(*work), queue);
102 cancel_work_sync(&work->work); 102 cancel_work_sync(&work->work);
103 list_del(&work->queue); 103 list_del(&work->queue);
104 if (!work->done) /* work was canceled */ 104 if (!work->done) { /* work was canceled */
105 mmdrop(work->mm);
106 kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
105 kmem_cache_free(async_pf_cache, work); 107 kmem_cache_free(async_pf_cache, work);
108 }
106 } 109 }
107 110
108 spin_lock(&vcpu->async_pf.lock); 111 spin_lock(&vcpu->async_pf.lock);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index bf040c4e02b3..a9dd682cf5e3 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1058,12 +1058,18 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
1058EXPORT_SYMBOL_GPL(gfn_to_hva); 1058EXPORT_SYMBOL_GPL(gfn_to_hva);
1059 1059
1060/* 1060/*
1061 * The hva returned by this function is only allowed to be read. 1061 * If writable is set to false, the hva returned by this function is only
1062 * It should pair with kvm_read_hva() or kvm_read_hva_atomic(). 1062 * allowed to be read.
1063 */ 1063 */
1064static unsigned long gfn_to_hva_read(struct kvm *kvm, gfn_t gfn) 1064unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
1065{ 1065{
1066 return __gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL, false); 1066 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
1067 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false);
1068
1069 if (!kvm_is_error_hva(hva) && writable)
1070 *writable = !memslot_is_readonly(slot);
1071
1072 return hva;
1067} 1073}
1068 1074
1069static int kvm_read_hva(void *data, void __user *hva, int len) 1075static int kvm_read_hva(void *data, void __user *hva, int len)
@@ -1430,7 +1436,7 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1430 int r; 1436 int r;
1431 unsigned long addr; 1437 unsigned long addr;
1432 1438
1433 addr = gfn_to_hva_read(kvm, gfn); 1439 addr = gfn_to_hva_prot(kvm, gfn, NULL);
1434 if (kvm_is_error_hva(addr)) 1440 if (kvm_is_error_hva(addr))
1435 return -EFAULT; 1441 return -EFAULT;
1436 r = kvm_read_hva(data, (void __user *)addr + offset, len); 1442 r = kvm_read_hva(data, (void __user *)addr + offset, len);
@@ -1468,7 +1474,7 @@ int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
1468 gfn_t gfn = gpa >> PAGE_SHIFT; 1474 gfn_t gfn = gpa >> PAGE_SHIFT;
1469 int offset = offset_in_page(gpa); 1475 int offset = offset_in_page(gpa);
1470 1476
1471 addr = gfn_to_hva_read(kvm, gfn); 1477 addr = gfn_to_hva_prot(kvm, gfn, NULL);
1472 if (kvm_is_error_hva(addr)) 1478 if (kvm_is_error_hva(addr))
1473 return -EFAULT; 1479 return -EFAULT;
1474 pagefault_disable(); 1480 pagefault_disable();