aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/Changes9
-rw-r--r--Documentation/DocBook/gadget.tmpl2
-rw-r--r--Documentation/DocBook/genericirq.tmpl4
-rw-r--r--Documentation/DocBook/kernel-locking.tmpl2
-rw-r--r--Documentation/DocBook/libata.tmpl6
-rw-r--r--Documentation/DocBook/media_api.tmpl2
-rw-r--r--Documentation/DocBook/mtdnand.tmpl30
-rw-r--r--Documentation/DocBook/regulator.tmpl2
-rw-r--r--Documentation/DocBook/uio-howto.tmpl4
-rw-r--r--Documentation/DocBook/usb.tmpl2
-rw-r--r--Documentation/DocBook/writing-an-alsa-driver.tmpl2
-rw-r--r--Documentation/acpi/enumeration.txt6
-rw-r--r--Documentation/cpu-freq/intel-pstate.txt7
-rw-r--r--Documentation/devicetree/bindings/arm/exynos/power_domain.txt20
-rw-r--r--Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt6
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,sci-serial.txt7
-rw-r--r--Documentation/input/event-codes.txt13
-rw-r--r--Documentation/kernel-parameters.txt8
-rw-r--r--Documentation/laptops/00-INDEX4
-rw-r--r--Documentation/laptops/freefall.c (renamed from Documentation/laptops/hpfall.c)59
-rw-r--r--MAINTAINERS40
-rw-r--r--Makefile103
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/boot/dts/am335x-evm.dts4
-rw-r--r--arch/arm/boot/dts/am335x-evmsk.dts4
-rw-r--r--arch/arm/boot/dts/am335x-igep0033.dtsi6
-rw-r--r--arch/arm/boot/dts/at91sam9n12.dtsi2
-rw-r--r--arch/arm/boot/dts/at91sam9x5.dtsi6
-rw-r--r--arch/arm/boot/dts/dra7-evm.dts1
-rw-r--r--arch/arm/boot/dts/dra7xx-clocks.dtsi10
-rw-r--r--arch/arm/boot/dts/exynos4.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos5420.dtsi5
-rw-r--r--arch/arm/boot/dts/hi3620.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts2
-rw-r--r--arch/arm/boot/dts/r8a7791.dtsi4
-rw-r--r--arch/arm/crypto/aesbs-glue.c10
-rw-r--r--arch/arm/kernel/kprobes-test-arm.c30
-rw-r--r--arch/arm/kernel/kprobes-test.c10
-rw-r--r--arch/arm/kernel/probes-arm.c6
-rw-r--r--arch/arm/kernel/topology.c2
-rw-r--r--arch/arm/mach-exynos/exynos.c8
-rw-r--r--arch/arm/mach-exynos/firmware.c9
-rw-r--r--arch/arm/mach-exynos/hotplug.c10
-rw-r--r--arch/arm/mach-exynos/platsmp.c34
-rw-r--r--arch/arm/mach-exynos/pm_domains.c61
-rw-r--r--arch/arm/mach-imx/clk-gate2.c31
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c4
-rw-r--r--arch/arm/mach-mvebu/coherency.c6
-rw-r--r--arch/arm/mach-mvebu/headsmp-a9.S9
-rw-r--r--arch/arm/mach-mvebu/pmsu.c10
-rw-r--r--arch/arm/mach-omap2/clkt_dpll.c2
-rw-r--r--arch/arm/mach-omap2/cm-regbits-34xx.h3
-rw-r--r--arch/arm/mach-omap2/common.h3
-rw-r--r--arch/arm/mach-omap2/devices.c28
-rw-r--r--arch/arm/mach-omap2/dsp.c10
-rw-r--r--arch/arm/mach-omap2/gpmc-nand.c18
-rw-r--r--arch/arm/mach-omap2/gpmc.c2
-rw-r--r--arch/arm/mach-omap2/omap4-common.c4
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c18
-rw-r--r--arch/arm/mach-omap2/prm-regbits-34xx.h6
-rw-r--r--arch/arm/mm/cache-l2x0.c2
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/crypto/aes-glue.c12
-rw-r--r--arch/arm64/include/asm/memory.h2
-rw-r--r--arch/arm64/kernel/efi-stub.c2
-rw-r--r--arch/arm64/mm/copypage.c2
-rw-r--r--arch/arm64/mm/init.c17
-rw-r--r--arch/blackfin/configs/BF609-EZKIT_defconfig2
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S2
-rw-r--r--arch/blackfin/mach-bf533/boards/blackstamp.c1
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537e.c1
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537u.c1
-rw-r--r--arch/blackfin/mach-bf537/boards/tcm_bf537.c1
-rw-r--r--arch/blackfin/mach-bf548/boards/ezkit.c6
-rw-r--r--arch/blackfin/mach-bf561/boards/acvilon.c1
-rw-r--r--arch/blackfin/mach-bf561/boards/cm_bf561.c1
-rw-r--r--arch/blackfin/mach-bf561/boards/ezkit.c1
-rw-r--r--arch/blackfin/mach-bf609/boards/ezkit.c20
-rw-r--r--arch/blackfin/mach-bf609/include/mach/pm.h5
-rw-r--r--arch/blackfin/mach-bf609/pm.c4
-rw-r--r--arch/blackfin/mach-common/ints-priority.c2
-rw-r--r--arch/m68k/kernel/head.S3
-rw-r--r--arch/m68k/kernel/time.c2
-rw-r--r--arch/parisc/include/uapi/asm/signal.h2
-rw-r--r--arch/parisc/kernel/hardware.c3
-rw-r--r--arch/parisc/kernel/sys_parisc32.c46
-rw-r--r--arch/parisc/kernel/syscall_table.S2
-rw-r--r--arch/parisc/mm/init.c1
-rw-r--r--arch/powerpc/Kconfig4
-rw-r--r--arch/powerpc/include/asm/cputable.h1
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h3
-rw-r--r--arch/powerpc/include/asm/mmu.h10
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h3
-rw-r--r--arch/powerpc/kernel/cputable.c20
-rw-r--r--arch/powerpc/kernel/idle_power7.S2
-rw-r--r--arch/powerpc/kernel/rtas_flash.c6
-rw-r--r--arch/powerpc/kernel/smp.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S5
-rw-r--r--arch/powerpc/lib/mem_64.S2
-rw-r--r--arch/powerpc/lib/sstep.c10
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c12
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c10
-rw-r--r--arch/powerpc/perf/core-book3s.c26
-rw-r--r--arch/powerpc/perf/power8-pmu.c2
-rw-r--r--arch/powerpc/platforms/cell/spu_syscalls.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/Makefile3
-rw-r--r--arch/powerpc/platforms/cell/spufs/syscalls.c6
-rw-r--r--arch/powerpc/platforms/powernv/opal-elog.c4
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c1
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c1
-rw-r--r--arch/s390/include/asm/switch_to.h4
-rw-r--r--arch/s390/kernel/head.S6
-rw-r--r--arch/s390/kernel/ptrace.c12
-rw-r--r--arch/s390/pci/pci.c49
-rw-r--r--arch/sh/Makefile3
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/include/uapi/asm/unistd.h3
-rw-r--r--arch/sparc/kernel/sys32.S1
-rw-r--r--arch/sparc/kernel/systbls_32.S1
-rw-r--r--arch/sparc/kernel/systbls_64.S2
-rw-r--r--arch/um/kernel/tlb.c9
-rw-r--r--arch/um/kernel/trap.c2
-rw-r--r--arch/um/os-Linux/skas/process.c9
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/boot/header.S26
-rw-r--r--arch/x86/boot/tools/build.c38
-rw-r--r--arch/x86/crypto/sha512_ssse3_glue.c2
-rw-r--r--arch/x86/kernel/apm_32.c1
-rw-r--r--arch/x86/kernel/cpu/intel.c22
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c12
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c10
-rw-r--r--arch/x86/kernel/cpu/perf_event.c3
-rw-r--r--arch/x86/kernel/cpu/perf_event.h12
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c78
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c6
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c11
-rw-r--r--arch/x86/kernel/entry_32.S9
-rw-r--r--arch/x86/kernel/espfix_64.c5
-rw-r--r--arch/x86/kernel/kprobes/core.c3
-rw-r--r--arch/x86/kernel/tsc.c4
-rw-r--r--arch/x86/vdso/vdso2c.h3
-rw-r--r--arch/x86/vdso/vma.c4
-rw-r--r--arch/xtensa/kernel/vectors.S158
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S4
-rw-r--r--arch/xtensa/mm/init.c2
-rw-r--r--block/blk-cgroup.c7
-rw-r--r--block/blk-tag.c33
-rw-r--r--block/compat_ioctl.c1
-rw-r--r--drivers/acpi/ac.c130
-rw-r--r--drivers/acpi/acpi_pnp.c2
-rw-r--r--drivers/acpi/battery.c41
-rw-r--r--drivers/acpi/ec.c164
-rw-r--r--drivers/acpi/resource.c10
-rw-r--r--drivers/acpi/video.c21
-rw-r--r--drivers/acpi/video_detect.c8
-rw-r--r--drivers/ata/ahci.c1
-rw-r--r--drivers/ata/ahci.h2
-rw-r--r--drivers/ata/ahci_imx.c38
-rw-r--r--drivers/ata/ahci_platform.c2
-rw-r--r--drivers/ata/ahci_xgene.c60
-rw-r--r--drivers/ata/libahci.c7
-rw-r--r--drivers/ata/libahci_platform.c7
-rw-r--r--drivers/ata/libata-core.c12
-rw-r--r--drivers/ata/libata-eh.c9
-rw-r--r--drivers/ata/pata_ep93xx.c2
-rw-r--r--drivers/base/platform.c18
-rw-r--r--drivers/block/drbd/drbd_nl.c6
-rw-r--r--drivers/block/zram/zram_drv.c22
-rw-r--r--drivers/bluetooth/ath3k.c2
-rw-r--r--drivers/bluetooth/btusb.c1
-rw-r--r--drivers/bluetooth/hci_h5.c1
-rw-r--r--drivers/char/hw_random/core.c47
-rw-r--r--drivers/char/hw_random/virtio-rng.c10
-rw-r--r--drivers/char/i8k.c4
-rw-r--r--drivers/char/random.c17
-rw-r--r--drivers/clk/clk-s2mps11.c7
-rw-r--r--drivers/clk/qcom/mmcc-msm8960.c2
-rw-r--r--drivers/clk/samsung/clk-exynos4.c16
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c2
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c91
-rw-r--r--drivers/clk/samsung/clk-s3c2410.c9
-rw-r--r--drivers/clk/samsung/clk-s3c64xx.c6
-rw-r--r--drivers/clk/spear/spear3xx_clock.c16
-rw-r--r--drivers/clk/sunxi/clk-sun6i-apb0-gates.c2
-rw-r--r--drivers/clk/ti/apll.c8
-rw-r--r--drivers/clk/ti/dpll.c5
-rw-r--r--drivers/clk/ti/mux.c2
-rw-r--r--drivers/clocksource/exynos_mct.c20
-rw-r--r--drivers/cpufreq/Kconfig.arm3
-rw-r--r--drivers/cpufreq/Makefile2
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c7
-rw-r--r--drivers/cpufreq/cpufreq.c6
-rw-r--r--drivers/cpufreq/intel_pstate.c35
-rw-r--r--drivers/cpufreq/sa1110-cpufreq.c2
-rw-r--r--drivers/crypto/caam/jr.c8
-rw-r--r--drivers/dma/cppi41.c13
-rw-r--r--drivers/dma/imx-sdma.c22
-rw-r--r--drivers/firewire/Kconfig1
-rw-r--r--drivers/firewire/ohci.c4
-rw-r--r--drivers/firmware/efi/efi.c22
-rw-r--r--drivers/firmware/efi/fdt.c10
-rw-r--r--drivers/gpio/gpio-mcp23s08.c6
-rw-r--r--drivers/gpio/gpio-rcar.c1
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c25
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c44
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c11
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h3
-rw-r--r--drivers/gpu/drm/i915/intel_display.c18
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c46
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c29
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.c6
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c7
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c9
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/temp.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_irq.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c8
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c10
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/cik.c8
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c20
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/r600.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h18
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c198
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c87
-rw-r--r--drivers/gpu/drm/radeon/rv515.c5
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/si.c7
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c15
-rw-r--r--drivers/hid/Kconfig2
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-rmi.c2
-rw-r--r--drivers/hid/hid-sensor-hub.c25
-rw-r--r--drivers/hid/usbhid/hid-quirks.c3
-rw-r--r--drivers/hv/connection.c8
-rw-r--r--drivers/hv/hv_fcopy.c2
-rw-r--r--drivers/hv/hv_kvp.c17
-rw-r--r--drivers/hv/hv_util.c2
-rw-r--r--drivers/hwmon/adc128d818.c28
-rw-r--r--drivers/hwmon/adm1021.c14
-rw-r--r--drivers/hwmon/adm1029.c3
-rw-r--r--drivers/hwmon/adm1031.c8
-rw-r--r--drivers/hwmon/adt7470.c6
-rw-r--r--drivers/hwmon/amc6821.c2
-rw-r--r--drivers/hwmon/da9052-hwmon.c2
-rw-r--r--drivers/hwmon/da9055-hwmon.c2
-rw-r--r--drivers/hwmon/emc2103.c15
-rw-r--r--drivers/hwmon/ntc_thermistor.c2
-rw-r--r--drivers/hwmon/smsc47m192.c4
-rw-r--r--drivers/i2c/busses/i2c-sun6i-p2wi.c1
-rw-r--r--drivers/i2c/muxes/Kconfig1
-rw-r--r--drivers/ide/Kconfig5
-rw-r--r--drivers/ide/ide-probe.c8
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c7
-rw-r--r--drivers/iio/accel/mma8452.c8
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c2
-rw-r--r--drivers/iio/gyro/hid-sensor-gyro-3d.c7
-rw-r--r--drivers/iio/industrialio-event.c3
-rw-r--r--drivers/iio/light/hid-sensor-als.c7
-rw-r--r--drivers/iio/light/hid-sensor-prox.c7
-rw-r--r--drivers/iio/light/tcs3472.c11
-rw-r--r--drivers/iio/magnetometer/hid-sensor-magn-3d.c7
-rw-r--r--drivers/iio/pressure/hid-sensor-press.c7
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c14
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c18
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h2
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c2
-rw-r--r--drivers/input/input.c6
-rw-r--r--drivers/input/keyboard/st-keyscan.c2
-rw-r--r--drivers/input/misc/sirfsoc-onkey.c2
-rw-r--r--drivers/input/mouse/synaptics.c5
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/input/tablet/wacom_wac.c28
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c5
-rw-r--r--drivers/iommu/fsl_pamu.c8
-rw-r--r--drivers/iommu/fsl_pamu_domain.c18
-rw-r--r--drivers/irqchip/irq-gic.c7
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c1
-rw-r--r--drivers/isdn/hisax/l3ni1.c14
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c28
-rw-r--r--drivers/md/dm-cache-metadata.c9
-rw-r--r--drivers/md/dm-crypt.c4
-rw-r--r--drivers/md/dm-io.c22
-rw-r--r--drivers/md/dm-mpath.c5
-rw-r--r--drivers/md/dm-thin-metadata.c9
-rw-r--r--drivers/md/dm-zero.c4
-rw-r--r--drivers/md/dm.c15
-rw-r--r--drivers/media/dvb-frontends/si2168.c16
-rw-r--r--drivers/media/dvb-frontends/si2168_priv.h2
-rw-r--r--drivers/media/dvb-frontends/tda10071.c12
-rw-r--r--drivers/media/dvb-frontends/tda10071_priv.h1
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c2
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c1
-rw-r--r--drivers/media/platform/davinci/vpif_display.c1
-rw-r--r--drivers/media/tuners/si2157.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c40
-rw-r--r--drivers/media/usb/gspca/pac7302.c1
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-video.c6
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c4
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c43
-rw-r--r--drivers/mtd/devices/elm.c2
-rw-r--r--drivers/mtd/nand/nand_base.c6
-rw-r--r--drivers/mtd/ubi/fastmap.c4
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/can/c_can/c_can_platform.c3
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c3
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c43
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c21
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c7
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h18
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c66
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h12
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c16
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c19
-rw-r--r--drivers/net/ethernet/realtek/r8169.c27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c2
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c42
-rw-r--r--drivers/net/fddi/defxx.c17
-rw-r--r--drivers/net/hyperv/netvsc.c4
-rw-r--r--drivers/net/phy/dp83640.c6
-rw-r--r--drivers/net/phy/mdio_bus.c45
-rw-r--r--drivers/net/phy/phy_device.c15
-rw-r--r--drivers/net/ppp/ppp_generic.c30
-rw-r--r--drivers/net/ppp/pppoe.c2
-rw-r--r--drivers/net/usb/cdc_ether.c16
-rw-r--r--drivers/net/usb/hso.c50
-rw-r--r--drivers/net/usb/huawei_cdc_ncm.c3
-rw-r--r--drivers/net/usb/qmi_wwan.c3
-rw-r--r--drivers/net/usb/r8152.c14
-rw-r--r--drivers/net/usb/smsc95xx.c14
-rw-r--r--drivers/net/vxlan.c2
-rw-r--r--drivers/net/wan/farsync.c112
-rw-r--r--drivers/net/wan/x25_asy.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c9
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c5
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rxon.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c20
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c12
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c65
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c3
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c1
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c1
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c1
-rw-r--r--drivers/net/wireless/mwifiex/main.c1
-rw-r--r--drivers/net/wireless/mwifiex/sta_tx.c1
-rw-r--r--drivers/net/wireless/mwifiex/tdls.c2
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c1
-rw-r--r--drivers/net/wireless/mwifiex/uap_txrx.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c28
-rw-r--r--drivers/net/xen-netback/netback.c86
-rw-r--r--drivers/net/xen-netfront.c27
-rw-r--r--drivers/of/of_mdio.c34
-rw-r--r--drivers/parport/Kconfig12
-rw-r--r--drivers/pci/pci.c9
-rw-r--r--drivers/phy/Kconfig2
-rw-r--r--drivers/phy/phy-core.c7
-rw-r--r--drivers/phy/phy-omap-usb2.c11
-rw-r--r--drivers/phy/phy-samsung-usb2.c1
-rw-r--r--drivers/pinctrl/berlin/berlin.c2
-rw-r--r--drivers/pinctrl/pinctrl-st.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c4
-rw-r--r--drivers/s390/char/raw3270.c1
-rw-r--r--drivers/s390/crypto/ap_bus.c9
-rw-r--r--drivers/staging/media/omap4iss/Kconfig2
-rw-r--r--drivers/thermal/imx_thermal.c18
-rw-r--r--drivers/thermal/of-thermal.c7
-rw-r--r--drivers/thermal/thermal_hwmon.c33
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c2
-rw-r--r--drivers/tty/serial/arc_uart.c2
-rw-r--r--drivers/tty/serial/imx.c3
-rw-r--r--drivers/tty/serial/ip22zilog.c2
-rw-r--r--drivers/tty/serial/m32r_sio.c8
-rw-r--r--drivers/tty/serial/pmac_zilog.c3
-rw-r--r--drivers/tty/serial/sunsab.c3
-rw-r--r--drivers/tty/serial/sunzilog.c2
-rw-r--r--drivers/usb/chipidea/udc.c4
-rw-r--r--drivers/usb/core/hub.c19
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c5
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h9
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/xen/balloon.c12
-rw-r--r--drivers/xen/manage.c5
-rw-r--r--firmware/Makefile6
-rw-r--r--fs/afs/main.c4
-rw-r--r--fs/aio.c7
-rw-r--r--fs/btrfs/ordered-data.c11
-rw-r--r--fs/btrfs/volumes.c8
-rw-r--r--fs/coredump.c2
-rw-r--r--fs/direct-io.c14
-rw-r--r--fs/ext4/extents_status.c4
-rw-r--r--fs/ext4/ialloc.c16
-rw-r--r--fs/ext4/mballoc.c4
-rw-r--r--fs/ext4/super.c60
-rw-r--r--fs/f2fs/data.c23
-rw-r--r--fs/f2fs/dir.c2
-rw-r--r--fs/f2fs/f2fs.h6
-rw-r--r--fs/f2fs/file.c12
-rw-r--r--fs/f2fs/inode.c1
-rw-r--r--fs/f2fs/namei.c13
-rw-r--r--fs/f2fs/node.c2
-rw-r--r--fs/f2fs/segment.c5
-rw-r--r--fs/f2fs/super.c4
-rw-r--r--fs/fuse/dev.c51
-rw-r--r--fs/fuse/dir.c41
-rw-r--r--fs/fuse/file.c8
-rw-r--r--fs/fuse/inode.c27
-rw-r--r--fs/gfs2/file.c4
-rw-r--r--fs/gfs2/glock.c14
-rw-r--r--fs/gfs2/glops.c4
-rw-r--r--fs/gfs2/lock_dlm.c4
-rw-r--r--fs/gfs2/rgrp.c4
-rw-r--r--fs/jbd2/transaction.c5
-rw-r--r--fs/kernfs/mount.c30
-rw-r--r--fs/namei.c3
-rw-r--r--fs/nfs/direct.c2
-rw-r--r--fs/nfs/internal.h1
-rw-r--r--fs/nfs/nfs3acl.c43
-rw-r--r--fs/nfs/nfs3proc.c4
-rw-r--r--fs/nfs/pagelist.c20
-rw-r--r--fs/nfs/write.c335
-rw-r--r--fs/nfsd/nfs4xdr.c6
-rw-r--r--fs/quota/dquot.c2
-rw-r--r--fs/xattr.c2
-rw-r--r--fs/xfs/xfs_bmap.c7
-rw-r--r--fs/xfs/xfs_bmap.h4
-rw-r--r--fs/xfs/xfs_bmap_util.c53
-rw-r--r--fs/xfs/xfs_bmap_util.h4
-rw-r--r--fs/xfs/xfs_btree.c82
-rw-r--r--fs/xfs/xfs_iomap.c3
-rw-r--r--fs/xfs/xfs_sb.c25
-rw-r--r--include/acpi/video.h2
-rw-r--r--include/asm-generic/vmlinux.lds.h2
-rw-r--r--include/dt-bindings/clock/exynos5420.h3
-rw-r--r--include/dt-bindings/pinctrl/dra.h7
-rw-r--r--include/linux/cpufreq.h4
-rw-r--r--include/linux/kernfs.h1
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/mlx4/device.h4
-rw-r--r--include/linux/mutex.h4
-rw-r--r--include/linux/of_mdio.h8
-rw-r--r--include/linux/osq_lock.h27
-rw-r--r--include/linux/pagemap.h12
-rw-r--r--include/linux/percpu-defs.h4
-rw-r--r--include/linux/rcupdate.h46
-rw-r--r--include/linux/rwsem-spinlock.h8
-rw-r--r--include/linux/rwsem.h34
-rw-r--r--include/linux/sched.h8
-rw-r--r--include/net/ip.h11
-rw-r--r--include/net/neighbour.h1
-rw-r--r--include/net/netfilter/nf_tables.h6
-rw-r--r--include/net/netns/ieee802154_6lowpan.h2
-rw-r--r--include/net/netns/nftables.h2
-rw-r--r--include/net/sock.h12
-rw-r--r--include/uapi/linux/fuse.h3
-rw-r--r--kernel/Kconfig.locks9
-rw-r--r--kernel/cgroup.c58
-rw-r--r--kernel/cpuset.c20
-rw-r--r--kernel/events/core.c34
-rw-r--r--kernel/kprobes.c14
-rw-r--r--kernel/locking/mcs_spinlock.c64
-rw-r--r--kernel/locking/mcs_spinlock.h9
-rw-r--r--kernel/locking/mutex.c2
-rw-r--r--kernel/locking/rwsem-spinlock.c28
-rw-r--r--kernel/locking/rwsem-xadd.c16
-rw-r--r--kernel/locking/rwsem.c2
-rw-r--r--kernel/power/process.c1
-rw-r--r--kernel/power/suspend.c4
-rw-r--r--kernel/rcu/tree.c140
-rw-r--r--kernel/rcu/tree.h6
-rw-r--r--kernel/rcu/tree_plugin.h2
-rw-r--r--kernel/rcu/update.c22
-rw-r--r--kernel/sched/core.c7
-rw-r--r--kernel/sched/debug.c2
-rw-r--r--kernel/time/alarmtimer.c20
-rw-r--r--kernel/trace/ftrace.c4
-rw-r--r--kernel/trace/ring_buffer.c4
-rw-r--r--kernel/trace/trace.c20
-rw-r--r--kernel/trace/trace_clock.c9
-rw-r--r--kernel/trace/trace_events.c1
-rw-r--r--kernel/workqueue.c3
-rw-r--r--lib/cpumask.c2
-rw-r--r--mm/hugetlb.c1
-rw-r--r--mm/memory-failure.c4
-rw-r--r--mm/memory.c3
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/migrate.c5
-rw-r--r--mm/page_alloc.c15
-rw-r--r--mm/rmap.c10
-rw-r--r--mm/shmem.c102
-rw-r--r--mm/slab_common.c2
-rw-r--r--mm/truncate.c11
-rw-r--r--net/8021q/vlan_dev.c13
-rw-r--r--net/appletalk/ddp.c3
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c44
-rw-r--r--net/batman-adv/soft-interface.c60
-rw-r--r--net/batman-adv/translation-table.c26
-rw-r--r--net/batman-adv/types.h2
-rw-r--r--net/bluetooth/hci_conn.c12
-rw-r--r--net/bluetooth/smp.c60
-rw-r--r--net/compat.c9
-rw-r--r--net/core/dev.c32
-rw-r--r--net/core/iovec.c6
-rw-r--r--net/core/neighbour.c11
-rw-r--r--net/dns_resolver/dns_query.c2
-rw-r--r--net/ipv4/af_inet.c3
-rw-r--r--net/ipv4/gre_demux.c1
-rw-r--r--net/ipv4/gre_offload.c3
-rw-r--r--net/ipv4/icmp.c2
-rw-r--r--net/ipv4/igmp.c10
-rw-r--r--net/ipv4/ip_options.c4
-rw-r--r--net/ipv4/ip_tunnel.c12
-rw-r--r--net/ipv4/route.c47
-rw-r--r--net/ipv4/tcp.c3
-rw-r--r--net/ipv4/tcp_input.c8
-rw-r--r--net/ipv4/tcp_offload.c2
-rw-r--r--net/ipv4/tcp_output.c6
-rw-r--r--net/ipv4/udp.c5
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/mcast.c13
-rw-r--r--net/ipv6/tcpv6_offload.c2
-rw-r--r--net/ipv6/udp.c6
-rw-r--r--net/l2tp/l2tp_ppp.c4
-rw-r--r--net/mac80211/cfg.c5
-rw-r--r--net/mac80211/tx.c20
-rw-r--r--net/mac80211/util.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c1
-rw-r--r--net/netfilter/nf_tables_api.c140
-rw-r--r--net/netfilter/nf_tables_core.c10
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/openvswitch/actions.c2
-rw-r--r--net/openvswitch/datapath.c27
-rw-r--r--net/openvswitch/flow.c4
-rw-r--r--net/openvswitch/flow.h5
-rw-r--r--net/openvswitch/flow_table.c16
-rw-r--r--net/openvswitch/flow_table.h3
-rw-r--r--net/openvswitch/vport-gre.c17
-rw-r--r--net/sched/cls_u32.c19
-rw-r--r--net/sctp/associola.c1
-rw-r--r--net/sctp/ulpevent.c122
-rw-r--r--net/tipc/bcast.c1
-rw-r--r--net/tipc/msg.c11
-rw-r--r--net/wireless/core.h2
-rw-r--r--net/wireless/nl80211.c11
-rw-r--r--net/wireless/reg.c22
-rw-r--r--net/wireless/trace.h3
-rw-r--r--net/xfrm/xfrm_policy.c2
-rw-r--r--net/xfrm/xfrm_user.c7
-rwxr-xr-xscripts/kernel-doc15
-rw-r--r--sound/firewire/bebob/bebob_maudio.c53
-rw-r--r--sound/pci/hda/hda_controller.c3
-rw-r--r--sound/pci/hda/hda_intel.c12
-rw-r--r--sound/pci/hda/hda_priv.h1
-rw-r--r--sound/pci/hda/hda_tegra.c2
-rw-r--r--sound/pci/hda/patch_hdmi.c2
-rw-r--r--sound/soc/fsl/imx-pcm-dma.c1
-rw-r--r--tools/lib/lockdep/include/liblockdep/mutex.h4
-rw-r--r--tools/lib/lockdep/include/liblockdep/rwlock.h8
-rw-r--r--tools/lib/lockdep/preload.c20
-rw-r--r--tools/perf/ui/browsers/hists.c21
-rw-r--r--tools/perf/util/machine.c54
-rw-r--r--tools/thermal/tmon/Makefile2
-rw-r--r--tools/thermal/tmon/tmon.c26
605 files changed, 5341 insertions, 2850 deletions
diff --git a/Documentation/Changes b/Documentation/Changes
index 2254db0f00a5..227bec88021e 100644
--- a/Documentation/Changes
+++ b/Documentation/Changes
@@ -280,12 +280,9 @@ that is possible.
280mcelog 280mcelog
281------ 281------
282 282
283In Linux 2.6.31+ the i386 kernel needs to run the mcelog utility 283On x86 kernels the mcelog utility is needed to process and log machine check
284as a regular cronjob similar to the x86-64 kernel to process and log 284events when CONFIG_X86_MCE is enabled. Machine check events are errors reported
285machine check events when CONFIG_X86_NEW_MCE is enabled. Machine check 285by the CPU. Processing them is strongly encouraged.
286events are errors reported by the CPU. Processing them is strongly encouraged.
287All x86-64 kernels since 2.6.4 require the mcelog utility to
288process machine checks.
289 286
290Getting updated software 287Getting updated software
291======================== 288========================
diff --git a/Documentation/DocBook/gadget.tmpl b/Documentation/DocBook/gadget.tmpl
index 4017f147ba2f..2c425d70f7e2 100644
--- a/Documentation/DocBook/gadget.tmpl
+++ b/Documentation/DocBook/gadget.tmpl
@@ -708,7 +708,7 @@ hardware level details could be very different.
708 708
709<para>Systems need specialized hardware support to implement OTG, 709<para>Systems need specialized hardware support to implement OTG,
710notably including a special <emphasis>Mini-AB</emphasis> jack 710notably including a special <emphasis>Mini-AB</emphasis> jack
711and associated transciever to support <emphasis>Dual-Role</emphasis> 711and associated transceiver to support <emphasis>Dual-Role</emphasis>
712operation: 712operation:
713they can act either as a host, using the standard 713they can act either as a host, using the standard
714Linux-USB host side driver stack, 714Linux-USB host side driver stack,
diff --git a/Documentation/DocBook/genericirq.tmpl b/Documentation/DocBook/genericirq.tmpl
index 46347f603353..59fb5c077541 100644
--- a/Documentation/DocBook/genericirq.tmpl
+++ b/Documentation/DocBook/genericirq.tmpl
@@ -182,7 +182,7 @@
182 <para> 182 <para>
183 Each interrupt is described by an interrupt descriptor structure 183 Each interrupt is described by an interrupt descriptor structure
184 irq_desc. The interrupt is referenced by an 'unsigned int' numeric 184 irq_desc. The interrupt is referenced by an 'unsigned int' numeric
185 value which selects the corresponding interrupt decription structure 185 value which selects the corresponding interrupt description structure
186 in the descriptor structures array. 186 in the descriptor structures array.
187 The descriptor structure contains status information and pointers 187 The descriptor structure contains status information and pointers
188 to the interrupt flow method and the interrupt chip structure 188 to the interrupt flow method and the interrupt chip structure
@@ -470,7 +470,7 @@ if (desc->irq_data.chip->irq_eoi)
470 <para> 470 <para>
471 To avoid copies of identical implementations of IRQ chips the 471 To avoid copies of identical implementations of IRQ chips the
472 core provides a configurable generic interrupt chip 472 core provides a configurable generic interrupt chip
473 implementation. Developers should check carefuly whether the 473 implementation. Developers should check carefully whether the
474 generic chip fits their needs before implementing the same 474 generic chip fits their needs before implementing the same
475 functionality slightly differently themselves. 475 functionality slightly differently themselves.
476 </para> 476 </para>
diff --git a/Documentation/DocBook/kernel-locking.tmpl b/Documentation/DocBook/kernel-locking.tmpl
index 19f2a5a5a5b4..e584ee12a1e7 100644
--- a/Documentation/DocBook/kernel-locking.tmpl
+++ b/Documentation/DocBook/kernel-locking.tmpl
@@ -1760,7 +1760,7 @@ as it would be on UP.
1760</para> 1760</para>
1761 1761
1762<para> 1762<para>
1763There is a furthur optimization possible here: remember our original 1763There is a further optimization possible here: remember our original
1764cache code, where there were no reference counts and the caller simply 1764cache code, where there were no reference counts and the caller simply
1765held the lock whenever using the object? This is still possible: if 1765held the lock whenever using the object? This is still possible: if
1766you hold the lock, no one can delete the object, so you don't need to 1766you hold the lock, no one can delete the object, so you don't need to
diff --git a/Documentation/DocBook/libata.tmpl b/Documentation/DocBook/libata.tmpl
index deb71baed328..d7fcdc5a4379 100644
--- a/Documentation/DocBook/libata.tmpl
+++ b/Documentation/DocBook/libata.tmpl
@@ -677,7 +677,7 @@ and other resources, etc.
677 677
678 <listitem> 678 <listitem>
679 <para> 679 <para>
680 ATA_QCFLAG_ACTIVE is clared from qc->flags. 680 ATA_QCFLAG_ACTIVE is cleared from qc->flags.
681 </para> 681 </para>
682 </listitem> 682 </listitem>
683 683
@@ -708,7 +708,7 @@ and other resources, etc.
708 708
709 <listitem> 709 <listitem>
710 <para> 710 <para>
711 qc->waiting is claread &amp; completed (in that order). 711 qc->waiting is cleared &amp; completed (in that order).
712 </para> 712 </para>
713 </listitem> 713 </listitem>
714 714
@@ -1163,7 +1163,7 @@ and other resources, etc.
1163 1163
1164 <para> 1164 <para>
1165 Once sense data is acquired, this type of errors can be 1165 Once sense data is acquired, this type of errors can be
1166 handled similary to other SCSI errors. Note that sense data 1166 handled similarly to other SCSI errors. Note that sense data
1167 may indicate ATA bus error (e.g. Sense Key 04h HARDWARE ERROR 1167 may indicate ATA bus error (e.g. Sense Key 04h HARDWARE ERROR
1168 &amp;&amp; ASC/ASCQ 47h/00h SCSI PARITY ERROR). In such 1168 &amp;&amp; ASC/ASCQ 47h/00h SCSI PARITY ERROR). In such
1169 cases, the error should be considered as an ATA bus error and 1169 cases, the error should be considered as an ATA bus error and
diff --git a/Documentation/DocBook/media_api.tmpl b/Documentation/DocBook/media_api.tmpl
index 4decb46bfa76..03f9a1f8d413 100644
--- a/Documentation/DocBook/media_api.tmpl
+++ b/Documentation/DocBook/media_api.tmpl
@@ -68,7 +68,7 @@
68 several digital tv standards. While it is called as DVB API, 68 several digital tv standards. While it is called as DVB API,
69 in fact it covers several different video standards including 69 in fact it covers several different video standards including
70 DVB-T, DVB-S, DVB-C and ATSC. The API is currently being updated 70 DVB-T, DVB-S, DVB-C and ATSC. The API is currently being updated
71 to documment support also for DVB-S2, ISDB-T and ISDB-S.</para> 71 to document support also for DVB-S2, ISDB-T and ISDB-S.</para>
72 <para>The third part covers the Remote Controller API.</para> 72 <para>The third part covers the Remote Controller API.</para>
73 <para>The fourth part covers the Media Controller API.</para> 73 <para>The fourth part covers the Media Controller API.</para>
74 <para>For additional information and for the latest development code, 74 <para>For additional information and for the latest development code,
diff --git a/Documentation/DocBook/mtdnand.tmpl b/Documentation/DocBook/mtdnand.tmpl
index cd11926e07c7..7da8f0402af5 100644
--- a/Documentation/DocBook/mtdnand.tmpl
+++ b/Documentation/DocBook/mtdnand.tmpl
@@ -91,7 +91,7 @@
91 <listitem><para> 91 <listitem><para>
92 [MTD Interface]</para><para> 92 [MTD Interface]</para><para>
93 These functions provide the interface to the MTD kernel API. 93 These functions provide the interface to the MTD kernel API.
94 They are not replacable and provide functionality 94 They are not replaceable and provide functionality
95 which is complete hardware independent. 95 which is complete hardware independent.
96 </para></listitem> 96 </para></listitem>
97 <listitem><para> 97 <listitem><para>
@@ -100,14 +100,14 @@
100 </para></listitem> 100 </para></listitem>
101 <listitem><para> 101 <listitem><para>
102 [GENERIC]</para><para> 102 [GENERIC]</para><para>
103 Generic functions are not replacable and provide functionality 103 Generic functions are not replaceable and provide functionality
104 which is complete hardware independent. 104 which is complete hardware independent.
105 </para></listitem> 105 </para></listitem>
106 <listitem><para> 106 <listitem><para>
107 [DEFAULT]</para><para> 107 [DEFAULT]</para><para>
108 Default functions provide hardware related functionality which is suitable 108 Default functions provide hardware related functionality which is suitable
109 for most of the implementations. These functions can be replaced by the 109 for most of the implementations. These functions can be replaced by the
110 board driver if neccecary. Those functions are called via pointers in the 110 board driver if necessary. Those functions are called via pointers in the
111 NAND chip description structure. The board driver can set the functions which 111 NAND chip description structure. The board driver can set the functions which
112 should be replaced by board dependent functions before calling nand_scan(). 112 should be replaced by board dependent functions before calling nand_scan().
113 If the function pointer is NULL on entry to nand_scan() then the pointer 113 If the function pointer is NULL on entry to nand_scan() then the pointer
@@ -264,7 +264,7 @@ static void board_hwcontrol(struct mtd_info *mtd, int cmd)
264 is set up nand_scan() is called. This function tries to 264 is set up nand_scan() is called. This function tries to
265 detect and identify then chip. If a chip is found all the 265 detect and identify then chip. If a chip is found all the
266 internal data fields are initialized accordingly. 266 internal data fields are initialized accordingly.
267 The structure(s) have to be zeroed out first and then filled with the neccecary 267 The structure(s) have to be zeroed out first and then filled with the necessary
268 information about the device. 268 information about the device.
269 </para> 269 </para>
270 <programlisting> 270 <programlisting>
@@ -327,7 +327,7 @@ module_init(board_init);
327 <sect1 id="Exit_function"> 327 <sect1 id="Exit_function">
328 <title>Exit function</title> 328 <title>Exit function</title>
329 <para> 329 <para>
330 The exit function is only neccecary if the driver is 330 The exit function is only necessary if the driver is
331 compiled as a module. It releases all resources which 331 compiled as a module. It releases all resources which
332 are held by the chip driver and unregisters the partitions 332 are held by the chip driver and unregisters the partitions
333 in the MTD layer. 333 in the MTD layer.
@@ -494,7 +494,7 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
494 in this case. See rts_from4.c and diskonchip.c for 494 in this case. See rts_from4.c and diskonchip.c for
495 implementation reference. In those cases we must also 495 implementation reference. In those cases we must also
496 use bad block tables on FLASH, because the ECC layout is 496 use bad block tables on FLASH, because the ECC layout is
497 interferring with the bad block marker positions. 497 interfering with the bad block marker positions.
498 See bad block table support for details. 498 See bad block table support for details.
499 </para> 499 </para>
500 </sect2> 500 </sect2>
@@ -542,7 +542,7 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
542 <para> 542 <para>
543 nand_scan() calls the function nand_default_bbt(). 543 nand_scan() calls the function nand_default_bbt().
544 nand_default_bbt() selects appropriate default 544 nand_default_bbt() selects appropriate default
545 bad block table desriptors depending on the chip information 545 bad block table descriptors depending on the chip information
546 which was retrieved by nand_scan(). 546 which was retrieved by nand_scan().
547 </para> 547 </para>
548 <para> 548 <para>
@@ -554,7 +554,7 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
554 <sect2 id="Flash_based_tables"> 554 <sect2 id="Flash_based_tables">
555 <title>Flash based tables</title> 555 <title>Flash based tables</title>
556 <para> 556 <para>
557 It may be desired or neccecary to keep a bad block table in FLASH. 557 It may be desired or necessary to keep a bad block table in FLASH.
558 For AG-AND chips this is mandatory, as they have no factory marked 558 For AG-AND chips this is mandatory, as they have no factory marked
559 bad blocks. They have factory marked good blocks. The marker pattern 559 bad blocks. They have factory marked good blocks. The marker pattern
560 is erased when the block is erased to be reused. So in case of 560 is erased when the block is erased to be reused. So in case of
@@ -565,10 +565,10 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
565 of the blocks. 565 of the blocks.
566 </para> 566 </para>
567 <para> 567 <para>
568 The blocks in which the tables are stored are procteted against 568 The blocks in which the tables are stored are protected against
569 accidental access by marking them bad in the memory bad block 569 accidental access by marking them bad in the memory bad block
570 table. The bad block table management functions are allowed 570 table. The bad block table management functions are allowed
571 to circumvernt this protection. 571 to circumvent this protection.
572 </para> 572 </para>
573 <para> 573 <para>
574 The simplest way to activate the FLASH based bad block table support 574 The simplest way to activate the FLASH based bad block table support
@@ -592,7 +592,7 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
592 User defined tables are created by filling out a 592 User defined tables are created by filling out a
593 nand_bbt_descr structure and storing the pointer in the 593 nand_bbt_descr structure and storing the pointer in the
594 nand_chip structure member bbt_td before calling nand_scan(). 594 nand_chip structure member bbt_td before calling nand_scan().
595 If a mirror table is neccecary a second structure must be 595 If a mirror table is necessary a second structure must be
596 created and a pointer to this structure must be stored 596 created and a pointer to this structure must be stored
597 in bbt_md inside the nand_chip structure. If the bbt_md 597 in bbt_md inside the nand_chip structure. If the bbt_md
598 member is set to NULL then only the main table is used 598 member is set to NULL then only the main table is used
@@ -666,7 +666,7 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
666 <para> 666 <para>
667 For automatic placement some blocks must be reserved for 667 For automatic placement some blocks must be reserved for
668 bad block table storage. The number of reserved blocks is defined 668 bad block table storage. The number of reserved blocks is defined
669 in the maxblocks member of the babd block table description structure. 669 in the maxblocks member of the bad block table description structure.
670 Reserving 4 blocks for mirrored tables should be a reasonable number. 670 Reserving 4 blocks for mirrored tables should be a reasonable number.
671 This also limits the number of blocks which are scanned for the bad 671 This also limits the number of blocks which are scanned for the bad
672 block table ident pattern. 672 block table ident pattern.
@@ -1068,11 +1068,11 @@ in this page</entry>
1068 <chapter id="filesystems"> 1068 <chapter id="filesystems">
1069 <title>Filesystem support</title> 1069 <title>Filesystem support</title>
1070 <para> 1070 <para>
1071 The NAND driver provides all neccecary functions for a 1071 The NAND driver provides all necessary functions for a
1072 filesystem via the MTD interface. 1072 filesystem via the MTD interface.
1073 </para> 1073 </para>
1074 <para> 1074 <para>
1075 Filesystems must be aware of the NAND pecularities and 1075 Filesystems must be aware of the NAND peculiarities and
1076 restrictions. One major restrictions of NAND Flash is, that you cannot 1076 restrictions. One major restrictions of NAND Flash is, that you cannot
1077 write as often as you want to a page. The consecutive writes to a page, 1077 write as often as you want to a page. The consecutive writes to a page,
1078 before erasing it again, are restricted to 1-3 writes, depending on the 1078 before erasing it again, are restricted to 1-3 writes, depending on the
@@ -1222,7 +1222,7 @@ in this page</entry>
1222#define NAND_BBT_VERSION 0x00000100 1222#define NAND_BBT_VERSION 0x00000100
1223/* Create a bbt if none axists */ 1223/* Create a bbt if none axists */
1224#define NAND_BBT_CREATE 0x00000200 1224#define NAND_BBT_CREATE 0x00000200
1225/* Write bbt if neccecary */ 1225/* Write bbt if necessary */
1226#define NAND_BBT_WRITE 0x00001000 1226#define NAND_BBT_WRITE 0x00001000
1227/* Read and write back block contents when writing bbt */ 1227/* Read and write back block contents when writing bbt */
1228#define NAND_BBT_SAVECONTENT 0x00002000 1228#define NAND_BBT_SAVECONTENT 0x00002000
diff --git a/Documentation/DocBook/regulator.tmpl b/Documentation/DocBook/regulator.tmpl
index 346e552fa2cc..3b08a085d2c7 100644
--- a/Documentation/DocBook/regulator.tmpl
+++ b/Documentation/DocBook/regulator.tmpl
@@ -155,7 +155,7 @@
155 release regulators. Functions are 155 release regulators. Functions are
156 provided to <link linkend='API-regulator-enable'>enable</link> 156 provided to <link linkend='API-regulator-enable'>enable</link>
157 and <link linkend='API-regulator-disable'>disable</link> the 157 and <link linkend='API-regulator-disable'>disable</link> the
158 reguator and to get and set the runtime parameters of the 158 regulator and to get and set the runtime parameters of the
159 regulator. 159 regulator.
160 </para> 160 </para>
161 <para> 161 <para>
diff --git a/Documentation/DocBook/uio-howto.tmpl b/Documentation/DocBook/uio-howto.tmpl
index 95618159e29b..bbe9c1fd5cef 100644
--- a/Documentation/DocBook/uio-howto.tmpl
+++ b/Documentation/DocBook/uio-howto.tmpl
@@ -766,10 +766,10 @@ framework to set up sysfs files for this region. Simply leave it alone.
766 <para> 766 <para>
767 The dynamic memory regions will be allocated when the UIO device file, 767 The dynamic memory regions will be allocated when the UIO device file,
768 <varname>/dev/uioX</varname> is opened. 768 <varname>/dev/uioX</varname> is opened.
769 Simiar to static memory resources, the memory region information for 769 Similar to static memory resources, the memory region information for
770 dynamic regions is then visible via sysfs at 770 dynamic regions is then visible via sysfs at
771 <varname>/sys/class/uio/uioX/maps/mapY/*</varname>. 771 <varname>/sys/class/uio/uioX/maps/mapY/*</varname>.
772 The dynmaic memory regions will be freed when the UIO device file is 772 The dynamic memory regions will be freed when the UIO device file is
773 closed. When no processes are holding the device file open, the address 773 closed. When no processes are holding the device file open, the address
774 returned to userspace is ~0. 774 returned to userspace is ~0.
775 </para> 775 </para>
diff --git a/Documentation/DocBook/usb.tmpl b/Documentation/DocBook/usb.tmpl
index 8d57c1888dca..85fc0e28576f 100644
--- a/Documentation/DocBook/usb.tmpl
+++ b/Documentation/DocBook/usb.tmpl
@@ -153,7 +153,7 @@
153 153
154 <listitem><para>The Linux USB API supports synchronous calls for 154 <listitem><para>The Linux USB API supports synchronous calls for
155 control and bulk messages. 155 control and bulk messages.
156 It also supports asynchnous calls for all kinds of data transfer, 156 It also supports asynchronous calls for all kinds of data transfer,
157 using request structures called "URBs" (USB Request Blocks). 157 using request structures called "URBs" (USB Request Blocks).
158 </para></listitem> 158 </para></listitem>
159 159
diff --git a/Documentation/DocBook/writing-an-alsa-driver.tmpl b/Documentation/DocBook/writing-an-alsa-driver.tmpl
index d0056a4e9c53..6f639d9530b5 100644
--- a/Documentation/DocBook/writing-an-alsa-driver.tmpl
+++ b/Documentation/DocBook/writing-an-alsa-driver.tmpl
@@ -5696,7 +5696,7 @@ struct _snd_pcm_runtime {
5696 suspending the PCM operations via 5696 suspending the PCM operations via
5697 <function>snd_pcm_suspend_all()</function> or 5697 <function>snd_pcm_suspend_all()</function> or
5698 <function>snd_pcm_suspend()</function>. It means that the PCM 5698 <function>snd_pcm_suspend()</function>. It means that the PCM
5699 streams are already stoppped when the register snapshot is 5699 streams are already stopped when the register snapshot is
5700 taken. But, remember that you don't have to restart the PCM 5700 taken. But, remember that you don't have to restart the PCM
5701 stream in the resume callback. It'll be restarted via 5701 stream in the resume callback. It'll be restarted via
5702 trigger call with <constant>SNDRV_PCM_TRIGGER_RESUME</constant> 5702 trigger call with <constant>SNDRV_PCM_TRIGGER_RESUME</constant>
diff --git a/Documentation/acpi/enumeration.txt b/Documentation/acpi/enumeration.txt
index fd786ea13a1f..e182be5e3c83 100644
--- a/Documentation/acpi/enumeration.txt
+++ b/Documentation/acpi/enumeration.txt
@@ -60,12 +60,6 @@ If the driver needs to perform more complex initialization like getting and
60configuring GPIOs it can get its ACPI handle and extract this information 60configuring GPIOs it can get its ACPI handle and extract this information
61from ACPI tables. 61from ACPI tables.
62 62
63Currently the kernel is not able to automatically determine from which ACPI
64device it should make the corresponding platform device so we need to add
65the ACPI device explicitly to acpi_platform_device_ids list defined in
66drivers/acpi/acpi_platform.c. This limitation is only for the platform
67devices, SPI and I2C devices are created automatically as described below.
68
69DMA support 63DMA support
70~~~~~~~~~~~ 64~~~~~~~~~~~
71DMA controllers enumerated via ACPI should be registered in the system to 65DMA controllers enumerated via ACPI should be registered in the system to
diff --git a/Documentation/cpu-freq/intel-pstate.txt b/Documentation/cpu-freq/intel-pstate.txt
index e742d21dbd96..a69ffe1d54d5 100644
--- a/Documentation/cpu-freq/intel-pstate.txt
+++ b/Documentation/cpu-freq/intel-pstate.txt
@@ -15,10 +15,13 @@ New sysfs files for controlling P state selection have been added to
15/sys/devices/system/cpu/intel_pstate/ 15/sys/devices/system/cpu/intel_pstate/
16 16
17 max_perf_pct: limits the maximum P state that will be requested by 17 max_perf_pct: limits the maximum P state that will be requested by
18 the driver stated as a percentage of the available performance. 18 the driver stated as a percentage of the available performance. The
19 available (P states) performance may be reduced by the no_turbo
20 setting described below.
19 21
20 min_perf_pct: limits the minimum P state that will be requested by 22 min_perf_pct: limits the minimum P state that will be requested by
21 the driver stated as a percentage of the available performance. 23 the driver stated as a percentage of the max (non-turbo)
24 performance level.
22 25
23 no_turbo: limits the driver to selecting P states below the turbo 26 no_turbo: limits the driver to selecting P states below the turbo
24 frequency range. 27 frequency range.
diff --git a/Documentation/devicetree/bindings/arm/exynos/power_domain.txt b/Documentation/devicetree/bindings/arm/exynos/power_domain.txt
index 5216b419016a..8b4f7b7fe88b 100644
--- a/Documentation/devicetree/bindings/arm/exynos/power_domain.txt
+++ b/Documentation/devicetree/bindings/arm/exynos/power_domain.txt
@@ -9,6 +9,18 @@ Required Properties:
9- reg: physical base address of the controller and length of memory mapped 9- reg: physical base address of the controller and length of memory mapped
10 region. 10 region.
11 11
12Optional Properties:
13- clocks: List of clock handles. The parent clocks of the input clocks to the
14 devices in this power domain are set to oscclk before power gating
15 and restored back after powering on a domain. This is required for
16 all domains which are powered on and off and not required for unused
17 domains.
18- clock-names: The following clocks can be specified:
19 - oscclk: Oscillator clock.
20 - pclkN, clkN: Pairs of parent of input clock and input clock to the
21 devices in this power domain. Maximum of 4 pairs (N = 0 to 3)
22 are supported currently.
23
12Node of a device using power domains must have a samsung,power-domain property 24Node of a device using power domains must have a samsung,power-domain property
13defined with a phandle to respective power domain. 25defined with a phandle to respective power domain.
14 26
@@ -19,6 +31,14 @@ Example:
19 reg = <0x10023C00 0x10>; 31 reg = <0x10023C00 0x10>;
20 }; 32 };
21 33
34 mfc_pd: power-domain@10044060 {
35 compatible = "samsung,exynos4210-pd";
36 reg = <0x10044060 0x20>;
37 clocks = <&clock CLK_FIN_PLL>, <&clock CLK_MOUT_SW_ACLK333>,
38 <&clock CLK_MOUT_USER_ACLK333>;
39 clock-names = "oscclk", "pclk0", "clk0";
40 };
41
22Example of the node using power domain: 42Example of the node using power domain:
23 43
24 node { 44 node {
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt b/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt
index f055515d2b62..366690cb86a3 100644
--- a/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt
+++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt
@@ -8,10 +8,12 @@ Both required and optional properties listed below must be defined
8under node /cpus/cpu@0. 8under node /cpus/cpu@0.
9 9
10Required properties: 10Required properties:
11- operating-points: Refer to Documentation/devicetree/bindings/power/opp.txt 11- None
12 for details
13 12
14Optional properties: 13Optional properties:
14- operating-points: Refer to Documentation/devicetree/bindings/power/opp.txt for
15 details. OPPs *must* be supplied either via DT, i.e. this property, or
16 populated at runtime.
15- clock-latency: Specify the possible maximum transition latency for clock, 17- clock-latency: Specify the possible maximum transition latency for clock,
16 in unit of nanoseconds. 18 in unit of nanoseconds.
17- voltage-tolerance: Specify the CPU voltage tolerance in percentage. 19- voltage-tolerance: Specify the CPU voltage tolerance in percentage.
diff --git a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
index 64fd7dec1bbc..b3556609a06f 100644
--- a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
+++ b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
@@ -4,6 +4,13 @@ Required properties:
4 4
5 - compatible: Must contain one of the following: 5 - compatible: Must contain one of the following:
6 6
7 - "renesas,scifa-sh73a0" for SH73A0 (SH-Mobile AG5) SCIFA compatible UART.
8 - "renesas,scifb-sh73a0" for SH73A0 (SH-Mobile AG5) SCIFB compatible UART.
9 - "renesas,scifa-r8a73a4" for R8A73A4 (R-Mobile APE6) SCIFA compatible UART.
10 - "renesas,scifb-r8a73a4" for R8A73A4 (R-Mobile APE6) SCIFB compatible UART.
11 - "renesas,scifa-r8a7740" for R8A7740 (R-Mobile A1) SCIFA compatible UART.
12 - "renesas,scifb-r8a7740" for R8A7740 (R-Mobile A1) SCIFB compatible UART.
13 - "renesas,scif-r8a7778" for R8A7778 (R-Car M1) SCIF compatible UART.
7 - "renesas,scif-r8a7779" for R8A7779 (R-Car H1) SCIF compatible UART. 14 - "renesas,scif-r8a7779" for R8A7779 (R-Car H1) SCIF compatible UART.
8 - "renesas,scif-r8a7790" for R8A7790 (R-Car H2) SCIF compatible UART. 15 - "renesas,scif-r8a7790" for R8A7790 (R-Car H2) SCIF compatible UART.
9 - "renesas,scifa-r8a7790" for R8A7790 (R-Car H2) SCIFA compatible UART. 16 - "renesas,scifa-r8a7790" for R8A7790 (R-Car H2) SCIFA compatible UART.
diff --git a/Documentation/input/event-codes.txt b/Documentation/input/event-codes.txt
index f1ea2c69648d..c587a966413e 100644
--- a/Documentation/input/event-codes.txt
+++ b/Documentation/input/event-codes.txt
@@ -281,6 +281,19 @@ gestures can normally be extracted from it.
281If INPUT_PROP_SEMI_MT is not set, the device is assumed to be a true MT 281If INPUT_PROP_SEMI_MT is not set, the device is assumed to be a true MT
282device. 282device.
283 283
284INPUT_PROP_TOPBUTTONPAD:
285-----------------------
286Some laptops, most notably the Lenovo *40 series provide a trackstick
287device but do not have physical buttons associated with the trackstick
288device. Instead, the top area of the touchpad is marked to show
289visual/haptic areas for left, middle, right buttons intended to be used
290with the trackstick.
291
292If INPUT_PROP_TOPBUTTONPAD is set, userspace should emulate buttons
293accordingly. This property does not affect kernel behavior.
294The kernel does not provide button emulation for such devices but treats
295them as any other INPUT_PROP_BUTTONPAD device.
296
284Guidelines: 297Guidelines:
285========== 298==========
286The guidelines below ensure proper single-touch and multi-finger functionality. 299The guidelines below ensure proper single-touch and multi-finger functionality.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index c1b9aa8c5a52..b7fa2f599459 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2790,6 +2790,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
2790 leaf rcu_node structure. Useful for very large 2790 leaf rcu_node structure. Useful for very large
2791 systems. 2791 systems.
2792 2792
2793 rcutree.jiffies_till_sched_qs= [KNL]
2794 Set required age in jiffies for a
2795 given grace period before RCU starts
2796 soliciting quiescent-state help from
2797 rcu_note_context_switch().
2798
2793 rcutree.jiffies_till_first_fqs= [KNL] 2799 rcutree.jiffies_till_first_fqs= [KNL]
2794 Set delay from grace-period initialization to 2800 Set delay from grace-period initialization to
2795 first attempt to force quiescent states. 2801 first attempt to force quiescent states.
@@ -3526,7 +3532,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
3526 the allocated input device; If set to 0, video driver 3532 the allocated input device; If set to 0, video driver
3527 will only send out the event without touching backlight 3533 will only send out the event without touching backlight
3528 brightness level. 3534 brightness level.
3529 default: 0 3535 default: 1
3530 3536
3531 virtio_mmio.device= 3537 virtio_mmio.device=
3532 [VMMIO] Memory mapped virtio (platform) device. 3538 [VMMIO] Memory mapped virtio (platform) device.
diff --git a/Documentation/laptops/00-INDEX b/Documentation/laptops/00-INDEX
index d13b9a9a9e00..d399ae1fc724 100644
--- a/Documentation/laptops/00-INDEX
+++ b/Documentation/laptops/00-INDEX
@@ -8,8 +8,8 @@ disk-shock-protection.txt
8 - information on hard disk shock protection. 8 - information on hard disk shock protection.
9dslm.c 9dslm.c
10 - Simple Disk Sleep Monitor program 10 - Simple Disk Sleep Monitor program
11hpfall.c 11freefall.c
12 - (HP) laptop accelerometer program for disk protection. 12 - (HP/DELL) laptop accelerometer program for disk protection.
13laptop-mode.txt 13laptop-mode.txt
14 - how to conserve battery power using laptop-mode. 14 - how to conserve battery power using laptop-mode.
15sony-laptop.txt 15sony-laptop.txt
diff --git a/Documentation/laptops/hpfall.c b/Documentation/laptops/freefall.c
index b85dbbac0499..aab2ff09e868 100644
--- a/Documentation/laptops/hpfall.c
+++ b/Documentation/laptops/freefall.c
@@ -1,7 +1,9 @@
1/* Disk protection for HP machines. 1/* Disk protection for HP/DELL machines.
2 * 2 *
3 * Copyright 2008 Eric Piel 3 * Copyright 2008 Eric Piel
4 * Copyright 2009 Pavel Machek <pavel@ucw.cz> 4 * Copyright 2009 Pavel Machek <pavel@ucw.cz>
5 * Copyright 2012 Sonal Santan
6 * Copyright 2014 Pali Rohár <pali.rohar@gmail.com>
5 * 7 *
6 * GPLv2. 8 * GPLv2.
7 */ 9 */
@@ -18,24 +20,31 @@
18#include <signal.h> 20#include <signal.h>
19#include <sys/mman.h> 21#include <sys/mman.h>
20#include <sched.h> 22#include <sched.h>
23#include <syslog.h>
21 24
22char unload_heads_path[64]; 25static int noled;
26static char unload_heads_path[64];
27static char device_path[32];
28static const char app_name[] = "FREE FALL";
23 29
24int set_unload_heads_path(char *device) 30static int set_unload_heads_path(char *device)
25{ 31{
26 char devname[64]; 32 char devname[64];
27 33
28 if (strlen(device) <= 5 || strncmp(device, "/dev/", 5) != 0) 34 if (strlen(device) <= 5 || strncmp(device, "/dev/", 5) != 0)
29 return -EINVAL; 35 return -EINVAL;
30 strncpy(devname, device + 5, sizeof(devname)); 36 strncpy(devname, device + 5, sizeof(devname) - 1);
37 strncpy(device_path, device, sizeof(device_path) - 1);
31 38
32 snprintf(unload_heads_path, sizeof(unload_heads_path) - 1, 39 snprintf(unload_heads_path, sizeof(unload_heads_path) - 1,
33 "/sys/block/%s/device/unload_heads", devname); 40 "/sys/block/%s/device/unload_heads", devname);
34 return 0; 41 return 0;
35} 42}
36int valid_disk(void) 43
44static int valid_disk(void)
37{ 45{
38 int fd = open(unload_heads_path, O_RDONLY); 46 int fd = open(unload_heads_path, O_RDONLY);
47
39 if (fd < 0) { 48 if (fd < 0) {
40 perror(unload_heads_path); 49 perror(unload_heads_path);
41 return 0; 50 return 0;
@@ -45,43 +54,54 @@ int valid_disk(void)
45 return 1; 54 return 1;
46} 55}
47 56
48void write_int(char *path, int i) 57static void write_int(char *path, int i)
49{ 58{
50 char buf[1024]; 59 char buf[1024];
51 int fd = open(path, O_RDWR); 60 int fd = open(path, O_RDWR);
61
52 if (fd < 0) { 62 if (fd < 0) {
53 perror("open"); 63 perror("open");
54 exit(1); 64 exit(1);
55 } 65 }
66
56 sprintf(buf, "%d", i); 67 sprintf(buf, "%d", i);
68
57 if (write(fd, buf, strlen(buf)) != strlen(buf)) { 69 if (write(fd, buf, strlen(buf)) != strlen(buf)) {
58 perror("write"); 70 perror("write");
59 exit(1); 71 exit(1);
60 } 72 }
73
61 close(fd); 74 close(fd);
62} 75}
63 76
64void set_led(int on) 77static void set_led(int on)
65{ 78{
79 if (noled)
80 return;
66 write_int("/sys/class/leds/hp::hddprotect/brightness", on); 81 write_int("/sys/class/leds/hp::hddprotect/brightness", on);
67} 82}
68 83
69void protect(int seconds) 84static void protect(int seconds)
70{ 85{
86 const char *str = (seconds == 0) ? "Unparked" : "Parked";
87
71 write_int(unload_heads_path, seconds*1000); 88 write_int(unload_heads_path, seconds*1000);
89 syslog(LOG_INFO, "%s %s disk head\n", str, device_path);
72} 90}
73 91
74int on_ac(void) 92static int on_ac(void)
75{ 93{
76// /sys/class/power_supply/AC0/online 94 /* /sys/class/power_supply/AC0/online */
95 return 1;
77} 96}
78 97
79int lid_open(void) 98static int lid_open(void)
80{ 99{
81// /proc/acpi/button/lid/LID/state 100 /* /proc/acpi/button/lid/LID/state */
101 return 1;
82} 102}
83 103
84void ignore_me(void) 104static void ignore_me(int signum)
85{ 105{
86 protect(0); 106 protect(0);
87 set_led(0); 107 set_led(0);
@@ -90,6 +110,7 @@ void ignore_me(void)
90int main(int argc, char **argv) 110int main(int argc, char **argv)
91{ 111{
92 int fd, ret; 112 int fd, ret;
113 struct stat st;
93 struct sched_param param; 114 struct sched_param param;
94 115
95 if (argc == 1) 116 if (argc == 1)
@@ -111,7 +132,16 @@ int main(int argc, char **argv)
111 return EXIT_FAILURE; 132 return EXIT_FAILURE;
112 } 133 }
113 134
114 daemon(0, 0); 135 if (stat("/sys/class/leds/hp::hddprotect/brightness", &st))
136 noled = 1;
137
138 if (daemon(0, 0) != 0) {
139 perror("daemon");
140 return EXIT_FAILURE;
141 }
142
143 openlog(app_name, LOG_CONS | LOG_PID | LOG_NDELAY, LOG_LOCAL1);
144
115 param.sched_priority = sched_get_priority_max(SCHED_FIFO); 145 param.sched_priority = sched_get_priority_max(SCHED_FIFO);
116 sched_setscheduler(0, SCHED_FIFO, &param); 146 sched_setscheduler(0, SCHED_FIFO, &param);
117 mlockall(MCL_CURRENT|MCL_FUTURE); 147 mlockall(MCL_CURRENT|MCL_FUTURE);
@@ -141,6 +171,7 @@ int main(int argc, char **argv)
141 alarm(20); 171 alarm(20);
142 } 172 }
143 173
174 closelog();
144 close(fd); 175 close(fd);
145 return EXIT_SUCCESS; 176 return EXIT_SUCCESS;
146} 177}
diff --git a/MAINTAINERS b/MAINTAINERS
index 6813d0aa5ecf..86efa7e213c2 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -156,7 +156,6 @@ F: drivers/net/hamradio/6pack.c
156 156
1578169 10/100/1000 GIGABIT ETHERNET DRIVER 1578169 10/100/1000 GIGABIT ETHERNET DRIVER
158M: Realtek linux nic maintainers <nic_swsd@realtek.com> 158M: Realtek linux nic maintainers <nic_swsd@realtek.com>
159M: Francois Romieu <romieu@fr.zoreil.com>
160L: netdev@vger.kernel.org 159L: netdev@vger.kernel.org
161S: Maintained 160S: Maintained
162F: drivers/net/ethernet/realtek/r8169.c 161F: drivers/net/ethernet/realtek/r8169.c
@@ -1314,6 +1313,20 @@ W: http://oss.renesas.com
1314Q: http://patchwork.kernel.org/project/linux-sh/list/ 1313Q: http://patchwork.kernel.org/project/linux-sh/list/
1315T: git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next 1314T: git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next
1316S: Supported 1315S: Supported
1316F: arch/arm/boot/dts/emev2*
1317F: arch/arm/boot/dts/r7s*
1318F: arch/arm/boot/dts/r8a*
1319F: arch/arm/boot/dts/sh*
1320F: arch/arm/configs/ape6evm_defconfig
1321F: arch/arm/configs/armadillo800eva_defconfig
1322F: arch/arm/configs/bockw_defconfig
1323F: arch/arm/configs/genmai_defconfig
1324F: arch/arm/configs/koelsch_defconfig
1325F: arch/arm/configs/kzm9g_defconfig
1326F: arch/arm/configs/lager_defconfig
1327F: arch/arm/configs/mackerel_defconfig
1328F: arch/arm/configs/marzen_defconfig
1329F: arch/arm/configs/shmobile_defconfig
1317F: arch/arm/mach-shmobile/ 1330F: arch/arm/mach-shmobile/
1318F: drivers/sh/ 1331F: drivers/sh/
1319 1332
@@ -4497,8 +4510,7 @@ S: Supported
4497F: drivers/idle/i7300_idle.c 4510F: drivers/idle/i7300_idle.c
4498 4511
4499IEEE 802.15.4 SUBSYSTEM 4512IEEE 802.15.4 SUBSYSTEM
4500M: Alexander Smirnov <alex.bluesman.smirnov@gmail.com> 4513M: Alexander Aring <alex.aring@gmail.com>
4501M: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
4502L: linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers) 4514L: linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers)
4503W: http://apps.sourceforge.net/trac/linux-zigbee 4515W: http://apps.sourceforge.net/trac/linux-zigbee
4504T: git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git 4516T: git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git
@@ -6787,7 +6799,7 @@ F: arch/x86/kernel/quirks.c
6787 6799
6788PCI DRIVER FOR IMX6 6800PCI DRIVER FOR IMX6
6789M: Richard Zhu <r65037@freescale.com> 6801M: Richard Zhu <r65037@freescale.com>
6790M: Shawn Guo <shawn.guo@linaro.org> 6802M: Shawn Guo <shawn.guo@freescale.com>
6791L: linux-pci@vger.kernel.org 6803L: linux-pci@vger.kernel.org
6792L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 6804L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
6793S: Maintained 6805S: Maintained
@@ -6944,6 +6956,12 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
6944S: Maintained 6956S: Maintained
6945F: drivers/pinctrl/pinctrl-at91.c 6957F: drivers/pinctrl/pinctrl-at91.c
6946 6958
6959PIN CONTROLLER - RENESAS
6960M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
6961L: linux-sh@vger.kernel.org
6962S: Maintained
6963F: drivers/pinctrl/sh-pfc/
6964
6947PIN CONTROLLER - SAMSUNG 6965PIN CONTROLLER - SAMSUNG
6948M: Tomasz Figa <t.figa@samsung.com> 6966M: Tomasz Figa <t.figa@samsung.com>
6949M: Thomas Abraham <thomas.abraham@linaro.org> 6967M: Thomas Abraham <thomas.abraham@linaro.org>
@@ -8007,6 +8025,16 @@ F: drivers/ata/
8007F: include/linux/ata.h 8025F: include/linux/ata.h
8008F: include/linux/libata.h 8026F: include/linux/libata.h
8009 8027
8028SERIAL ATA AHCI PLATFORM devices support
8029M: Hans de Goede <hdegoede@redhat.com>
8030M: Tejun Heo <tj@kernel.org>
8031L: linux-ide@vger.kernel.org
8032T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
8033S: Supported
8034F: drivers/ata/ahci_platform.c
8035F: drivers/ata/libahci_platform.c
8036F: include/linux/ahci_platform.h
8037
8010SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER 8038SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER
8011M: Jayamohan Kallickal <jayamohan.kallickal@emulex.com> 8039M: Jayamohan Kallickal <jayamohan.kallickal@emulex.com>
8012L: linux-scsi@vger.kernel.org 8040L: linux-scsi@vger.kernel.org
@@ -8984,7 +9012,7 @@ F: drivers/media/radio/radio-raremono.c
8984 9012
8985THERMAL 9013THERMAL
8986M: Zhang Rui <rui.zhang@intel.com> 9014M: Zhang Rui <rui.zhang@intel.com>
8987M: Eduardo Valentin <eduardo.valentin@ti.com> 9015M: Eduardo Valentin <edubezval@gmail.com>
8988L: linux-pm@vger.kernel.org 9016L: linux-pm@vger.kernel.org
8989T: git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git 9017T: git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git
8990T: git git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux-soc-thermal.git 9018T: git git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux-soc-thermal.git
@@ -9011,7 +9039,7 @@ S: Maintained
9011F: drivers/platform/x86/thinkpad_acpi.c 9039F: drivers/platform/x86/thinkpad_acpi.c
9012 9040
9013TI BANDGAP AND THERMAL DRIVER 9041TI BANDGAP AND THERMAL DRIVER
9014M: Eduardo Valentin <eduardo.valentin@ti.com> 9042M: Eduardo Valentin <edubezval@gmail.com>
9015L: linux-pm@vger.kernel.org 9043L: linux-pm@vger.kernel.org
9016S: Supported 9044S: Supported
9017F: drivers/thermal/ti-soc-thermal/ 9045F: drivers/thermal/ti-soc-thermal/
diff --git a/Makefile b/Makefile
index 4d75b4bceedd..f6a7794e4db4 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 16 2PATCHLEVEL = 16
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc4 4EXTRAVERSION = -rc7
5NAME = Shuffling Zombie Juror 5NAME = Shuffling Zombie Juror
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -41,6 +41,29 @@ unexport GREP_OPTIONS
41# descending is started. They are now explicitly listed as the 41# descending is started. They are now explicitly listed as the
42# prepare rule. 42# prepare rule.
43 43
44# Beautify output
45# ---------------------------------------------------------------------------
46#
47# Normally, we echo the whole command before executing it. By making
48# that echo $($(quiet)$(cmd)), we now have the possibility to set
49# $(quiet) to choose other forms of output instead, e.g.
50#
51# quiet_cmd_cc_o_c = Compiling $(RELDIR)/$@
52# cmd_cc_o_c = $(CC) $(c_flags) -c -o $@ $<
53#
54# If $(quiet) is empty, the whole command will be printed.
55# If it is set to "quiet_", only the short version will be printed.
56# If it is set to "silent_", nothing will be printed at all, since
57# the variable $(silent_cmd_cc_o_c) doesn't exist.
58#
59# A simple variant is to prefix commands with $(Q) - that's useful
60# for commands that shall be hidden in non-verbose mode.
61#
62# $(Q)ln $@ :<
63#
64# If KBUILD_VERBOSE equals 0 then the above command will be hidden.
65# If KBUILD_VERBOSE equals 1 then the above command is displayed.
66#
44# To put more focus on warnings, be less verbose as default 67# To put more focus on warnings, be less verbose as default
45# Use 'make V=1' to see the full commands 68# Use 'make V=1' to see the full commands
46 69
@@ -51,6 +74,29 @@ ifndef KBUILD_VERBOSE
51 KBUILD_VERBOSE = 0 74 KBUILD_VERBOSE = 0
52endif 75endif
53 76
77ifeq ($(KBUILD_VERBOSE),1)
78 quiet =
79 Q =
80else
81 quiet=quiet_
82 Q = @
83endif
84
85# If the user is running make -s (silent mode), suppress echoing of
86# commands
87
88ifneq ($(filter 4.%,$(MAKE_VERSION)),) # make-4
89ifneq ($(filter %s ,$(firstword x$(MAKEFLAGS))),)
90 quiet=silent_
91endif
92else # make-3.8x
93ifneq ($(filter s% -s%,$(MAKEFLAGS)),)
94 quiet=silent_
95endif
96endif
97
98export quiet Q KBUILD_VERBOSE
99
54# Call a source code checker (by default, "sparse") as part of the 100# Call a source code checker (by default, "sparse") as part of the
55# C compilation. 101# C compilation.
56# 102#
@@ -128,8 +174,11 @@ $(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make
128 174
129# Fake the "Entering directory" message once, so that IDEs/editors are 175# Fake the "Entering directory" message once, so that IDEs/editors are
130# able to understand relative filenames. 176# able to understand relative filenames.
177 echodir := @echo
178 quiet_echodir := @echo
179silent_echodir := @:
131sub-make: FORCE 180sub-make: FORCE
132 @echo "make[1]: Entering directory \`$(KBUILD_OUTPUT)'" 181 $($(quiet)echodir) "make[1]: Entering directory \`$(KBUILD_OUTPUT)'"
133 $(if $(KBUILD_VERBOSE:1=),@)$(MAKE) -C $(KBUILD_OUTPUT) \ 182 $(if $(KBUILD_VERBOSE:1=),@)$(MAKE) -C $(KBUILD_OUTPUT) \
134 KBUILD_SRC=$(CURDIR) \ 183 KBUILD_SRC=$(CURDIR) \
135 KBUILD_EXTMOD="$(KBUILD_EXTMOD)" -f $(CURDIR)/Makefile \ 184 KBUILD_EXTMOD="$(KBUILD_EXTMOD)" -f $(CURDIR)/Makefile \
@@ -292,52 +341,6 @@ endif
292export KBUILD_MODULES KBUILD_BUILTIN 341export KBUILD_MODULES KBUILD_BUILTIN
293export KBUILD_CHECKSRC KBUILD_SRC KBUILD_EXTMOD 342export KBUILD_CHECKSRC KBUILD_SRC KBUILD_EXTMOD
294 343
295# Beautify output
296# ---------------------------------------------------------------------------
297#
298# Normally, we echo the whole command before executing it. By making
299# that echo $($(quiet)$(cmd)), we now have the possibility to set
300# $(quiet) to choose other forms of output instead, e.g.
301#
302# quiet_cmd_cc_o_c = Compiling $(RELDIR)/$@
303# cmd_cc_o_c = $(CC) $(c_flags) -c -o $@ $<
304#
305# If $(quiet) is empty, the whole command will be printed.
306# If it is set to "quiet_", only the short version will be printed.
307# If it is set to "silent_", nothing will be printed at all, since
308# the variable $(silent_cmd_cc_o_c) doesn't exist.
309#
310# A simple variant is to prefix commands with $(Q) - that's useful
311# for commands that shall be hidden in non-verbose mode.
312#
313# $(Q)ln $@ :<
314#
315# If KBUILD_VERBOSE equals 0 then the above command will be hidden.
316# If KBUILD_VERBOSE equals 1 then the above command is displayed.
317
318ifeq ($(KBUILD_VERBOSE),1)
319 quiet =
320 Q =
321else
322 quiet=quiet_
323 Q = @
324endif
325
326# If the user is running make -s (silent mode), suppress echoing of
327# commands
328
329ifneq ($(filter 4.%,$(MAKE_VERSION)),) # make-4
330ifneq ($(filter %s ,$(firstword x$(MAKEFLAGS))),)
331 quiet=silent_
332endif
333else # make-3.8x
334ifneq ($(filter s% -s%,$(MAKEFLAGS)),)
335 quiet=silent_
336endif
337endif
338
339export quiet Q KBUILD_VERBOSE
340
341ifneq ($(CC),) 344ifneq ($(CC),)
342ifeq ($(shell $(CC) -v 2>&1 | grep -c "clang version"), 1) 345ifeq ($(shell $(CC) -v 2>&1 | grep -c "clang version"), 1)
343COMPILER := clang 346COMPILER := clang
@@ -685,6 +688,8 @@ KBUILD_CFLAGS += -fomit-frame-pointer
685endif 688endif
686endif 689endif
687 690
691KBUILD_CFLAGS += $(call cc-option, -fno-var-tracking-assignments)
692
688ifdef CONFIG_DEBUG_INFO 693ifdef CONFIG_DEBUG_INFO
689KBUILD_CFLAGS += -g 694KBUILD_CFLAGS += -g
690KBUILD_AFLAGS += -Wa,-gdwarf-2 695KBUILD_AFLAGS += -Wa,-gdwarf-2
@@ -1173,7 +1178,7 @@ distclean: mrproper
1173# Packaging of the kernel to various formats 1178# Packaging of the kernel to various formats
1174# --------------------------------------------------------------------------- 1179# ---------------------------------------------------------------------------
1175# rpm target kept for backward compatibility 1180# rpm target kept for backward compatibility
1176package-dir := $(srctree)/scripts/package 1181package-dir := scripts/package
1177 1182
1178%src-pkg: FORCE 1183%src-pkg: FORCE
1179 $(Q)$(MAKE) $(build)=$(package-dir) $@ 1184 $(Q)$(MAKE) $(build)=$(package-dir) $@
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 245058b3b0ef..88acf8bc1490 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -6,6 +6,7 @@ config ARM
6 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST 6 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
7 select ARCH_HAVE_CUSTOM_GPIO_H 7 select ARCH_HAVE_CUSTOM_GPIO_H
8 select ARCH_MIGHT_HAVE_PC_PARPORT 8 select ARCH_MIGHT_HAVE_PC_PARPORT
9 select ARCH_SUPPORTS_ATOMIC_RMW
9 select ARCH_USE_BUILTIN_BSWAP 10 select ARCH_USE_BUILTIN_BSWAP
10 select ARCH_USE_CMPXCHG_LOCKREF 11 select ARCH_USE_CMPXCHG_LOCKREF
11 select ARCH_WANT_IPC_PARSE_VERSION 12 select ARCH_WANT_IPC_PARSE_VERSION
diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts
index ecb267767cf5..e2156a583de7 100644
--- a/arch/arm/boot/dts/am335x-evm.dts
+++ b/arch/arm/boot/dts/am335x-evm.dts
@@ -529,8 +529,8 @@
529 serial-dir = < /* 0: INACTIVE, 1: TX, 2: RX */ 529 serial-dir = < /* 0: INACTIVE, 1: TX, 2: RX */
530 0 0 1 2 530 0 0 1 2
531 >; 531 >;
532 tx-num-evt = <1>; 532 tx-num-evt = <32>;
533 rx-num-evt = <1>; 533 rx-num-evt = <32>;
534}; 534};
535 535
536&tps { 536&tps {
diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
index ab9a34ce524c..80a3b215e7d6 100644
--- a/arch/arm/boot/dts/am335x-evmsk.dts
+++ b/arch/arm/boot/dts/am335x-evmsk.dts
@@ -560,8 +560,8 @@
560 serial-dir = < /* 0: INACTIVE, 1: TX, 2: RX */ 560 serial-dir = < /* 0: INACTIVE, 1: TX, 2: RX */
561 0 0 1 2 561 0 0 1 2
562 >; 562 >;
563 tx-num-evt = <1>; 563 tx-num-evt = <32>;
564 rx-num-evt = <1>; 564 rx-num-evt = <32>;
565}; 565};
566 566
567&tscadc { 567&tscadc {
diff --git a/arch/arm/boot/dts/am335x-igep0033.dtsi b/arch/arm/boot/dts/am335x-igep0033.dtsi
index 8a0a72dc7dd7..a1a0cc5eb35c 100644
--- a/arch/arm/boot/dts/am335x-igep0033.dtsi
+++ b/arch/arm/boot/dts/am335x-igep0033.dtsi
@@ -105,10 +105,16 @@
105 105
106&cpsw_emac0 { 106&cpsw_emac0 {
107 phy_id = <&davinci_mdio>, <0>; 107 phy_id = <&davinci_mdio>, <0>;
108 phy-mode = "rmii";
108}; 109};
109 110
110&cpsw_emac1 { 111&cpsw_emac1 {
111 phy_id = <&davinci_mdio>, <1>; 112 phy_id = <&davinci_mdio>, <1>;
113 phy-mode = "rmii";
114};
115
116&phy_sel {
117 rmii-clock-ext;
112}; 118};
113 119
114&elm { 120&elm {
diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi
index 287795985e32..b84bac5bada4 100644
--- a/arch/arm/boot/dts/at91sam9n12.dtsi
+++ b/arch/arm/boot/dts/at91sam9n12.dtsi
@@ -925,7 +925,7 @@
925 compatible = "atmel,at91rm9200-ohci", "usb-ohci"; 925 compatible = "atmel,at91rm9200-ohci", "usb-ohci";
926 reg = <0x00500000 0x00100000>; 926 reg = <0x00500000 0x00100000>;
927 interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>; 927 interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
928 clocks = <&usb>, <&uhphs_clk>, <&udphs_clk>, 928 clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>,
929 <&uhpck>; 929 <&uhpck>;
930 clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck"; 930 clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck";
931 status = "disabled"; 931 status = "disabled";
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index d6133f497207..2c0d6ea3ab41 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -1045,6 +1045,8 @@
1045 reg = <0x00500000 0x80000 1045 reg = <0x00500000 0x80000
1046 0xf803c000 0x400>; 1046 0xf803c000 0x400>;
1047 interrupts = <23 IRQ_TYPE_LEVEL_HIGH 0>; 1047 interrupts = <23 IRQ_TYPE_LEVEL_HIGH 0>;
1048 clocks = <&usb>, <&udphs_clk>;
1049 clock-names = "hclk", "pclk";
1048 status = "disabled"; 1050 status = "disabled";
1049 1051
1050 ep0 { 1052 ep0 {
@@ -1122,6 +1124,7 @@
1122 compatible = "atmel,at91sam9rl-pwm"; 1124 compatible = "atmel,at91sam9rl-pwm";
1123 reg = <0xf8034000 0x300>; 1125 reg = <0xf8034000 0x300>;
1124 interrupts = <18 IRQ_TYPE_LEVEL_HIGH 4>; 1126 interrupts = <18 IRQ_TYPE_LEVEL_HIGH 4>;
1127 clocks = <&pwm_clk>;
1125 #pwm-cells = <3>; 1128 #pwm-cells = <3>;
1126 status = "disabled"; 1129 status = "disabled";
1127 }; 1130 };
@@ -1153,8 +1156,7 @@
1153 compatible = "atmel,at91rm9200-ohci", "usb-ohci"; 1156 compatible = "atmel,at91rm9200-ohci", "usb-ohci";
1154 reg = <0x00600000 0x100000>; 1157 reg = <0x00600000 0x100000>;
1155 interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>; 1158 interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
1156 clocks = <&usb>, <&uhphs_clk>, <&udphs_clk>, 1159 clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>, <&uhpck>;
1157 <&uhpck>;
1158 clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck"; 1160 clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck";
1159 status = "disabled"; 1161 status = "disabled";
1160 }; 1162 };
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
index 4adc28039c30..83089540e324 100644
--- a/arch/arm/boot/dts/dra7-evm.dts
+++ b/arch/arm/boot/dts/dra7-evm.dts
@@ -240,6 +240,7 @@
240 regulator-name = "ldo3"; 240 regulator-name = "ldo3";
241 regulator-min-microvolt = <1800000>; 241 regulator-min-microvolt = <1800000>;
242 regulator-max-microvolt = <1800000>; 242 regulator-max-microvolt = <1800000>;
243 regulator-always-on;
243 regulator-boot-on; 244 regulator-boot-on;
244 }; 245 };
245 246
diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi
index c90c76de84d6..dc7a292fe939 100644
--- a/arch/arm/boot/dts/dra7xx-clocks.dtsi
+++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi
@@ -673,10 +673,12 @@
673 673
674 l3_iclk_div: l3_iclk_div { 674 l3_iclk_div: l3_iclk_div {
675 #clock-cells = <0>; 675 #clock-cells = <0>;
676 compatible = "fixed-factor-clock"; 676 compatible = "ti,divider-clock";
677 ti,max-div = <2>;
678 ti,bit-shift = <4>;
679 reg = <0x0100>;
677 clocks = <&dpll_core_h12x2_ck>; 680 clocks = <&dpll_core_h12x2_ck>;
678 clock-mult = <1>; 681 ti,index-power-of-two;
679 clock-div = <1>;
680 }; 682 };
681 683
682 l4_root_clk_div: l4_root_clk_div { 684 l4_root_clk_div: l4_root_clk_div {
@@ -684,7 +686,7 @@
684 compatible = "fixed-factor-clock"; 686 compatible = "fixed-factor-clock";
685 clocks = <&l3_iclk_div>; 687 clocks = <&l3_iclk_div>;
686 clock-mult = <1>; 688 clock-mult = <1>;
687 clock-div = <1>; 689 clock-div = <2>;
688 }; 690 };
689 691
690 video1_clk2_div: video1_clk2_div { 692 video1_clk2_div: video1_clk2_div {
diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi
index fbaf426d2daa..17b22e9cc2aa 100644
--- a/arch/arm/boot/dts/exynos4.dtsi
+++ b/arch/arm/boot/dts/exynos4.dtsi
@@ -554,7 +554,7 @@
554 interrupts = <0 37 0>, <0 38 0>, <0 39 0>, <0 40 0>, <0 41 0>; 554 interrupts = <0 37 0>, <0 38 0>, <0 39 0>, <0 40 0>, <0 41 0>;
555 clocks = <&clock CLK_PWM>; 555 clocks = <&clock CLK_PWM>;
556 clock-names = "timers"; 556 clock-names = "timers";
557 #pwm-cells = <2>; 557 #pwm-cells = <3>;
558 status = "disabled"; 558 status = "disabled";
559 }; 559 };
560 560
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index e38532271ef9..15957227ffda 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -167,7 +167,7 @@
167 compatible = "samsung,exynos5420-audss-clock"; 167 compatible = "samsung,exynos5420-audss-clock";
168 reg = <0x03810000 0x0C>; 168 reg = <0x03810000 0x0C>;
169 #clock-cells = <1>; 169 #clock-cells = <1>;
170 clocks = <&clock CLK_FIN_PLL>, <&clock CLK_FOUT_EPLL>, 170 clocks = <&clock CLK_FIN_PLL>, <&clock CLK_MAU_EPLL>,
171 <&clock CLK_SCLK_MAUDIO0>, <&clock CLK_SCLK_MAUPCM0>; 171 <&clock CLK_SCLK_MAUDIO0>, <&clock CLK_SCLK_MAUPCM0>;
172 clock-names = "pll_ref", "pll_in", "sclk_audio", "sclk_pcm_in"; 172 clock-names = "pll_ref", "pll_in", "sclk_audio", "sclk_pcm_in";
173 }; 173 };
@@ -260,6 +260,9 @@
260 mfc_pd: power-domain@10044060 { 260 mfc_pd: power-domain@10044060 {
261 compatible = "samsung,exynos4210-pd"; 261 compatible = "samsung,exynos4210-pd";
262 reg = <0x10044060 0x20>; 262 reg = <0x10044060 0x20>;
263 clocks = <&clock CLK_FIN_PLL>, <&clock CLK_MOUT_SW_ACLK333>,
264 <&clock CLK_MOUT_USER_ACLK333>;
265 clock-names = "oscclk", "pclk0", "clk0";
263 }; 266 };
264 267
265 disp_pd: power-domain@100440C0 { 268 disp_pd: power-domain@100440C0 {
diff --git a/arch/arm/boot/dts/hi3620.dtsi b/arch/arm/boot/dts/hi3620.dtsi
index ab1116d086be..83a5b8685bd9 100644
--- a/arch/arm/boot/dts/hi3620.dtsi
+++ b/arch/arm/boot/dts/hi3620.dtsi
@@ -73,7 +73,7 @@
73 73
74 L2: l2-cache { 74 L2: l2-cache {
75 compatible = "arm,pl310-cache"; 75 compatible = "arm,pl310-cache";
76 reg = <0xfc10000 0x100000>; 76 reg = <0x100000 0x100000>;
77 interrupts = <0 15 4>; 77 interrupts = <0 15 4>;
78 cache-unified; 78 cache-unified;
79 cache-level = <2>; 79 cache-level = <2>;
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index 1fe45d1f75ec..b15f1a77d684 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -353,7 +353,7 @@
353 }; 353 };
354 354
355 twl_power: power { 355 twl_power: power {
356 compatible = "ti,twl4030-power-n900", "ti,twl4030-power-idle-osc-off"; 356 compatible = "ti,twl4030-power-n900";
357 ti,use_poweroff; 357 ti,use_poweroff;
358 }; 358 };
359}; 359};
diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi
index 8d7ffaeff6e0..79f68acfd5d4 100644
--- a/arch/arm/boot/dts/r8a7791.dtsi
+++ b/arch/arm/boot/dts/r8a7791.dtsi
@@ -540,9 +540,9 @@
540 #clock-cells = <0>; 540 #clock-cells = <0>;
541 clock-output-names = "sd1"; 541 clock-output-names = "sd1";
542 }; 542 };
543 sd2_clk: sd3_clk@e615007c { 543 sd2_clk: sd3_clk@e615026c {
544 compatible = "renesas,r8a7791-div6-clock", "renesas,cpg-div6-clock"; 544 compatible = "renesas,r8a7791-div6-clock", "renesas,cpg-div6-clock";
545 reg = <0 0xe615007c 0 4>; 545 reg = <0 0xe615026c 0 4>;
546 clocks = <&pll1_div2_clk>; 546 clocks = <&pll1_div2_clk>;
547 #clock-cells = <0>; 547 #clock-cells = <0>;
548 clock-output-names = "sd2"; 548 clock-output-names = "sd2";
diff --git a/arch/arm/crypto/aesbs-glue.c b/arch/arm/crypto/aesbs-glue.c
index 4522366da759..15468fbbdea3 100644
--- a/arch/arm/crypto/aesbs-glue.c
+++ b/arch/arm/crypto/aesbs-glue.c
@@ -137,7 +137,7 @@ static int aesbs_cbc_encrypt(struct blkcipher_desc *desc,
137 dst += AES_BLOCK_SIZE; 137 dst += AES_BLOCK_SIZE;
138 } while (--blocks); 138 } while (--blocks);
139 } 139 }
140 err = blkcipher_walk_done(desc, &walk, 0); 140 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
141 } 141 }
142 return err; 142 return err;
143} 143}
@@ -158,7 +158,7 @@ static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
158 bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr, 158 bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
159 walk.nbytes, &ctx->dec, walk.iv); 159 walk.nbytes, &ctx->dec, walk.iv);
160 kernel_neon_end(); 160 kernel_neon_end();
161 err = blkcipher_walk_done(desc, &walk, 0); 161 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
162 } 162 }
163 while (walk.nbytes) { 163 while (walk.nbytes) {
164 u32 blocks = walk.nbytes / AES_BLOCK_SIZE; 164 u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
@@ -182,7 +182,7 @@ static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
182 dst += AES_BLOCK_SIZE; 182 dst += AES_BLOCK_SIZE;
183 src += AES_BLOCK_SIZE; 183 src += AES_BLOCK_SIZE;
184 } while (--blocks); 184 } while (--blocks);
185 err = blkcipher_walk_done(desc, &walk, 0); 185 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
186 } 186 }
187 return err; 187 return err;
188} 188}
@@ -268,7 +268,7 @@ static int aesbs_xts_encrypt(struct blkcipher_desc *desc,
268 bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr, 268 bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
269 walk.nbytes, &ctx->enc, walk.iv); 269 walk.nbytes, &ctx->enc, walk.iv);
270 kernel_neon_end(); 270 kernel_neon_end();
271 err = blkcipher_walk_done(desc, &walk, 0); 271 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
272 } 272 }
273 return err; 273 return err;
274} 274}
@@ -292,7 +292,7 @@ static int aesbs_xts_decrypt(struct blkcipher_desc *desc,
292 bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr, 292 bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
293 walk.nbytes, &ctx->dec, walk.iv); 293 walk.nbytes, &ctx->dec, walk.iv);
294 kernel_neon_end(); 294 kernel_neon_end();
295 err = blkcipher_walk_done(desc, &walk, 0); 295 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
296 } 296 }
297 return err; 297 return err;
298} 298}
diff --git a/arch/arm/kernel/kprobes-test-arm.c b/arch/arm/kernel/kprobes-test-arm.c
index 9db4b659d03e..cb1424240ff6 100644
--- a/arch/arm/kernel/kprobes-test-arm.c
+++ b/arch/arm/kernel/kprobes-test-arm.c
@@ -74,8 +74,6 @@ void kprobe_arm_test_cases(void)
74 TEST_RRR( op "lt" s " r11, r",11,VAL1,", r",14,N(val),", asr r",7, 6,"")\ 74 TEST_RRR( op "lt" s " r11, r",11,VAL1,", r",14,N(val),", asr r",7, 6,"")\
75 TEST_RR( op "gt" s " r12, r13" ", r",14,val, ", ror r",14,7,"")\ 75 TEST_RR( op "gt" s " r12, r13" ", r",14,val, ", ror r",14,7,"")\
76 TEST_RR( op "le" s " r14, r",0, val, ", r13" ", lsl r",14,8,"")\ 76 TEST_RR( op "le" s " r14, r",0, val, ", r13" ", lsl r",14,8,"")\
77 TEST_RR( op s " r12, pc" ", r",14,val, ", ror r",14,7,"")\
78 TEST_RR( op s " r14, r",0, val, ", pc" ", lsl r",14,8,"")\
79 TEST_R( op "eq" s " r0, r",11,VAL1,", #0xf5") \ 77 TEST_R( op "eq" s " r0, r",11,VAL1,", #0xf5") \
80 TEST_R( op "ne" s " r11, r",0, VAL1,", #0xf5000000") \ 78 TEST_R( op "ne" s " r11, r",0, VAL1,", #0xf5000000") \
81 TEST_R( op s " r7, r",8, VAL2,", #0x000af000") \ 79 TEST_R( op s " r7, r",8, VAL2,", #0x000af000") \
@@ -103,8 +101,6 @@ void kprobe_arm_test_cases(void)
103 TEST_RRR( op "ge r",11,VAL1,", r",14,N(val),", asr r",7, 6,"") \ 101 TEST_RRR( op "ge r",11,VAL1,", r",14,N(val),", asr r",7, 6,"") \
104 TEST_RR( op "le r13" ", r",14,val, ", ror r",14,7,"") \ 102 TEST_RR( op "le r13" ", r",14,val, ", ror r",14,7,"") \
105 TEST_RR( op "gt r",0, val, ", r13" ", lsl r",14,8,"") \ 103 TEST_RR( op "gt r",0, val, ", r13" ", lsl r",14,8,"") \
106 TEST_RR( op " pc" ", r",14,val, ", ror r",14,7,"") \
107 TEST_RR( op " r",0, val, ", pc" ", lsl r",14,8,"") \
108 TEST_R( op "eq r",11,VAL1,", #0xf5") \ 104 TEST_R( op "eq r",11,VAL1,", #0xf5") \
109 TEST_R( op "ne r",0, VAL1,", #0xf5000000") \ 105 TEST_R( op "ne r",0, VAL1,", #0xf5000000") \
110 TEST_R( op " r",8, VAL2,", #0x000af000") 106 TEST_R( op " r",8, VAL2,", #0x000af000")
@@ -125,7 +121,6 @@ void kprobe_arm_test_cases(void)
125 TEST_RR( op "ge" s " r11, r",11,N(val),", asr r",7, 6,"") \ 121 TEST_RR( op "ge" s " r11, r",11,N(val),", asr r",7, 6,"") \
126 TEST_RR( op "lt" s " r12, r",11,val, ", ror r",14,7,"") \ 122 TEST_RR( op "lt" s " r12, r",11,val, ", ror r",14,7,"") \
127 TEST_R( op "gt" s " r14, r13" ", lsl r",14,8,"") \ 123 TEST_R( op "gt" s " r14, r13" ", lsl r",14,8,"") \
128 TEST_R( op "le" s " r14, pc" ", lsl r",14,8,"") \
129 TEST( op "eq" s " r0, #0xf5") \ 124 TEST( op "eq" s " r0, #0xf5") \
130 TEST( op "ne" s " r11, #0xf5000000") \ 125 TEST( op "ne" s " r11, #0xf5000000") \
131 TEST( op s " r7, #0x000af000") \ 126 TEST( op s " r7, #0x000af000") \
@@ -159,12 +154,19 @@ void kprobe_arm_test_cases(void)
159 TEST_SUPPORTED("cmp pc, #0x1000"); 154 TEST_SUPPORTED("cmp pc, #0x1000");
160 TEST_SUPPORTED("cmp sp, #0x1000"); 155 TEST_SUPPORTED("cmp sp, #0x1000");
161 156
162 /* Data-processing with PC as shift*/ 157 /* Data-processing with PC and a shift count in a register */
163 TEST_UNSUPPORTED(__inst_arm(0xe15c0f1e) " @ cmp r12, r14, asl pc") 158 TEST_UNSUPPORTED(__inst_arm(0xe15c0f1e) " @ cmp r12, r14, asl pc")
164 TEST_UNSUPPORTED(__inst_arm(0xe1a0cf1e) " @ mov r12, r14, asl pc") 159 TEST_UNSUPPORTED(__inst_arm(0xe1a0cf1e) " @ mov r12, r14, asl pc")
165 TEST_UNSUPPORTED(__inst_arm(0xe08caf1e) " @ add r10, r12, r14, asl pc") 160 TEST_UNSUPPORTED(__inst_arm(0xe08caf1e) " @ add r10, r12, r14, asl pc")
166 161 TEST_UNSUPPORTED(__inst_arm(0xe151021f) " @ cmp r1, pc, lsl r2")
167 /* Data-processing with PC as shift*/ 162 TEST_UNSUPPORTED(__inst_arm(0xe17f0211) " @ cmn pc, r1, lsl r2")
163 TEST_UNSUPPORTED(__inst_arm(0xe1a0121f) " @ mov r1, pc, lsl r2")
164 TEST_UNSUPPORTED(__inst_arm(0xe1a0f211) " @ mov pc, r1, lsl r2")
165 TEST_UNSUPPORTED(__inst_arm(0xe042131f) " @ sub r1, r2, pc, lsl r3")
166 TEST_UNSUPPORTED(__inst_arm(0xe1cf1312) " @ bic r1, pc, r2, lsl r3")
167 TEST_UNSUPPORTED(__inst_arm(0xe081f312) " @ add pc, r1, r2, lsl r3")
168
169 /* Data-processing with PC as a target and status registers updated */
168 TEST_UNSUPPORTED("movs pc, r1") 170 TEST_UNSUPPORTED("movs pc, r1")
169 TEST_UNSUPPORTED("movs pc, r1, lsl r2") 171 TEST_UNSUPPORTED("movs pc, r1, lsl r2")
170 TEST_UNSUPPORTED("movs pc, #0x10000") 172 TEST_UNSUPPORTED("movs pc, #0x10000")
@@ -187,14 +189,14 @@ void kprobe_arm_test_cases(void)
187 TEST_BF_R ("add pc, pc, r",14,2f-1f-8,"") 189 TEST_BF_R ("add pc, pc, r",14,2f-1f-8,"")
188 TEST_BF_R ("add pc, r",14,2f-1f-8,", pc") 190 TEST_BF_R ("add pc, r",14,2f-1f-8,", pc")
189 TEST_BF_R ("mov pc, r",0,2f,"") 191 TEST_BF_R ("mov pc, r",0,2f,"")
190 TEST_BF_RR("mov pc, r",0,2f,", asl r",1,0,"") 192 TEST_BF_R ("add pc, pc, r",14,(2f-1f-8)*2,", asr #1")
191 TEST_BB( "sub pc, pc, #1b-2b+8") 193 TEST_BB( "sub pc, pc, #1b-2b+8")
192#if __LINUX_ARM_ARCH__ == 6 && !defined(CONFIG_CPU_V7) 194#if __LINUX_ARM_ARCH__ == 6 && !defined(CONFIG_CPU_V7)
193 TEST_BB( "sub pc, pc, #1b-2b+8-2") /* UNPREDICTABLE before and after ARMv6 */ 195 TEST_BB( "sub pc, pc, #1b-2b+8-2") /* UNPREDICTABLE before and after ARMv6 */
194#endif 196#endif
195 TEST_BB_R( "sub pc, pc, r",14, 1f-2f+8,"") 197 TEST_BB_R( "sub pc, pc, r",14, 1f-2f+8,"")
196 TEST_BB_R( "rsb pc, r",14,1f-2f+8,", pc") 198 TEST_BB_R( "rsb pc, r",14,1f-2f+8,", pc")
197 TEST_RR( "add pc, pc, r",10,-2,", asl r",11,1,"") 199 TEST_R( "add pc, pc, r",10,-2,", asl #1")
198#ifdef CONFIG_THUMB2_KERNEL 200#ifdef CONFIG_THUMB2_KERNEL
199 TEST_ARM_TO_THUMB_INTERWORK_R("add pc, pc, r",0,3f-1f-8+1,"") 201 TEST_ARM_TO_THUMB_INTERWORK_R("add pc, pc, r",0,3f-1f-8+1,"")
200 TEST_ARM_TO_THUMB_INTERWORK_R("sub pc, r",0,3f+8+1,", #8") 202 TEST_ARM_TO_THUMB_INTERWORK_R("sub pc, r",0,3f+8+1,", #8")
@@ -216,6 +218,7 @@ void kprobe_arm_test_cases(void)
216 TEST_BB_R("bx r",7,2f,"") 218 TEST_BB_R("bx r",7,2f,"")
217 TEST_BF_R("bxeq r",14,2f,"") 219 TEST_BF_R("bxeq r",14,2f,"")
218 220
221#if __LINUX_ARM_ARCH__ >= 5
219 TEST_R("clz r0, r",0, 0x0,"") 222 TEST_R("clz r0, r",0, 0x0,"")
220 TEST_R("clzeq r7, r",14,0x1,"") 223 TEST_R("clzeq r7, r",14,0x1,"")
221 TEST_R("clz lr, r",7, 0xffffffff,"") 224 TEST_R("clz lr, r",7, 0xffffffff,"")
@@ -337,6 +340,7 @@ void kprobe_arm_test_cases(void)
337 TEST_UNSUPPORTED(__inst_arm(0xe16f02e1) " @ smultt pc, r1, r2") 340 TEST_UNSUPPORTED(__inst_arm(0xe16f02e1) " @ smultt pc, r1, r2")
338 TEST_UNSUPPORTED(__inst_arm(0xe16002ef) " @ smultt r0, pc, r2") 341 TEST_UNSUPPORTED(__inst_arm(0xe16002ef) " @ smultt r0, pc, r2")
339 TEST_UNSUPPORTED(__inst_arm(0xe1600fe1) " @ smultt r0, r1, pc") 342 TEST_UNSUPPORTED(__inst_arm(0xe1600fe1) " @ smultt r0, r1, pc")
343#endif
340 344
341 TEST_GROUP("Multiply and multiply-accumulate") 345 TEST_GROUP("Multiply and multiply-accumulate")
342 346
@@ -559,6 +563,7 @@ void kprobe_arm_test_cases(void)
559 TEST_UNSUPPORTED("ldrsht r1, [r2], #48") 563 TEST_UNSUPPORTED("ldrsht r1, [r2], #48")
560#endif 564#endif
561 565
566#if __LINUX_ARM_ARCH__ >= 5
562 TEST_RPR( "strd r",0, VAL1,", [r",1, 48,", -r",2,24,"]") 567 TEST_RPR( "strd r",0, VAL1,", [r",1, 48,", -r",2,24,"]")
563 TEST_RPR( "strccd r",8, VAL2,", [r",13,0, ", r",12,48,"]") 568 TEST_RPR( "strccd r",8, VAL2,", [r",13,0, ", r",12,48,"]")
564 TEST_RPR( "strd r",4, VAL1,", [r",2, 24,", r",3, 48,"]!") 569 TEST_RPR( "strd r",4, VAL1,", [r",2, 24,", r",3, 48,"]!")
@@ -595,6 +600,7 @@ void kprobe_arm_test_cases(void)
595 TEST_UNSUPPORTED(__inst_arm(0xe1efc3d0) " @ ldrd r12, [pc, #48]!") 600 TEST_UNSUPPORTED(__inst_arm(0xe1efc3d0) " @ ldrd r12, [pc, #48]!")
596 TEST_UNSUPPORTED(__inst_arm(0xe0c9f3d0) " @ ldrd pc, [r9], #48") 601 TEST_UNSUPPORTED(__inst_arm(0xe0c9f3d0) " @ ldrd pc, [r9], #48")
597 TEST_UNSUPPORTED(__inst_arm(0xe0c9e3d0) " @ ldrd lr, [r9], #48") 602 TEST_UNSUPPORTED(__inst_arm(0xe0c9e3d0) " @ ldrd lr, [r9], #48")
603#endif
598 604
599 TEST_GROUP("Miscellaneous") 605 TEST_GROUP("Miscellaneous")
600 606
@@ -1227,7 +1233,9 @@ void kprobe_arm_test_cases(void)
1227 TEST_COPROCESSOR( "mrc"two" 0, 0, r0, cr0, cr0, 0") 1233 TEST_COPROCESSOR( "mrc"two" 0, 0, r0, cr0, cr0, 0")
1228 1234
1229 COPROCESSOR_INSTRUCTIONS_ST_LD("",e) 1235 COPROCESSOR_INSTRUCTIONS_ST_LD("",e)
1236#if __LINUX_ARM_ARCH__ >= 5
1230 COPROCESSOR_INSTRUCTIONS_MC_MR("",e) 1237 COPROCESSOR_INSTRUCTIONS_MC_MR("",e)
1238#endif
1231 TEST_UNSUPPORTED("svc 0") 1239 TEST_UNSUPPORTED("svc 0")
1232 TEST_UNSUPPORTED("svc 0xffffff") 1240 TEST_UNSUPPORTED("svc 0xffffff")
1233 1241
@@ -1287,7 +1295,9 @@ void kprobe_arm_test_cases(void)
1287 TEST( "blx __dummy_thumb_subroutine_odd") 1295 TEST( "blx __dummy_thumb_subroutine_odd")
1288#endif /* __LINUX_ARM_ARCH__ >= 6 */ 1296#endif /* __LINUX_ARM_ARCH__ >= 6 */
1289 1297
1298#if __LINUX_ARM_ARCH__ >= 5
1290 COPROCESSOR_INSTRUCTIONS_ST_LD("2",f) 1299 COPROCESSOR_INSTRUCTIONS_ST_LD("2",f)
1300#endif
1291#if __LINUX_ARM_ARCH__ >= 6 1301#if __LINUX_ARM_ARCH__ >= 6
1292 COPROCESSOR_INSTRUCTIONS_MC_MR("2",f) 1302 COPROCESSOR_INSTRUCTIONS_MC_MR("2",f)
1293#endif 1303#endif
diff --git a/arch/arm/kernel/kprobes-test.c b/arch/arm/kernel/kprobes-test.c
index 379639998d5a..08d731294bcd 100644
--- a/arch/arm/kernel/kprobes-test.c
+++ b/arch/arm/kernel/kprobes-test.c
@@ -225,6 +225,7 @@ static int pre_handler_called;
225static int post_handler_called; 225static int post_handler_called;
226static int jprobe_func_called; 226static int jprobe_func_called;
227static int kretprobe_handler_called; 227static int kretprobe_handler_called;
228static int tests_failed;
228 229
229#define FUNC_ARG1 0x12345678 230#define FUNC_ARG1 0x12345678
230#define FUNC_ARG2 0xabcdef 231#define FUNC_ARG2 0xabcdef
@@ -461,6 +462,13 @@ static int run_api_tests(long (*func)(long, long))
461 462
462 pr_info(" jprobe\n"); 463 pr_info(" jprobe\n");
463 ret = test_jprobe(func); 464 ret = test_jprobe(func);
465#if defined(CONFIG_THUMB2_KERNEL) && !defined(MODULE)
466 if (ret == -EINVAL) {
467 pr_err("FAIL: Known longtime bug with jprobe on Thumb kernels\n");
468 tests_failed = ret;
469 ret = 0;
470 }
471#endif
464 if (ret < 0) 472 if (ret < 0)
465 return ret; 473 return ret;
466 474
@@ -1672,6 +1680,8 @@ static int __init run_all_tests(void)
1672 1680
1673out: 1681out:
1674 if (ret == 0) 1682 if (ret == 0)
1683 ret = tests_failed;
1684 if (ret == 0)
1675 pr_info("Finished kprobe tests OK\n"); 1685 pr_info("Finished kprobe tests OK\n");
1676 else 1686 else
1677 pr_err("kprobe tests failed\n"); 1687 pr_err("kprobe tests failed\n");
diff --git a/arch/arm/kernel/probes-arm.c b/arch/arm/kernel/probes-arm.c
index 51a13a027989..8eaef81d8344 100644
--- a/arch/arm/kernel/probes-arm.c
+++ b/arch/arm/kernel/probes-arm.c
@@ -341,12 +341,12 @@ static const union decode_item arm_cccc_000x_table[] = {
341 /* CMP (reg-shift reg) cccc 0001 0101 xxxx xxxx xxxx 0xx1 xxxx */ 341 /* CMP (reg-shift reg) cccc 0001 0101 xxxx xxxx xxxx 0xx1 xxxx */
342 /* CMN (reg-shift reg) cccc 0001 0111 xxxx xxxx xxxx 0xx1 xxxx */ 342 /* CMN (reg-shift reg) cccc 0001 0111 xxxx xxxx xxxx 0xx1 xxxx */
343 DECODE_EMULATEX (0x0f900090, 0x01100010, PROBES_DATA_PROCESSING_REG, 343 DECODE_EMULATEX (0x0f900090, 0x01100010, PROBES_DATA_PROCESSING_REG,
344 REGS(ANY, 0, NOPC, 0, ANY)), 344 REGS(NOPC, 0, NOPC, 0, NOPC)),
345 345
346 /* MOV (reg-shift reg) cccc 0001 101x xxxx xxxx xxxx 0xx1 xxxx */ 346 /* MOV (reg-shift reg) cccc 0001 101x xxxx xxxx xxxx 0xx1 xxxx */
347 /* MVN (reg-shift reg) cccc 0001 111x xxxx xxxx xxxx 0xx1 xxxx */ 347 /* MVN (reg-shift reg) cccc 0001 111x xxxx xxxx xxxx 0xx1 xxxx */
348 DECODE_EMULATEX (0x0fa00090, 0x01a00010, PROBES_DATA_PROCESSING_REG, 348 DECODE_EMULATEX (0x0fa00090, 0x01a00010, PROBES_DATA_PROCESSING_REG,
349 REGS(0, ANY, NOPC, 0, ANY)), 349 REGS(0, NOPC, NOPC, 0, NOPC)),
350 350
351 /* AND (reg-shift reg) cccc 0000 000x xxxx xxxx xxxx 0xx1 xxxx */ 351 /* AND (reg-shift reg) cccc 0000 000x xxxx xxxx xxxx 0xx1 xxxx */
352 /* EOR (reg-shift reg) cccc 0000 001x xxxx xxxx xxxx 0xx1 xxxx */ 352 /* EOR (reg-shift reg) cccc 0000 001x xxxx xxxx xxxx 0xx1 xxxx */
@@ -359,7 +359,7 @@ static const union decode_item arm_cccc_000x_table[] = {
359 /* ORR (reg-shift reg) cccc 0001 100x xxxx xxxx xxxx 0xx1 xxxx */ 359 /* ORR (reg-shift reg) cccc 0001 100x xxxx xxxx xxxx 0xx1 xxxx */
360 /* BIC (reg-shift reg) cccc 0001 110x xxxx xxxx xxxx 0xx1 xxxx */ 360 /* BIC (reg-shift reg) cccc 0001 110x xxxx xxxx xxxx 0xx1 xxxx */
361 DECODE_EMULATEX (0x0e000090, 0x00000010, PROBES_DATA_PROCESSING_REG, 361 DECODE_EMULATEX (0x0e000090, 0x00000010, PROBES_DATA_PROCESSING_REG,
362 REGS(ANY, ANY, NOPC, 0, ANY)), 362 REGS(NOPC, NOPC, NOPC, 0, NOPC)),
363 363
364 DECODE_END 364 DECODE_END
365}; 365};
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 9d853189028b..e35d880f9773 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -275,7 +275,7 @@ void store_cpu_topology(unsigned int cpuid)
275 cpu_topology[cpuid].socket_id, mpidr); 275 cpu_topology[cpuid].socket_id, mpidr);
276} 276}
277 277
278static inline const int cpu_corepower_flags(void) 278static inline int cpu_corepower_flags(void)
279{ 279{
280 return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN; 280 return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN;
281} 281}
diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c
index f38cf7c110cc..46d893fcbe85 100644
--- a/arch/arm/mach-exynos/exynos.c
+++ b/arch/arm/mach-exynos/exynos.c
@@ -173,10 +173,8 @@ static struct platform_device exynos_cpuidle = {
173 173
174void __init exynos_cpuidle_init(void) 174void __init exynos_cpuidle_init(void)
175{ 175{
176 if (soc_is_exynos5440()) 176 if (soc_is_exynos4210() || soc_is_exynos5250())
177 return; 177 platform_device_register(&exynos_cpuidle);
178
179 platform_device_register(&exynos_cpuidle);
180} 178}
181 179
182void __init exynos_cpufreq_init(void) 180void __init exynos_cpufreq_init(void)
@@ -297,7 +295,7 @@ static void __init exynos_dt_machine_init(void)
297 * This is called from smp_prepare_cpus if we've built for SMP, but 295 * This is called from smp_prepare_cpus if we've built for SMP, but
298 * we still need to set it up for PM and firmware ops if not. 296 * we still need to set it up for PM and firmware ops if not.
299 */ 297 */
300 if (!IS_ENABLED(SMP)) 298 if (!IS_ENABLED(CONFIG_SMP))
301 exynos_sysram_init(); 299 exynos_sysram_init();
302 300
303 exynos_cpuidle_init(); 301 exynos_cpuidle_init();
diff --git a/arch/arm/mach-exynos/firmware.c b/arch/arm/mach-exynos/firmware.c
index eb91d2350f8c..e8797bb78871 100644
--- a/arch/arm/mach-exynos/firmware.c
+++ b/arch/arm/mach-exynos/firmware.c
@@ -57,8 +57,13 @@ static int exynos_set_cpu_boot_addr(int cpu, unsigned long boot_addr)
57 57
58 boot_reg = sysram_ns_base_addr + 0x1c; 58 boot_reg = sysram_ns_base_addr + 0x1c;
59 59
60 if (!soc_is_exynos4212() && !soc_is_exynos3250()) 60 /*
61 boot_reg += 4*cpu; 61 * Almost all Exynos-series of SoCs that run in secure mode don't need
62 * additional offset for every CPU, with Exynos4412 being the only
63 * exception.
64 */
65 if (soc_is_exynos4412())
66 boot_reg += 4 * cpu;
62 67
63 __raw_writel(boot_addr, boot_reg); 68 __raw_writel(boot_addr, boot_reg);
64 return 0; 69 return 0;
diff --git a/arch/arm/mach-exynos/hotplug.c b/arch/arm/mach-exynos/hotplug.c
index 8a134d019cb3..920a4baa53cd 100644
--- a/arch/arm/mach-exynos/hotplug.c
+++ b/arch/arm/mach-exynos/hotplug.c
@@ -40,15 +40,17 @@ static inline void cpu_leave_lowpower(void)
40 40
41static inline void platform_do_lowpower(unsigned int cpu, int *spurious) 41static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
42{ 42{
43 u32 mpidr = cpu_logical_map(cpu);
44 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
45
43 for (;;) { 46 for (;;) {
44 47
45 /* make cpu1 to be turned off at next WFI command */ 48 /* Turn the CPU off on next WFI instruction. */
46 if (cpu == 1) 49 exynos_cpu_power_down(core_id);
47 exynos_cpu_power_down(cpu);
48 50
49 wfi(); 51 wfi();
50 52
51 if (pen_release == cpu_logical_map(cpu)) { 53 if (pen_release == core_id) {
52 /* 54 /*
53 * OK, proper wakeup, we're done 55 * OK, proper wakeup, we're done
54 */ 56 */
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
index 1c8d31e39520..50b9aad5e27b 100644
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
@@ -90,7 +90,8 @@ static void exynos_secondary_init(unsigned int cpu)
90static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) 90static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
91{ 91{
92 unsigned long timeout; 92 unsigned long timeout;
93 unsigned long phys_cpu = cpu_logical_map(cpu); 93 u32 mpidr = cpu_logical_map(cpu);
94 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
94 int ret = -ENOSYS; 95 int ret = -ENOSYS;
95 96
96 /* 97 /*
@@ -104,17 +105,18 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
104 * the holding pen - release it, then wait for it to flag 105 * the holding pen - release it, then wait for it to flag
105 * that it has been released by resetting pen_release. 106 * that it has been released by resetting pen_release.
106 * 107 *
107 * Note that "pen_release" is the hardware CPU ID, whereas 108 * Note that "pen_release" is the hardware CPU core ID, whereas
108 * "cpu" is Linux's internal ID. 109 * "cpu" is Linux's internal ID.
109 */ 110 */
110 write_pen_release(phys_cpu); 111 write_pen_release(core_id);
111 112
112 if (!exynos_cpu_power_state(cpu)) { 113 if (!exynos_cpu_power_state(core_id)) {
113 exynos_cpu_power_up(cpu); 114 exynos_cpu_power_up(core_id);
114 timeout = 10; 115 timeout = 10;
115 116
116 /* wait max 10 ms until cpu1 is on */ 117 /* wait max 10 ms until cpu1 is on */
117 while (exynos_cpu_power_state(cpu) != S5P_CORE_LOCAL_PWR_EN) { 118 while (exynos_cpu_power_state(core_id)
119 != S5P_CORE_LOCAL_PWR_EN) {
118 if (timeout-- == 0) 120 if (timeout-- == 0)
119 break; 121 break;
120 122
@@ -145,20 +147,20 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
145 * Try to set boot address using firmware first 147 * Try to set boot address using firmware first
146 * and fall back to boot register if it fails. 148 * and fall back to boot register if it fails.
147 */ 149 */
148 ret = call_firmware_op(set_cpu_boot_addr, phys_cpu, boot_addr); 150 ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
149 if (ret && ret != -ENOSYS) 151 if (ret && ret != -ENOSYS)
150 goto fail; 152 goto fail;
151 if (ret == -ENOSYS) { 153 if (ret == -ENOSYS) {
152 void __iomem *boot_reg = cpu_boot_reg(phys_cpu); 154 void __iomem *boot_reg = cpu_boot_reg(core_id);
153 155
154 if (IS_ERR(boot_reg)) { 156 if (IS_ERR(boot_reg)) {
155 ret = PTR_ERR(boot_reg); 157 ret = PTR_ERR(boot_reg);
156 goto fail; 158 goto fail;
157 } 159 }
158 __raw_writel(boot_addr, cpu_boot_reg(phys_cpu)); 160 __raw_writel(boot_addr, cpu_boot_reg(core_id));
159 } 161 }
160 162
161 call_firmware_op(cpu_boot, phys_cpu); 163 call_firmware_op(cpu_boot, core_id);
162 164
163 arch_send_wakeup_ipi_mask(cpumask_of(cpu)); 165 arch_send_wakeup_ipi_mask(cpumask_of(cpu));
164 166
@@ -227,22 +229,24 @@ static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
227 * boot register if it fails. 229 * boot register if it fails.
228 */ 230 */
229 for (i = 1; i < max_cpus; ++i) { 231 for (i = 1; i < max_cpus; ++i) {
230 unsigned long phys_cpu;
231 unsigned long boot_addr; 232 unsigned long boot_addr;
233 u32 mpidr;
234 u32 core_id;
232 int ret; 235 int ret;
233 236
234 phys_cpu = cpu_logical_map(i); 237 mpidr = cpu_logical_map(i);
238 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
235 boot_addr = virt_to_phys(exynos4_secondary_startup); 239 boot_addr = virt_to_phys(exynos4_secondary_startup);
236 240
237 ret = call_firmware_op(set_cpu_boot_addr, phys_cpu, boot_addr); 241 ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
238 if (ret && ret != -ENOSYS) 242 if (ret && ret != -ENOSYS)
239 break; 243 break;
240 if (ret == -ENOSYS) { 244 if (ret == -ENOSYS) {
241 void __iomem *boot_reg = cpu_boot_reg(phys_cpu); 245 void __iomem *boot_reg = cpu_boot_reg(core_id);
242 246
243 if (IS_ERR(boot_reg)) 247 if (IS_ERR(boot_reg))
244 break; 248 break;
245 __raw_writel(boot_addr, cpu_boot_reg(phys_cpu)); 249 __raw_writel(boot_addr, cpu_boot_reg(core_id));
246 } 250 }
247 } 251 }
248} 252}
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
index fe6570ebbdde..797cb134bfff 100644
--- a/arch/arm/mach-exynos/pm_domains.c
+++ b/arch/arm/mach-exynos/pm_domains.c
@@ -17,6 +17,7 @@
17#include <linux/err.h> 17#include <linux/err.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/pm_domain.h> 19#include <linux/pm_domain.h>
20#include <linux/clk.h>
20#include <linux/delay.h> 21#include <linux/delay.h>
21#include <linux/of_address.h> 22#include <linux/of_address.h>
22#include <linux/of_platform.h> 23#include <linux/of_platform.h>
@@ -24,6 +25,8 @@
24 25
25#include "regs-pmu.h" 26#include "regs-pmu.h"
26 27
28#define MAX_CLK_PER_DOMAIN 4
29
27/* 30/*
28 * Exynos specific wrapper around the generic power domain 31 * Exynos specific wrapper around the generic power domain
29 */ 32 */
@@ -32,6 +35,9 @@ struct exynos_pm_domain {
32 char const *name; 35 char const *name;
33 bool is_off; 36 bool is_off;
34 struct generic_pm_domain pd; 37 struct generic_pm_domain pd;
38 struct clk *oscclk;
39 struct clk *clk[MAX_CLK_PER_DOMAIN];
40 struct clk *pclk[MAX_CLK_PER_DOMAIN];
35}; 41};
36 42
37static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on) 43static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
@@ -44,6 +50,19 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
44 pd = container_of(domain, struct exynos_pm_domain, pd); 50 pd = container_of(domain, struct exynos_pm_domain, pd);
45 base = pd->base; 51 base = pd->base;
46 52
53 /* Set oscclk before powering off a domain*/
54 if (!power_on) {
55 int i;
56
57 for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
58 if (IS_ERR(pd->clk[i]))
59 break;
60 if (clk_set_parent(pd->clk[i], pd->oscclk))
61 pr_err("%s: error setting oscclk as parent to clock %d\n",
62 pd->name, i);
63 }
64 }
65
47 pwr = power_on ? S5P_INT_LOCAL_PWR_EN : 0; 66 pwr = power_on ? S5P_INT_LOCAL_PWR_EN : 0;
48 __raw_writel(pwr, base); 67 __raw_writel(pwr, base);
49 68
@@ -60,6 +79,20 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
60 cpu_relax(); 79 cpu_relax();
61 usleep_range(80, 100); 80 usleep_range(80, 100);
62 } 81 }
82
83 /* Restore clocks after powering on a domain*/
84 if (power_on) {
85 int i;
86
87 for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
88 if (IS_ERR(pd->clk[i]))
89 break;
90 if (clk_set_parent(pd->clk[i], pd->pclk[i]))
91 pr_err("%s: error setting parent to clock%d\n",
92 pd->name, i);
93 }
94 }
95
63 return 0; 96 return 0;
64} 97}
65 98
@@ -152,9 +185,11 @@ static __init int exynos4_pm_init_power_domain(void)
152 185
153 for_each_compatible_node(np, NULL, "samsung,exynos4210-pd") { 186 for_each_compatible_node(np, NULL, "samsung,exynos4210-pd") {
154 struct exynos_pm_domain *pd; 187 struct exynos_pm_domain *pd;
155 int on; 188 int on, i;
189 struct device *dev;
156 190
157 pdev = of_find_device_by_node(np); 191 pdev = of_find_device_by_node(np);
192 dev = &pdev->dev;
158 193
159 pd = kzalloc(sizeof(*pd), GFP_KERNEL); 194 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
160 if (!pd) { 195 if (!pd) {
@@ -170,6 +205,30 @@ static __init int exynos4_pm_init_power_domain(void)
170 pd->pd.power_on = exynos_pd_power_on; 205 pd->pd.power_on = exynos_pd_power_on;
171 pd->pd.of_node = np; 206 pd->pd.of_node = np;
172 207
208 pd->oscclk = clk_get(dev, "oscclk");
209 if (IS_ERR(pd->oscclk))
210 goto no_clk;
211
212 for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
213 char clk_name[8];
214
215 snprintf(clk_name, sizeof(clk_name), "clk%d", i);
216 pd->clk[i] = clk_get(dev, clk_name);
217 if (IS_ERR(pd->clk[i]))
218 break;
219 snprintf(clk_name, sizeof(clk_name), "pclk%d", i);
220 pd->pclk[i] = clk_get(dev, clk_name);
221 if (IS_ERR(pd->pclk[i])) {
222 clk_put(pd->clk[i]);
223 pd->clk[i] = ERR_PTR(-EINVAL);
224 break;
225 }
226 }
227
228 if (IS_ERR(pd->clk[0]))
229 clk_put(pd->oscclk);
230
231no_clk:
173 platform_set_drvdata(pdev, pd); 232 platform_set_drvdata(pdev, pd);
174 233
175 on = __raw_readl(pd->base + 0x4) & S5P_INT_LOCAL_PWR_EN; 234 on = __raw_readl(pd->base + 0x4) & S5P_INT_LOCAL_PWR_EN;
diff --git a/arch/arm/mach-imx/clk-gate2.c b/arch/arm/mach-imx/clk-gate2.c
index 4ba587da89d2..84acdfd1d715 100644
--- a/arch/arm/mach-imx/clk-gate2.c
+++ b/arch/arm/mach-imx/clk-gate2.c
@@ -67,8 +67,12 @@ static void clk_gate2_disable(struct clk_hw *hw)
67 67
68 spin_lock_irqsave(gate->lock, flags); 68 spin_lock_irqsave(gate->lock, flags);
69 69
70 if (gate->share_count && --(*gate->share_count) > 0) 70 if (gate->share_count) {
71 goto out; 71 if (WARN_ON(*gate->share_count == 0))
72 goto out;
73 else if (--(*gate->share_count) > 0)
74 goto out;
75 }
72 76
73 reg = readl(gate->reg); 77 reg = readl(gate->reg);
74 reg &= ~(3 << gate->bit_idx); 78 reg &= ~(3 << gate->bit_idx);
@@ -78,19 +82,26 @@ out:
78 spin_unlock_irqrestore(gate->lock, flags); 82 spin_unlock_irqrestore(gate->lock, flags);
79} 83}
80 84
81static int clk_gate2_is_enabled(struct clk_hw *hw) 85static int clk_gate2_reg_is_enabled(void __iomem *reg, u8 bit_idx)
82{ 86{
83 u32 reg; 87 u32 val = readl(reg);
84 struct clk_gate2 *gate = to_clk_gate2(hw);
85 88
86 reg = readl(gate->reg); 89 if (((val >> bit_idx) & 1) == 1)
87
88 if (((reg >> gate->bit_idx) & 1) == 1)
89 return 1; 90 return 1;
90 91
91 return 0; 92 return 0;
92} 93}
93 94
95static int clk_gate2_is_enabled(struct clk_hw *hw)
96{
97 struct clk_gate2 *gate = to_clk_gate2(hw);
98
99 if (gate->share_count)
100 return !!(*gate->share_count);
101 else
102 return clk_gate2_reg_is_enabled(gate->reg, gate->bit_idx);
103}
104
94static struct clk_ops clk_gate2_ops = { 105static struct clk_ops clk_gate2_ops = {
95 .enable = clk_gate2_enable, 106 .enable = clk_gate2_enable,
96 .disable = clk_gate2_disable, 107 .disable = clk_gate2_disable,
@@ -116,6 +127,10 @@ struct clk *clk_register_gate2(struct device *dev, const char *name,
116 gate->bit_idx = bit_idx; 127 gate->bit_idx = bit_idx;
117 gate->flags = clk_gate2_flags; 128 gate->flags = clk_gate2_flags;
118 gate->lock = lock; 129 gate->lock = lock;
130
131 /* Initialize share_count per hardware state */
132 if (share_count)
133 *share_count = clk_gate2_reg_is_enabled(reg, bit_idx) ? 1 : 0;
119 gate->share_count = share_count; 134 gate->share_count = share_count;
120 135
121 init.name = name; 136 init.name = name;
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index 8e795dea02ec..8556c787e59c 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -70,7 +70,7 @@ static const char *cko_sels[] = { "cko1", "cko2", };
70static const char *lvds_sels[] = { 70static const char *lvds_sels[] = {
71 "dummy", "dummy", "dummy", "dummy", "dummy", "dummy", 71 "dummy", "dummy", "dummy", "dummy", "dummy", "dummy",
72 "pll4_audio", "pll5_video", "pll8_mlb", "enet_ref", 72 "pll4_audio", "pll5_video", "pll8_mlb", "enet_ref",
73 "pcie_ref", "sata_ref", 73 "pcie_ref_125m", "sata_ref_100m",
74}; 74};
75 75
76enum mx6q_clks { 76enum mx6q_clks {
@@ -491,7 +491,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
491 491
492 /* All existing boards with PCIe use LVDS1 */ 492 /* All existing boards with PCIe use LVDS1 */
493 if (IS_ENABLED(CONFIG_PCI_IMX6)) 493 if (IS_ENABLED(CONFIG_PCI_IMX6))
494 clk_set_parent(clk[lvds1_sel], clk[sata_ref]); 494 clk_set_parent(clk[lvds1_sel], clk[sata_ref_100m]);
495 495
496 /* Set initial power mode */ 496 /* Set initial power mode */
497 imx6q_set_lpm(WAIT_CLOCKED); 497 imx6q_set_lpm(WAIT_CLOCKED);
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
index 477202fd39cc..2bdc3233abe2 100644
--- a/arch/arm/mach-mvebu/coherency.c
+++ b/arch/arm/mach-mvebu/coherency.c
@@ -292,6 +292,10 @@ static struct notifier_block mvebu_hwcc_nb = {
292 .notifier_call = mvebu_hwcc_notifier, 292 .notifier_call = mvebu_hwcc_notifier,
293}; 293};
294 294
295static struct notifier_block mvebu_hwcc_pci_nb = {
296 .notifier_call = mvebu_hwcc_notifier,
297};
298
295static void __init armada_370_coherency_init(struct device_node *np) 299static void __init armada_370_coherency_init(struct device_node *np)
296{ 300{
297 struct resource res; 301 struct resource res;
@@ -427,7 +431,7 @@ static int __init coherency_pci_init(void)
427{ 431{
428 if (coherency_available()) 432 if (coherency_available())
429 bus_register_notifier(&pci_bus_type, 433 bus_register_notifier(&pci_bus_type,
430 &mvebu_hwcc_nb); 434 &mvebu_hwcc_pci_nb);
431 return 0; 435 return 0;
432} 436}
433 437
diff --git a/arch/arm/mach-mvebu/headsmp-a9.S b/arch/arm/mach-mvebu/headsmp-a9.S
index 5925366bc03c..da5bb292b91c 100644
--- a/arch/arm/mach-mvebu/headsmp-a9.S
+++ b/arch/arm/mach-mvebu/headsmp-a9.S
@@ -15,6 +15,8 @@
15#include <linux/linkage.h> 15#include <linux/linkage.h>
16#include <linux/init.h> 16#include <linux/init.h>
17 17
18#include <asm/assembler.h>
19
18 __CPUINIT 20 __CPUINIT
19#define CPU_RESUME_ADDR_REG 0xf10182d4 21#define CPU_RESUME_ADDR_REG 0xf10182d4
20 22
@@ -22,13 +24,18 @@
22.global armada_375_smp_cpu1_enable_code_end 24.global armada_375_smp_cpu1_enable_code_end
23 25
24armada_375_smp_cpu1_enable_code_start: 26armada_375_smp_cpu1_enable_code_start:
25 ldr r0, [pc, #4] 27ARM_BE8(setend be)
28 adr r0, 1f
29 ldr r0, [r0]
26 ldr r1, [r0] 30 ldr r1, [r0]
31ARM_BE8(rev r1, r1)
27 mov pc, r1 32 mov pc, r1
331:
28 .word CPU_RESUME_ADDR_REG 34 .word CPU_RESUME_ADDR_REG
29armada_375_smp_cpu1_enable_code_end: 35armada_375_smp_cpu1_enable_code_end:
30 36
31ENTRY(mvebu_cortex_a9_secondary_startup) 37ENTRY(mvebu_cortex_a9_secondary_startup)
38ARM_BE8(setend be)
32 bl v7_invalidate_l1 39 bl v7_invalidate_l1
33 b secondary_startup 40 b secondary_startup
34ENDPROC(mvebu_cortex_a9_secondary_startup) 41ENDPROC(mvebu_cortex_a9_secondary_startup)
diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c
index a1d407c0febe..25aa8237d668 100644
--- a/arch/arm/mach-mvebu/pmsu.c
+++ b/arch/arm/mach-mvebu/pmsu.c
@@ -201,12 +201,12 @@ static noinline int do_armada_370_xp_cpu_suspend(unsigned long deepidle)
201 201
202 /* Test the CR_C bit and set it if it was cleared */ 202 /* Test the CR_C bit and set it if it was cleared */
203 asm volatile( 203 asm volatile(
204 "mrc p15, 0, %0, c1, c0, 0 \n\t" 204 "mrc p15, 0, r0, c1, c0, 0 \n\t"
205 "tst %0, #(1 << 2) \n\t" 205 "tst r0, #(1 << 2) \n\t"
206 "orreq %0, %0, #(1 << 2) \n\t" 206 "orreq r0, r0, #(1 << 2) \n\t"
207 "mcreq p15, 0, %0, c1, c0, 0 \n\t" 207 "mcreq p15, 0, r0, c1, c0, 0 \n\t"
208 "isb " 208 "isb "
209 : : "r" (0)); 209 : : : "r0");
210 210
211 pr_warn("Failed to suspend the system\n"); 211 pr_warn("Failed to suspend the system\n");
212 212
diff --git a/arch/arm/mach-omap2/clkt_dpll.c b/arch/arm/mach-omap2/clkt_dpll.c
index 332af927f4d3..67fd26a18441 100644
--- a/arch/arm/mach-omap2/clkt_dpll.c
+++ b/arch/arm/mach-omap2/clkt_dpll.c
@@ -76,7 +76,7 @@
76 * (assuming that it is counting N upwards), or -2 if the enclosing loop 76 * (assuming that it is counting N upwards), or -2 if the enclosing loop
77 * should skip to the next iteration (again assuming N is increasing). 77 * should skip to the next iteration (again assuming N is increasing).
78 */ 78 */
79static int _dpll_test_fint(struct clk_hw_omap *clk, u8 n) 79static int _dpll_test_fint(struct clk_hw_omap *clk, unsigned int n)
80{ 80{
81 struct dpll_data *dd; 81 struct dpll_data *dd;
82 long fint, fint_min, fint_max; 82 long fint, fint_min, fint_max;
diff --git a/arch/arm/mach-omap2/cm-regbits-34xx.h b/arch/arm/mach-omap2/cm-regbits-34xx.h
index 04dab2fcf862..ee6c784cd6b7 100644
--- a/arch/arm/mach-omap2/cm-regbits-34xx.h
+++ b/arch/arm/mach-omap2/cm-regbits-34xx.h
@@ -26,11 +26,14 @@
26#define OMAP3430_EN_WDT3_SHIFT 12 26#define OMAP3430_EN_WDT3_SHIFT 12
27#define OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_MASK (1 << 0) 27#define OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_MASK (1 << 0)
28#define OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_SHIFT 0 28#define OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_SHIFT 0
29#define OMAP3430_IVA2_DPLL_FREQSEL_SHIFT 4
29#define OMAP3430_IVA2_DPLL_FREQSEL_MASK (0xf << 4) 30#define OMAP3430_IVA2_DPLL_FREQSEL_MASK (0xf << 4)
30#define OMAP3430_EN_IVA2_DPLL_DRIFTGUARD_SHIFT 3 31#define OMAP3430_EN_IVA2_DPLL_DRIFTGUARD_SHIFT 3
32#define OMAP3430_EN_IVA2_DPLL_SHIFT 0
31#define OMAP3430_EN_IVA2_DPLL_MASK (0x7 << 0) 33#define OMAP3430_EN_IVA2_DPLL_MASK (0x7 << 0)
32#define OMAP3430_ST_IVA2_SHIFT 0 34#define OMAP3430_ST_IVA2_SHIFT 0
33#define OMAP3430_ST_IVA2_CLK_MASK (1 << 0) 35#define OMAP3430_ST_IVA2_CLK_MASK (1 << 0)
36#define OMAP3430_AUTO_IVA2_DPLL_SHIFT 0
34#define OMAP3430_AUTO_IVA2_DPLL_MASK (0x7 << 0) 37#define OMAP3430_AUTO_IVA2_DPLL_MASK (0x7 << 0)
35#define OMAP3430_IVA2_CLK_SRC_SHIFT 19 38#define OMAP3430_IVA2_CLK_SRC_SHIFT 19
36#define OMAP3430_IVA2_CLK_SRC_WIDTH 3 39#define OMAP3430_IVA2_CLK_SRC_WIDTH 3
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index b2d252bf4a53..dc571f1d3b8a 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -162,7 +162,8 @@ static inline void omap3xxx_restart(enum reboot_mode mode, const char *cmd)
162} 162}
163#endif 163#endif
164 164
165#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) 165#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
166 defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM43XX)
166void omap44xx_restart(enum reboot_mode mode, const char *cmd); 167void omap44xx_restart(enum reboot_mode mode, const char *cmd);
167#else 168#else
168static inline void omap44xx_restart(enum reboot_mode mode, const char *cmd) 169static inline void omap44xx_restart(enum reboot_mode mode, const char *cmd)
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 592ba0a0ecf3..b6f8f348296e 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -297,33 +297,6 @@ static void omap_init_audio(void)
297static inline void omap_init_audio(void) {} 297static inline void omap_init_audio(void) {}
298#endif 298#endif
299 299
300#if defined(CONFIG_SND_OMAP_SOC_OMAP_HDMI) || \
301 defined(CONFIG_SND_OMAP_SOC_OMAP_HDMI_MODULE)
302
303static struct platform_device omap_hdmi_audio = {
304 .name = "omap-hdmi-audio",
305 .id = -1,
306};
307
308static void __init omap_init_hdmi_audio(void)
309{
310 struct omap_hwmod *oh;
311 struct platform_device *pdev;
312
313 oh = omap_hwmod_lookup("dss_hdmi");
314 if (!oh)
315 return;
316
317 pdev = omap_device_build("omap-hdmi-audio-dai", -1, oh, NULL, 0);
318 WARN(IS_ERR(pdev),
319 "Can't build omap_device for omap-hdmi-audio-dai.\n");
320
321 platform_device_register(&omap_hdmi_audio);
322}
323#else
324static inline void omap_init_hdmi_audio(void) {}
325#endif
326
327#if defined(CONFIG_SPI_OMAP24XX) || defined(CONFIG_SPI_OMAP24XX_MODULE) 300#if defined(CONFIG_SPI_OMAP24XX) || defined(CONFIG_SPI_OMAP24XX_MODULE)
328 301
329#include <linux/platform_data/spi-omap2-mcspi.h> 302#include <linux/platform_data/spi-omap2-mcspi.h>
@@ -459,7 +432,6 @@ static int __init omap2_init_devices(void)
459 */ 432 */
460 omap_init_audio(); 433 omap_init_audio();
461 omap_init_camera(); 434 omap_init_camera();
462 omap_init_hdmi_audio();
463 omap_init_mbox(); 435 omap_init_mbox();
464 /* If dtb is there, the devices will be created dynamically */ 436 /* If dtb is there, the devices will be created dynamically */
465 if (!of_have_populated_dt()) { 437 if (!of_have_populated_dt()) {
diff --git a/arch/arm/mach-omap2/dsp.c b/arch/arm/mach-omap2/dsp.c
index b8208b4b1bd9..f7492df1cbba 100644
--- a/arch/arm/mach-omap2/dsp.c
+++ b/arch/arm/mach-omap2/dsp.c
@@ -29,6 +29,7 @@
29#ifdef CONFIG_TIDSPBRIDGE_DVFS 29#ifdef CONFIG_TIDSPBRIDGE_DVFS
30#include "omap-pm.h" 30#include "omap-pm.h"
31#endif 31#endif
32#include "soc.h"
32 33
33#include <linux/platform_data/dsp-omap.h> 34#include <linux/platform_data/dsp-omap.h>
34 35
@@ -59,6 +60,9 @@ void __init omap_dsp_reserve_sdram_memblock(void)
59 phys_addr_t size = CONFIG_TIDSPBRIDGE_MEMPOOL_SIZE; 60 phys_addr_t size = CONFIG_TIDSPBRIDGE_MEMPOOL_SIZE;
60 phys_addr_t paddr; 61 phys_addr_t paddr;
61 62
63 if (!cpu_is_omap34xx())
64 return;
65
62 if (!size) 66 if (!size)
63 return; 67 return;
64 68
@@ -83,6 +87,9 @@ static int __init omap_dsp_init(void)
83 int err = -ENOMEM; 87 int err = -ENOMEM;
84 struct omap_dsp_platform_data *pdata = &omap_dsp_pdata; 88 struct omap_dsp_platform_data *pdata = &omap_dsp_pdata;
85 89
90 if (!cpu_is_omap34xx())
91 return 0;
92
86 pdata->phys_mempool_base = omap_dsp_get_mempool_base(); 93 pdata->phys_mempool_base = omap_dsp_get_mempool_base();
87 94
88 if (pdata->phys_mempool_base) { 95 if (pdata->phys_mempool_base) {
@@ -115,6 +122,9 @@ module_init(omap_dsp_init);
115 122
116static void __exit omap_dsp_exit(void) 123static void __exit omap_dsp_exit(void)
117{ 124{
125 if (!cpu_is_omap34xx())
126 return;
127
118 platform_device_unregister(omap_dsp_pdev); 128 platform_device_unregister(omap_dsp_pdev);
119} 129}
120module_exit(omap_dsp_exit); 130module_exit(omap_dsp_exit);
diff --git a/arch/arm/mach-omap2/gpmc-nand.c b/arch/arm/mach-omap2/gpmc-nand.c
index 17cd39360afe..93914d220069 100644
--- a/arch/arm/mach-omap2/gpmc-nand.c
+++ b/arch/arm/mach-omap2/gpmc-nand.c
@@ -50,6 +50,16 @@ static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt)
50 soc_is_omap54xx() || soc_is_dra7xx()) 50 soc_is_omap54xx() || soc_is_dra7xx())
51 return 1; 51 return 1;
52 52
53 if (ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW ||
54 ecc_opt == OMAP_ECC_BCH8_CODE_HW_DETECTION_SW) {
55 if (cpu_is_omap24xx())
56 return 0;
57 else if (cpu_is_omap3630() && (GET_OMAP_REVISION() == 0))
58 return 0;
59 else
60 return 1;
61 }
62
53 /* OMAP3xxx do not have ELM engine, so cannot support ECC schemes 63 /* OMAP3xxx do not have ELM engine, so cannot support ECC schemes
54 * which require H/W based ECC error detection */ 64 * which require H/W based ECC error detection */
55 if ((cpu_is_omap34xx() || cpu_is_omap3630()) && 65 if ((cpu_is_omap34xx() || cpu_is_omap3630()) &&
@@ -57,14 +67,6 @@ static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt)
57 (ecc_opt == OMAP_ECC_BCH8_CODE_HW))) 67 (ecc_opt == OMAP_ECC_BCH8_CODE_HW)))
58 return 0; 68 return 0;
59 69
60 /*
61 * For now, assume 4-bit mode is only supported on OMAP3630 ES1.x, x>=1
62 * and AM33xx derivates. Other chips may be added if confirmed to work.
63 */
64 if ((ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW) &&
65 (!cpu_is_omap3630() || (GET_OMAP_REVISION() == 0)))
66 return 0;
67
68 /* legacy platforms support only HAM1 (1-bit Hamming) ECC scheme */ 70 /* legacy platforms support only HAM1 (1-bit Hamming) ECC scheme */
69 if (ecc_opt == OMAP_ECC_HAM1_CODE_HW) 71 if (ecc_opt == OMAP_ECC_HAM1_CODE_HW)
70 return 1; 72 return 1;
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index 2c0c2816900f..8bc13380f0a0 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -1615,7 +1615,7 @@ static int gpmc_probe_dt(struct platform_device *pdev)
1615 return ret; 1615 return ret;
1616 } 1616 }
1617 1617
1618 for_each_child_of_node(pdev->dev.of_node, child) { 1618 for_each_available_child_of_node(pdev->dev.of_node, child) {
1619 1619
1620 if (!child->name) 1620 if (!child->name)
1621 continue; 1621 continue;
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index 539e8106eb96..a0fe747634c1 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -168,6 +168,10 @@ static void omap4_l2c310_write_sec(unsigned long val, unsigned reg)
168 smc_op = OMAP4_MON_L2X0_PREFETCH_INDEX; 168 smc_op = OMAP4_MON_L2X0_PREFETCH_INDEX;
169 break; 169 break;
170 170
171 case L310_POWER_CTRL:
172 pr_info_once("OMAP L2C310: ROM does not support power control setting\n");
173 return;
174
171 default: 175 default:
172 WARN_ONCE(1, "OMAP L2C310: ignoring write to reg 0x%x\n", reg); 176 WARN_ONCE(1, "OMAP L2C310: ignoring write to reg 0x%x\n", reg);
173 return; 177 return;
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index 20b4398cec05..284324f2b98a 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -1268,9 +1268,6 @@ static struct omap_hwmod_class dra7xx_sata_hwmod_class = {
1268}; 1268};
1269 1269
1270/* sata */ 1270/* sata */
1271static struct omap_hwmod_opt_clk sata_opt_clks[] = {
1272 { .role = "ref_clk", .clk = "sata_ref_clk" },
1273};
1274 1271
1275static struct omap_hwmod dra7xx_sata_hwmod = { 1272static struct omap_hwmod dra7xx_sata_hwmod = {
1276 .name = "sata", 1273 .name = "sata",
@@ -1278,6 +1275,7 @@ static struct omap_hwmod dra7xx_sata_hwmod = {
1278 .clkdm_name = "l3init_clkdm", 1275 .clkdm_name = "l3init_clkdm",
1279 .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY, 1276 .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
1280 .main_clk = "func_48m_fclk", 1277 .main_clk = "func_48m_fclk",
1278 .mpu_rt_idx = 1,
1281 .prcm = { 1279 .prcm = {
1282 .omap4 = { 1280 .omap4 = {
1283 .clkctrl_offs = DRA7XX_CM_L3INIT_SATA_CLKCTRL_OFFSET, 1281 .clkctrl_offs = DRA7XX_CM_L3INIT_SATA_CLKCTRL_OFFSET,
@@ -1285,8 +1283,6 @@ static struct omap_hwmod dra7xx_sata_hwmod = {
1285 .modulemode = MODULEMODE_SWCTRL, 1283 .modulemode = MODULEMODE_SWCTRL,
1286 }, 1284 },
1287 }, 1285 },
1288 .opt_clks = sata_opt_clks,
1289 .opt_clks_cnt = ARRAY_SIZE(sata_opt_clks),
1290}; 1286};
1291 1287
1292/* 1288/*
@@ -1731,8 +1727,20 @@ static struct omap_hwmod dra7xx_uart6_hwmod = {
1731 * 1727 *
1732 */ 1728 */
1733 1729
1730static struct omap_hwmod_class_sysconfig dra7xx_usb_otg_ss_sysc = {
1731 .rev_offs = 0x0000,
1732 .sysc_offs = 0x0010,
1733 .sysc_flags = (SYSC_HAS_DMADISABLE | SYSC_HAS_MIDLEMODE |
1734 SYSC_HAS_SIDLEMODE),
1735 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
1736 SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
1737 MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
1738 .sysc_fields = &omap_hwmod_sysc_type2,
1739};
1740
1734static struct omap_hwmod_class dra7xx_usb_otg_ss_hwmod_class = { 1741static struct omap_hwmod_class dra7xx_usb_otg_ss_hwmod_class = {
1735 .name = "usb_otg_ss", 1742 .name = "usb_otg_ss",
1743 .sysc = &dra7xx_usb_otg_ss_sysc,
1736}; 1744};
1737 1745
1738/* usb_otg_ss1 */ 1746/* usb_otg_ss1 */
diff --git a/arch/arm/mach-omap2/prm-regbits-34xx.h b/arch/arm/mach-omap2/prm-regbits-34xx.h
index 106132db532b..cbefbd7cfdb5 100644
--- a/arch/arm/mach-omap2/prm-regbits-34xx.h
+++ b/arch/arm/mach-omap2/prm-regbits-34xx.h
@@ -35,6 +35,8 @@
35#define OMAP3430_LOGICSTATEST_MASK (1 << 2) 35#define OMAP3430_LOGICSTATEST_MASK (1 << 2)
36#define OMAP3430_LASTLOGICSTATEENTERED_MASK (1 << 2) 36#define OMAP3430_LASTLOGICSTATEENTERED_MASK (1 << 2)
37#define OMAP3430_LASTPOWERSTATEENTERED_MASK (0x3 << 0) 37#define OMAP3430_LASTPOWERSTATEENTERED_MASK (0x3 << 0)
38#define OMAP3430_GRPSEL_MCBSP5_MASK (1 << 10)
39#define OMAP3430_GRPSEL_MCBSP1_MASK (1 << 9)
38#define OMAP3630_GRPSEL_UART4_MASK (1 << 18) 40#define OMAP3630_GRPSEL_UART4_MASK (1 << 18)
39#define OMAP3430_GRPSEL_GPIO6_MASK (1 << 17) 41#define OMAP3430_GRPSEL_GPIO6_MASK (1 << 17)
40#define OMAP3430_GRPSEL_GPIO5_MASK (1 << 16) 42#define OMAP3430_GRPSEL_GPIO5_MASK (1 << 16)
@@ -42,6 +44,10 @@
42#define OMAP3430_GRPSEL_GPIO3_MASK (1 << 14) 44#define OMAP3430_GRPSEL_GPIO3_MASK (1 << 14)
43#define OMAP3430_GRPSEL_GPIO2_MASK (1 << 13) 45#define OMAP3430_GRPSEL_GPIO2_MASK (1 << 13)
44#define OMAP3430_GRPSEL_UART3_MASK (1 << 11) 46#define OMAP3430_GRPSEL_UART3_MASK (1 << 11)
47#define OMAP3430_GRPSEL_GPT8_MASK (1 << 9)
48#define OMAP3430_GRPSEL_GPT7_MASK (1 << 8)
49#define OMAP3430_GRPSEL_GPT6_MASK (1 << 7)
50#define OMAP3430_GRPSEL_GPT5_MASK (1 << 6)
45#define OMAP3430_GRPSEL_MCBSP4_MASK (1 << 2) 51#define OMAP3430_GRPSEL_MCBSP4_MASK (1 << 2)
46#define OMAP3430_GRPSEL_MCBSP3_MASK (1 << 1) 52#define OMAP3430_GRPSEL_MCBSP3_MASK (1 << 1)
47#define OMAP3430_GRPSEL_MCBSP2_MASK (1 << 0) 53#define OMAP3430_GRPSEL_MCBSP2_MASK (1 << 0)
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 076172b69422..7c3fb41a462e 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -664,7 +664,7 @@ static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, v
664 664
665static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock) 665static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock)
666{ 666{
667 unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_PART_MASK; 667 unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_RTL_MASK;
668 bool cortex_a9 = read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9; 668 bool cortex_a9 = read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9;
669 669
670 if (rev >= L310_CACHE_ID_RTL_R2P0) { 670 if (rev >= L310_CACHE_ID_RTL_R2P0) {
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index a474de346be6..839f48c26ef0 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -4,6 +4,7 @@ config ARM64
4 select ARCH_HAS_OPP 4 select ARCH_HAS_OPP
5 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST 5 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
6 select ARCH_USE_CMPXCHG_LOCKREF 6 select ARCH_USE_CMPXCHG_LOCKREF
7 select ARCH_SUPPORTS_ATOMIC_RMW
7 select ARCH_WANT_OPTIONAL_GPIOLIB 8 select ARCH_WANT_OPTIONAL_GPIOLIB
8 select ARCH_WANT_COMPAT_IPC_PARSE_VERSION 9 select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
9 select ARCH_WANT_FRAME_POINTERS 10 select ARCH_WANT_FRAME_POINTERS
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index 60f2f4c12256..79cd911ef88c 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -106,7 +106,7 @@ static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
106 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { 106 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
107 aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr, 107 aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
108 (u8 *)ctx->key_enc, rounds, blocks, first); 108 (u8 *)ctx->key_enc, rounds, blocks, first);
109 err = blkcipher_walk_done(desc, &walk, 0); 109 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
110 } 110 }
111 kernel_neon_end(); 111 kernel_neon_end();
112 return err; 112 return err;
@@ -128,7 +128,7 @@ static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
128 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { 128 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
129 aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr, 129 aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
130 (u8 *)ctx->key_dec, rounds, blocks, first); 130 (u8 *)ctx->key_dec, rounds, blocks, first);
131 err = blkcipher_walk_done(desc, &walk, 0); 131 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
132 } 132 }
133 kernel_neon_end(); 133 kernel_neon_end();
134 return err; 134 return err;
@@ -151,7 +151,7 @@ static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
151 aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr, 151 aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
152 (u8 *)ctx->key_enc, rounds, blocks, walk.iv, 152 (u8 *)ctx->key_enc, rounds, blocks, walk.iv,
153 first); 153 first);
154 err = blkcipher_walk_done(desc, &walk, 0); 154 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
155 } 155 }
156 kernel_neon_end(); 156 kernel_neon_end();
157 return err; 157 return err;
@@ -174,7 +174,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
174 aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr, 174 aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
175 (u8 *)ctx->key_dec, rounds, blocks, walk.iv, 175 (u8 *)ctx->key_dec, rounds, blocks, walk.iv,
176 first); 176 first);
177 err = blkcipher_walk_done(desc, &walk, 0); 177 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
178 } 178 }
179 kernel_neon_end(); 179 kernel_neon_end();
180 return err; 180 return err;
@@ -243,7 +243,7 @@ static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
243 aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, 243 aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
244 (u8 *)ctx->key1.key_enc, rounds, blocks, 244 (u8 *)ctx->key1.key_enc, rounds, blocks,
245 (u8 *)ctx->key2.key_enc, walk.iv, first); 245 (u8 *)ctx->key2.key_enc, walk.iv, first);
246 err = blkcipher_walk_done(desc, &walk, 0); 246 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
247 } 247 }
248 kernel_neon_end(); 248 kernel_neon_end();
249 249
@@ -267,7 +267,7 @@ static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
267 aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, 267 aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
268 (u8 *)ctx->key1.key_dec, rounds, blocks, 268 (u8 *)ctx->key1.key_dec, rounds, blocks,
269 (u8 *)ctx->key2.key_enc, walk.iv, first); 269 (u8 *)ctx->key2.key_enc, walk.iv, first);
270 err = blkcipher_walk_done(desc, &walk, 0); 270 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
271 } 271 }
272 kernel_neon_end(); 272 kernel_neon_end();
273 273
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 993bce527b85..902eb708804a 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -56,6 +56,8 @@
56#define TASK_SIZE_32 UL(0x100000000) 56#define TASK_SIZE_32 UL(0x100000000)
57#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ 57#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
58 TASK_SIZE_32 : TASK_SIZE_64) 58 TASK_SIZE_32 : TASK_SIZE_64)
59#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
60 TASK_SIZE_32 : TASK_SIZE_64)
59#else 61#else
60#define TASK_SIZE TASK_SIZE_64 62#define TASK_SIZE TASK_SIZE_64
61#endif /* CONFIG_COMPAT */ 63#endif /* CONFIG_COMPAT */
diff --git a/arch/arm64/kernel/efi-stub.c b/arch/arm64/kernel/efi-stub.c
index 60e98a639ac5..e786e6cdc400 100644
--- a/arch/arm64/kernel/efi-stub.c
+++ b/arch/arm64/kernel/efi-stub.c
@@ -12,8 +12,6 @@
12#include <linux/efi.h> 12#include <linux/efi.h>
13#include <linux/libfdt.h> 13#include <linux/libfdt.h>
14#include <asm/sections.h> 14#include <asm/sections.h>
15#include <generated/compile.h>
16#include <generated/utsrelease.h>
17 15
18/* 16/*
19 * AArch64 requires the DTB to be 8-byte aligned in the first 512MiB from 17 * AArch64 requires the DTB to be 8-byte aligned in the first 512MiB from
diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c
index 9aecbace4128..13bbc3be6f5a 100644
--- a/arch/arm64/mm/copypage.c
+++ b/arch/arm64/mm/copypage.c
@@ -27,8 +27,10 @@ void __cpu_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
27 copy_page(kto, kfrom); 27 copy_page(kto, kfrom);
28 __flush_dcache_area(kto, PAGE_SIZE); 28 __flush_dcache_area(kto, PAGE_SIZE);
29} 29}
30EXPORT_SYMBOL_GPL(__cpu_copy_user_page);
30 31
31void __cpu_clear_user_page(void *kaddr, unsigned long vaddr) 32void __cpu_clear_user_page(void *kaddr, unsigned long vaddr)
32{ 33{
33 clear_page(kaddr); 34 clear_page(kaddr);
34} 35}
36EXPORT_SYMBOL_GPL(__cpu_clear_user_page);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index f43db8a69262..e90c5426fe14 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -60,6 +60,17 @@ static int __init early_initrd(char *p)
60early_param("initrd", early_initrd); 60early_param("initrd", early_initrd);
61#endif 61#endif
62 62
63/*
64 * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It
65 * currently assumes that for memory starting above 4G, 32-bit devices will
66 * use a DMA offset.
67 */
68static phys_addr_t max_zone_dma_phys(void)
69{
70 phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
71 return min(offset + (1ULL << 32), memblock_end_of_DRAM());
72}
73
63static void __init zone_sizes_init(unsigned long min, unsigned long max) 74static void __init zone_sizes_init(unsigned long min, unsigned long max)
64{ 75{
65 struct memblock_region *reg; 76 struct memblock_region *reg;
@@ -70,9 +81,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
70 81
71 /* 4GB maximum for 32-bit only capable devices */ 82 /* 4GB maximum for 32-bit only capable devices */
72 if (IS_ENABLED(CONFIG_ZONE_DMA)) { 83 if (IS_ENABLED(CONFIG_ZONE_DMA)) {
73 unsigned long max_dma_phys = 84 max_dma = PFN_DOWN(max_zone_dma_phys());
74 (unsigned long)(dma_to_phys(NULL, DMA_BIT_MASK(32)) + 1);
75 max_dma = max(min, min(max, max_dma_phys >> PAGE_SHIFT));
76 zone_size[ZONE_DMA] = max_dma - min; 85 zone_size[ZONE_DMA] = max_dma - min;
77 } 86 }
78 zone_size[ZONE_NORMAL] = max - max_dma; 87 zone_size[ZONE_NORMAL] = max - max_dma;
@@ -146,7 +155,7 @@ void __init arm64_memblock_init(void)
146 155
147 /* 4GB maximum for 32-bit only capable devices */ 156 /* 4GB maximum for 32-bit only capable devices */
148 if (IS_ENABLED(CONFIG_ZONE_DMA)) 157 if (IS_ENABLED(CONFIG_ZONE_DMA))
149 dma_phys_limit = dma_to_phys(NULL, DMA_BIT_MASK(32)) + 1; 158 dma_phys_limit = max_zone_dma_phys();
150 dma_contiguous_reserve(dma_phys_limit); 159 dma_contiguous_reserve(dma_phys_limit);
151 160
152 memblock_allow_resize(); 161 memblock_allow_resize();
diff --git a/arch/blackfin/configs/BF609-EZKIT_defconfig b/arch/blackfin/configs/BF609-EZKIT_defconfig
index a7e9bfd84183..fcec5ce71392 100644
--- a/arch/blackfin/configs/BF609-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF609-EZKIT_defconfig
@@ -102,7 +102,7 @@ CONFIG_I2C_CHARDEV=y
102CONFIG_I2C_BLACKFIN_TWI=y 102CONFIG_I2C_BLACKFIN_TWI=y
103CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 103CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
104CONFIG_SPI=y 104CONFIG_SPI=y
105CONFIG_SPI_BFIN_V3=y 105CONFIG_SPI_ADI_V3=y
106CONFIG_GPIOLIB=y 106CONFIG_GPIOLIB=y
107CONFIG_GPIO_SYSFS=y 107CONFIG_GPIO_SYSFS=y
108# CONFIG_HWMON is not set 108# CONFIG_HWMON is not set
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index ba35864b2b74..c9eec84aa258 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -145,7 +145,7 @@ SECTIONS
145 145
146 .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data)) 146 .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data))
147#else 147#else
148 .init.data : AT(__data_lma + __data_len) 148 .init.data : AT(__data_lma + __data_len + 32)
149 { 149 {
150 __sinitdata = .; 150 __sinitdata = .;
151 INIT_DATA 151 INIT_DATA
diff --git a/arch/blackfin/mach-bf533/boards/blackstamp.c b/arch/blackfin/mach-bf533/boards/blackstamp.c
index 63b0e4fe760c..0ccf0cf4daaf 100644
--- a/arch/blackfin/mach-bf533/boards/blackstamp.c
+++ b/arch/blackfin/mach-bf533/boards/blackstamp.c
@@ -20,6 +20,7 @@
20#include <linux/spi/spi.h> 20#include <linux/spi/spi.h>
21#include <linux/spi/flash.h> 21#include <linux/spi/flash.h>
22#include <linux/irq.h> 22#include <linux/irq.h>
23#include <linux/gpio.h>
23#include <linux/i2c.h> 24#include <linux/i2c.h>
24#include <asm/dma.h> 25#include <asm/dma.h>
25#include <asm/bfin5xx_spi.h> 26#include <asm/bfin5xx_spi.h>
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537e.c b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
index c65c6dbda3da..1e7290ef3525 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537e.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
@@ -21,6 +21,7 @@
21#endif 21#endif
22#include <linux/ata_platform.h> 22#include <linux/ata_platform.h>
23#include <linux/irq.h> 23#include <linux/irq.h>
24#include <linux/gpio.h>
24#include <asm/dma.h> 25#include <asm/dma.h>
25#include <asm/bfin5xx_spi.h> 26#include <asm/bfin5xx_spi.h>
26#include <asm/portmux.h> 27#include <asm/portmux.h>
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537u.c b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
index af58454b4bff..c7495dc74690 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537u.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
@@ -21,6 +21,7 @@
21#endif 21#endif
22#include <linux/ata_platform.h> 22#include <linux/ata_platform.h>
23#include <linux/irq.h> 23#include <linux/irq.h>
24#include <linux/gpio.h>
24#include <asm/dma.h> 25#include <asm/dma.h>
25#include <asm/bfin5xx_spi.h> 26#include <asm/bfin5xx_spi.h>
26#include <asm/portmux.h> 27#include <asm/portmux.h>
diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
index a0211225748d..6b988ad653d8 100644
--- a/arch/blackfin/mach-bf537/boards/tcm_bf537.c
+++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
@@ -21,6 +21,7 @@
21#endif 21#endif
22#include <linux/ata_platform.h> 22#include <linux/ata_platform.h>
23#include <linux/irq.h> 23#include <linux/irq.h>
24#include <linux/gpio.h>
24#include <asm/dma.h> 25#include <asm/dma.h>
25#include <asm/bfin5xx_spi.h> 26#include <asm/bfin5xx_spi.h>
26#include <asm/portmux.h> 27#include <asm/portmux.h>
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index 90138e6112c1..1fe7ff286619 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -2118,7 +2118,7 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
2118 PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary", "pinctrl-adi2.0", NULL, "rotary"), 2118 PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary", "pinctrl-adi2.0", NULL, "rotary"),
2119 PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0", "pinctrl-adi2.0", NULL, "can0"), 2119 PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0", "pinctrl-adi2.0", NULL, "can0"),
2120 PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.1", "pinctrl-adi2.0", NULL, "can1"), 2120 PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.1", "pinctrl-adi2.0", NULL, "can1"),
2121 PIN_MAP_MUX_GROUP_DEFAULT("bf54x-lq043", "pinctrl-adi2.0", NULL, "ppi0_24b"), 2121 PIN_MAP_MUX_GROUP_DEFAULT("bf54x-lq043", "pinctrl-adi2.0", "ppi0_24bgrp", "ppi0"),
2122 PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0", "pinctrl-adi2.0", NULL, "sport0"), 2122 PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0", "pinctrl-adi2.0", NULL, "sport0"),
2123 PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0", "pinctrl-adi2.0", NULL, "sport0"), 2123 PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0", "pinctrl-adi2.0", NULL, "sport0"),
2124 PIN_MAP_MUX_GROUP_DEFAULT("bfin-ac97.0", "pinctrl-adi2.0", NULL, "sport0"), 2124 PIN_MAP_MUX_GROUP_DEFAULT("bfin-ac97.0", "pinctrl-adi2.0", NULL, "sport0"),
@@ -2140,7 +2140,9 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
2140 PIN_MAP_MUX_GROUP_DEFAULT("pata-bf54x", "pinctrl-adi2.0", NULL, "atapi_alter"), 2140 PIN_MAP_MUX_GROUP_DEFAULT("pata-bf54x", "pinctrl-adi2.0", NULL, "atapi_alter"),
2141#endif 2141#endif
2142 PIN_MAP_MUX_GROUP_DEFAULT("bf5xx-nand.0", "pinctrl-adi2.0", NULL, "nfc0"), 2142 PIN_MAP_MUX_GROUP_DEFAULT("bf5xx-nand.0", "pinctrl-adi2.0", NULL, "nfc0"),
2143 PIN_MAP_MUX_GROUP_DEFAULT("bf54x-keys", "pinctrl-adi2.0", NULL, "keys_4x4"), 2143 PIN_MAP_MUX_GROUP_DEFAULT("bf54x-keys", "pinctrl-adi2.0", "keys_4x4grp", "keys"),
2144 PIN_MAP_MUX_GROUP("bf54x-keys", "4bit", "pinctrl-adi2.0", "keys_4x4grp", "keys"),
2145 PIN_MAP_MUX_GROUP("bf54x-keys", "8bit", "pinctrl-adi2.0", "keys_8x8grp", "keys"),
2144}; 2146};
2145 2147
2146static int __init ezkit_init(void) 2148static int __init ezkit_init(void)
diff --git a/arch/blackfin/mach-bf561/boards/acvilon.c b/arch/blackfin/mach-bf561/boards/acvilon.c
index 430b16d5ccb1..6ab951534d79 100644
--- a/arch/blackfin/mach-bf561/boards/acvilon.c
+++ b/arch/blackfin/mach-bf561/boards/acvilon.c
@@ -44,6 +44,7 @@
44#include <linux/spi/flash.h> 44#include <linux/spi/flash.h>
45#include <linux/irq.h> 45#include <linux/irq.h>
46#include <linux/interrupt.h> 46#include <linux/interrupt.h>
47#include <linux/gpio.h>
47#include <linux/jiffies.h> 48#include <linux/jiffies.h>
48#include <linux/i2c-pca-platform.h> 49#include <linux/i2c-pca-platform.h>
49#include <linux/delay.h> 50#include <linux/delay.h>
diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c
index 9f777df4cacc..e862f7823e68 100644
--- a/arch/blackfin/mach-bf561/boards/cm_bf561.c
+++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c
@@ -18,6 +18,7 @@
18#endif 18#endif
19#include <linux/ata_platform.h> 19#include <linux/ata_platform.h>
20#include <linux/irq.h> 20#include <linux/irq.h>
21#include <linux/gpio.h>
21#include <asm/dma.h> 22#include <asm/dma.h>
22#include <asm/bfin5xx_spi.h> 23#include <asm/bfin5xx_spi.h>
23#include <asm/portmux.h> 24#include <asm/portmux.h>
diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
index 88dee43e7abe..2de71e8c104b 100644
--- a/arch/blackfin/mach-bf561/boards/ezkit.c
+++ b/arch/blackfin/mach-bf561/boards/ezkit.c
@@ -14,6 +14,7 @@
14#include <linux/spi/spi.h> 14#include <linux/spi/spi.h>
15#include <linux/irq.h> 15#include <linux/irq.h>
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/gpio.h>
17#include <linux/delay.h> 18#include <linux/delay.h>
18#include <asm/dma.h> 19#include <asm/dma.h>
19#include <asm/bfin5xx_spi.h> 20#include <asm/bfin5xx_spi.h>
diff --git a/arch/blackfin/mach-bf609/boards/ezkit.c b/arch/blackfin/mach-bf609/boards/ezkit.c
index 1ba4600de69f..e2c0b024ce88 100644
--- a/arch/blackfin/mach-bf609/boards/ezkit.c
+++ b/arch/blackfin/mach-bf609/boards/ezkit.c
@@ -698,8 +698,6 @@ int bf609_nor_flash_init(struct platform_device *pdev)
698{ 698{
699#define CONFIG_SMC_GCTL_VAL 0x00000010 699#define CONFIG_SMC_GCTL_VAL 0x00000010
700 700
701 if (!devm_pinctrl_get_select_default(&pdev->dev))
702 return -EBUSY;
703 bfin_write32(SMC_GCTL, CONFIG_SMC_GCTL_VAL); 701 bfin_write32(SMC_GCTL, CONFIG_SMC_GCTL_VAL);
704 bfin_write32(SMC_B0CTL, 0x01002011); 702 bfin_write32(SMC_B0CTL, 0x01002011);
705 bfin_write32(SMC_B0TIM, 0x08170977); 703 bfin_write32(SMC_B0TIM, 0x08170977);
@@ -709,7 +707,6 @@ int bf609_nor_flash_init(struct platform_device *pdev)
709 707
710void bf609_nor_flash_exit(struct platform_device *pdev) 708void bf609_nor_flash_exit(struct platform_device *pdev)
711{ 709{
712 devm_pinctrl_put(pdev->dev.pins->p);
713 bfin_write32(SMC_GCTL, 0); 710 bfin_write32(SMC_GCTL, 0);
714} 711}
715 712
@@ -2058,15 +2055,14 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
2058 PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary", "pinctrl-adi2.0", NULL, "rotary"), 2055 PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary", "pinctrl-adi2.0", NULL, "rotary"),
2059 PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0", "pinctrl-adi2.0", NULL, "can0"), 2056 PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0", "pinctrl-adi2.0", NULL, "can0"),
2060 PIN_MAP_MUX_GROUP_DEFAULT("physmap-flash.0", "pinctrl-adi2.0", NULL, "smc0"), 2057 PIN_MAP_MUX_GROUP_DEFAULT("physmap-flash.0", "pinctrl-adi2.0", NULL, "smc0"),
2061 PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2", "pinctrl-adi2.0", NULL, "ppi2_16b"), 2058 PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2", "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
2062 PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0", "pinctrl-adi2.0", NULL, "ppi0_16b"), 2059 PIN_MAP_MUX_GROUP("bfin_display.0", "8bit", "pinctrl-adi2.0", "ppi2_8bgrp", "ppi2"),
2063#if IS_ENABLED(CONFIG_VIDEO_MT9M114) 2060 PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0", "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
2064 PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_8b"), 2061 PIN_MAP_MUX_GROUP("bfin_display.0", "16bit", "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
2065#elif IS_ENABLED(CONFIG_VIDEO_VS6624) 2062 PIN_MAP_MUX_GROUP("bfin_capture.0", "8bit", "pinctrl-adi2.0", "ppi0_8bgrp", "ppi0"),
2066 PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_16b"), 2063 PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", "ppi0_16bgrp", "ppi0"),
2067#else 2064 PIN_MAP_MUX_GROUP("bfin_capture.0", "16bit", "pinctrl-adi2.0", "ppi0_16bgrp", "ppi0"),
2068 PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_24b"), 2065 PIN_MAP_MUX_GROUP("bfin_capture.0", "24bit", "pinctrl-adi2.0", "ppi0_24bgrp", "ppi0"),
2069#endif
2070 PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0", "pinctrl-adi2.0", NULL, "sport0"), 2066 PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0", "pinctrl-adi2.0", NULL, "sport0"),
2071 PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0", "pinctrl-adi2.0", NULL, "sport0"), 2067 PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0", "pinctrl-adi2.0", NULL, "sport0"),
2072 PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.1", "pinctrl-adi2.0", NULL, "sport1"), 2068 PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.1", "pinctrl-adi2.0", NULL, "sport1"),
diff --git a/arch/blackfin/mach-bf609/include/mach/pm.h b/arch/blackfin/mach-bf609/include/mach/pm.h
index 3ca0fb965636..a1efd936dd30 100644
--- a/arch/blackfin/mach-bf609/include/mach/pm.h
+++ b/arch/blackfin/mach-bf609/include/mach/pm.h
@@ -10,6 +10,7 @@
10#define __MACH_BF609_PM_H__ 10#define __MACH_BF609_PM_H__
11 11
12#include <linux/suspend.h> 12#include <linux/suspend.h>
13#include <linux/platform_device.h>
13 14
14extern int bfin609_pm_enter(suspend_state_t state); 15extern int bfin609_pm_enter(suspend_state_t state);
15extern int bf609_pm_prepare(void); 16extern int bf609_pm_prepare(void);
@@ -19,6 +20,6 @@ void bf609_hibernate(void);
19void bfin_sec_raise_irq(unsigned int sid); 20void bfin_sec_raise_irq(unsigned int sid);
20void coreb_enable(void); 21void coreb_enable(void);
21 22
22int bf609_nor_flash_init(void); 23int bf609_nor_flash_init(struct platform_device *pdev);
23void bf609_nor_flash_exit(void); 24void bf609_nor_flash_exit(struct platform_device *pdev);
24#endif 25#endif
diff --git a/arch/blackfin/mach-bf609/pm.c b/arch/blackfin/mach-bf609/pm.c
index 0cdd6955c7be..b1bfcf434d16 100644
--- a/arch/blackfin/mach-bf609/pm.c
+++ b/arch/blackfin/mach-bf609/pm.c
@@ -291,13 +291,13 @@ static struct bfin_cpu_pm_fns bf609_cpu_pm = {
291#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) 291#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
292static int smc_pm_syscore_suspend(void) 292static int smc_pm_syscore_suspend(void)
293{ 293{
294 bf609_nor_flash_exit(); 294 bf609_nor_flash_exit(NULL);
295 return 0; 295 return 0;
296} 296}
297 297
298static void smc_pm_syscore_resume(void) 298static void smc_pm_syscore_resume(void)
299{ 299{
300 bf609_nor_flash_init(); 300 bf609_nor_flash_init(NULL);
301} 301}
302 302
303static struct syscore_ops smc_pm_syscore_ops = { 303static struct syscore_ops smc_pm_syscore_ops = {
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index 867b7cef204c..1f94784eab6d 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -1208,8 +1208,6 @@ int __init init_arch_irq(void)
1208 1208
1209 bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority); 1209 bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
1210 1210
1211 bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
1212
1213 /* Enable interrupts IVG7-15 */ 1211 /* Enable interrupts IVG7-15 */
1214 bfin_irq_flags |= IMASK_IVG15 | 1212 bfin_irq_flags |= IMASK_IVG15 |
1215 IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 | 1213 IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
diff --git a/arch/m68k/kernel/head.S b/arch/m68k/kernel/head.S
index dbb118e1a4e0..a54788458ca3 100644
--- a/arch/m68k/kernel/head.S
+++ b/arch/m68k/kernel/head.S
@@ -921,7 +921,8 @@ L(nocon):
921 jls 1f 921 jls 1f
922 lsrl #1,%d1 922 lsrl #1,%d1
9231: 9231:
924 movel %d1,m68k_init_mapped_size 924 lea %pc@(m68k_init_mapped_size),%a0
925 movel %d1,%a0@
925 mmu_map #PAGE_OFFSET,%pc@(L(phys_kernel_start)),%d1,\ 926 mmu_map #PAGE_OFFSET,%pc@(L(phys_kernel_start)),%d1,\
926 %pc@(m68k_supervisor_cachemode) 927 %pc@(m68k_supervisor_cachemode)
927 928
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c
index 958f1adb9d0c..3857737e3958 100644
--- a/arch/m68k/kernel/time.c
+++ b/arch/m68k/kernel/time.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/errno.h> 13#include <linux/errno.h>
14#include <linux/export.h>
14#include <linux/module.h> 15#include <linux/module.h>
15#include <linux/sched.h> 16#include <linux/sched.h>
16#include <linux/kernel.h> 17#include <linux/kernel.h>
@@ -30,6 +31,7 @@
30 31
31 32
32unsigned long (*mach_random_get_entropy)(void); 33unsigned long (*mach_random_get_entropy)(void);
34EXPORT_SYMBOL_GPL(mach_random_get_entropy);
33 35
34 36
35/* 37/*
diff --git a/arch/parisc/include/uapi/asm/signal.h b/arch/parisc/include/uapi/asm/signal.h
index a2fa297196bc..f5645d6a89f2 100644
--- a/arch/parisc/include/uapi/asm/signal.h
+++ b/arch/parisc/include/uapi/asm/signal.h
@@ -69,8 +69,6 @@
69#define SA_NOMASK SA_NODEFER 69#define SA_NOMASK SA_NODEFER
70#define SA_ONESHOT SA_RESETHAND 70#define SA_ONESHOT SA_RESETHAND
71 71
72#define SA_RESTORER 0x04000000 /* obsolete -- ignored */
73
74#define MINSIGSTKSZ 2048 72#define MINSIGSTKSZ 2048
75#define SIGSTKSZ 8192 73#define SIGSTKSZ 8192
76 74
diff --git a/arch/parisc/kernel/hardware.c b/arch/parisc/kernel/hardware.c
index 608716f8496b..af3bc359dc70 100644
--- a/arch/parisc/kernel/hardware.c
+++ b/arch/parisc/kernel/hardware.c
@@ -1210,7 +1210,8 @@ static struct hp_hardware hp_hardware_list[] = {
1210 {HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"}, 1210 {HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"},
1211 {HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"}, 1211 {HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"},
1212 {HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"}, 1212 {HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"},
1213 {HPHW_FIO, 0x076, 0x000AD, 0x00, "Crestone Peak RS-232"}, 1213 {HPHW_FIO, 0x076, 0x000AD, 0x0, "Crestone Peak Core RS-232"},
1214 {HPHW_FIO, 0x077, 0x000AD, 0x0, "Crestone Peak Fast? Core RS-232"},
1214 {HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"}, 1215 {HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"},
1215 {HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"}, 1216 {HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"},
1216 {HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"}, 1217 {HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"},
diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c
index bb9f3b64de55..93c1963d76fe 100644
--- a/arch/parisc/kernel/sys_parisc32.c
+++ b/arch/parisc/kernel/sys_parisc32.c
@@ -4,6 +4,7 @@
4 * Copyright (C) 2000-2001 Hewlett Packard Company 4 * Copyright (C) 2000-2001 Hewlett Packard Company
5 * Copyright (C) 2000 John Marvin 5 * Copyright (C) 2000 John Marvin
6 * Copyright (C) 2001 Matthew Wilcox 6 * Copyright (C) 2001 Matthew Wilcox
7 * Copyright (C) 2014 Helge Deller <deller@gmx.de>
7 * 8 *
8 * These routines maintain argument size conversion between 32bit and 64bit 9 * These routines maintain argument size conversion between 32bit and 64bit
9 * environment. Based heavily on sys_ia32.c and sys_sparc32.c. 10 * environment. Based heavily on sys_ia32.c and sys_sparc32.c.
@@ -11,44 +12,8 @@
11 12
12#include <linux/compat.h> 13#include <linux/compat.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/sched.h>
15#include <linux/fs.h>
16#include <linux/mm.h>
17#include <linux/file.h>
18#include <linux/signal.h>
19#include <linux/resource.h>
20#include <linux/times.h>
21#include <linux/time.h>
22#include <linux/smp.h>
23#include <linux/sem.h>
24#include <linux/shm.h>
25#include <linux/slab.h>
26#include <linux/uio.h>
27#include <linux/ncp_fs.h>
28#include <linux/poll.h>
29#include <linux/personality.h>
30#include <linux/stat.h>
31#include <linux/highmem.h>
32#include <linux/highuid.h>
33#include <linux/mman.h>
34#include <linux/binfmts.h>
35#include <linux/namei.h>
36#include <linux/vfs.h>
37#include <linux/ptrace.h>
38#include <linux/swap.h>
39#include <linux/syscalls.h> 15#include <linux/syscalls.h>
40 16
41#include <asm/types.h>
42#include <asm/uaccess.h>
43#include <asm/mmu_context.h>
44
45#undef DEBUG
46
47#ifdef DEBUG
48#define DBG(x) printk x
49#else
50#define DBG(x)
51#endif
52 17
53asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23, 18asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23,
54 int r22, int r21, int r20) 19 int r22, int r21, int r20)
@@ -57,3 +22,12 @@ asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23,
57 current->comm, current->pid, r20); 22 current->comm, current->pid, r20);
58 return -ENOSYS; 23 return -ENOSYS;
59} 24}
25
26asmlinkage long sys32_fanotify_mark(compat_int_t fanotify_fd, compat_uint_t flags,
27 compat_uint_t mask0, compat_uint_t mask1, compat_int_t dfd,
28 const char __user * pathname)
29{
30 return sys_fanotify_mark(fanotify_fd, flags,
31 ((__u64)mask1 << 32) | mask0,
32 dfd, pathname);
33}
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index c5fa7a697fba..84c5d3a58fa1 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -418,7 +418,7 @@
418 ENTRY_SAME(accept4) /* 320 */ 418 ENTRY_SAME(accept4) /* 320 */
419 ENTRY_SAME(prlimit64) 419 ENTRY_SAME(prlimit64)
420 ENTRY_SAME(fanotify_init) 420 ENTRY_SAME(fanotify_init)
421 ENTRY_COMP(fanotify_mark) 421 ENTRY_DIFF(fanotify_mark)
422 ENTRY_COMP(clock_adjtime) 422 ENTRY_COMP(clock_adjtime)
423 ENTRY_SAME(name_to_handle_at) /* 325 */ 423 ENTRY_SAME(name_to_handle_at) /* 325 */
424 ENTRY_COMP(open_by_handle_at) 424 ENTRY_COMP(open_by_handle_at)
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index ae085ad0fba0..0bef864264c0 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -728,7 +728,6 @@ static void __init pagetable_init(void)
728#endif 728#endif
729 729
730 empty_zero_page = alloc_bootmem_pages(PAGE_SIZE); 730 empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
731 memset(empty_zero_page, 0, PAGE_SIZE);
732} 731}
733 732
734static void __init gateway_init(void) 733static void __init gateway_init(void)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index bd6dd6ed3a9f..80b94b0add1f 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -145,6 +145,7 @@ config PPC
145 select HAVE_IRQ_EXIT_ON_IRQ_STACK 145 select HAVE_IRQ_EXIT_ON_IRQ_STACK
146 select ARCH_USE_CMPXCHG_LOCKREF if PPC64 146 select ARCH_USE_CMPXCHG_LOCKREF if PPC64
147 select HAVE_ARCH_AUDITSYSCALL 147 select HAVE_ARCH_AUDITSYSCALL
148 select ARCH_SUPPORTS_ATOMIC_RMW
148 149
149config GENERIC_CSUM 150config GENERIC_CSUM
150 def_bool CPU_LITTLE_ENDIAN 151 def_bool CPU_LITTLE_ENDIAN
@@ -414,7 +415,7 @@ config KEXEC
414config CRASH_DUMP 415config CRASH_DUMP
415 bool "Build a kdump crash kernel" 416 bool "Build a kdump crash kernel"
416 depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP) 417 depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP)
417 select RELOCATABLE if PPC64 || 44x || FSL_BOOKE 418 select RELOCATABLE if (PPC64 && !COMPILE_TEST) || 44x || FSL_BOOKE
418 help 419 help
419 Build a kernel suitable for use as a kdump capture kernel. 420 Build a kernel suitable for use as a kdump capture kernel.
420 The same kernel binary can be used as production kernel and dump 421 The same kernel binary can be used as production kernel and dump
@@ -1017,6 +1018,7 @@ endmenu
1017if PPC64 1018if PPC64
1018config RELOCATABLE 1019config RELOCATABLE
1019 bool "Build a relocatable kernel" 1020 bool "Build a relocatable kernel"
1021 depends on !COMPILE_TEST
1020 select NONSTATIC_KERNEL 1022 select NONSTATIC_KERNEL
1021 help 1023 help
1022 This builds a kernel image that is capable of running anywhere 1024 This builds a kernel image that is capable of running anywhere
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index bc2347774f0a..0fdd7eece6d9 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -447,6 +447,7 @@ extern const char *powerpc_base_platform;
447 CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \ 447 CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
448 CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP) 448 CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP)
449#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG) 449#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
450#define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
450#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 451#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
451 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 452 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
452 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ 453 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 807014dde821..c2b4dcf23d03 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -22,6 +22,7 @@
22 */ 22 */
23#include <asm/pgtable-ppc64.h> 23#include <asm/pgtable-ppc64.h>
24#include <asm/bug.h> 24#include <asm/bug.h>
25#include <asm/processor.h>
25 26
26/* 27/*
27 * Segment table 28 * Segment table
@@ -496,7 +497,7 @@ extern void slb_set_size(u16 size);
496 */ 497 */
497struct subpage_prot_table { 498struct subpage_prot_table {
498 unsigned long maxaddr; /* only addresses < this are protected */ 499 unsigned long maxaddr; /* only addresses < this are protected */
499 unsigned int **protptrs[2]; 500 unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
500 unsigned int *low_prot[4]; 501 unsigned int *low_prot[4];
501}; 502};
502 503
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index f8d1d6dcf7db..e61f24ed4e65 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -19,8 +19,7 @@
19#define MMU_FTR_TYPE_40x ASM_CONST(0x00000004) 19#define MMU_FTR_TYPE_40x ASM_CONST(0x00000004)
20#define MMU_FTR_TYPE_44x ASM_CONST(0x00000008) 20#define MMU_FTR_TYPE_44x ASM_CONST(0x00000008)
21#define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010) 21#define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010)
22#define MMU_FTR_TYPE_3E ASM_CONST(0x00000020) 22#define MMU_FTR_TYPE_47x ASM_CONST(0x00000020)
23#define MMU_FTR_TYPE_47x ASM_CONST(0x00000040)
24 23
25/* 24/*
26 * This is individual features 25 * This is individual features
@@ -106,13 +105,6 @@
106 MMU_FTR_CI_LARGE_PAGE 105 MMU_FTR_CI_LARGE_PAGE
107#define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \ 106#define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
108 MMU_FTR_CI_LARGE_PAGE | MMU_FTR_NO_SLBIE_B 107 MMU_FTR_CI_LARGE_PAGE | MMU_FTR_NO_SLBIE_B
109#define MMU_FTRS_A2 MMU_FTR_TYPE_3E | MMU_FTR_USE_TLBILX | \
110 MMU_FTR_USE_TLBIVAX_BCAST | \
111 MMU_FTR_LOCK_BCAST_INVAL | \
112 MMU_FTR_USE_TLBRSRV | \
113 MMU_FTR_USE_PAIRED_MAS | \
114 MMU_FTR_TLBIEL | \
115 MMU_FTR_16M_PAGE
116#ifndef __ASSEMBLY__ 108#ifndef __ASSEMBLY__
117#include <asm/cputable.h> 109#include <asm/cputable.h>
118 110
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index 9ed737146dbb..b3e936027b26 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -61,8 +61,7 @@ struct power_pmu {
61#define PPMU_SIAR_VALID 0x00000010 /* Processor has SIAR Valid bit */ 61#define PPMU_SIAR_VALID 0x00000010 /* Processor has SIAR Valid bit */
62#define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */ 62#define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */
63#define PPMU_HAS_SIER 0x00000040 /* Has SIER */ 63#define PPMU_HAS_SIER 0x00000040 /* Has SIER */
64#define PPMU_BHRB 0x00000080 /* has BHRB feature enabled */ 64#define PPMU_ARCH_207S 0x00000080 /* PMC is architecture v2.07S */
65#define PPMU_EBB 0x00000100 /* supports event based branch */
66 65
67/* 66/*
68 * Values for flags to get_alternatives() 67 * Values for flags to get_alternatives()
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 965291b4c2fa..0c157642c2a1 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -527,6 +527,26 @@ static struct cpu_spec __initdata cpu_specs[] = {
527 .machine_check_early = __machine_check_early_realmode_p8, 527 .machine_check_early = __machine_check_early_realmode_p8,
528 .platform = "power8", 528 .platform = "power8",
529 }, 529 },
530 { /* Power8 DD1: Does not support doorbell IPIs */
531 .pvr_mask = 0xffffff00,
532 .pvr_value = 0x004d0100,
533 .cpu_name = "POWER8 (raw)",
534 .cpu_features = CPU_FTRS_POWER8_DD1,
535 .cpu_user_features = COMMON_USER_POWER8,
536 .cpu_user_features2 = COMMON_USER2_POWER8,
537 .mmu_features = MMU_FTRS_POWER8,
538 .icache_bsize = 128,
539 .dcache_bsize = 128,
540 .num_pmcs = 6,
541 .pmc_type = PPC_PMC_IBM,
542 .oprofile_cpu_type = "ppc64/power8",
543 .oprofile_type = PPC_OPROFILE_INVALID,
544 .cpu_setup = __setup_cpu_power8,
545 .cpu_restore = __restore_cpu_power8,
546 .flush_tlb = __flush_tlb_power8,
547 .machine_check_early = __machine_check_early_realmode_p8,
548 .platform = "power8",
549 },
530 { /* Power8 */ 550 { /* Power8 */
531 .pvr_mask = 0xffff0000, 551 .pvr_mask = 0xffff0000,
532 .pvr_value = 0x004d0000, 552 .pvr_value = 0x004d0000,
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index 2480256272d4..5cf3d367190d 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -131,7 +131,7 @@ _GLOBAL(power7_nap)
131 131
132_GLOBAL(power7_sleep) 132_GLOBAL(power7_sleep)
133 li r3,1 133 li r3,1
134 li r4,0 134 li r4,1
135 b power7_powersave_common 135 b power7_powersave_common
136 /* No return */ 136 /* No return */
137 137
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 658e89d2025b..db2b482af658 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -611,17 +611,19 @@ static void rtas_flash_firmware(int reboot_type)
611 for (f = flist; f; f = next) { 611 for (f = flist; f; f = next) {
612 /* Translate data addrs to absolute */ 612 /* Translate data addrs to absolute */
613 for (i = 0; i < f->num_blocks; i++) { 613 for (i = 0; i < f->num_blocks; i++) {
614 f->blocks[i].data = (char *)__pa(f->blocks[i].data); 614 f->blocks[i].data = (char *)cpu_to_be64(__pa(f->blocks[i].data));
615 image_size += f->blocks[i].length; 615 image_size += f->blocks[i].length;
616 f->blocks[i].length = cpu_to_be64(f->blocks[i].length);
616 } 617 }
617 next = f->next; 618 next = f->next;
618 /* Don't translate NULL pointer for last entry */ 619 /* Don't translate NULL pointer for last entry */
619 if (f->next) 620 if (f->next)
620 f->next = (struct flash_block_list *)__pa(f->next); 621 f->next = (struct flash_block_list *)cpu_to_be64(__pa(f->next));
621 else 622 else
622 f->next = NULL; 623 f->next = NULL;
623 /* make num_blocks into the version/length field */ 624 /* make num_blocks into the version/length field */
624 f->num_blocks = (FLASH_BLOCK_LIST_VERSION << 56) | ((f->num_blocks+1)*16); 625 f->num_blocks = (FLASH_BLOCK_LIST_VERSION << 56) | ((f->num_blocks+1)*16);
626 f->num_blocks = cpu_to_be64(f->num_blocks);
625 } 627 }
626 628
627 printk(KERN_ALERT "FLASH: flash image is %ld bytes\n", image_size); 629 printk(KERN_ALERT "FLASH: flash image is %ld bytes\n", image_size);
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 51a3ff78838a..1007fb802e6b 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -747,7 +747,7 @@ int setup_profiling_timer(unsigned int multiplier)
747 747
748#ifdef CONFIG_SCHED_SMT 748#ifdef CONFIG_SCHED_SMT
749/* cpumask of CPUs with asymetric SMT dependancy */ 749/* cpumask of CPUs with asymetric SMT dependancy */
750static const int powerpc_smt_flags(void) 750static int powerpc_smt_flags(void)
751{ 751{
752 int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; 752 int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
753 753
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 8c86422a1e37..731be7478b27 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -127,11 +127,6 @@ BEGIN_FTR_SECTION
127 stw r10, HSTATE_PMC + 24(r13) 127 stw r10, HSTATE_PMC + 24(r13)
128 stw r11, HSTATE_PMC + 28(r13) 128 stw r11, HSTATE_PMC + 28(r13)
129END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 129END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
130BEGIN_FTR_SECTION
131 mfspr r9, SPRN_SIER
132 std r8, HSTATE_MMCR + 40(r13)
133 std r9, HSTATE_MMCR + 48(r13)
134END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
13531: 13031:
136 131
137 /* 132 /*
diff --git a/arch/powerpc/lib/mem_64.S b/arch/powerpc/lib/mem_64.S
index 0738f96befbf..43435c6892fb 100644
--- a/arch/powerpc/lib/mem_64.S
+++ b/arch/powerpc/lib/mem_64.S
@@ -77,7 +77,7 @@ _GLOBAL(memset)
77 stb r4,0(r6) 77 stb r4,0(r6)
78 blr 78 blr
79 79
80_GLOBAL(memmove) 80_GLOBAL_TOC(memmove)
81 cmplw 0,r3,r4 81 cmplw 0,r3,r4
82 bgt backwards_memcpy 82 bgt backwards_memcpy
83 b memcpy 83 b memcpy
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 412dd46dd0b7..5c09f365c842 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -1198,7 +1198,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1198 sh = regs->gpr[rb] & 0x3f; 1198 sh = regs->gpr[rb] & 0x3f;
1199 ival = (signed int) regs->gpr[rd]; 1199 ival = (signed int) regs->gpr[rd];
1200 regs->gpr[ra] = ival >> (sh < 32 ? sh : 31); 1200 regs->gpr[ra] = ival >> (sh < 32 ? sh : 31);
1201 if (ival < 0 && (sh >= 32 || (ival & ((1 << sh) - 1)) != 0)) 1201 if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
1202 regs->xer |= XER_CA; 1202 regs->xer |= XER_CA;
1203 else 1203 else
1204 regs->xer &= ~XER_CA; 1204 regs->xer &= ~XER_CA;
@@ -1208,7 +1208,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1208 sh = rb; 1208 sh = rb;
1209 ival = (signed int) regs->gpr[rd]; 1209 ival = (signed int) regs->gpr[rd];
1210 regs->gpr[ra] = ival >> sh; 1210 regs->gpr[ra] = ival >> sh;
1211 if (ival < 0 && (ival & ((1 << sh) - 1)) != 0) 1211 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1212 regs->xer |= XER_CA; 1212 regs->xer |= XER_CA;
1213 else 1213 else
1214 regs->xer &= ~XER_CA; 1214 regs->xer &= ~XER_CA;
@@ -1216,7 +1216,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1216 1216
1217#ifdef __powerpc64__ 1217#ifdef __powerpc64__
1218 case 27: /* sld */ 1218 case 27: /* sld */
1219 sh = regs->gpr[rd] & 0x7f; 1219 sh = regs->gpr[rb] & 0x7f;
1220 if (sh < 64) 1220 if (sh < 64)
1221 regs->gpr[ra] = regs->gpr[rd] << sh; 1221 regs->gpr[ra] = regs->gpr[rd] << sh;
1222 else 1222 else
@@ -1235,7 +1235,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1235 sh = regs->gpr[rb] & 0x7f; 1235 sh = regs->gpr[rb] & 0x7f;
1236 ival = (signed long int) regs->gpr[rd]; 1236 ival = (signed long int) regs->gpr[rd];
1237 regs->gpr[ra] = ival >> (sh < 64 ? sh : 63); 1237 regs->gpr[ra] = ival >> (sh < 64 ? sh : 63);
1238 if (ival < 0 && (sh >= 64 || (ival & ((1 << sh) - 1)) != 0)) 1238 if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
1239 regs->xer |= XER_CA; 1239 regs->xer |= XER_CA;
1240 else 1240 else
1241 regs->xer &= ~XER_CA; 1241 regs->xer &= ~XER_CA;
@@ -1246,7 +1246,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1246 sh = rb | ((instr & 2) << 4); 1246 sh = rb | ((instr & 2) << 4);
1247 ival = (signed long int) regs->gpr[rd]; 1247 ival = (signed long int) regs->gpr[rd];
1248 regs->gpr[ra] = ival >> sh; 1248 regs->gpr[ra] = ival >> sh;
1249 if (ival < 0 && (ival & ((1 << sh) - 1)) != 0) 1249 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1250 regs->xer |= XER_CA; 1250 regs->xer |= XER_CA;
1251 else 1251 else
1252 regs->xer &= ~XER_CA; 1252 regs->xer &= ~XER_CA;
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index af3d78e19302..928ebe79668b 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -410,17 +410,7 @@ void __init mmu_context_init(void)
410 } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) { 410 } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
411 first_context = 1; 411 first_context = 1;
412 last_context = 65535; 412 last_context = 65535;
413 } else 413 } else {
414#ifdef CONFIG_PPC_BOOK3E_MMU
415 if (mmu_has_feature(MMU_FTR_TYPE_3E)) {
416 u32 mmucfg = mfspr(SPRN_MMUCFG);
417 u32 pid_bits = (mmucfg & MMUCFG_PIDSIZE_MASK)
418 >> MMUCFG_PIDSIZE_SHIFT;
419 first_context = 1;
420 last_context = (1UL << (pid_bits + 1)) - 1;
421 } else
422#endif
423 {
424 first_context = 1; 414 first_context = 1;
425 last_context = 255; 415 last_context = 255;
426 } 416 }
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 6dcdadefd8d0..82e82cadcde5 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -390,12 +390,16 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
390 case BPF_ANC | SKF_AD_VLAN_TAG: 390 case BPF_ANC | SKF_AD_VLAN_TAG:
391 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: 391 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
392 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); 392 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
393 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
394
393 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, 395 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
394 vlan_tci)); 396 vlan_tci));
395 if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) 397 if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
396 PPC_ANDI(r_A, r_A, VLAN_VID_MASK); 398 PPC_ANDI(r_A, r_A, ~VLAN_TAG_PRESENT);
397 else 399 } else {
398 PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT); 400 PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT);
401 PPC_SRWI(r_A, r_A, 12);
402 }
399 break; 403 break;
400 case BPF_ANC | SKF_AD_QUEUE: 404 case BPF_ANC | SKF_AD_QUEUE:
401 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 405 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 4520c9356b54..fe52db2eea6a 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -485,7 +485,7 @@ static bool is_ebb_event(struct perf_event *event)
485 * check that the PMU supports EBB, meaning those that don't can still 485 * check that the PMU supports EBB, meaning those that don't can still
486 * use bit 63 of the event code for something else if they wish. 486 * use bit 63 of the event code for something else if they wish.
487 */ 487 */
488 return (ppmu->flags & PPMU_EBB) && 488 return (ppmu->flags & PPMU_ARCH_207S) &&
489 ((event->attr.config >> PERF_EVENT_CONFIG_EBB_SHIFT) & 1); 489 ((event->attr.config >> PERF_EVENT_CONFIG_EBB_SHIFT) & 1);
490} 490}
491 491
@@ -777,7 +777,7 @@ void perf_event_print_debug(void)
777 if (ppmu->flags & PPMU_HAS_SIER) 777 if (ppmu->flags & PPMU_HAS_SIER)
778 sier = mfspr(SPRN_SIER); 778 sier = mfspr(SPRN_SIER);
779 779
780 if (ppmu->flags & PPMU_EBB) { 780 if (ppmu->flags & PPMU_ARCH_207S) {
781 pr_info("MMCR2: %016lx EBBHR: %016lx\n", 781 pr_info("MMCR2: %016lx EBBHR: %016lx\n",
782 mfspr(SPRN_MMCR2), mfspr(SPRN_EBBHR)); 782 mfspr(SPRN_MMCR2), mfspr(SPRN_EBBHR));
783 pr_info("EBBRR: %016lx BESCR: %016lx\n", 783 pr_info("EBBRR: %016lx BESCR: %016lx\n",
@@ -996,7 +996,22 @@ static void power_pmu_read(struct perf_event *event)
996 } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); 996 } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
997 997
998 local64_add(delta, &event->count); 998 local64_add(delta, &event->count);
999 local64_sub(delta, &event->hw.period_left); 999
1000 /*
1001 * A number of places program the PMC with (0x80000000 - period_left).
1002 * We never want period_left to be less than 1 because we will program
1003 * the PMC with a value >= 0x800000000 and an edge detected PMC will
1004 * roll around to 0 before taking an exception. We have seen this
1005 * on POWER8.
1006 *
1007 * To fix this, clamp the minimum value of period_left to 1.
1008 */
1009 do {
1010 prev = local64_read(&event->hw.period_left);
1011 val = prev - delta;
1012 if (val < 1)
1013 val = 1;
1014 } while (local64_cmpxchg(&event->hw.period_left, prev, val) != prev);
1000} 1015}
1001 1016
1002/* 1017/*
@@ -1292,6 +1307,9 @@ static void power_pmu_enable(struct pmu *pmu)
1292 out_enable: 1307 out_enable:
1293 pmao_restore_workaround(ebb); 1308 pmao_restore_workaround(ebb);
1294 1309
1310 if (ppmu->flags & PPMU_ARCH_207S)
1311 mtspr(SPRN_MMCR2, 0);
1312
1295 mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]); 1313 mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]);
1296 1314
1297 mb(); 1315 mb();
@@ -1696,7 +1714,7 @@ static int power_pmu_event_init(struct perf_event *event)
1696 1714
1697 if (has_branch_stack(event)) { 1715 if (has_branch_stack(event)) {
1698 /* PMU has BHRB enabled */ 1716 /* PMU has BHRB enabled */
1699 if (!(ppmu->flags & PPMU_BHRB)) 1717 if (!(ppmu->flags & PPMU_ARCH_207S))
1700 return -EOPNOTSUPP; 1718 return -EOPNOTSUPP;
1701 } 1719 }
1702 1720
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index fe2763b6e039..639cd9156585 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -792,7 +792,7 @@ static struct power_pmu power8_pmu = {
792 .get_constraint = power8_get_constraint, 792 .get_constraint = power8_get_constraint,
793 .get_alternatives = power8_get_alternatives, 793 .get_alternatives = power8_get_alternatives,
794 .disable_pmc = power8_disable_pmc, 794 .disable_pmc = power8_disable_pmc,
795 .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB | PPMU_EBB, 795 .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_ARCH_207S,
796 .n_generic = ARRAY_SIZE(power8_generic_events), 796 .n_generic = ARRAY_SIZE(power8_generic_events),
797 .generic_events = power8_generic_events, 797 .generic_events = power8_generic_events,
798 .cache_events = &power8_cache_events, 798 .cache_events = &power8_cache_events,
diff --git a/arch/powerpc/platforms/cell/spu_syscalls.c b/arch/powerpc/platforms/cell/spu_syscalls.c
index 38e0a1a5cec3..5e6e0bad6db6 100644
--- a/arch/powerpc/platforms/cell/spu_syscalls.c
+++ b/arch/powerpc/platforms/cell/spu_syscalls.c
@@ -111,6 +111,7 @@ asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, __u32 __user *ustatus)
111 return ret; 111 return ret;
112} 112}
113 113
114#ifdef CONFIG_COREDUMP
114int elf_coredump_extra_notes_size(void) 115int elf_coredump_extra_notes_size(void)
115{ 116{
116 struct spufs_calls *calls; 117 struct spufs_calls *calls;
@@ -142,6 +143,7 @@ int elf_coredump_extra_notes_write(struct coredump_params *cprm)
142 143
143 return ret; 144 return ret;
144} 145}
146#endif
145 147
146void notify_spus_active(void) 148void notify_spus_active(void)
147{ 149{
diff --git a/arch/powerpc/platforms/cell/spufs/Makefile b/arch/powerpc/platforms/cell/spufs/Makefile
index b9d5d678aa44..52a7d2596d30 100644
--- a/arch/powerpc/platforms/cell/spufs/Makefile
+++ b/arch/powerpc/platforms/cell/spufs/Makefile
@@ -1,8 +1,9 @@
1 1
2obj-$(CONFIG_SPU_FS) += spufs.o 2obj-$(CONFIG_SPU_FS) += spufs.o
3spufs-y += inode.o file.o context.o syscalls.o coredump.o 3spufs-y += inode.o file.o context.o syscalls.o
4spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o 4spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o
5spufs-y += switch.o fault.o lscsa_alloc.o 5spufs-y += switch.o fault.o lscsa_alloc.o
6spufs-$(CONFIG_COREDUMP) += coredump.o
6 7
7# magic for the trace events 8# magic for the trace events
8CFLAGS_sched.o := -I$(src) 9CFLAGS_sched.o := -I$(src)
diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c
index b045fdda4845..a87200a535fa 100644
--- a/arch/powerpc/platforms/cell/spufs/syscalls.c
+++ b/arch/powerpc/platforms/cell/spufs/syscalls.c
@@ -79,8 +79,10 @@ static long do_spu_create(const char __user *pathname, unsigned int flags,
79struct spufs_calls spufs_calls = { 79struct spufs_calls spufs_calls = {
80 .create_thread = do_spu_create, 80 .create_thread = do_spu_create,
81 .spu_run = do_spu_run, 81 .spu_run = do_spu_run,
82 .coredump_extra_notes_size = spufs_coredump_extra_notes_size,
83 .coredump_extra_notes_write = spufs_coredump_extra_notes_write,
84 .notify_spus_active = do_notify_spus_active, 82 .notify_spus_active = do_notify_spus_active,
85 .owner = THIS_MODULE, 83 .owner = THIS_MODULE,
84#ifdef CONFIG_COREDUMP
85 .coredump_extra_notes_size = spufs_coredump_extra_notes_size,
86 .coredump_extra_notes_write = spufs_coredump_extra_notes_write,
87#endif
86}; 88};
diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c
index 10268c41d830..0ad533b617f7 100644
--- a/arch/powerpc/platforms/powernv/opal-elog.c
+++ b/arch/powerpc/platforms/powernv/opal-elog.c
@@ -249,7 +249,7 @@ static void elog_work_fn(struct work_struct *work)
249 249
250 rc = opal_get_elog_size(&id, &size, &type); 250 rc = opal_get_elog_size(&id, &size, &type);
251 if (rc != OPAL_SUCCESS) { 251 if (rc != OPAL_SUCCESS) {
252 pr_err("ELOG: Opal log read failed\n"); 252 pr_err("ELOG: OPAL log info read failed\n");
253 return; 253 return;
254 } 254 }
255 255
@@ -257,7 +257,7 @@ static void elog_work_fn(struct work_struct *work)
257 log_id = be64_to_cpu(id); 257 log_id = be64_to_cpu(id);
258 elog_type = be64_to_cpu(type); 258 elog_type = be64_to_cpu(type);
259 259
260 BUG_ON(elog_size > OPAL_MAX_ERRLOG_SIZE); 260 WARN_ON(elog_size > OPAL_MAX_ERRLOG_SIZE);
261 261
262 if (elog_size >= OPAL_MAX_ERRLOG_SIZE) 262 if (elog_size >= OPAL_MAX_ERRLOG_SIZE)
263 elog_size = OPAL_MAX_ERRLOG_SIZE; 263 elog_size = OPAL_MAX_ERRLOG_SIZE;
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 022b38e6a80b..2d0b4d68a40a 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -86,6 +86,7 @@ static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa,
86 } 86 }
87 87
88 of_node_set_flag(dn, OF_DYNAMIC); 88 of_node_set_flag(dn, OF_DYNAMIC);
89 of_node_init(dn);
89 90
90 return dn; 91 return dn;
91} 92}
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index 0435bb65d0aa..1c0a60d98867 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -69,6 +69,7 @@ static int pSeries_reconfig_add_node(const char *path, struct property *proplist
69 69
70 np->properties = proplist; 70 np->properties = proplist;
71 of_node_set_flag(np, OF_DYNAMIC); 71 of_node_set_flag(np, OF_DYNAMIC);
72 of_node_init(np);
72 73
73 np->parent = derive_parent(path); 74 np->parent = derive_parent(path);
74 if (IS_ERR(np->parent)) { 75 if (IS_ERR(np->parent)) {
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index df38c70cd59e..18ea9e3f8142 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -51,8 +51,8 @@ static inline int restore_fp_ctl(u32 *fpc)
51 return 0; 51 return 0;
52 52
53 asm volatile( 53 asm volatile(
54 "0: lfpc %1\n" 54 " lfpc %1\n"
55 " la %0,0\n" 55 "0: la %0,0\n"
56 "1:\n" 56 "1:\n"
57 EX_TABLE(0b,1b) 57 EX_TABLE(0b,1b)
58 : "=d" (rc) : "Q" (*fpc), "0" (-EINVAL)); 58 : "=d" (rc) : "Q" (*fpc), "0" (-EINVAL));
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 7ba7d6784510..e88d35d74950 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -437,11 +437,11 @@ ENTRY(startup_kdump)
437 437
438#if defined(CONFIG_64BIT) 438#if defined(CONFIG_64BIT)
439#if defined(CONFIG_MARCH_ZEC12) 439#if defined(CONFIG_MARCH_ZEC12)
440 .long 3, 0xc100efea, 0xf46ce800, 0x00400000 440 .long 3, 0xc100eff2, 0xf46ce800, 0x00400000
441#elif defined(CONFIG_MARCH_Z196) 441#elif defined(CONFIG_MARCH_Z196)
442 .long 2, 0xc100efea, 0xf46c0000 442 .long 2, 0xc100eff2, 0xf46c0000
443#elif defined(CONFIG_MARCH_Z10) 443#elif defined(CONFIG_MARCH_Z10)
444 .long 2, 0xc100efea, 0xf0680000 444 .long 2, 0xc100eff2, 0xf0680000
445#elif defined(CONFIG_MARCH_Z9_109) 445#elif defined(CONFIG_MARCH_Z9_109)
446 .long 1, 0xc100efc2 446 .long 1, 0xc100efc2
447#elif defined(CONFIG_MARCH_Z990) 447#elif defined(CONFIG_MARCH_Z990)
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 2d716734b5b1..5dc7ad9e2fbf 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -334,9 +334,14 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
334 unsigned long mask = PSW_MASK_USER; 334 unsigned long mask = PSW_MASK_USER;
335 335
336 mask |= is_ri_task(child) ? PSW_MASK_RI : 0; 336 mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
337 if ((data & ~mask) != PSW_USER_BITS) 337 if ((data ^ PSW_USER_BITS) & ~mask)
338 /* Invalid psw mask. */
339 return -EINVAL;
340 if ((data & PSW_MASK_ASC) == PSW_ASC_HOME)
341 /* Invalid address-space-control bits */
338 return -EINVAL; 342 return -EINVAL;
339 if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA)) 343 if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
344 /* Invalid addressing mode bits */
340 return -EINVAL; 345 return -EINVAL;
341 } 346 }
342 *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data; 347 *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
@@ -672,9 +677,12 @@ static int __poke_user_compat(struct task_struct *child,
672 677
673 mask |= is_ri_task(child) ? PSW32_MASK_RI : 0; 678 mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
674 /* Build a 64 bit psw mask from 31 bit mask. */ 679 /* Build a 64 bit psw mask from 31 bit mask. */
675 if ((tmp & ~mask) != PSW32_USER_BITS) 680 if ((tmp ^ PSW32_USER_BITS) & ~mask)
676 /* Invalid psw mask. */ 681 /* Invalid psw mask. */
677 return -EINVAL; 682 return -EINVAL;
683 if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME)
684 /* Invalid address-space-control bits */
685 return -EINVAL;
678 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | 686 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
679 (regs->psw.mask & PSW_MASK_BA) | 687 (regs->psw.mask & PSW_MASK_BA) |
680 (__u64)(tmp & mask) << 32; 688 (__u64)(tmp & mask) << 32;
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 9ddc51eeb8d6..30de42730b2f 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -48,13 +48,10 @@
48static LIST_HEAD(zpci_list); 48static LIST_HEAD(zpci_list);
49static DEFINE_SPINLOCK(zpci_list_lock); 49static DEFINE_SPINLOCK(zpci_list_lock);
50 50
51static void zpci_enable_irq(struct irq_data *data);
52static void zpci_disable_irq(struct irq_data *data);
53
54static struct irq_chip zpci_irq_chip = { 51static struct irq_chip zpci_irq_chip = {
55 .name = "zPCI", 52 .name = "zPCI",
56 .irq_unmask = zpci_enable_irq, 53 .irq_unmask = unmask_msi_irq,
57 .irq_mask = zpci_disable_irq, 54 .irq_mask = mask_msi_irq,
58}; 55};
59 56
60static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES); 57static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
@@ -244,43 +241,6 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
244 return rc; 241 return rc;
245} 242}
246 243
247static int zpci_msi_set_mask_bits(struct msi_desc *msi, u32 mask, u32 flag)
248{
249 int offset, pos;
250 u32 mask_bits;
251
252 if (msi->msi_attrib.is_msix) {
253 offset = msi->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
254 PCI_MSIX_ENTRY_VECTOR_CTRL;
255 msi->masked = readl(msi->mask_base + offset);
256 writel(flag, msi->mask_base + offset);
257 } else if (msi->msi_attrib.maskbit) {
258 pos = (long) msi->mask_base;
259 pci_read_config_dword(msi->dev, pos, &mask_bits);
260 mask_bits &= ~(mask);
261 mask_bits |= flag & mask;
262 pci_write_config_dword(msi->dev, pos, mask_bits);
263 } else
264 return 0;
265
266 msi->msi_attrib.maskbit = !!flag;
267 return 1;
268}
269
270static void zpci_enable_irq(struct irq_data *data)
271{
272 struct msi_desc *msi = irq_get_msi_desc(data->irq);
273
274 zpci_msi_set_mask_bits(msi, 1, 0);
275}
276
277static void zpci_disable_irq(struct irq_data *data)
278{
279 struct msi_desc *msi = irq_get_msi_desc(data->irq);
280
281 zpci_msi_set_mask_bits(msi, 1, 1);
282}
283
284void pcibios_fixup_bus(struct pci_bus *bus) 244void pcibios_fixup_bus(struct pci_bus *bus)
285{ 245{
286} 246}
@@ -487,7 +447,10 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
487 447
488 /* Release MSI interrupts */ 448 /* Release MSI interrupts */
489 list_for_each_entry(msi, &pdev->msi_list, list) { 449 list_for_each_entry(msi, &pdev->msi_list, list) {
490 zpci_msi_set_mask_bits(msi, 1, 1); 450 if (msi->msi_attrib.is_msix)
451 default_msix_mask_irq(msi, 1);
452 else
453 default_msi_mask_irq(msi, 1, 1);
491 irq_set_msi_desc(msi->irq, NULL); 454 irq_set_msi_desc(msi->irq, NULL);
492 irq_free_desc(msi->irq); 455 irq_free_desc(msi->irq);
493 msi->msg.address_lo = 0; 456 msi->msg.address_lo = 0;
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index d4d16e4be07c..bf5b3f5f4962 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -32,7 +32,8 @@ endif
32 32
33cflags-$(CONFIG_CPU_SH2) := $(call cc-option,-m2,) 33cflags-$(CONFIG_CPU_SH2) := $(call cc-option,-m2,)
34cflags-$(CONFIG_CPU_SH2A) += $(call cc-option,-m2a,) \ 34cflags-$(CONFIG_CPU_SH2A) += $(call cc-option,-m2a,) \
35 $(call cc-option,-m2a-nofpu,) 35 $(call cc-option,-m2a-nofpu,) \
36 $(call cc-option,-m4-nofpu,)
36cflags-$(CONFIG_CPU_SH3) := $(call cc-option,-m3,) 37cflags-$(CONFIG_CPU_SH3) := $(call cc-option,-m3,)
37cflags-$(CONFIG_CPU_SH4) := $(call cc-option,-m4,) \ 38cflags-$(CONFIG_CPU_SH4) := $(call cc-option,-m4,) \
38 $(call cc-option,-mno-implicit-fp,-m4-nofpu) 39 $(call cc-option,-mno-implicit-fp,-m4-nofpu)
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 29f2e988c56a..407c87d9879a 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -78,6 +78,7 @@ config SPARC64
78 select HAVE_C_RECORDMCOUNT 78 select HAVE_C_RECORDMCOUNT
79 select NO_BOOTMEM 79 select NO_BOOTMEM
80 select HAVE_ARCH_AUDITSYSCALL 80 select HAVE_ARCH_AUDITSYSCALL
81 select ARCH_SUPPORTS_ATOMIC_RMW
81 82
82config ARCH_DEFCONFIG 83config ARCH_DEFCONFIG
83 string 84 string
diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h
index b73274fb961a..42f2bca1d338 100644
--- a/arch/sparc/include/uapi/asm/unistd.h
+++ b/arch/sparc/include/uapi/asm/unistd.h
@@ -410,8 +410,9 @@
410#define __NR_finit_module 342 410#define __NR_finit_module 342
411#define __NR_sched_setattr 343 411#define __NR_sched_setattr 343
412#define __NR_sched_getattr 344 412#define __NR_sched_getattr 344
413#define __NR_renameat2 345
413 414
414#define NR_syscalls 345 415#define NR_syscalls 346
415 416
416/* Bitmask values returned from kern_features system call. */ 417/* Bitmask values returned from kern_features system call. */
417#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001 418#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001
diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S
index d066eb18650c..f834224208ed 100644
--- a/arch/sparc/kernel/sys32.S
+++ b/arch/sparc/kernel/sys32.S
@@ -48,6 +48,7 @@ SIGN1(sys32_futex, compat_sys_futex, %o1)
48SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0) 48SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0)
49SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0) 49SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0)
50SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0) 50SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0)
51SIGN2(sys32_renameat2, sys_renameat2, %o0, %o2)
51 52
52 .globl sys32_mmap2 53 .globl sys32_mmap2
53sys32_mmap2: 54sys32_mmap2:
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 151ace8766cc..85fe9b1087cd 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -86,3 +86,4 @@ sys_call_table:
86/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime 86/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
87/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev 87/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
88/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr 88/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
89/*345*/ .long sys_renameat2
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 4bd4e2bb26cf..33ecba2826ea 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -87,6 +87,7 @@ sys_call_table32:
87/*330*/ .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime 87/*330*/ .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
88 .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev 88 .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
89/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr 89/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
90 .word sys32_renameat2
90 91
91#endif /* CONFIG_COMPAT */ 92#endif /* CONFIG_COMPAT */
92 93
@@ -165,3 +166,4 @@ sys_call_table:
165/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime 166/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
166 .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev 167 .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
167/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr 168/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
169 .word sys_renameat2
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index 9472079471bb..f1b3eb14b855 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -12,6 +12,7 @@
12#include <mem_user.h> 12#include <mem_user.h>
13#include <os.h> 13#include <os.h>
14#include <skas.h> 14#include <skas.h>
15#include <kern_util.h>
15 16
16struct host_vm_change { 17struct host_vm_change {
17 struct host_vm_op { 18 struct host_vm_op {
@@ -124,6 +125,9 @@ static int add_munmap(unsigned long addr, unsigned long len,
124 struct host_vm_op *last; 125 struct host_vm_op *last;
125 int ret = 0; 126 int ret = 0;
126 127
128 if ((addr >= STUB_START) && (addr < STUB_END))
129 return -EINVAL;
130
127 if (hvc->index != 0) { 131 if (hvc->index != 0) {
128 last = &hvc->ops[hvc->index - 1]; 132 last = &hvc->ops[hvc->index - 1];
129 if ((last->type == MUNMAP) && 133 if ((last->type == MUNMAP) &&
@@ -283,8 +287,11 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
283 /* This is not an else because ret is modified above */ 287 /* This is not an else because ret is modified above */
284 if (ret) { 288 if (ret) {
285 printk(KERN_ERR "fix_range_common: failed, killing current " 289 printk(KERN_ERR "fix_range_common: failed, killing current "
286 "process\n"); 290 "process: %d\n", task_tgid_vnr(current));
291 /* We are under mmap_sem, release it such that current can terminate */
292 up_write(&current->mm->mmap_sem);
287 force_sig(SIGKILL, current); 293 force_sig(SIGKILL, current);
294 do_signal();
288 } 295 }
289} 296}
290 297
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index 974b87474a99..5678c3571e7c 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -206,7 +206,7 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
206 int is_write = FAULT_WRITE(fi); 206 int is_write = FAULT_WRITE(fi);
207 unsigned long address = FAULT_ADDRESS(fi); 207 unsigned long address = FAULT_ADDRESS(fi);
208 208
209 if (regs) 209 if (!is_user && regs)
210 current->thread.segv_regs = container_of(regs, struct pt_regs, regs); 210 current->thread.segv_regs = container_of(regs, struct pt_regs, regs);
211 211
212 if (!is_user && (address >= start_vm) && (address < end_vm)) { 212 if (!is_user && (address >= start_vm) && (address < end_vm)) {
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index d531879a4617..908579f2b0ab 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -54,7 +54,7 @@ static int ptrace_dump_regs(int pid)
54 54
55void wait_stub_done(int pid) 55void wait_stub_done(int pid)
56{ 56{
57 int n, status, err, bad_stop = 0; 57 int n, status, err;
58 58
59 while (1) { 59 while (1) {
60 CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL)); 60 CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
@@ -74,8 +74,6 @@ void wait_stub_done(int pid)
74 74
75 if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0) 75 if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0)
76 return; 76 return;
77 else
78 bad_stop = 1;
79 77
80bad_wait: 78bad_wait:
81 err = ptrace_dump_regs(pid); 79 err = ptrace_dump_regs(pid);
@@ -85,10 +83,7 @@ bad_wait:
85 printk(UM_KERN_ERR "wait_stub_done : failed to wait for SIGTRAP, " 83 printk(UM_KERN_ERR "wait_stub_done : failed to wait for SIGTRAP, "
86 "pid = %d, n = %d, errno = %d, status = 0x%x\n", pid, n, errno, 84 "pid = %d, n = %d, errno = %d, status = 0x%x\n", pid, n, errno,
87 status); 85 status);
88 if (bad_stop) 86 fatal_sigsegv();
89 kill(pid, SIGKILL);
90 else
91 fatal_sigsegv();
92} 87}
93 88
94extern unsigned long current_stub_stack(void); 89extern unsigned long current_stub_stack(void);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index a8f749ef0fdc..d24887b645dc 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -131,6 +131,7 @@ config X86
131 select HAVE_CC_STACKPROTECTOR 131 select HAVE_CC_STACKPROTECTOR
132 select GENERIC_CPU_AUTOPROBE 132 select GENERIC_CPU_AUTOPROBE
133 select HAVE_ARCH_AUDITSYSCALL 133 select HAVE_ARCH_AUDITSYSCALL
134 select ARCH_SUPPORTS_ATOMIC_RMW
134 135
135config INSTRUCTION_DECODER 136config INSTRUCTION_DECODER
136 def_bool y 137 def_bool y
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index 84c223479e3c..7a6d43a554d7 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -91,10 +91,9 @@ bs_die:
91 91
92 .section ".bsdata", "a" 92 .section ".bsdata", "a"
93bugger_off_msg: 93bugger_off_msg:
94 .ascii "Direct floppy boot is not supported. " 94 .ascii "Use a boot loader.\r\n"
95 .ascii "Use a boot loader program instead.\r\n"
96 .ascii "\n" 95 .ascii "\n"
97 .ascii "Remove disk and press any key to reboot ...\r\n" 96 .ascii "Remove disk and press any key to reboot...\r\n"
98 .byte 0 97 .byte 0
99 98
100#ifdef CONFIG_EFI_STUB 99#ifdef CONFIG_EFI_STUB
@@ -108,7 +107,7 @@ coff_header:
108#else 107#else
109 .word 0x8664 # x86-64 108 .word 0x8664 # x86-64
110#endif 109#endif
111 .word 3 # nr_sections 110 .word 4 # nr_sections
112 .long 0 # TimeDateStamp 111 .long 0 # TimeDateStamp
113 .long 0 # PointerToSymbolTable 112 .long 0 # PointerToSymbolTable
114 .long 1 # NumberOfSymbols 113 .long 1 # NumberOfSymbols
@@ -250,6 +249,25 @@ section_table:
250 .word 0 # NumberOfLineNumbers 249 .word 0 # NumberOfLineNumbers
251 .long 0x60500020 # Characteristics (section flags) 250 .long 0x60500020 # Characteristics (section flags)
252 251
252 #
253 # The offset & size fields are filled in by build.c.
254 #
255 .ascii ".bss"
256 .byte 0
257 .byte 0
258 .byte 0
259 .byte 0
260 .long 0
261 .long 0x0
262 .long 0 # Size of initialized data
263 # on disk
264 .long 0x0
265 .long 0 # PointerToRelocations
266 .long 0 # PointerToLineNumbers
267 .word 0 # NumberOfRelocations
268 .word 0 # NumberOfLineNumbers
269 .long 0xc8000080 # Characteristics (section flags)
270
253#endif /* CONFIG_EFI_STUB */ 271#endif /* CONFIG_EFI_STUB */
254 272
255 # Kernel attributes; used by setup. This is part 1 of the 273 # Kernel attributes; used by setup. This is part 1 of the
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
index 1a2f2121cada..a7661c430cd9 100644
--- a/arch/x86/boot/tools/build.c
+++ b/arch/x86/boot/tools/build.c
@@ -143,7 +143,7 @@ static void usage(void)
143 143
144#ifdef CONFIG_EFI_STUB 144#ifdef CONFIG_EFI_STUB
145 145
146static void update_pecoff_section_header(char *section_name, u32 offset, u32 size) 146static void update_pecoff_section_header_fields(char *section_name, u32 vma, u32 size, u32 datasz, u32 offset)
147{ 147{
148 unsigned int pe_header; 148 unsigned int pe_header;
149 unsigned short num_sections; 149 unsigned short num_sections;
@@ -164,10 +164,10 @@ static void update_pecoff_section_header(char *section_name, u32 offset, u32 siz
164 put_unaligned_le32(size, section + 0x8); 164 put_unaligned_le32(size, section + 0x8);
165 165
166 /* section header vma field */ 166 /* section header vma field */
167 put_unaligned_le32(offset, section + 0xc); 167 put_unaligned_le32(vma, section + 0xc);
168 168
169 /* section header 'size of initialised data' field */ 169 /* section header 'size of initialised data' field */
170 put_unaligned_le32(size, section + 0x10); 170 put_unaligned_le32(datasz, section + 0x10);
171 171
172 /* section header 'file offset' field */ 172 /* section header 'file offset' field */
173 put_unaligned_le32(offset, section + 0x14); 173 put_unaligned_le32(offset, section + 0x14);
@@ -179,6 +179,11 @@ static void update_pecoff_section_header(char *section_name, u32 offset, u32 siz
179 } 179 }
180} 180}
181 181
182static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
183{
184 update_pecoff_section_header_fields(section_name, offset, size, size, offset);
185}
186
182static void update_pecoff_setup_and_reloc(unsigned int size) 187static void update_pecoff_setup_and_reloc(unsigned int size)
183{ 188{
184 u32 setup_offset = 0x200; 189 u32 setup_offset = 0x200;
@@ -203,9 +208,6 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
203 208
204 pe_header = get_unaligned_le32(&buf[0x3c]); 209 pe_header = get_unaligned_le32(&buf[0x3c]);
205 210
206 /* Size of image */
207 put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
208
209 /* 211 /*
210 * Size of code: Subtract the size of the first sector (512 bytes) 212 * Size of code: Subtract the size of the first sector (512 bytes)
211 * which includes the header. 213 * which includes the header.
@@ -220,6 +222,22 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
220 update_pecoff_section_header(".text", text_start, text_sz); 222 update_pecoff_section_header(".text", text_start, text_sz);
221} 223}
222 224
225static void update_pecoff_bss(unsigned int file_sz, unsigned int init_sz)
226{
227 unsigned int pe_header;
228 unsigned int bss_sz = init_sz - file_sz;
229
230 pe_header = get_unaligned_le32(&buf[0x3c]);
231
232 /* Size of uninitialized data */
233 put_unaligned_le32(bss_sz, &buf[pe_header + 0x24]);
234
235 /* Size of image */
236 put_unaligned_le32(init_sz, &buf[pe_header + 0x50]);
237
238 update_pecoff_section_header_fields(".bss", file_sz, bss_sz, 0, 0);
239}
240
223static int reserve_pecoff_reloc_section(int c) 241static int reserve_pecoff_reloc_section(int c)
224{ 242{
225 /* Reserve 0x20 bytes for .reloc section */ 243 /* Reserve 0x20 bytes for .reloc section */
@@ -259,6 +277,8 @@ static void efi_stub_entry_update(void)
259static inline void update_pecoff_setup_and_reloc(unsigned int size) {} 277static inline void update_pecoff_setup_and_reloc(unsigned int size) {}
260static inline void update_pecoff_text(unsigned int text_start, 278static inline void update_pecoff_text(unsigned int text_start,
261 unsigned int file_sz) {} 279 unsigned int file_sz) {}
280static inline void update_pecoff_bss(unsigned int file_sz,
281 unsigned int init_sz) {}
262static inline void efi_stub_defaults(void) {} 282static inline void efi_stub_defaults(void) {}
263static inline void efi_stub_entry_update(void) {} 283static inline void efi_stub_entry_update(void) {}
264 284
@@ -310,7 +330,7 @@ static void parse_zoffset(char *fname)
310 330
311int main(int argc, char ** argv) 331int main(int argc, char ** argv)
312{ 332{
313 unsigned int i, sz, setup_sectors; 333 unsigned int i, sz, setup_sectors, init_sz;
314 int c; 334 int c;
315 u32 sys_size; 335 u32 sys_size;
316 struct stat sb; 336 struct stat sb;
@@ -376,7 +396,9 @@ int main(int argc, char ** argv)
376 buf[0x1f1] = setup_sectors-1; 396 buf[0x1f1] = setup_sectors-1;
377 put_unaligned_le32(sys_size, &buf[0x1f4]); 397 put_unaligned_le32(sys_size, &buf[0x1f4]);
378 398
379 update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz)); 399 update_pecoff_text(setup_sectors * 512, i + (sys_size * 16));
400 init_sz = get_unaligned_le32(&buf[0x260]);
401 update_pecoff_bss(i + (sys_size * 16), init_sz);
380 402
381 efi_stub_entry_update(); 403 efi_stub_entry_update();
382 404
diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c
index f30cd10293f0..8626b03e83b7 100644
--- a/arch/x86/crypto/sha512_ssse3_glue.c
+++ b/arch/x86/crypto/sha512_ssse3_glue.c
@@ -141,7 +141,7 @@ static int sha512_ssse3_final(struct shash_desc *desc, u8 *out)
141 141
142 /* save number of bits */ 142 /* save number of bits */
143 bits[1] = cpu_to_be64(sctx->count[0] << 3); 143 bits[1] = cpu_to_be64(sctx->count[0] << 3);
144 bits[0] = cpu_to_be64(sctx->count[1] << 3) | sctx->count[0] >> 61; 144 bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
145 145
146 /* Pad out to 112 mod 128 and append length */ 146 /* Pad out to 112 mod 128 and append length */
147 index = sctx->count[0] & 0x7f; 147 index = sctx->count[0] & 0x7f;
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index f3a1f04ed4cb..584874451414 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -841,7 +841,6 @@ static int apm_do_idle(void)
841 u32 eax; 841 u32 eax;
842 u8 ret = 0; 842 u8 ret = 0;
843 int idled = 0; 843 int idled = 0;
844 int polling;
845 int err = 0; 844 int err = 0;
846 845
847 if (!need_resched()) { 846 if (!need_resched()) {
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index a80029035bf2..f9e4fdd3b877 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -370,6 +370,17 @@ static void init_intel(struct cpuinfo_x86 *c)
370 */ 370 */
371 detect_extended_topology(c); 371 detect_extended_topology(c);
372 372
373 if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
374 /*
375 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
376 * detection.
377 */
378 c->x86_max_cores = intel_num_cpu_cores(c);
379#ifdef CONFIG_X86_32
380 detect_ht(c);
381#endif
382 }
383
373 l2 = init_intel_cacheinfo(c); 384 l2 = init_intel_cacheinfo(c);
374 if (c->cpuid_level > 9) { 385 if (c->cpuid_level > 9) {
375 unsigned eax = cpuid_eax(10); 386 unsigned eax = cpuid_eax(10);
@@ -438,17 +449,6 @@ static void init_intel(struct cpuinfo_x86 *c)
438 set_cpu_cap(c, X86_FEATURE_P3); 449 set_cpu_cap(c, X86_FEATURE_P3);
439#endif 450#endif
440 451
441 if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
442 /*
443 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
444 * detection.
445 */
446 c->x86_max_cores = intel_num_cpu_cores(c);
447#ifdef CONFIG_X86_32
448 detect_ht(c);
449#endif
450 }
451
452 /* Work around errata */ 452 /* Work around errata */
453 srat_detect_node(c); 453 srat_detect_node(c);
454 454
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index a952e9c85b6f..9c8f7394c612 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -730,6 +730,18 @@ unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c)
730#endif 730#endif
731 } 731 }
732 732
733#ifdef CONFIG_X86_HT
734 /*
735 * If cpu_llc_id is not yet set, this means cpuid_level < 4 which in
736 * turns means that the only possibility is SMT (as indicated in
737 * cpuid1). Since cpuid2 doesn't specify shared caches, and we know
738 * that SMT shares all caches, we can unconditionally set cpu_llc_id to
739 * c->phys_proc_id.
740 */
741 if (per_cpu(cpu_llc_id, cpu) == BAD_APICID)
742 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
743#endif
744
733 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d)); 745 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
734 746
735 return l2; 747 return l2;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index bb92f38153b2..9a79c8dbd8e8 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -2451,6 +2451,12 @@ static __init int mcheck_init_device(void)
2451 for_each_online_cpu(i) { 2451 for_each_online_cpu(i) {
2452 err = mce_device_create(i); 2452 err = mce_device_create(i);
2453 if (err) { 2453 if (err) {
2454 /*
2455 * Register notifier anyway (and do not unreg it) so
2456 * that we don't leave undeleted timers, see notifier
2457 * callback above.
2458 */
2459 __register_hotcpu_notifier(&mce_cpu_notifier);
2454 cpu_notifier_register_done(); 2460 cpu_notifier_register_done();
2455 goto err_device_create; 2461 goto err_device_create;
2456 } 2462 }
@@ -2471,10 +2477,6 @@ static __init int mcheck_init_device(void)
2471err_register: 2477err_register:
2472 unregister_syscore_ops(&mce_syscore_ops); 2478 unregister_syscore_ops(&mce_syscore_ops);
2473 2479
2474 cpu_notifier_register_begin();
2475 __unregister_hotcpu_notifier(&mce_cpu_notifier);
2476 cpu_notifier_register_done();
2477
2478err_device_create: 2480err_device_create:
2479 /* 2481 /*
2480 * We didn't keep track of which devices were created above, but 2482 * We didn't keep track of which devices were created above, but
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 2bdfbff8a4f6..2879ecdaac43 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -118,6 +118,9 @@ static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
118 continue; 118 continue;
119 if (event->attr.config1 & ~er->valid_mask) 119 if (event->attr.config1 & ~er->valid_mask)
120 return -EINVAL; 120 return -EINVAL;
121 /* Check if the extra msrs can be safely accessed*/
122 if (!er->extra_msr_access)
123 return -ENXIO;
121 124
122 reg->idx = er->idx; 125 reg->idx = er->idx;
123 reg->config = event->attr.config1; 126 reg->config = event->attr.config1;
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 3b2f9bdd974b..8ade93111e03 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -295,14 +295,16 @@ struct extra_reg {
295 u64 config_mask; 295 u64 config_mask;
296 u64 valid_mask; 296 u64 valid_mask;
297 int idx; /* per_xxx->regs[] reg index */ 297 int idx; /* per_xxx->regs[] reg index */
298 bool extra_msr_access;
298}; 299};
299 300
300#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \ 301#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
301 .event = (e), \ 302 .event = (e), \
302 .msr = (ms), \ 303 .msr = (ms), \
303 .config_mask = (m), \ 304 .config_mask = (m), \
304 .valid_mask = (vm), \ 305 .valid_mask = (vm), \
305 .idx = EXTRA_REG_##i, \ 306 .idx = EXTRA_REG_##i, \
307 .extra_msr_access = true, \
306 } 308 }
307 309
308#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \ 310#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index adb02aa62af5..2502d0d9d246 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1382,6 +1382,15 @@ again:
1382 intel_pmu_lbr_read(); 1382 intel_pmu_lbr_read();
1383 1383
1384 /* 1384 /*
1385 * CondChgd bit 63 doesn't mean any overflow status. Ignore
1386 * and clear the bit.
1387 */
1388 if (__test_and_clear_bit(63, (unsigned long *)&status)) {
1389 if (!status)
1390 goto done;
1391 }
1392
1393 /*
1385 * PEBS overflow sets bit 62 in the global status register 1394 * PEBS overflow sets bit 62 in the global status register
1386 */ 1395 */
1387 if (__test_and_clear_bit(62, (unsigned long *)&status)) { 1396 if (__test_and_clear_bit(62, (unsigned long *)&status)) {
@@ -2173,6 +2182,41 @@ static void intel_snb_check_microcode(void)
2173 } 2182 }
2174} 2183}
2175 2184
2185/*
2186 * Under certain circumstances, access certain MSR may cause #GP.
2187 * The function tests if the input MSR can be safely accessed.
2188 */
2189static bool check_msr(unsigned long msr, u64 mask)
2190{
2191 u64 val_old, val_new, val_tmp;
2192
2193 /*
2194 * Read the current value, change it and read it back to see if it
2195 * matches, this is needed to detect certain hardware emulators
2196 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
2197 */
2198 if (rdmsrl_safe(msr, &val_old))
2199 return false;
2200
2201 /*
2202 * Only change the bits which can be updated by wrmsrl.
2203 */
2204 val_tmp = val_old ^ mask;
2205 if (wrmsrl_safe(msr, val_tmp) ||
2206 rdmsrl_safe(msr, &val_new))
2207 return false;
2208
2209 if (val_new != val_tmp)
2210 return false;
2211
2212 /* Here it's sure that the MSR can be safely accessed.
2213 * Restore the old value and return.
2214 */
2215 wrmsrl(msr, val_old);
2216
2217 return true;
2218}
2219
2176static __init void intel_sandybridge_quirk(void) 2220static __init void intel_sandybridge_quirk(void)
2177{ 2221{
2178 x86_pmu.check_microcode = intel_snb_check_microcode; 2222 x86_pmu.check_microcode = intel_snb_check_microcode;
@@ -2262,7 +2306,8 @@ __init int intel_pmu_init(void)
2262 union cpuid10_ebx ebx; 2306 union cpuid10_ebx ebx;
2263 struct event_constraint *c; 2307 struct event_constraint *c;
2264 unsigned int unused; 2308 unsigned int unused;
2265 int version; 2309 struct extra_reg *er;
2310 int version, i;
2266 2311
2267 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { 2312 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
2268 switch (boot_cpu_data.x86) { 2313 switch (boot_cpu_data.x86) {
@@ -2465,6 +2510,9 @@ __init int intel_pmu_init(void)
2465 case 62: /* IvyBridge EP */ 2510 case 62: /* IvyBridge EP */
2466 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, 2511 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
2467 sizeof(hw_cache_event_ids)); 2512 sizeof(hw_cache_event_ids));
2513 /* dTLB-load-misses on IVB is different than SNB */
2514 hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
2515
2468 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, 2516 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
2469 sizeof(hw_cache_extra_regs)); 2517 sizeof(hw_cache_extra_regs));
2470 2518
@@ -2565,6 +2613,34 @@ __init int intel_pmu_init(void)
2565 } 2613 }
2566 } 2614 }
2567 2615
2616 /*
2617 * Access LBR MSR may cause #GP under certain circumstances.
2618 * E.g. KVM doesn't support LBR MSR
2619 * Check all LBT MSR here.
2620 * Disable LBR access if any LBR MSRs can not be accessed.
2621 */
2622 if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
2623 x86_pmu.lbr_nr = 0;
2624 for (i = 0; i < x86_pmu.lbr_nr; i++) {
2625 if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
2626 check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
2627 x86_pmu.lbr_nr = 0;
2628 }
2629
2630 /*
2631 * Access extra MSR may cause #GP under certain circumstances.
2632 * E.g. KVM doesn't support offcore event
2633 * Check all extra_regs here.
2634 */
2635 if (x86_pmu.extra_regs) {
2636 for (er = x86_pmu.extra_regs; er->msr; er++) {
2637 er->extra_msr_access = check_msr(er->msr, 0x1ffUL);
2638 /* Disable LBR select mapping */
2639 if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
2640 x86_pmu.lbr_sel_map = NULL;
2641 }
2642 }
2643
2568 /* Support full width counters using alternative MSR range */ 2644 /* Support full width counters using alternative MSR range */
2569 if (x86_pmu.intel_cap.full_width_write) { 2645 if (x86_pmu.intel_cap.full_width_write) {
2570 x86_pmu.max_period = x86_pmu.cntval_mask; 2646 x86_pmu.max_period = x86_pmu.cntval_mask;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 980970cb744d..696ade311ded 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -311,9 +311,11 @@ static int alloc_bts_buffer(int cpu)
311 if (!x86_pmu.bts) 311 if (!x86_pmu.bts)
312 return 0; 312 return 0;
313 313
314 buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL, node); 314 buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, node);
315 if (unlikely(!buffer)) 315 if (unlikely(!buffer)) {
316 WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__);
316 return -ENOMEM; 317 return -ENOMEM;
318 }
317 319
318 max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE; 320 max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
319 thresh = max / 16; 321 thresh = max / 16;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 65bbbea38b9c..ae6552a0701f 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -550,16 +550,16 @@ static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
550 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6), 550 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
551 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8), 551 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
552 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8), 552 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
553 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xc), 553 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
554 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xc), 554 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
555 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2), 555 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
556 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2), 556 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
557 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2), 557 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
558 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2), 558 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
559 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8), 559 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
560 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8), 560 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
561 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xc), 561 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
562 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xc), 562 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
563 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2), 563 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
564 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2), 564 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
565 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2), 565 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
@@ -1222,6 +1222,7 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
1222 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, 1222 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1223 SNBEP_CBO_PMON_CTL_TID_EN, 0x1), 1223 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1224 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2), 1224 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1225
1225 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4), 1226 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1226 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc), 1227 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1227 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc), 1228 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
@@ -1245,7 +1246,7 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
1245 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10), 1246 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1246 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10), 1247 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1247 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10), 1248 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1248 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10), 1249 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1249 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10), 1250 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1250 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18), 1251 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1251 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18), 1252 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index dbaa23e78b36..0d0c9d4ab6d5 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -425,8 +425,8 @@ sysenter_do_call:
425 cmpl $(NR_syscalls), %eax 425 cmpl $(NR_syscalls), %eax
426 jae sysenter_badsys 426 jae sysenter_badsys
427 call *sys_call_table(,%eax,4) 427 call *sys_call_table(,%eax,4)
428 movl %eax,PT_EAX(%esp)
429sysenter_after_call: 428sysenter_after_call:
429 movl %eax,PT_EAX(%esp)
430 LOCKDEP_SYS_EXIT 430 LOCKDEP_SYS_EXIT
431 DISABLE_INTERRUPTS(CLBR_ANY) 431 DISABLE_INTERRUPTS(CLBR_ANY)
432 TRACE_IRQS_OFF 432 TRACE_IRQS_OFF
@@ -502,6 +502,7 @@ ENTRY(system_call)
502 jae syscall_badsys 502 jae syscall_badsys
503syscall_call: 503syscall_call:
504 call *sys_call_table(,%eax,4) 504 call *sys_call_table(,%eax,4)
505syscall_after_call:
505 movl %eax,PT_EAX(%esp) # store the return value 506 movl %eax,PT_EAX(%esp) # store the return value
506syscall_exit: 507syscall_exit:
507 LOCKDEP_SYS_EXIT 508 LOCKDEP_SYS_EXIT
@@ -675,12 +676,12 @@ syscall_fault:
675END(syscall_fault) 676END(syscall_fault)
676 677
677syscall_badsys: 678syscall_badsys:
678 movl $-ENOSYS,PT_EAX(%esp) 679 movl $-ENOSYS,%eax
679 jmp syscall_exit 680 jmp syscall_after_call
680END(syscall_badsys) 681END(syscall_badsys)
681 682
682sysenter_badsys: 683sysenter_badsys:
683 movl $-ENOSYS,PT_EAX(%esp) 684 movl $-ENOSYS,%eax
684 jmp sysenter_after_call 685 jmp sysenter_after_call
685END(syscall_badsys) 686END(syscall_badsys)
686 CFI_ENDPROC 687 CFI_ENDPROC
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
index 6afbb16e9b79..94d857fb1033 100644
--- a/arch/x86/kernel/espfix_64.c
+++ b/arch/x86/kernel/espfix_64.c
@@ -175,7 +175,7 @@ void init_espfix_ap(void)
175 if (!pud_present(pud)) { 175 if (!pud_present(pud)) {
176 pmd_p = (pmd_t *)__get_free_page(PGALLOC_GFP); 176 pmd_p = (pmd_t *)__get_free_page(PGALLOC_GFP);
177 pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask)); 177 pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask));
178 paravirt_alloc_pud(&init_mm, __pa(pmd_p) >> PAGE_SHIFT); 178 paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
179 for (n = 0; n < ESPFIX_PUD_CLONES; n++) 179 for (n = 0; n < ESPFIX_PUD_CLONES; n++)
180 set_pud(&pud_p[n], pud); 180 set_pud(&pud_p[n], pud);
181 } 181 }
@@ -185,7 +185,7 @@ void init_espfix_ap(void)
185 if (!pmd_present(pmd)) { 185 if (!pmd_present(pmd)) {
186 pte_p = (pte_t *)__get_free_page(PGALLOC_GFP); 186 pte_p = (pte_t *)__get_free_page(PGALLOC_GFP);
187 pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask)); 187 pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask));
188 paravirt_alloc_pmd(&init_mm, __pa(pte_p) >> PAGE_SHIFT); 188 paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT);
189 for (n = 0; n < ESPFIX_PMD_CLONES; n++) 189 for (n = 0; n < ESPFIX_PMD_CLONES; n++)
190 set_pmd(&pmd_p[n], pmd); 190 set_pmd(&pmd_p[n], pmd);
191 } 191 }
@@ -193,7 +193,6 @@ void init_espfix_ap(void)
193 pte_p = pte_offset_kernel(&pmd, addr); 193 pte_p = pte_offset_kernel(&pmd, addr);
194 stack_page = (void *)__get_free_page(GFP_KERNEL); 194 stack_page = (void *)__get_free_page(GFP_KERNEL);
195 pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask)); 195 pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask));
196 paravirt_alloc_pte(&init_mm, __pa(stack_page) >> PAGE_SHIFT);
197 for (n = 0; n < ESPFIX_PTE_CLONES; n++) 196 for (n = 0; n < ESPFIX_PTE_CLONES; n++)
198 set_pte(&pte_p[n*PTE_STRIDE], pte); 197 set_pte(&pte_p[n*PTE_STRIDE], pte);
199 198
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 7596df664901..67e6d19ef1be 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -574,6 +574,9 @@ int kprobe_int3_handler(struct pt_regs *regs)
574 struct kprobe *p; 574 struct kprobe *p;
575 struct kprobe_ctlblk *kcb; 575 struct kprobe_ctlblk *kcb;
576 576
577 if (user_mode_vm(regs))
578 return 0;
579
577 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t)); 580 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
578 /* 581 /*
579 * We don't want to be preempted for the entire 582 * We don't want to be preempted for the entire
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 57e5ce126d5a..ea030319b321 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -920,9 +920,9 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
920 tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new); 920 tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
921 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) 921 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
922 mark_tsc_unstable("cpufreq changes"); 922 mark_tsc_unstable("cpufreq changes");
923 }
924 923
925 set_cyc2ns_scale(tsc_khz, freq->cpu); 924 set_cyc2ns_scale(tsc_khz, freq->cpu);
925 }
926 926
927 return 0; 927 return 0;
928} 928}
diff --git a/arch/x86/vdso/vdso2c.h b/arch/x86/vdso/vdso2c.h
index df95a2fdff73..11b65d4f9414 100644
--- a/arch/x86/vdso/vdso2c.h
+++ b/arch/x86/vdso/vdso2c.h
@@ -93,6 +93,9 @@ static void BITSFUNC(copy_section)(struct BITSFUNC(fake_sections) *out,
93 uint64_t flags = GET_LE(&in->sh_flags); 93 uint64_t flags = GET_LE(&in->sh_flags);
94 94
95 bool copy = flags & SHF_ALLOC && 95 bool copy = flags & SHF_ALLOC &&
96 (GET_LE(&in->sh_size) ||
97 (GET_LE(&in->sh_type) != SHT_RELA &&
98 GET_LE(&in->sh_type) != SHT_REL)) &&
96 strcmp(name, ".altinstructions") && 99 strcmp(name, ".altinstructions") &&
97 strcmp(name, ".altinstr_replacement"); 100 strcmp(name, ".altinstr_replacement");
98 101
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
index e1513c47872a..5a5176de8d0a 100644
--- a/arch/x86/vdso/vma.c
+++ b/arch/x86/vdso/vma.c
@@ -62,6 +62,9 @@ struct linux_binprm;
62 Only used for the 64-bit and x32 vdsos. */ 62 Only used for the 64-bit and x32 vdsos. */
63static unsigned long vdso_addr(unsigned long start, unsigned len) 63static unsigned long vdso_addr(unsigned long start, unsigned len)
64{ 64{
65#ifdef CONFIG_X86_32
66 return 0;
67#else
65 unsigned long addr, end; 68 unsigned long addr, end;
66 unsigned offset; 69 unsigned offset;
67 end = (start + PMD_SIZE - 1) & PMD_MASK; 70 end = (start + PMD_SIZE - 1) & PMD_MASK;
@@ -83,6 +86,7 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
83 addr = align_vdso_addr(addr); 86 addr = align_vdso_addr(addr);
84 87
85 return addr; 88 return addr;
89#endif
86} 90}
87 91
88static int map_vdso(const struct vdso_image *image, bool calculate_addr) 92static int map_vdso(const struct vdso_image *image, bool calculate_addr)
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
index f9e1ec346e35..8453e6e39895 100644
--- a/arch/xtensa/kernel/vectors.S
+++ b/arch/xtensa/kernel/vectors.S
@@ -376,38 +376,42 @@ _DoubleExceptionVector_WindowOverflow:
376 beqz a2, 1f # if at start of vector, don't restore 376 beqz a2, 1f # if at start of vector, don't restore
377 377
378 addi a0, a0, -128 378 addi a0, a0, -128
379 bbsi a0, 8, 1f # don't restore except for overflow 8 and 12 379 bbsi.l a0, 8, 1f # don't restore except for overflow 8 and 12
380 bbsi a0, 7, 2f 380
381 /*
382 * This fixup handler is for the extremely unlikely case where the
383 * overflow handler's reference thru a0 gets a hardware TLB refill
384 * that bumps out the (distinct, aliasing) TLB entry that mapped its
385 * prior references thru a9/a13, and where our reference now thru
386 * a9/a13 gets a 2nd-level miss exception (not hardware TLB refill).
387 */
388 movi a2, window_overflow_restore_a0_fixup
389 s32i a2, a3, EXC_TABLE_FIXUP
390 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
391 xsr a3, excsave1
392
393 bbsi.l a0, 7, 2f
381 394
382 /* 395 /*
383 * Restore a0 as saved by _WindowOverflow8(). 396 * Restore a0 as saved by _WindowOverflow8().
384 *
385 * FIXME: we really need a fixup handler for this L32E,
386 * for the extremely unlikely case where the overflow handler's
387 * reference thru a0 gets a hardware TLB refill that bumps out
388 * the (distinct, aliasing) TLB entry that mapped its prior
389 * references thru a9, and where our reference now thru a9
390 * gets a 2nd-level miss exception (not hardware TLB refill).
391 */ 397 */
392 398
393 l32e a2, a9, -16 399 l32e a0, a9, -16
394 wsr a2, depc # replace the saved a0 400 wsr a0, depc # replace the saved a0
395 j 1f 401 j 3f
396 402
3972: 4032:
398 /* 404 /*
399 * Restore a0 as saved by _WindowOverflow12(). 405 * Restore a0 as saved by _WindowOverflow12().
400 *
401 * FIXME: we really need a fixup handler for this L32E,
402 * for the extremely unlikely case where the overflow handler's
403 * reference thru a0 gets a hardware TLB refill that bumps out
404 * the (distinct, aliasing) TLB entry that mapped its prior
405 * references thru a13, and where our reference now thru a13
406 * gets a 2nd-level miss exception (not hardware TLB refill).
407 */ 406 */
408 407
409 l32e a2, a13, -16 408 l32e a0, a13, -16
410 wsr a2, depc # replace the saved a0 409 wsr a0, depc # replace the saved a0
4103:
411 xsr a3, excsave1
412 movi a0, 0
413 s32i a0, a3, EXC_TABLE_FIXUP
414 s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
4111: 4151:
412 /* 416 /*
413 * Restore WindowBase while leaving all address registers restored. 417 * Restore WindowBase while leaving all address registers restored.
@@ -449,6 +453,7 @@ _DoubleExceptionVector_WindowOverflow:
449 453
450 s32i a0, a2, PT_DEPC 454 s32i a0, a2, PT_DEPC
451 455
456_DoubleExceptionVector_handle_exception:
452 addx4 a0, a0, a3 457 addx4 a0, a0, a3
453 l32i a0, a0, EXC_TABLE_FAST_USER 458 l32i a0, a0, EXC_TABLE_FAST_USER
454 xsr a3, excsave1 459 xsr a3, excsave1
@@ -464,11 +469,120 @@ _DoubleExceptionVector_WindowOverflow:
464 rotw -3 469 rotw -3
465 j 1b 470 j 1b
466 471
467 .end literal_prefix
468 472
469ENDPROC(_DoubleExceptionVector) 473ENDPROC(_DoubleExceptionVector)
470 474
471/* 475/*
476 * Fixup handler for TLB miss in double exception handler for window owerflow.
477 * We get here with windowbase set to the window that was being spilled and
478 * a0 trashed. a0 bit 7 determines if this is a call8 (bit clear) or call12
479 * (bit set) window.
480 *
481 * We do the following here:
482 * - go to the original window retaining a0 value;
483 * - set up exception stack to return back to appropriate a0 restore code
484 * (we'll need to rotate window back and there's no place to save this
485 * information, use different return address for that);
486 * - handle the exception;
487 * - go to the window that was being spilled;
488 * - set up window_overflow_restore_a0_fixup as a fixup routine;
489 * - reload a0;
490 * - restore the original window;
491 * - reset the default fixup routine;
492 * - return to user. By the time we get to this fixup handler all information
493 * about the conditions of the original double exception that happened in
494 * the window overflow handler is lost, so we just return to userspace to
495 * retry overflow from start.
496 *
497 * a0: value of depc, original value in depc
498 * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
499 * a3: exctable, original value in excsave1
500 */
501
502ENTRY(window_overflow_restore_a0_fixup)
503
504 rsr a0, ps
505 extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH
506 rsr a2, windowbase
507 sub a0, a2, a0
508 extui a0, a0, 0, 3
509 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
510 xsr a3, excsave1
511
512 _beqi a0, 1, .Lhandle_1
513 _beqi a0, 3, .Lhandle_3
514
515 .macro overflow_fixup_handle_exception_pane n
516
517 rsr a0, depc
518 rotw -\n
519
520 xsr a3, excsave1
521 wsr a2, depc
522 l32i a2, a3, EXC_TABLE_KSTK
523 s32i a0, a2, PT_AREG0
524
525 movi a0, .Lrestore_\n
526 s32i a0, a2, PT_DEPC
527 rsr a0, exccause
528 j _DoubleExceptionVector_handle_exception
529
530 .endm
531
532 overflow_fixup_handle_exception_pane 2
533.Lhandle_1:
534 overflow_fixup_handle_exception_pane 1
535.Lhandle_3:
536 overflow_fixup_handle_exception_pane 3
537
538 .macro overflow_fixup_restore_a0_pane n
539
540 rotw \n
541 /* Need to preserve a0 value here to be able to handle exception
542 * that may occur on a0 reload from stack. It may occur because
543 * TLB miss handler may not be atomic and pointer to page table
544 * may be lost before we get here. There are no free registers,
545 * so we need to use EXC_TABLE_DOUBLE_SAVE area.
546 */
547 xsr a3, excsave1
548 s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
549 movi a2, window_overflow_restore_a0_fixup
550 s32i a2, a3, EXC_TABLE_FIXUP
551 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
552 xsr a3, excsave1
553 bbsi.l a0, 7, 1f
554 l32e a0, a9, -16
555 j 2f
5561:
557 l32e a0, a13, -16
5582:
559 rotw -\n
560
561 .endm
562
563.Lrestore_2:
564 overflow_fixup_restore_a0_pane 2
565
566.Lset_default_fixup:
567 xsr a3, excsave1
568 s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
569 movi a2, 0
570 s32i a2, a3, EXC_TABLE_FIXUP
571 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
572 xsr a3, excsave1
573 rfe
574
575.Lrestore_1:
576 overflow_fixup_restore_a0_pane 1
577 j .Lset_default_fixup
578.Lrestore_3:
579 overflow_fixup_restore_a0_pane 3
580 j .Lset_default_fixup
581
582ENDPROC(window_overflow_restore_a0_fixup)
583
584 .end literal_prefix
585/*
472 * Debug interrupt vector 586 * Debug interrupt vector
473 * 587 *
474 * There is not much space here, so simply jump to another handler. 588 * There is not much space here, so simply jump to another handler.
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index ee32c0085dff..d16db6df86f8 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -269,13 +269,13 @@ SECTIONS
269 .UserExceptionVector.literal) 269 .UserExceptionVector.literal)
270 SECTION_VECTOR (_DoubleExceptionVector_literal, 270 SECTION_VECTOR (_DoubleExceptionVector_literal,
271 .DoubleExceptionVector.literal, 271 .DoubleExceptionVector.literal,
272 DOUBLEEXC_VECTOR_VADDR - 16, 272 DOUBLEEXC_VECTOR_VADDR - 40,
273 SIZEOF(.UserExceptionVector.text), 273 SIZEOF(.UserExceptionVector.text),
274 .UserExceptionVector.text) 274 .UserExceptionVector.text)
275 SECTION_VECTOR (_DoubleExceptionVector_text, 275 SECTION_VECTOR (_DoubleExceptionVector_text,
276 .DoubleExceptionVector.text, 276 .DoubleExceptionVector.text,
277 DOUBLEEXC_VECTOR_VADDR, 277 DOUBLEEXC_VECTOR_VADDR,
278 32, 278 40,
279 .DoubleExceptionVector.literal) 279 .DoubleExceptionVector.literal)
280 280
281 . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3; 281 . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 4224256bb215..77ed20209ca5 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -191,7 +191,7 @@ int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
191 return -EINVAL; 191 return -EINVAL;
192 } 192 }
193 193
194 if (it && start - it->start < bank_sz) { 194 if (it && start - it->start <= bank_sz) {
195 if (start == it->start) { 195 if (start == it->start) {
196 if (end - it->start < bank_sz) { 196 if (end - it->start < bank_sz) {
197 it->start = end; 197 it->start = end;
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index b9f4cc494ece..28d227c5ca77 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -872,6 +872,13 @@ void blkcg_drain_queue(struct request_queue *q)
872{ 872{
873 lockdep_assert_held(q->queue_lock); 873 lockdep_assert_held(q->queue_lock);
874 874
875 /*
876 * @q could be exiting and already have destroyed all blkgs as
877 * indicated by NULL root_blkg. If so, don't confuse policies.
878 */
879 if (!q->root_blkg)
880 return;
881
875 blk_throtl_drain(q); 882 blk_throtl_drain(q);
876} 883}
877 884
diff --git a/block/blk-tag.c b/block/blk-tag.c
index 3f33d8672268..a185b86741e5 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -27,18 +27,15 @@ struct request *blk_queue_find_tag(struct request_queue *q, int tag)
27EXPORT_SYMBOL(blk_queue_find_tag); 27EXPORT_SYMBOL(blk_queue_find_tag);
28 28
29/** 29/**
30 * __blk_free_tags - release a given set of tag maintenance info 30 * blk_free_tags - release a given set of tag maintenance info
31 * @bqt: the tag map to free 31 * @bqt: the tag map to free
32 * 32 *
33 * Tries to free the specified @bqt. Returns true if it was 33 * Drop the reference count on @bqt and frees it when the last reference
34 * actually freed and false if there are still references using it 34 * is dropped.
35 */ 35 */
36static int __blk_free_tags(struct blk_queue_tag *bqt) 36void blk_free_tags(struct blk_queue_tag *bqt)
37{ 37{
38 int retval; 38 if (atomic_dec_and_test(&bqt->refcnt)) {
39
40 retval = atomic_dec_and_test(&bqt->refcnt);
41 if (retval) {
42 BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) < 39 BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
43 bqt->max_depth); 40 bqt->max_depth);
44 41
@@ -50,9 +47,8 @@ static int __blk_free_tags(struct blk_queue_tag *bqt)
50 47
51 kfree(bqt); 48 kfree(bqt);
52 } 49 }
53
54 return retval;
55} 50}
51EXPORT_SYMBOL(blk_free_tags);
56 52
57/** 53/**
58 * __blk_queue_free_tags - release tag maintenance info 54 * __blk_queue_free_tags - release tag maintenance info
@@ -69,28 +65,13 @@ void __blk_queue_free_tags(struct request_queue *q)
69 if (!bqt) 65 if (!bqt)
70 return; 66 return;
71 67
72 __blk_free_tags(bqt); 68 blk_free_tags(bqt);
73 69
74 q->queue_tags = NULL; 70 q->queue_tags = NULL;
75 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); 71 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
76} 72}
77 73
78/** 74/**
79 * blk_free_tags - release a given set of tag maintenance info
80 * @bqt: the tag map to free
81 *
82 * For externally managed @bqt frees the map. Callers of this
83 * function must guarantee to have released all the queues that
84 * might have been using this tag map.
85 */
86void blk_free_tags(struct blk_queue_tag *bqt)
87{
88 if (unlikely(!__blk_free_tags(bqt)))
89 BUG();
90}
91EXPORT_SYMBOL(blk_free_tags);
92
93/**
94 * blk_queue_free_tags - release tag maintenance info 75 * blk_queue_free_tags - release tag maintenance info
95 * @q: the request queue for the device 76 * @q: the request queue for the device
96 * 77 *
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index fbd5a67cb773..a0926a6094b2 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -690,6 +690,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
690 case BLKROSET: 690 case BLKROSET:
691 case BLKDISCARD: 691 case BLKDISCARD:
692 case BLKSECDISCARD: 692 case BLKSECDISCARD:
693 case BLKZEROOUT:
693 /* 694 /*
694 * the ones below are implemented in blkdev_locked_ioctl, 695 * the ones below are implemented in blkdev_locked_ioctl,
695 * but we call blkdev_ioctl, which gets the lock for us 696 * but we call blkdev_ioctl, which gets the lock for us
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index c67f6f5ad611..36b0e61f9c09 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -30,6 +30,10 @@
30#include <linux/types.h> 30#include <linux/types.h>
31#include <linux/dmi.h> 31#include <linux/dmi.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33#ifdef CONFIG_ACPI_PROCFS_POWER
34#include <linux/proc_fs.h>
35#include <linux/seq_file.h>
36#endif
33#include <linux/platform_device.h> 37#include <linux/platform_device.h>
34#include <linux/power_supply.h> 38#include <linux/power_supply.h>
35#include <linux/acpi.h> 39#include <linux/acpi.h>
@@ -52,6 +56,7 @@ MODULE_AUTHOR("Paul Diefenbaugh");
52MODULE_DESCRIPTION("ACPI AC Adapter Driver"); 56MODULE_DESCRIPTION("ACPI AC Adapter Driver");
53MODULE_LICENSE("GPL"); 57MODULE_LICENSE("GPL");
54 58
59
55static int acpi_ac_add(struct acpi_device *device); 60static int acpi_ac_add(struct acpi_device *device);
56static int acpi_ac_remove(struct acpi_device *device); 61static int acpi_ac_remove(struct acpi_device *device);
57static void acpi_ac_notify(struct acpi_device *device, u32 event); 62static void acpi_ac_notify(struct acpi_device *device, u32 event);
@@ -67,6 +72,13 @@ static int acpi_ac_resume(struct device *dev);
67#endif 72#endif
68static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume); 73static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
69 74
75#ifdef CONFIG_ACPI_PROCFS_POWER
76extern struct proc_dir_entry *acpi_lock_ac_dir(void);
77extern void *acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir);
78static int acpi_ac_open_fs(struct inode *inode, struct file *file);
79#endif
80
81
70static int ac_sleep_before_get_state_ms; 82static int ac_sleep_before_get_state_ms;
71 83
72static struct acpi_driver acpi_ac_driver = { 84static struct acpi_driver acpi_ac_driver = {
@@ -91,6 +103,16 @@ struct acpi_ac {
91 103
92#define to_acpi_ac(x) container_of(x, struct acpi_ac, charger) 104#define to_acpi_ac(x) container_of(x, struct acpi_ac, charger)
93 105
106#ifdef CONFIG_ACPI_PROCFS_POWER
107static const struct file_operations acpi_ac_fops = {
108 .owner = THIS_MODULE,
109 .open = acpi_ac_open_fs,
110 .read = seq_read,
111 .llseek = seq_lseek,
112 .release = single_release,
113};
114#endif
115
94/* -------------------------------------------------------------------------- 116/* --------------------------------------------------------------------------
95 AC Adapter Management 117 AC Adapter Management
96 -------------------------------------------------------------------------- */ 118 -------------------------------------------------------------------------- */
@@ -143,6 +165,83 @@ static enum power_supply_property ac_props[] = {
143 POWER_SUPPLY_PROP_ONLINE, 165 POWER_SUPPLY_PROP_ONLINE,
144}; 166};
145 167
168#ifdef CONFIG_ACPI_PROCFS_POWER
169/* --------------------------------------------------------------------------
170 FS Interface (/proc)
171 -------------------------------------------------------------------------- */
172
173static struct proc_dir_entry *acpi_ac_dir;
174
175static int acpi_ac_seq_show(struct seq_file *seq, void *offset)
176{
177 struct acpi_ac *ac = seq->private;
178
179
180 if (!ac)
181 return 0;
182
183 if (acpi_ac_get_state(ac)) {
184 seq_puts(seq, "ERROR: Unable to read AC Adapter state\n");
185 return 0;
186 }
187
188 seq_puts(seq, "state: ");
189 switch (ac->state) {
190 case ACPI_AC_STATUS_OFFLINE:
191 seq_puts(seq, "off-line\n");
192 break;
193 case ACPI_AC_STATUS_ONLINE:
194 seq_puts(seq, "on-line\n");
195 break;
196 default:
197 seq_puts(seq, "unknown\n");
198 break;
199 }
200
201 return 0;
202}
203
204static int acpi_ac_open_fs(struct inode *inode, struct file *file)
205{
206 return single_open(file, acpi_ac_seq_show, PDE_DATA(inode));
207}
208
209static int acpi_ac_add_fs(struct acpi_ac *ac)
210{
211 struct proc_dir_entry *entry = NULL;
212
213 printk(KERN_WARNING PREFIX "Deprecated procfs I/F for AC is loaded,"
214 " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
215 if (!acpi_device_dir(ac->device)) {
216 acpi_device_dir(ac->device) =
217 proc_mkdir(acpi_device_bid(ac->device), acpi_ac_dir);
218 if (!acpi_device_dir(ac->device))
219 return -ENODEV;
220 }
221
222 /* 'state' [R] */
223 entry = proc_create_data(ACPI_AC_FILE_STATE,
224 S_IRUGO, acpi_device_dir(ac->device),
225 &acpi_ac_fops, ac);
226 if (!entry)
227 return -ENODEV;
228 return 0;
229}
230
231static int acpi_ac_remove_fs(struct acpi_ac *ac)
232{
233
234 if (acpi_device_dir(ac->device)) {
235 remove_proc_entry(ACPI_AC_FILE_STATE,
236 acpi_device_dir(ac->device));
237 remove_proc_entry(acpi_device_bid(ac->device), acpi_ac_dir);
238 acpi_device_dir(ac->device) = NULL;
239 }
240
241 return 0;
242}
243#endif
244
146/* -------------------------------------------------------------------------- 245/* --------------------------------------------------------------------------
147 Driver Model 246 Driver Model
148 -------------------------------------------------------------------------- */ 247 -------------------------------------------------------------------------- */
@@ -243,6 +342,11 @@ static int acpi_ac_add(struct acpi_device *device)
243 goto end; 342 goto end;
244 343
245 ac->charger.name = acpi_device_bid(device); 344 ac->charger.name = acpi_device_bid(device);
345#ifdef CONFIG_ACPI_PROCFS_POWER
346 result = acpi_ac_add_fs(ac);
347 if (result)
348 goto end;
349#endif
246 ac->charger.type = POWER_SUPPLY_TYPE_MAINS; 350 ac->charger.type = POWER_SUPPLY_TYPE_MAINS;
247 ac->charger.properties = ac_props; 351 ac->charger.properties = ac_props;
248 ac->charger.num_properties = ARRAY_SIZE(ac_props); 352 ac->charger.num_properties = ARRAY_SIZE(ac_props);
@@ -258,8 +362,12 @@ static int acpi_ac_add(struct acpi_device *device)
258 ac->battery_nb.notifier_call = acpi_ac_battery_notify; 362 ac->battery_nb.notifier_call = acpi_ac_battery_notify;
259 register_acpi_notifier(&ac->battery_nb); 363 register_acpi_notifier(&ac->battery_nb);
260end: 364end:
261 if (result) 365 if (result) {
366#ifdef CONFIG_ACPI_PROCFS_POWER
367 acpi_ac_remove_fs(ac);
368#endif
262 kfree(ac); 369 kfree(ac);
370 }
263 371
264 dmi_check_system(ac_dmi_table); 372 dmi_check_system(ac_dmi_table);
265 return result; 373 return result;
@@ -303,6 +411,10 @@ static int acpi_ac_remove(struct acpi_device *device)
303 power_supply_unregister(&ac->charger); 411 power_supply_unregister(&ac->charger);
304 unregister_acpi_notifier(&ac->battery_nb); 412 unregister_acpi_notifier(&ac->battery_nb);
305 413
414#ifdef CONFIG_ACPI_PROCFS_POWER
415 acpi_ac_remove_fs(ac);
416#endif
417
306 kfree(ac); 418 kfree(ac);
307 419
308 return 0; 420 return 0;
@@ -315,9 +427,20 @@ static int __init acpi_ac_init(void)
315 if (acpi_disabled) 427 if (acpi_disabled)
316 return -ENODEV; 428 return -ENODEV;
317 429
430#ifdef CONFIG_ACPI_PROCFS_POWER
431 acpi_ac_dir = acpi_lock_ac_dir();
432 if (!acpi_ac_dir)
433 return -ENODEV;
434#endif
435
436
318 result = acpi_bus_register_driver(&acpi_ac_driver); 437 result = acpi_bus_register_driver(&acpi_ac_driver);
319 if (result < 0) 438 if (result < 0) {
439#ifdef CONFIG_ACPI_PROCFS_POWER
440 acpi_unlock_ac_dir(acpi_ac_dir);
441#endif
320 return -ENODEV; 442 return -ENODEV;
443 }
321 444
322 return 0; 445 return 0;
323} 446}
@@ -325,6 +448,9 @@ static int __init acpi_ac_init(void)
325static void __exit acpi_ac_exit(void) 448static void __exit acpi_ac_exit(void)
326{ 449{
327 acpi_bus_unregister_driver(&acpi_ac_driver); 450 acpi_bus_unregister_driver(&acpi_ac_driver);
451#ifdef CONFIG_ACPI_PROCFS_POWER
452 acpi_unlock_ac_dir(acpi_ac_dir);
453#endif
328} 454}
329module_init(acpi_ac_init); 455module_init(acpi_ac_init);
330module_exit(acpi_ac_exit); 456module_exit(acpi_ac_exit);
diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
index 6703c1fd993a..4ddb0dca56f6 100644
--- a/drivers/acpi/acpi_pnp.c
+++ b/drivers/acpi/acpi_pnp.c
@@ -14,6 +14,8 @@
14#include <linux/module.h> 14#include <linux/module.h>
15 15
16static const struct acpi_device_id acpi_pnp_device_ids[] = { 16static const struct acpi_device_id acpi_pnp_device_ids[] = {
17 /* soc_button_array */
18 {"PNP0C40"},
17 /* pata_isapnp */ 19 /* pata_isapnp */
18 {"PNP0600"}, /* Generic ESDI/IDE/ATA compatible hard disk controller */ 20 {"PNP0600"}, /* Generic ESDI/IDE/ATA compatible hard disk controller */
19 /* floppy */ 21 /* floppy */
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 0d7116f34b95..130f513e08c9 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -35,6 +35,7 @@
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include <linux/slab.h> 36#include <linux/slab.h>
37#include <linux/suspend.h> 37#include <linux/suspend.h>
38#include <linux/delay.h>
38#include <asm/unaligned.h> 39#include <asm/unaligned.h>
39 40
40#ifdef CONFIG_ACPI_PROCFS_POWER 41#ifdef CONFIG_ACPI_PROCFS_POWER
@@ -534,6 +535,20 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
534 " invalid.\n"); 535 " invalid.\n");
535 } 536 }
536 537
538 /*
539 * When fully charged, some batteries wrongly report
540 * capacity_now = design_capacity instead of = full_charge_capacity
541 */
542 if (battery->capacity_now > battery->full_charge_capacity
543 && battery->full_charge_capacity != ACPI_BATTERY_VALUE_UNKNOWN) {
544 battery->capacity_now = battery->full_charge_capacity;
545 if (battery->capacity_now != battery->design_capacity)
546 printk_once(KERN_WARNING FW_BUG
547 "battery: reported current charge level (%d) "
548 "is higher than reported maximum charge level (%d).\n",
549 battery->capacity_now, battery->full_charge_capacity);
550 }
551
537 if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags) 552 if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)
538 && battery->capacity_now >= 0 && battery->capacity_now <= 100) 553 && battery->capacity_now >= 0 && battery->capacity_now <= 100)
539 battery->capacity_now = (battery->capacity_now * 554 battery->capacity_now = (battery->capacity_now *
@@ -1151,6 +1166,28 @@ static struct dmi_system_id bat_dmi_table[] = {
1151 {}, 1166 {},
1152}; 1167};
1153 1168
1169/*
1170 * Some machines'(E,G Lenovo Z480) ECs are not stable
1171 * during boot up and this causes battery driver fails to be
1172 * probed due to failure of getting battery information
1173 * from EC sometimes. After several retries, the operation
1174 * may work. So add retry code here and 20ms sleep between
1175 * every retries.
1176 */
1177static int acpi_battery_update_retry(struct acpi_battery *battery)
1178{
1179 int retry, ret;
1180
1181 for (retry = 5; retry; retry--) {
1182 ret = acpi_battery_update(battery, false);
1183 if (!ret)
1184 break;
1185
1186 msleep(20);
1187 }
1188 return ret;
1189}
1190
1154static int acpi_battery_add(struct acpi_device *device) 1191static int acpi_battery_add(struct acpi_device *device)
1155{ 1192{
1156 int result = 0; 1193 int result = 0;
@@ -1169,9 +1206,11 @@ static int acpi_battery_add(struct acpi_device *device)
1169 mutex_init(&battery->sysfs_lock); 1206 mutex_init(&battery->sysfs_lock);
1170 if (acpi_has_method(battery->device->handle, "_BIX")) 1207 if (acpi_has_method(battery->device->handle, "_BIX"))
1171 set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags); 1208 set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags);
1172 result = acpi_battery_update(battery, false); 1209
1210 result = acpi_battery_update_retry(battery);
1173 if (result) 1211 if (result)
1174 goto fail; 1212 goto fail;
1213
1175#ifdef CONFIG_ACPI_PROCFS_POWER 1214#ifdef CONFIG_ACPI_PROCFS_POWER
1176 result = acpi_battery_add_fs(device); 1215 result = acpi_battery_add_fs(device);
1177#endif 1216#endif
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index ad11ba4a412d..a66ab658abbc 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1,11 +1,14 @@
1/* 1/*
2 * ec.c - ACPI Embedded Controller Driver (v2.1) 2 * ec.c - ACPI Embedded Controller Driver (v2.2)
3 * 3 *
4 * Copyright (C) 2006-2008 Alexey Starikovskiy <astarikovskiy@suse.de> 4 * Copyright (C) 2001-2014 Intel Corporation
5 * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com> 5 * Author: 2014 Lv Zheng <lv.zheng@intel.com>
6 * Copyright (C) 2004 Luming Yu <luming.yu@intel.com> 6 * 2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
7 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 7 * 2006 Denis Sadykov <denis.m.sadykov@intel.com>
8 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 8 * 2004 Luming Yu <luming.yu@intel.com>
9 * 2001, 2002 Andy Grover <andrew.grover@intel.com>
10 * 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
11 * Copyright (C) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
9 * 12 *
10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 13 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 * 14 *
@@ -52,6 +55,7 @@
52/* EC status register */ 55/* EC status register */
53#define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */ 56#define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */
54#define ACPI_EC_FLAG_IBF 0x02 /* Input buffer full */ 57#define ACPI_EC_FLAG_IBF 0x02 /* Input buffer full */
58#define ACPI_EC_FLAG_CMD 0x08 /* Input buffer contains a command */
55#define ACPI_EC_FLAG_BURST 0x10 /* burst mode */ 59#define ACPI_EC_FLAG_BURST 0x10 /* burst mode */
56#define ACPI_EC_FLAG_SCI 0x20 /* EC-SCI occurred */ 60#define ACPI_EC_FLAG_SCI 0x20 /* EC-SCI occurred */
57 61
@@ -78,6 +82,9 @@ enum {
78 EC_FLAGS_BLOCKED, /* Transactions are blocked */ 82 EC_FLAGS_BLOCKED, /* Transactions are blocked */
79}; 83};
80 84
85#define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */
86#define ACPI_EC_COMMAND_COMPLETE 0x02 /* Completed last byte */
87
81/* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */ 88/* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */
82static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY; 89static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
83module_param(ec_delay, uint, 0644); 90module_param(ec_delay, uint, 0644);
@@ -109,7 +116,7 @@ struct transaction {
109 u8 ri; 116 u8 ri;
110 u8 wlen; 117 u8 wlen;
111 u8 rlen; 118 u8 rlen;
112 bool done; 119 u8 flags;
113}; 120};
114 121
115struct acpi_ec *boot_ec, *first_ec; 122struct acpi_ec *boot_ec, *first_ec;
@@ -127,83 +134,104 @@ static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
127static inline u8 acpi_ec_read_status(struct acpi_ec *ec) 134static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
128{ 135{
129 u8 x = inb(ec->command_addr); 136 u8 x = inb(ec->command_addr);
130 pr_debug("---> status = 0x%2.2x\n", x); 137 pr_debug("EC_SC(R) = 0x%2.2x "
138 "SCI_EVT=%d BURST=%d CMD=%d IBF=%d OBF=%d\n",
139 x,
140 !!(x & ACPI_EC_FLAG_SCI),
141 !!(x & ACPI_EC_FLAG_BURST),
142 !!(x & ACPI_EC_FLAG_CMD),
143 !!(x & ACPI_EC_FLAG_IBF),
144 !!(x & ACPI_EC_FLAG_OBF));
131 return x; 145 return x;
132} 146}
133 147
134static inline u8 acpi_ec_read_data(struct acpi_ec *ec) 148static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
135{ 149{
136 u8 x = inb(ec->data_addr); 150 u8 x = inb(ec->data_addr);
137 pr_debug("---> data = 0x%2.2x\n", x); 151 pr_debug("EC_DATA(R) = 0x%2.2x\n", x);
138 return x; 152 return x;
139} 153}
140 154
141static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command) 155static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
142{ 156{
143 pr_debug("<--- command = 0x%2.2x\n", command); 157 pr_debug("EC_SC(W) = 0x%2.2x\n", command);
144 outb(command, ec->command_addr); 158 outb(command, ec->command_addr);
145} 159}
146 160
147static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data) 161static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
148{ 162{
149 pr_debug("<--- data = 0x%2.2x\n", data); 163 pr_debug("EC_DATA(W) = 0x%2.2x\n", data);
150 outb(data, ec->data_addr); 164 outb(data, ec->data_addr);
151} 165}
152 166
153static int ec_transaction_done(struct acpi_ec *ec) 167static int ec_transaction_completed(struct acpi_ec *ec)
154{ 168{
155 unsigned long flags; 169 unsigned long flags;
156 int ret = 0; 170 int ret = 0;
157 spin_lock_irqsave(&ec->lock, flags); 171 spin_lock_irqsave(&ec->lock, flags);
158 if (!ec->curr || ec->curr->done) 172 if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
159 ret = 1; 173 ret = 1;
160 spin_unlock_irqrestore(&ec->lock, flags); 174 spin_unlock_irqrestore(&ec->lock, flags);
161 return ret; 175 return ret;
162} 176}
163 177
164static void start_transaction(struct acpi_ec *ec) 178static bool advance_transaction(struct acpi_ec *ec)
165{ 179{
166 ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
167 ec->curr->done = false;
168 acpi_ec_write_cmd(ec, ec->curr->command);
169}
170
171static void advance_transaction(struct acpi_ec *ec, u8 status)
172{
173 unsigned long flags;
174 struct transaction *t; 180 struct transaction *t;
181 u8 status;
182 bool wakeup = false;
175 183
176 spin_lock_irqsave(&ec->lock, flags); 184 pr_debug("===== %s =====\n", in_interrupt() ? "IRQ" : "TASK");
185 status = acpi_ec_read_status(ec);
177 t = ec->curr; 186 t = ec->curr;
178 if (!t) 187 if (!t)
179 goto unlock; 188 goto err;
180 if (t->wlen > t->wi) { 189 if (t->flags & ACPI_EC_COMMAND_POLL) {
181 if ((status & ACPI_EC_FLAG_IBF) == 0) 190 if (t->wlen > t->wi) {
182 acpi_ec_write_data(ec, 191 if ((status & ACPI_EC_FLAG_IBF) == 0)
183 t->wdata[t->wi++]); 192 acpi_ec_write_data(ec, t->wdata[t->wi++]);
184 else 193 else
185 goto err; 194 goto err;
186 } else if (t->rlen > t->ri) { 195 } else if (t->rlen > t->ri) {
187 if ((status & ACPI_EC_FLAG_OBF) == 1) { 196 if ((status & ACPI_EC_FLAG_OBF) == 1) {
188 t->rdata[t->ri++] = acpi_ec_read_data(ec); 197 t->rdata[t->ri++] = acpi_ec_read_data(ec);
189 if (t->rlen == t->ri) 198 if (t->rlen == t->ri) {
190 t->done = true; 199 t->flags |= ACPI_EC_COMMAND_COMPLETE;
200 wakeup = true;
201 }
202 } else
203 goto err;
204 } else if (t->wlen == t->wi &&
205 (status & ACPI_EC_FLAG_IBF) == 0) {
206 t->flags |= ACPI_EC_COMMAND_COMPLETE;
207 wakeup = true;
208 }
209 return wakeup;
210 } else {
211 if ((status & ACPI_EC_FLAG_IBF) == 0) {
212 acpi_ec_write_cmd(ec, t->command);
213 t->flags |= ACPI_EC_COMMAND_POLL;
191 } else 214 } else
192 goto err; 215 goto err;
193 } else if (t->wlen == t->wi && 216 return wakeup;
194 (status & ACPI_EC_FLAG_IBF) == 0) 217 }
195 t->done = true;
196 goto unlock;
197err: 218err:
198 /* 219 /*
199 * If SCI bit is set, then don't think it's a false IRQ 220 * If SCI bit is set, then don't think it's a false IRQ
200 * otherwise will take a not handled IRQ as a false one. 221 * otherwise will take a not handled IRQ as a false one.
201 */ 222 */
202 if (in_interrupt() && !(status & ACPI_EC_FLAG_SCI)) 223 if (!(status & ACPI_EC_FLAG_SCI)) {
203 ++t->irq_count; 224 if (in_interrupt() && t)
225 ++t->irq_count;
226 }
227 return wakeup;
228}
204 229
205unlock: 230static void start_transaction(struct acpi_ec *ec)
206 spin_unlock_irqrestore(&ec->lock, flags); 231{
232 ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
233 ec->curr->flags = 0;
234 (void)advance_transaction(ec);
207} 235}
208 236
209static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data); 237static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data);
@@ -228,15 +256,17 @@ static int ec_poll(struct acpi_ec *ec)
228 /* don't sleep with disabled interrupts */ 256 /* don't sleep with disabled interrupts */
229 if (EC_FLAGS_MSI || irqs_disabled()) { 257 if (EC_FLAGS_MSI || irqs_disabled()) {
230 udelay(ACPI_EC_MSI_UDELAY); 258 udelay(ACPI_EC_MSI_UDELAY);
231 if (ec_transaction_done(ec)) 259 if (ec_transaction_completed(ec))
232 return 0; 260 return 0;
233 } else { 261 } else {
234 if (wait_event_timeout(ec->wait, 262 if (wait_event_timeout(ec->wait,
235 ec_transaction_done(ec), 263 ec_transaction_completed(ec),
236 msecs_to_jiffies(1))) 264 msecs_to_jiffies(1)))
237 return 0; 265 return 0;
238 } 266 }
239 advance_transaction(ec, acpi_ec_read_status(ec)); 267 spin_lock_irqsave(&ec->lock, flags);
268 (void)advance_transaction(ec);
269 spin_unlock_irqrestore(&ec->lock, flags);
240 } while (time_before(jiffies, delay)); 270 } while (time_before(jiffies, delay));
241 pr_debug("controller reset, restart transaction\n"); 271 pr_debug("controller reset, restart transaction\n");
242 spin_lock_irqsave(&ec->lock, flags); 272 spin_lock_irqsave(&ec->lock, flags);
@@ -268,23 +298,6 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
268 return ret; 298 return ret;
269} 299}
270 300
271static int ec_check_ibf0(struct acpi_ec *ec)
272{
273 u8 status = acpi_ec_read_status(ec);
274 return (status & ACPI_EC_FLAG_IBF) == 0;
275}
276
277static int ec_wait_ibf0(struct acpi_ec *ec)
278{
279 unsigned long delay = jiffies + msecs_to_jiffies(ec_delay);
280 /* interrupt wait manually if GPE mode is not active */
281 while (time_before(jiffies, delay))
282 if (wait_event_timeout(ec->wait, ec_check_ibf0(ec),
283 msecs_to_jiffies(1)))
284 return 0;
285 return -ETIME;
286}
287
288static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) 301static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
289{ 302{
290 int status; 303 int status;
@@ -305,12 +318,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
305 goto unlock; 318 goto unlock;
306 } 319 }
307 } 320 }
308 if (ec_wait_ibf0(ec)) {
309 pr_err("input buffer is not empty, "
310 "aborting transaction\n");
311 status = -ETIME;
312 goto end;
313 }
314 pr_debug("transaction start (cmd=0x%02x, addr=0x%02x)\n", 321 pr_debug("transaction start (cmd=0x%02x, addr=0x%02x)\n",
315 t->command, t->wdata ? t->wdata[0] : 0); 322 t->command, t->wdata ? t->wdata[0] : 0);
316 /* disable GPE during transaction if storm is detected */ 323 /* disable GPE during transaction if storm is detected */
@@ -334,7 +341,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
334 set_bit(EC_FLAGS_GPE_STORM, &ec->flags); 341 set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
335 } 342 }
336 pr_debug("transaction end\n"); 343 pr_debug("transaction end\n");
337end:
338 if (ec->global_lock) 344 if (ec->global_lock)
339 acpi_release_global_lock(glk); 345 acpi_release_global_lock(glk);
340unlock: 346unlock:
@@ -634,17 +640,14 @@ static int ec_check_sci(struct acpi_ec *ec, u8 state)
634static u32 acpi_ec_gpe_handler(acpi_handle gpe_device, 640static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
635 u32 gpe_number, void *data) 641 u32 gpe_number, void *data)
636{ 642{
643 unsigned long flags;
637 struct acpi_ec *ec = data; 644 struct acpi_ec *ec = data;
638 u8 status = acpi_ec_read_status(ec);
639 645
640 pr_debug("~~~> interrupt, status:0x%02x\n", status); 646 spin_lock_irqsave(&ec->lock, flags);
641 647 if (advance_transaction(ec))
642 advance_transaction(ec, status);
643 if (ec_transaction_done(ec) &&
644 (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) {
645 wake_up(&ec->wait); 648 wake_up(&ec->wait);
646 ec_check_sci(ec, acpi_ec_read_status(ec)); 649 spin_unlock_irqrestore(&ec->lock, flags);
647 } 650 ec_check_sci(ec, acpi_ec_read_status(ec));
648 return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE; 651 return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE;
649} 652}
650 653
@@ -1066,8 +1069,10 @@ int __init acpi_ec_ecdt_probe(void)
1066 /* fall through */ 1069 /* fall through */
1067 } 1070 }
1068 1071
1069 if (EC_FLAGS_SKIP_DSDT_SCAN) 1072 if (EC_FLAGS_SKIP_DSDT_SCAN) {
1073 kfree(saved_ec);
1070 return -ENODEV; 1074 return -ENODEV;
1075 }
1071 1076
1072 /* This workaround is needed only on some broken machines, 1077 /* This workaround is needed only on some broken machines,
1073 * which require early EC, but fail to provide ECDT */ 1078 * which require early EC, but fail to provide ECDT */
@@ -1105,6 +1110,7 @@ install:
1105 } 1110 }
1106error: 1111error:
1107 kfree(boot_ec); 1112 kfree(boot_ec);
1113 kfree(saved_ec);
1108 boot_ec = NULL; 1114 boot_ec = NULL;
1109 return -ENODEV; 1115 return -ENODEV;
1110} 1116}
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 0bdacc5e26a3..2ba8f02ced36 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -77,7 +77,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
77 switch (ares->type) { 77 switch (ares->type) {
78 case ACPI_RESOURCE_TYPE_MEMORY24: 78 case ACPI_RESOURCE_TYPE_MEMORY24:
79 memory24 = &ares->data.memory24; 79 memory24 = &ares->data.memory24;
80 if (!memory24->address_length) 80 if (!memory24->minimum && !memory24->address_length)
81 return false; 81 return false;
82 acpi_dev_get_memresource(res, memory24->minimum, 82 acpi_dev_get_memresource(res, memory24->minimum,
83 memory24->address_length, 83 memory24->address_length,
@@ -85,7 +85,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
85 break; 85 break;
86 case ACPI_RESOURCE_TYPE_MEMORY32: 86 case ACPI_RESOURCE_TYPE_MEMORY32:
87 memory32 = &ares->data.memory32; 87 memory32 = &ares->data.memory32;
88 if (!memory32->address_length) 88 if (!memory32->minimum && !memory32->address_length)
89 return false; 89 return false;
90 acpi_dev_get_memresource(res, memory32->minimum, 90 acpi_dev_get_memresource(res, memory32->minimum,
91 memory32->address_length, 91 memory32->address_length,
@@ -93,7 +93,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
93 break; 93 break;
94 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: 94 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
95 fixed_memory32 = &ares->data.fixed_memory32; 95 fixed_memory32 = &ares->data.fixed_memory32;
96 if (!fixed_memory32->address_length) 96 if (!fixed_memory32->address && !fixed_memory32->address_length)
97 return false; 97 return false;
98 acpi_dev_get_memresource(res, fixed_memory32->address, 98 acpi_dev_get_memresource(res, fixed_memory32->address,
99 fixed_memory32->address_length, 99 fixed_memory32->address_length,
@@ -150,7 +150,7 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
150 switch (ares->type) { 150 switch (ares->type) {
151 case ACPI_RESOURCE_TYPE_IO: 151 case ACPI_RESOURCE_TYPE_IO:
152 io = &ares->data.io; 152 io = &ares->data.io;
153 if (!io->address_length) 153 if (!io->minimum && !io->address_length)
154 return false; 154 return false;
155 acpi_dev_get_ioresource(res, io->minimum, 155 acpi_dev_get_ioresource(res, io->minimum,
156 io->address_length, 156 io->address_length,
@@ -158,7 +158,7 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
158 break; 158 break;
159 case ACPI_RESOURCE_TYPE_FIXED_IO: 159 case ACPI_RESOURCE_TYPE_FIXED_IO:
160 fixed_io = &ares->data.fixed_io; 160 fixed_io = &ares->data.fixed_io;
161 if (!fixed_io->address_length) 161 if (!fixed_io->address && !fixed_io->address_length)
162 return false; 162 return false;
163 acpi_dev_get_ioresource(res, fixed_io->address, 163 acpi_dev_get_ioresource(res, fixed_io->address,
164 fixed_io->address_length, 164 fixed_io->address_length,
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index fb9ffe9adc64..350d52a8f781 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -68,7 +68,7 @@ MODULE_AUTHOR("Bruno Ducrot");
68MODULE_DESCRIPTION("ACPI Video Driver"); 68MODULE_DESCRIPTION("ACPI Video Driver");
69MODULE_LICENSE("GPL"); 69MODULE_LICENSE("GPL");
70 70
71static bool brightness_switch_enabled; 71static bool brightness_switch_enabled = 1;
72module_param(brightness_switch_enabled, bool, 0644); 72module_param(brightness_switch_enabled, bool, 0644);
73 73
74/* 74/*
@@ -241,13 +241,14 @@ static bool acpi_video_use_native_backlight(void)
241 return use_native_backlight_dmi; 241 return use_native_backlight_dmi;
242} 242}
243 243
244static bool acpi_video_verify_backlight_support(void) 244bool acpi_video_verify_backlight_support(void)
245{ 245{
246 if (acpi_osi_is_win8() && acpi_video_use_native_backlight() && 246 if (acpi_osi_is_win8() && acpi_video_use_native_backlight() &&
247 backlight_device_registered(BACKLIGHT_RAW)) 247 backlight_device_registered(BACKLIGHT_RAW))
248 return false; 248 return false;
249 return acpi_video_backlight_support(); 249 return acpi_video_backlight_support();
250} 250}
251EXPORT_SYMBOL_GPL(acpi_video_verify_backlight_support);
251 252
252/* backlight device sysfs support */ 253/* backlight device sysfs support */
253static int acpi_video_get_brightness(struct backlight_device *bd) 254static int acpi_video_get_brightness(struct backlight_device *bd)
@@ -563,6 +564,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
563 }, 564 },
564 }, 565 },
565 { 566 {
567 .callback = video_set_use_native_backlight,
568 .ident = "Acer TravelMate B113",
569 .matches = {
570 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
571 DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate B113"),
572 },
573 },
574 {
566 .callback = video_set_use_native_backlight, 575 .callback = video_set_use_native_backlight,
567 .ident = "HP ProBook 4340s", 576 .ident = "HP ProBook 4340s",
568 .matches = { 577 .matches = {
@@ -572,6 +581,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
572 }, 581 },
573 { 582 {
574 .callback = video_set_use_native_backlight, 583 .callback = video_set_use_native_backlight,
584 .ident = "HP ProBook 4540s",
585 .matches = {
586 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
587 DMI_MATCH(DMI_PRODUCT_VERSION, "HP ProBook 4540s"),
588 },
589 },
590 {
591 .callback = video_set_use_native_backlight,
575 .ident = "HP ProBook 2013 models", 592 .ident = "HP ProBook 2013 models",
576 .matches = { 593 .matches = {
577 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 594 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 33e3db548a29..c42feb2bacd0 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -166,6 +166,14 @@ static struct dmi_system_id video_detect_dmi_table[] = {
166 DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"), 166 DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
167 }, 167 },
168 }, 168 },
169 {
170 .callback = video_detect_force_vendor,
171 .ident = "Dell Inspiron 5737",
172 .matches = {
173 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
174 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"),
175 },
176 },
169 { }, 177 { },
170}; 178};
171 179
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index dae5607e1115..4cd52a4541a9 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -456,6 +456,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
456 456
457 /* Promise */ 457 /* Promise */
458 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ 458 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
459 { PCI_VDEVICE(PROMISE, 0x3781), board_ahci }, /* FastTrak TX8660 ahci-mode */
459 460
460 /* Asmedia */ 461 /* Asmedia */
461 { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */ 462 { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 05882e4445a6..5513296e5e2e 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -371,7 +371,9 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
371 int pmp, unsigned long deadline, 371 int pmp, unsigned long deadline,
372 int (*check_ready)(struct ata_link *link)); 372 int (*check_ready)(struct ata_link *link));
373 373
374unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
374int ahci_stop_engine(struct ata_port *ap); 375int ahci_stop_engine(struct ata_port *ap);
376void ahci_start_fis_rx(struct ata_port *ap);
375void ahci_start_engine(struct ata_port *ap); 377void ahci_start_engine(struct ata_port *ap);
376int ahci_check_ready(struct ata_link *link); 378int ahci_check_ready(struct ata_link *link);
377int ahci_kick_engine(struct ata_port *ap); 379int ahci_kick_engine(struct ata_port *ap);
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index 3a901520c62b..cac4360f272a 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -58,6 +58,8 @@ enum ahci_imx_type {
58struct imx_ahci_priv { 58struct imx_ahci_priv {
59 struct platform_device *ahci_pdev; 59 struct platform_device *ahci_pdev;
60 enum ahci_imx_type type; 60 enum ahci_imx_type type;
61 struct clk *sata_clk;
62 struct clk *sata_ref_clk;
61 struct clk *ahb_clk; 63 struct clk *ahb_clk;
62 struct regmap *gpr; 64 struct regmap *gpr;
63 bool no_device; 65 bool no_device;
@@ -224,7 +226,7 @@ static int imx_sata_enable(struct ahci_host_priv *hpriv)
224 return ret; 226 return ret;
225 } 227 }
226 228
227 ret = ahci_platform_enable_clks(hpriv); 229 ret = clk_prepare_enable(imxpriv->sata_ref_clk);
228 if (ret < 0) 230 if (ret < 0)
229 goto disable_regulator; 231 goto disable_regulator;
230 232
@@ -291,7 +293,7 @@ static void imx_sata_disable(struct ahci_host_priv *hpriv)
291 !IMX6Q_GPR13_SATA_MPLL_CLK_EN); 293 !IMX6Q_GPR13_SATA_MPLL_CLK_EN);
292 } 294 }
293 295
294 ahci_platform_disable_clks(hpriv); 296 clk_disable_unprepare(imxpriv->sata_ref_clk);
295 297
296 if (hpriv->target_pwr) 298 if (hpriv->target_pwr)
297 regulator_disable(hpriv->target_pwr); 299 regulator_disable(hpriv->target_pwr);
@@ -324,6 +326,9 @@ static void ahci_imx_error_handler(struct ata_port *ap)
324 writel(reg_val | IMX_P0PHYCR_TEST_PDDQ, mmio + IMX_P0PHYCR); 326 writel(reg_val | IMX_P0PHYCR_TEST_PDDQ, mmio + IMX_P0PHYCR);
325 imx_sata_disable(hpriv); 327 imx_sata_disable(hpriv);
326 imxpriv->no_device = true; 328 imxpriv->no_device = true;
329
330 dev_info(ap->dev, "no device found, disabling link.\n");
331 dev_info(ap->dev, "pass " MODULE_PARAM_PREFIX ".hotplug=1 to enable hotplug\n");
327} 332}
328 333
329static int ahci_imx_softreset(struct ata_link *link, unsigned int *class, 334static int ahci_imx_softreset(struct ata_link *link, unsigned int *class,
@@ -385,6 +390,19 @@ static int imx_ahci_probe(struct platform_device *pdev)
385 imxpriv->no_device = false; 390 imxpriv->no_device = false;
386 imxpriv->first_time = true; 391 imxpriv->first_time = true;
387 imxpriv->type = (enum ahci_imx_type)of_id->data; 392 imxpriv->type = (enum ahci_imx_type)of_id->data;
393
394 imxpriv->sata_clk = devm_clk_get(dev, "sata");
395 if (IS_ERR(imxpriv->sata_clk)) {
396 dev_err(dev, "can't get sata clock.\n");
397 return PTR_ERR(imxpriv->sata_clk);
398 }
399
400 imxpriv->sata_ref_clk = devm_clk_get(dev, "sata_ref");
401 if (IS_ERR(imxpriv->sata_ref_clk)) {
402 dev_err(dev, "can't get sata_ref clock.\n");
403 return PTR_ERR(imxpriv->sata_ref_clk);
404 }
405
388 imxpriv->ahb_clk = devm_clk_get(dev, "ahb"); 406 imxpriv->ahb_clk = devm_clk_get(dev, "ahb");
389 if (IS_ERR(imxpriv->ahb_clk)) { 407 if (IS_ERR(imxpriv->ahb_clk)) {
390 dev_err(dev, "can't get ahb clock.\n"); 408 dev_err(dev, "can't get ahb clock.\n");
@@ -407,10 +425,14 @@ static int imx_ahci_probe(struct platform_device *pdev)
407 425
408 hpriv->plat_data = imxpriv; 426 hpriv->plat_data = imxpriv;
409 427
410 ret = imx_sata_enable(hpriv); 428 ret = clk_prepare_enable(imxpriv->sata_clk);
411 if (ret) 429 if (ret)
412 return ret; 430 return ret;
413 431
432 ret = imx_sata_enable(hpriv);
433 if (ret)
434 goto disable_clk;
435
414 /* 436 /*
415 * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL, 437 * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL,
416 * and IP vendor specific register IMX_TIMER1MS. 438 * and IP vendor specific register IMX_TIMER1MS.
@@ -435,16 +457,24 @@ static int imx_ahci_probe(struct platform_device *pdev)
435 ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info, 457 ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info,
436 0, 0, 0); 458 0, 0, 0);
437 if (ret) 459 if (ret)
438 imx_sata_disable(hpriv); 460 goto disable_sata;
439 461
462 return 0;
463
464disable_sata:
465 imx_sata_disable(hpriv);
466disable_clk:
467 clk_disable_unprepare(imxpriv->sata_clk);
440 return ret; 468 return ret;
441} 469}
442 470
443static void ahci_imx_host_stop(struct ata_host *host) 471static void ahci_imx_host_stop(struct ata_host *host)
444{ 472{
445 struct ahci_host_priv *hpriv = host->private_data; 473 struct ahci_host_priv *hpriv = host->private_data;
474 struct imx_ahci_priv *imxpriv = hpriv->plat_data;
446 475
447 imx_sata_disable(hpriv); 476 imx_sata_disable(hpriv);
477 clk_disable_unprepare(imxpriv->sata_clk);
448} 478}
449 479
450#ifdef CONFIG_PM_SLEEP 480#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index ebe505c17763..b10d81ddb528 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -58,7 +58,7 @@ static int ahci_probe(struct platform_device *pdev)
58 } 58 }
59 59
60 if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci")) 60 if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci"))
61 hflags |= AHCI_HFLAG_NO_FBS; 61 hflags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ;
62 62
63 rc = ahci_platform_init_host(pdev, hpriv, &ahci_port_info, 63 rc = ahci_platform_init_host(pdev, hpriv, &ahci_port_info,
64 hflags, 0, 0); 64 hflags, 0, 0);
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index 042a9bb45c86..ee3a3659bd9e 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -78,6 +78,7 @@
78struct xgene_ahci_context { 78struct xgene_ahci_context {
79 struct ahci_host_priv *hpriv; 79 struct ahci_host_priv *hpriv;
80 struct device *dev; 80 struct device *dev;
81 u8 last_cmd[MAX_AHCI_CHN_PERCTR]; /* tracking the last command issued*/
81 void __iomem *csr_core; /* Core CSR address of IP */ 82 void __iomem *csr_core; /* Core CSR address of IP */
82 void __iomem *csr_diag; /* Diag CSR address of IP */ 83 void __iomem *csr_diag; /* Diag CSR address of IP */
83 void __iomem *csr_axi; /* AXI CSR address of IP */ 84 void __iomem *csr_axi; /* AXI CSR address of IP */
@@ -98,20 +99,62 @@ static int xgene_ahci_init_memram(struct xgene_ahci_context *ctx)
98} 99}
99 100
100/** 101/**
102 * xgene_ahci_restart_engine - Restart the dma engine.
103 * @ap : ATA port of interest
104 *
105 * Restarts the dma engine inside the controller.
106 */
107static int xgene_ahci_restart_engine(struct ata_port *ap)
108{
109 struct ahci_host_priv *hpriv = ap->host->private_data;
110
111 ahci_stop_engine(ap);
112 ahci_start_fis_rx(ap);
113 hpriv->start_engine(ap);
114
115 return 0;
116}
117
118/**
119 * xgene_ahci_qc_issue - Issue commands to the device
120 * @qc: Command to issue
121 *
122 * Due to Hardware errata for IDENTIFY DEVICE command, the controller cannot
123 * clear the BSY bit after receiving the PIO setup FIS. This results in the dma
124 * state machine goes into the CMFatalErrorUpdate state and locks up. By
125 * restarting the dma engine, it removes the controller out of lock up state.
126 */
127static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc)
128{
129 struct ata_port *ap = qc->ap;
130 struct ahci_host_priv *hpriv = ap->host->private_data;
131 struct xgene_ahci_context *ctx = hpriv->plat_data;
132 int rc = 0;
133
134 if (unlikely(ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA))
135 xgene_ahci_restart_engine(ap);
136
137 rc = ahci_qc_issue(qc);
138
139 /* Save the last command issued */
140 ctx->last_cmd[ap->port_no] = qc->tf.command;
141
142 return rc;
143}
144
145/**
101 * xgene_ahci_read_id - Read ID data from the specified device 146 * xgene_ahci_read_id - Read ID data from the specified device
102 * @dev: device 147 * @dev: device
103 * @tf: proposed taskfile 148 * @tf: proposed taskfile
104 * @id: data buffer 149 * @id: data buffer
105 * 150 *
106 * This custom read ID function is required due to the fact that the HW 151 * This custom read ID function is required due to the fact that the HW
107 * does not support DEVSLP and the controller state machine may get stuck 152 * does not support DEVSLP.
108 * after processing the ID query command.
109 */ 153 */
110static unsigned int xgene_ahci_read_id(struct ata_device *dev, 154static unsigned int xgene_ahci_read_id(struct ata_device *dev,
111 struct ata_taskfile *tf, u16 *id) 155 struct ata_taskfile *tf, u16 *id)
112{ 156{
113 u32 err_mask; 157 u32 err_mask;
114 void __iomem *port_mmio = ahci_port_base(dev->link->ap);
115 158
116 err_mask = ata_do_dev_read_id(dev, tf, id); 159 err_mask = ata_do_dev_read_id(dev, tf, id);
117 if (err_mask) 160 if (err_mask)
@@ -133,16 +176,6 @@ static unsigned int xgene_ahci_read_id(struct ata_device *dev,
133 */ 176 */
134 id[ATA_ID_FEATURE_SUPP] &= ~(1 << 8); 177 id[ATA_ID_FEATURE_SUPP] &= ~(1 << 8);
135 178
136 /*
137 * Due to HW errata, restart the port if no other command active.
138 * Otherwise the controller may get stuck.
139 */
140 if (!readl(port_mmio + PORT_CMD_ISSUE)) {
141 writel(PORT_CMD_FIS_RX, port_mmio + PORT_CMD);
142 readl(port_mmio + PORT_CMD); /* Force a barrier */
143 writel(PORT_CMD_FIS_RX | PORT_CMD_START, port_mmio + PORT_CMD);
144 readl(port_mmio + PORT_CMD); /* Force a barrier */
145 }
146 return 0; 179 return 0;
147} 180}
148 181
@@ -300,6 +333,7 @@ static struct ata_port_operations xgene_ahci_ops = {
300 .host_stop = xgene_ahci_host_stop, 333 .host_stop = xgene_ahci_host_stop,
301 .hardreset = xgene_ahci_hardreset, 334 .hardreset = xgene_ahci_hardreset,
302 .read_id = xgene_ahci_read_id, 335 .read_id = xgene_ahci_read_id,
336 .qc_issue = xgene_ahci_qc_issue,
303}; 337};
304 338
305static const struct ata_port_info xgene_ahci_port_info = { 339static const struct ata_port_info xgene_ahci_port_info = {
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 40ea583d3610..d72ce0470309 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -68,7 +68,6 @@ static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
68 68
69static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); 69static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
70static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); 70static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
71static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
72static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc); 71static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
73static int ahci_port_start(struct ata_port *ap); 72static int ahci_port_start(struct ata_port *ap);
74static void ahci_port_stop(struct ata_port *ap); 73static void ahci_port_stop(struct ata_port *ap);
@@ -620,7 +619,7 @@ int ahci_stop_engine(struct ata_port *ap)
620} 619}
621EXPORT_SYMBOL_GPL(ahci_stop_engine); 620EXPORT_SYMBOL_GPL(ahci_stop_engine);
622 621
623static void ahci_start_fis_rx(struct ata_port *ap) 622void ahci_start_fis_rx(struct ata_port *ap)
624{ 623{
625 void __iomem *port_mmio = ahci_port_base(ap); 624 void __iomem *port_mmio = ahci_port_base(ap);
626 struct ahci_host_priv *hpriv = ap->host->private_data; 625 struct ahci_host_priv *hpriv = ap->host->private_data;
@@ -646,6 +645,7 @@ static void ahci_start_fis_rx(struct ata_port *ap)
646 /* flush */ 645 /* flush */
647 readl(port_mmio + PORT_CMD); 646 readl(port_mmio + PORT_CMD);
648} 647}
648EXPORT_SYMBOL_GPL(ahci_start_fis_rx);
649 649
650static int ahci_stop_fis_rx(struct ata_port *ap) 650static int ahci_stop_fis_rx(struct ata_port *ap)
651{ 651{
@@ -1945,7 +1945,7 @@ irqreturn_t ahci_interrupt(int irq, void *dev_instance)
1945} 1945}
1946EXPORT_SYMBOL_GPL(ahci_interrupt); 1946EXPORT_SYMBOL_GPL(ahci_interrupt);
1947 1947
1948static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) 1948unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
1949{ 1949{
1950 struct ata_port *ap = qc->ap; 1950 struct ata_port *ap = qc->ap;
1951 void __iomem *port_mmio = ahci_port_base(ap); 1951 void __iomem *port_mmio = ahci_port_base(ap);
@@ -1974,6 +1974,7 @@ static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
1974 1974
1975 return 0; 1975 return 0;
1976} 1976}
1977EXPORT_SYMBOL_GPL(ahci_qc_issue);
1977 1978
1978static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc) 1979static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
1979{ 1980{
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 3a5b4ed25a4f..b0077589f065 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -250,8 +250,13 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev)
250 if (IS_ERR(hpriv->phy)) { 250 if (IS_ERR(hpriv->phy)) {
251 rc = PTR_ERR(hpriv->phy); 251 rc = PTR_ERR(hpriv->phy);
252 switch (rc) { 252 switch (rc) {
253 case -ENODEV:
254 case -ENOSYS: 253 case -ENOSYS:
254 /* No PHY support. Check if PHY is required. */
255 if (of_find_property(dev->of_node, "phys", NULL)) {
256 dev_err(dev, "couldn't get sata-phy: ENOSYS\n");
257 goto err_out;
258 }
259 case -ENODEV:
255 /* continue normally */ 260 /* continue normally */
256 hpriv->phy = NULL; 261 hpriv->phy = NULL;
257 break; 262 break;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 18d97d5c7d90..677c0c1b03bd 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4787,6 +4787,10 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
4787 * ata_qc_new - Request an available ATA command, for queueing 4787 * ata_qc_new - Request an available ATA command, for queueing
4788 * @ap: target port 4788 * @ap: target port
4789 * 4789 *
4790 * Some ATA host controllers may implement a queue depth which is less
4791 * than ATA_MAX_QUEUE. So we shouldn't allocate a tag which is beyond
4792 * the hardware limitation.
4793 *
4790 * LOCKING: 4794 * LOCKING:
4791 * None. 4795 * None.
4792 */ 4796 */
@@ -4794,14 +4798,15 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
4794static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) 4798static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4795{ 4799{
4796 struct ata_queued_cmd *qc = NULL; 4800 struct ata_queued_cmd *qc = NULL;
4801 unsigned int max_queue = ap->host->n_tags;
4797 unsigned int i, tag; 4802 unsigned int i, tag;
4798 4803
4799 /* no command while frozen */ 4804 /* no command while frozen */
4800 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) 4805 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4801 return NULL; 4806 return NULL;
4802 4807
4803 for (i = 0; i < ATA_MAX_QUEUE; i++) { 4808 for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) {
4804 tag = (i + ap->last_tag + 1) % ATA_MAX_QUEUE; 4809 tag = tag < max_queue ? tag : 0;
4805 4810
4806 /* the last tag is reserved for internal command. */ 4811 /* the last tag is reserved for internal command. */
4807 if (tag == ATA_TAG_INTERNAL) 4812 if (tag == ATA_TAG_INTERNAL)
@@ -6088,6 +6093,7 @@ void ata_host_init(struct ata_host *host, struct device *dev,
6088{ 6093{
6089 spin_lock_init(&host->lock); 6094 spin_lock_init(&host->lock);
6090 mutex_init(&host->eh_mutex); 6095 mutex_init(&host->eh_mutex);
6096 host->n_tags = ATA_MAX_QUEUE - 1;
6091 host->dev = dev; 6097 host->dev = dev;
6092 host->ops = ops; 6098 host->ops = ops;
6093} 6099}
@@ -6169,6 +6175,8 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6169{ 6175{
6170 int i, rc; 6176 int i, rc;
6171 6177
6178 host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
6179
6172 /* host must have been started */ 6180 /* host must have been started */
6173 if (!(host->flags & ATA_HOST_STARTED)) { 6181 if (!(host->flags & ATA_HOST_STARTED)) {
6174 dev_err(host->dev, "BUG: trying to register unstarted host\n"); 6182 dev_err(host->dev, "BUG: trying to register unstarted host\n");
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 6760fc4e85b8..dad83df555c4 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1811,7 +1811,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1811 case ATA_DEV_ATA: 1811 case ATA_DEV_ATA:
1812 if (err & ATA_ICRC) 1812 if (err & ATA_ICRC)
1813 qc->err_mask |= AC_ERR_ATA_BUS; 1813 qc->err_mask |= AC_ERR_ATA_BUS;
1814 if (err & ATA_UNC) 1814 if (err & (ATA_UNC | ATA_AMNF))
1815 qc->err_mask |= AC_ERR_MEDIA; 1815 qc->err_mask |= AC_ERR_MEDIA;
1816 if (err & ATA_IDNF) 1816 if (err & ATA_IDNF)
1817 qc->err_mask |= AC_ERR_INVALID; 1817 qc->err_mask |= AC_ERR_INVALID;
@@ -2556,11 +2556,12 @@ static void ata_eh_link_report(struct ata_link *link)
2556 } 2556 }
2557 2557
2558 if (cmd->command != ATA_CMD_PACKET && 2558 if (cmd->command != ATA_CMD_PACKET &&
2559 (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF | 2559 (res->feature & (ATA_ICRC | ATA_UNC | ATA_AMNF |
2560 ATA_ABORTED))) 2560 ATA_IDNF | ATA_ABORTED)))
2561 ata_dev_err(qc->dev, "error: { %s%s%s%s}\n", 2561 ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n",
2562 res->feature & ATA_ICRC ? "ICRC " : "", 2562 res->feature & ATA_ICRC ? "ICRC " : "",
2563 res->feature & ATA_UNC ? "UNC " : "", 2563 res->feature & ATA_UNC ? "UNC " : "",
2564 res->feature & ATA_AMNF ? "AMNF " : "",
2564 res->feature & ATA_IDNF ? "IDNF " : "", 2565 res->feature & ATA_IDNF ? "IDNF " : "",
2565 res->feature & ATA_ABORTED ? "ABRT " : ""); 2566 res->feature & ATA_ABORTED ? "ABRT " : "");
2566#endif 2567#endif
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index 6ad5c072ce34..4d37c5415fc7 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -915,7 +915,7 @@ static int ep93xx_pata_probe(struct platform_device *pdev)
915 struct ep93xx_pata_data *drv_data; 915 struct ep93xx_pata_data *drv_data;
916 struct ata_host *host; 916 struct ata_host *host;
917 struct ata_port *ap; 917 struct ata_port *ap;
918 unsigned int irq; 918 int irq;
919 struct resource *mem_res; 919 struct resource *mem_res;
920 void __iomem *ide_base; 920 void __iomem *ide_base;
921 int err; 921 int err;
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 9e9227e1762d..eee48c49f5de 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -89,8 +89,13 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
89 return dev->archdata.irqs[num]; 89 return dev->archdata.irqs[num];
90#else 90#else
91 struct resource *r; 91 struct resource *r;
92 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) 92 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
93 return of_irq_get(dev->dev.of_node, num); 93 int ret;
94
95 ret = of_irq_get(dev->dev.of_node, num);
96 if (ret >= 0 || ret == -EPROBE_DEFER)
97 return ret;
98 }
94 99
95 r = platform_get_resource(dev, IORESOURCE_IRQ, num); 100 r = platform_get_resource(dev, IORESOURCE_IRQ, num);
96 101
@@ -133,8 +138,13 @@ int platform_get_irq_byname(struct platform_device *dev, const char *name)
133{ 138{
134 struct resource *r; 139 struct resource *r;
135 140
136 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) 141 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
137 return of_irq_get_byname(dev->dev.of_node, name); 142 int ret;
143
144 ret = of_irq_get_byname(dev->dev.of_node, name);
145 if (ret >= 0 || ret == -EPROBE_DEFER)
146 return ret;
147 }
138 148
139 r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name); 149 r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
140 return r ? r->start : -ENXIO; 150 return r ? r->start : -ENXIO;
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 1b35c45c92b7..3f2e16738080 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -544,6 +544,12 @@ void conn_try_outdate_peer_async(struct drbd_connection *connection)
544 struct task_struct *opa; 544 struct task_struct *opa;
545 545
546 kref_get(&connection->kref); 546 kref_get(&connection->kref);
547 /* We may just have force_sig()'ed this thread
548 * to get it out of some blocking network function.
549 * Clear signals; otherwise kthread_run(), which internally uses
550 * wait_on_completion_killable(), will mistake our pending signal
551 * for a new fatal signal and fail. */
552 flush_signals(current);
547 opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h"); 553 opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h");
548 if (IS_ERR(opa)) { 554 if (IS_ERR(opa)) {
549 drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n"); 555 drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n");
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 089e72cd37be..36e54be402df 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -622,11 +622,18 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity)
622 memset(&zram->stats, 0, sizeof(zram->stats)); 622 memset(&zram->stats, 0, sizeof(zram->stats));
623 623
624 zram->disksize = 0; 624 zram->disksize = 0;
625 if (reset_capacity) { 625 if (reset_capacity)
626 set_capacity(zram->disk, 0); 626 set_capacity(zram->disk, 0);
627 revalidate_disk(zram->disk); 627
628 }
629 up_write(&zram->init_lock); 628 up_write(&zram->init_lock);
629
630 /*
631 * Revalidate disk out of the init_lock to avoid lockdep splat.
632 * It's okay because disk's capacity is protected by init_lock
633 * so that revalidate_disk always sees up-to-date capacity.
634 */
635 if (reset_capacity)
636 revalidate_disk(zram->disk);
630} 637}
631 638
632static ssize_t disksize_store(struct device *dev, 639static ssize_t disksize_store(struct device *dev,
@@ -666,8 +673,15 @@ static ssize_t disksize_store(struct device *dev,
666 zram->comp = comp; 673 zram->comp = comp;
667 zram->disksize = disksize; 674 zram->disksize = disksize;
668 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); 675 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
669 revalidate_disk(zram->disk);
670 up_write(&zram->init_lock); 676 up_write(&zram->init_lock);
677
678 /*
679 * Revalidate disk out of the init_lock to avoid lockdep splat.
680 * It's okay because disk's capacity is protected by init_lock
681 * so that revalidate_disk always sees up-to-date capacity.
682 */
683 revalidate_disk(zram->disk);
684
671 return len; 685 return len;
672 686
673out_destroy_comp: 687out_destroy_comp:
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index f98380648cb3..f50dffc0374f 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -90,7 +90,6 @@ static const struct usb_device_id ath3k_table[] = {
90 { USB_DEVICE(0x0b05, 0x17d0) }, 90 { USB_DEVICE(0x0b05, 0x17d0) },
91 { USB_DEVICE(0x0CF3, 0x0036) }, 91 { USB_DEVICE(0x0CF3, 0x0036) },
92 { USB_DEVICE(0x0CF3, 0x3004) }, 92 { USB_DEVICE(0x0CF3, 0x3004) },
93 { USB_DEVICE(0x0CF3, 0x3005) },
94 { USB_DEVICE(0x0CF3, 0x3008) }, 93 { USB_DEVICE(0x0CF3, 0x3008) },
95 { USB_DEVICE(0x0CF3, 0x311D) }, 94 { USB_DEVICE(0x0CF3, 0x311D) },
96 { USB_DEVICE(0x0CF3, 0x311E) }, 95 { USB_DEVICE(0x0CF3, 0x311E) },
@@ -140,7 +139,6 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
140 { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, 139 { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
141 { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 }, 140 { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
142 { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, 141 { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
143 { USB_DEVICE(0x0cf3, 0x3005), .driver_info = BTUSB_ATH3012 },
144 { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, 142 { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
145 { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 }, 143 { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
146 { USB_DEVICE(0x0cf3, 0x311E), .driver_info = BTUSB_ATH3012 }, 144 { USB_DEVICE(0x0cf3, 0x311E), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index a1c80b0c7663..6250fc2fb93a 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -162,7 +162,6 @@ static const struct usb_device_id blacklist_table[] = {
162 { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, 162 { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
163 { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 }, 163 { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
164 { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, 164 { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
165 { USB_DEVICE(0x0cf3, 0x3005), .driver_info = BTUSB_ATH3012 },
166 { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, 165 { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
167 { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, 166 { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
168 { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 }, 167 { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 04680ead9275..fede8ca7147c 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -406,6 +406,7 @@ static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
406 H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) { 406 H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
407 BT_ERR("Non-link packet received in non-active state"); 407 BT_ERR("Non-link packet received in non-active state");
408 h5_reset_rx(h5); 408 h5_reset_rx(h5);
409 return 0;
409 } 410 }
410 411
411 h5->rx_func = h5_rx_payload; 412 h5->rx_func = h5_rx_payload;
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 334601cc81cf..c4419ea1ab07 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -55,16 +55,41 @@ static DEFINE_MUTEX(rng_mutex);
55static int data_avail; 55static int data_avail;
56static u8 *rng_buffer; 56static u8 *rng_buffer;
57 57
58static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
59 int wait);
60
58static size_t rng_buffer_size(void) 61static size_t rng_buffer_size(void)
59{ 62{
60 return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES; 63 return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
61} 64}
62 65
66static void add_early_randomness(struct hwrng *rng)
67{
68 unsigned char bytes[16];
69 int bytes_read;
70
71 /*
72 * Currently only virtio-rng cannot return data during device
73 * probe, and that's handled in virtio-rng.c itself. If there
74 * are more such devices, this call to rng_get_data can be
75 * made conditional here instead of doing it per-device.
76 */
77 bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1);
78 if (bytes_read > 0)
79 add_device_randomness(bytes, bytes_read);
80}
81
63static inline int hwrng_init(struct hwrng *rng) 82static inline int hwrng_init(struct hwrng *rng)
64{ 83{
65 if (!rng->init) 84 if (rng->init) {
66 return 0; 85 int ret;
67 return rng->init(rng); 86
87 ret = rng->init(rng);
88 if (ret)
89 return ret;
90 }
91 add_early_randomness(rng);
92 return 0;
68} 93}
69 94
70static inline void hwrng_cleanup(struct hwrng *rng) 95static inline void hwrng_cleanup(struct hwrng *rng)
@@ -304,8 +329,6 @@ int hwrng_register(struct hwrng *rng)
304{ 329{
305 int err = -EINVAL; 330 int err = -EINVAL;
306 struct hwrng *old_rng, *tmp; 331 struct hwrng *old_rng, *tmp;
307 unsigned char bytes[16];
308 int bytes_read;
309 332
310 if (rng->name == NULL || 333 if (rng->name == NULL ||
311 (rng->data_read == NULL && rng->read == NULL)) 334 (rng->data_read == NULL && rng->read == NULL))
@@ -347,9 +370,17 @@ int hwrng_register(struct hwrng *rng)
347 INIT_LIST_HEAD(&rng->list); 370 INIT_LIST_HEAD(&rng->list);
348 list_add_tail(&rng->list, &rng_list); 371 list_add_tail(&rng->list, &rng_list);
349 372
350 bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1); 373 if (old_rng && !rng->init) {
351 if (bytes_read > 0) 374 /*
352 add_device_randomness(bytes, bytes_read); 375 * Use a new device's input to add some randomness to
376 * the system. If this rng device isn't going to be
377 * used right away, its init function hasn't been
378 * called yet; so only use the randomness from devices
379 * that don't need an init callback.
380 */
381 add_early_randomness(rng);
382 }
383
353out_unlock: 384out_unlock:
354 mutex_unlock(&rng_mutex); 385 mutex_unlock(&rng_mutex);
355out: 386out:
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index f3e71501de54..e9b15bc18b4d 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -38,6 +38,8 @@ struct virtrng_info {
38 int index; 38 int index;
39}; 39};
40 40
41static bool probe_done;
42
41static void random_recv_done(struct virtqueue *vq) 43static void random_recv_done(struct virtqueue *vq)
42{ 44{
43 struct virtrng_info *vi = vq->vdev->priv; 45 struct virtrng_info *vi = vq->vdev->priv;
@@ -67,6 +69,13 @@ static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
67 int ret; 69 int ret;
68 struct virtrng_info *vi = (struct virtrng_info *)rng->priv; 70 struct virtrng_info *vi = (struct virtrng_info *)rng->priv;
69 71
72 /*
73 * Don't ask host for data till we're setup. This call can
74 * happen during hwrng_register(), after commit d9e7972619.
75 */
76 if (unlikely(!probe_done))
77 return 0;
78
70 if (!vi->busy) { 79 if (!vi->busy) {
71 vi->busy = true; 80 vi->busy = true;
72 init_completion(&vi->have_data); 81 init_completion(&vi->have_data);
@@ -137,6 +146,7 @@ static int probe_common(struct virtio_device *vdev)
137 return err; 146 return err;
138 } 147 }
139 148
149 probe_done = true;
140 return 0; 150 return 0;
141} 151}
142 152
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
index d915707d2ba1..93dcad0c1cbe 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -138,7 +138,9 @@ static int i8k_smm(struct smm_regs *regs)
138 if (!alloc_cpumask_var(&old_mask, GFP_KERNEL)) 138 if (!alloc_cpumask_var(&old_mask, GFP_KERNEL))
139 return -ENOMEM; 139 return -ENOMEM;
140 cpumask_copy(old_mask, &current->cpus_allowed); 140 cpumask_copy(old_mask, &current->cpus_allowed);
141 set_cpus_allowed_ptr(current, cpumask_of(0)); 141 rc = set_cpus_allowed_ptr(current, cpumask_of(0));
142 if (rc)
143 goto out;
142 if (smp_processor_id() != 0) { 144 if (smp_processor_id() != 0) {
143 rc = -EBUSY; 145 rc = -EBUSY;
144 goto out; 146 goto out;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 0a7ac0a7b252..71529e196b84 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -641,7 +641,7 @@ retry:
641 } while (unlikely(entropy_count < pool_size-2 && pnfrac)); 641 } while (unlikely(entropy_count < pool_size-2 && pnfrac));
642 } 642 }
643 643
644 if (entropy_count < 0) { 644 if (unlikely(entropy_count < 0)) {
645 pr_warn("random: negative entropy/overflow: pool %s count %d\n", 645 pr_warn("random: negative entropy/overflow: pool %s count %d\n",
646 r->name, entropy_count); 646 r->name, entropy_count);
647 WARN_ON(1); 647 WARN_ON(1);
@@ -981,7 +981,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
981 int reserved) 981 int reserved)
982{ 982{
983 int entropy_count, orig; 983 int entropy_count, orig;
984 size_t ibytes; 984 size_t ibytes, nfrac;
985 985
986 BUG_ON(r->entropy_count > r->poolinfo->poolfracbits); 986 BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
987 987
@@ -999,7 +999,17 @@ retry:
999 } 999 }
1000 if (ibytes < min) 1000 if (ibytes < min)
1001 ibytes = 0; 1001 ibytes = 0;
1002 if ((entropy_count -= ibytes << (ENTROPY_SHIFT + 3)) < 0) 1002
1003 if (unlikely(entropy_count < 0)) {
1004 pr_warn("random: negative entropy count: pool %s count %d\n",
1005 r->name, entropy_count);
1006 WARN_ON(1);
1007 entropy_count = 0;
1008 }
1009 nfrac = ibytes << (ENTROPY_SHIFT + 3);
1010 if ((size_t) entropy_count > nfrac)
1011 entropy_count -= nfrac;
1012 else
1003 entropy_count = 0; 1013 entropy_count = 0;
1004 1014
1005 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) 1015 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
@@ -1376,6 +1386,7 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1376 "with %d bits of entropy available\n", 1386 "with %d bits of entropy available\n",
1377 current->comm, nonblocking_pool.entropy_total); 1387 current->comm, nonblocking_pool.entropy_total);
1378 1388
1389 nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
1379 ret = extract_entropy_user(&nonblocking_pool, buf, nbytes); 1390 ret = extract_entropy_user(&nonblocking_pool, buf, nbytes);
1380 1391
1381 trace_urandom_read(8 * nbytes, ENTROPY_BITS(&nonblocking_pool), 1392 trace_urandom_read(8 * nbytes, ENTROPY_BITS(&nonblocking_pool),
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index 9b7b5859a420..3757e9e72d37 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -230,16 +230,13 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
230 goto err_reg; 230 goto err_reg;
231 } 231 }
232 232
233 s2mps11_clk->lookup = devm_kzalloc(&pdev->dev, 233 s2mps11_clk->lookup = clkdev_alloc(s2mps11_clk->clk,
234 sizeof(struct clk_lookup), GFP_KERNEL); 234 s2mps11_name(s2mps11_clk), NULL);
235 if (!s2mps11_clk->lookup) { 235 if (!s2mps11_clk->lookup) {
236 ret = -ENOMEM; 236 ret = -ENOMEM;
237 goto err_lup; 237 goto err_lup;
238 } 238 }
239 239
240 s2mps11_clk->lookup->con_id = s2mps11_name(s2mps11_clk);
241 s2mps11_clk->lookup->clk = s2mps11_clk->clk;
242
243 clkdev_add(s2mps11_clk->lookup); 240 clkdev_add(s2mps11_clk->lookup);
244 } 241 }
245 242
diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c
index 12f3c0b64fcd..4c449b3170f6 100644
--- a/drivers/clk/qcom/mmcc-msm8960.c
+++ b/drivers/clk/qcom/mmcc-msm8960.c
@@ -1209,7 +1209,7 @@ static struct clk_branch rot_clk = {
1209 1209
1210static u8 mmcc_pxo_hdmi_map[] = { 1210static u8 mmcc_pxo_hdmi_map[] = {
1211 [P_PXO] = 0, 1211 [P_PXO] = 0,
1212 [P_HDMI_PLL] = 2, 1212 [P_HDMI_PLL] = 3,
1213}; 1213};
1214 1214
1215static const char *mmcc_pxo_hdmi[] = { 1215static const char *mmcc_pxo_hdmi[] = {
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 4f150c9dd38c..7f4a473a7ad7 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -925,21 +925,13 @@ static struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
925 GATE(CLK_RTC, "rtc", "aclk100", E4X12_GATE_IP_PERIR, 15, 925 GATE(CLK_RTC, "rtc", "aclk100", E4X12_GATE_IP_PERIR, 15,
926 0, 0), 926 0, 0),
927 GATE(CLK_KEYIF, "keyif", "aclk100", E4X12_GATE_IP_PERIR, 16, 0, 0), 927 GATE(CLK_KEYIF, "keyif", "aclk100", E4X12_GATE_IP_PERIR, 16, 0, 0),
928 GATE(CLK_SCLK_PWM_ISP, "sclk_pwm_isp", "div_pwm_isp", 928 GATE(CLK_PWM_ISP_SCLK, "pwm_isp_sclk", "div_pwm_isp",
929 E4X12_SRC_MASK_ISP, 0, CLK_SET_RATE_PARENT, 0),
930 GATE(CLK_SCLK_SPI0_ISP, "sclk_spi0_isp", "div_spi0_isp_pre",
931 E4X12_SRC_MASK_ISP, 4, CLK_SET_RATE_PARENT, 0),
932 GATE(CLK_SCLK_SPI1_ISP, "sclk_spi1_isp", "div_spi1_isp_pre",
933 E4X12_SRC_MASK_ISP, 8, CLK_SET_RATE_PARENT, 0),
934 GATE(CLK_SCLK_UART_ISP, "sclk_uart_isp", "div_uart_isp",
935 E4X12_SRC_MASK_ISP, 12, CLK_SET_RATE_PARENT, 0),
936 GATE(CLK_PWM_ISP_SCLK, "pwm_isp_sclk", "sclk_pwm_isp",
937 E4X12_GATE_IP_ISP, 0, 0, 0), 929 E4X12_GATE_IP_ISP, 0, 0, 0),
938 GATE(CLK_SPI0_ISP_SCLK, "spi0_isp_sclk", "sclk_spi0_isp", 930 GATE(CLK_SPI0_ISP_SCLK, "spi0_isp_sclk", "div_spi0_isp_pre",
939 E4X12_GATE_IP_ISP, 1, 0, 0), 931 E4X12_GATE_IP_ISP, 1, 0, 0),
940 GATE(CLK_SPI1_ISP_SCLK, "spi1_isp_sclk", "sclk_spi1_isp", 932 GATE(CLK_SPI1_ISP_SCLK, "spi1_isp_sclk", "div_spi1_isp_pre",
941 E4X12_GATE_IP_ISP, 2, 0, 0), 933 E4X12_GATE_IP_ISP, 2, 0, 0),
942 GATE(CLK_UART_ISP_SCLK, "uart_isp_sclk", "sclk_uart_isp", 934 GATE(CLK_UART_ISP_SCLK, "uart_isp_sclk", "div_uart_isp",
943 E4X12_GATE_IP_ISP, 3, 0, 0), 935 E4X12_GATE_IP_ISP, 3, 0, 0),
944 GATE(CLK_WDT, "watchdog", "aclk100", E4X12_GATE_IP_PERIR, 14, 0, 0), 936 GATE(CLK_WDT, "watchdog", "aclk100", E4X12_GATE_IP_PERIR, 14, 0, 0),
945 GATE(CLK_PCM0, "pcm0", "aclk100", E4X12_GATE_IP_MAUDIO, 2, 937 GATE(CLK_PCM0, "pcm0", "aclk100", E4X12_GATE_IP_MAUDIO, 2,
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index 1fad4c5e3f5d..184f64293b26 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -661,7 +661,7 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
661 GATE(CLK_RTC, "rtc", "div_aclk66", GATE_IP_PERIS, 20, 0, 0), 661 GATE(CLK_RTC, "rtc", "div_aclk66", GATE_IP_PERIS, 20, 0, 0),
662 GATE(CLK_TMU, "tmu", "div_aclk66", GATE_IP_PERIS, 21, 0, 0), 662 GATE(CLK_TMU, "tmu", "div_aclk66", GATE_IP_PERIS, 21, 0, 0),
663 GATE(CLK_SMMU_TV, "smmu_tv", "mout_aclk200_disp1_sub", 663 GATE(CLK_SMMU_TV, "smmu_tv", "mout_aclk200_disp1_sub",
664 GATE_IP_DISP1, 2, 0, 0), 664 GATE_IP_DISP1, 9, 0, 0),
665 GATE(CLK_SMMU_FIMD1, "smmu_fimd1", "mout_aclk200_disp1_sub", 665 GATE(CLK_SMMU_FIMD1, "smmu_fimd1", "mout_aclk200_disp1_sub",
666 GATE_IP_DISP1, 8, 0, 0), 666 GATE_IP_DISP1, 8, 0, 0),
667 GATE(CLK_SMMU_2D, "smmu_2d", "div_aclk200", GATE_IP_ACP, 7, 0, 0), 667 GATE(CLK_SMMU_2D, "smmu_2d", "div_aclk200", GATE_IP_ACP, 7, 0, 0),
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 9d7d7eed03fd..a4e6cc782e5c 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -631,7 +631,8 @@ static struct samsung_mux_clock exynos5x_mux_clks[] __initdata = {
631 SRC_TOP4, 16, 1), 631 SRC_TOP4, 16, 1),
632 MUX(0, "mout_user_aclk266", mout_user_aclk266_p, SRC_TOP4, 20, 1), 632 MUX(0, "mout_user_aclk266", mout_user_aclk266_p, SRC_TOP4, 20, 1),
633 MUX(0, "mout_user_aclk166", mout_user_aclk166_p, SRC_TOP4, 24, 1), 633 MUX(0, "mout_user_aclk166", mout_user_aclk166_p, SRC_TOP4, 24, 1),
634 MUX(0, "mout_user_aclk333", mout_user_aclk333_p, SRC_TOP4, 28, 1), 634 MUX(CLK_MOUT_USER_ACLK333, "mout_user_aclk333", mout_user_aclk333_p,
635 SRC_TOP4, 28, 1),
635 636
636 MUX(0, "mout_user_aclk400_disp1", mout_user_aclk400_disp1_p, 637 MUX(0, "mout_user_aclk400_disp1", mout_user_aclk400_disp1_p,
637 SRC_TOP5, 0, 1), 638 SRC_TOP5, 0, 1),
@@ -684,7 +685,8 @@ static struct samsung_mux_clock exynos5x_mux_clks[] __initdata = {
684 SRC_TOP11, 12, 1), 685 SRC_TOP11, 12, 1),
685 MUX(0, "mout_sw_aclk266", mout_sw_aclk266_p, SRC_TOP11, 20, 1), 686 MUX(0, "mout_sw_aclk266", mout_sw_aclk266_p, SRC_TOP11, 20, 1),
686 MUX(0, "mout_sw_aclk166", mout_sw_aclk166_p, SRC_TOP11, 24, 1), 687 MUX(0, "mout_sw_aclk166", mout_sw_aclk166_p, SRC_TOP11, 24, 1),
687 MUX(0, "mout_sw_aclk333", mout_sw_aclk333_p, SRC_TOP11, 28, 1), 688 MUX(CLK_MOUT_SW_ACLK333, "mout_sw_aclk333", mout_sw_aclk333_p,
689 SRC_TOP11, 28, 1),
688 690
689 MUX(0, "mout_sw_aclk400_disp1", mout_sw_aclk400_disp1_p, 691 MUX(0, "mout_sw_aclk400_disp1", mout_sw_aclk400_disp1_p,
690 SRC_TOP12, 4, 1), 692 SRC_TOP12, 4, 1),
@@ -890,8 +892,6 @@ static struct samsung_gate_clock exynos5x_gate_clks[] __initdata = {
890 GATE_BUS_TOP, 9, CLK_IGNORE_UNUSED, 0), 892 GATE_BUS_TOP, 9, CLK_IGNORE_UNUSED, 0),
891 GATE(0, "aclk66_psgen", "mout_user_aclk66_psgen", 893 GATE(0, "aclk66_psgen", "mout_user_aclk66_psgen",
892 GATE_BUS_TOP, 10, CLK_IGNORE_UNUSED, 0), 894 GATE_BUS_TOP, 10, CLK_IGNORE_UNUSED, 0),
893 GATE(CLK_ACLK66_PERIC, "aclk66_peric", "mout_user_aclk66_peric",
894 GATE_BUS_TOP, 11, CLK_IGNORE_UNUSED, 0),
895 GATE(0, "aclk266_isp", "mout_user_aclk266_isp", 895 GATE(0, "aclk266_isp", "mout_user_aclk266_isp",
896 GATE_BUS_TOP, 13, 0, 0), 896 GATE_BUS_TOP, 13, 0, 0),
897 GATE(0, "aclk166", "mout_user_aclk166", 897 GATE(0, "aclk166", "mout_user_aclk166",
@@ -994,34 +994,61 @@ static struct samsung_gate_clock exynos5x_gate_clks[] __initdata = {
994 SRC_MASK_FSYS, 24, CLK_SET_RATE_PARENT, 0), 994 SRC_MASK_FSYS, 24, CLK_SET_RATE_PARENT, 0),
995 995
996 /* PERIC Block */ 996 /* PERIC Block */
997 GATE(CLK_UART0, "uart0", "aclk66_peric", GATE_IP_PERIC, 0, 0, 0), 997 GATE(CLK_UART0, "uart0", "mout_user_aclk66_peric",
998 GATE(CLK_UART1, "uart1", "aclk66_peric", GATE_IP_PERIC, 1, 0, 0), 998 GATE_IP_PERIC, 0, 0, 0),
999 GATE(CLK_UART2, "uart2", "aclk66_peric", GATE_IP_PERIC, 2, 0, 0), 999 GATE(CLK_UART1, "uart1", "mout_user_aclk66_peric",
1000 GATE(CLK_UART3, "uart3", "aclk66_peric", GATE_IP_PERIC, 3, 0, 0), 1000 GATE_IP_PERIC, 1, 0, 0),
1001 GATE(CLK_I2C0, "i2c0", "aclk66_peric", GATE_IP_PERIC, 6, 0, 0), 1001 GATE(CLK_UART2, "uart2", "mout_user_aclk66_peric",
1002 GATE(CLK_I2C1, "i2c1", "aclk66_peric", GATE_IP_PERIC, 7, 0, 0), 1002 GATE_IP_PERIC, 2, 0, 0),
1003 GATE(CLK_I2C2, "i2c2", "aclk66_peric", GATE_IP_PERIC, 8, 0, 0), 1003 GATE(CLK_UART3, "uart3", "mout_user_aclk66_peric",
1004 GATE(CLK_I2C3, "i2c3", "aclk66_peric", GATE_IP_PERIC, 9, 0, 0), 1004 GATE_IP_PERIC, 3, 0, 0),
1005 GATE(CLK_USI0, "usi0", "aclk66_peric", GATE_IP_PERIC, 10, 0, 0), 1005 GATE(CLK_I2C0, "i2c0", "mout_user_aclk66_peric",
1006 GATE(CLK_USI1, "usi1", "aclk66_peric", GATE_IP_PERIC, 11, 0, 0), 1006 GATE_IP_PERIC, 6, 0, 0),
1007 GATE(CLK_USI2, "usi2", "aclk66_peric", GATE_IP_PERIC, 12, 0, 0), 1007 GATE(CLK_I2C1, "i2c1", "mout_user_aclk66_peric",
1008 GATE(CLK_USI3, "usi3", "aclk66_peric", GATE_IP_PERIC, 13, 0, 0), 1008 GATE_IP_PERIC, 7, 0, 0),
1009 GATE(CLK_I2C_HDMI, "i2c_hdmi", "aclk66_peric", GATE_IP_PERIC, 14, 0, 0), 1009 GATE(CLK_I2C2, "i2c2", "mout_user_aclk66_peric",
1010 GATE(CLK_TSADC, "tsadc", "aclk66_peric", GATE_IP_PERIC, 15, 0, 0), 1010 GATE_IP_PERIC, 8, 0, 0),
1011 GATE(CLK_SPI0, "spi0", "aclk66_peric", GATE_IP_PERIC, 16, 0, 0), 1011 GATE(CLK_I2C3, "i2c3", "mout_user_aclk66_peric",
1012 GATE(CLK_SPI1, "spi1", "aclk66_peric", GATE_IP_PERIC, 17, 0, 0), 1012 GATE_IP_PERIC, 9, 0, 0),
1013 GATE(CLK_SPI2, "spi2", "aclk66_peric", GATE_IP_PERIC, 18, 0, 0), 1013 GATE(CLK_USI0, "usi0", "mout_user_aclk66_peric",
1014 GATE(CLK_I2S1, "i2s1", "aclk66_peric", GATE_IP_PERIC, 20, 0, 0), 1014 GATE_IP_PERIC, 10, 0, 0),
1015 GATE(CLK_I2S2, "i2s2", "aclk66_peric", GATE_IP_PERIC, 21, 0, 0), 1015 GATE(CLK_USI1, "usi1", "mout_user_aclk66_peric",
1016 GATE(CLK_PCM1, "pcm1", "aclk66_peric", GATE_IP_PERIC, 22, 0, 0), 1016 GATE_IP_PERIC, 11, 0, 0),
1017 GATE(CLK_PCM2, "pcm2", "aclk66_peric", GATE_IP_PERIC, 23, 0, 0), 1017 GATE(CLK_USI2, "usi2", "mout_user_aclk66_peric",
1018 GATE(CLK_PWM, "pwm", "aclk66_peric", GATE_IP_PERIC, 24, 0, 0), 1018 GATE_IP_PERIC, 12, 0, 0),
1019 GATE(CLK_SPDIF, "spdif", "aclk66_peric", GATE_IP_PERIC, 26, 0, 0), 1019 GATE(CLK_USI3, "usi3", "mout_user_aclk66_peric",
1020 GATE(CLK_USI4, "usi4", "aclk66_peric", GATE_IP_PERIC, 28, 0, 0), 1020 GATE_IP_PERIC, 13, 0, 0),
1021 GATE(CLK_USI5, "usi5", "aclk66_peric", GATE_IP_PERIC, 30, 0, 0), 1021 GATE(CLK_I2C_HDMI, "i2c_hdmi", "mout_user_aclk66_peric",
1022 GATE(CLK_USI6, "usi6", "aclk66_peric", GATE_IP_PERIC, 31, 0, 0), 1022 GATE_IP_PERIC, 14, 0, 0),
1023 1023 GATE(CLK_TSADC, "tsadc", "mout_user_aclk66_peric",
1024 GATE(CLK_KEYIF, "keyif", "aclk66_peric", GATE_BUS_PERIC, 22, 0, 0), 1024 GATE_IP_PERIC, 15, 0, 0),
1025 GATE(CLK_SPI0, "spi0", "mout_user_aclk66_peric",
1026 GATE_IP_PERIC, 16, 0, 0),
1027 GATE(CLK_SPI1, "spi1", "mout_user_aclk66_peric",
1028 GATE_IP_PERIC, 17, 0, 0),
1029 GATE(CLK_SPI2, "spi2", "mout_user_aclk66_peric",
1030 GATE_IP_PERIC, 18, 0, 0),
1031 GATE(CLK_I2S1, "i2s1", "mout_user_aclk66_peric",
1032 GATE_IP_PERIC, 20, 0, 0),
1033 GATE(CLK_I2S2, "i2s2", "mout_user_aclk66_peric",
1034 GATE_IP_PERIC, 21, 0, 0),
1035 GATE(CLK_PCM1, "pcm1", "mout_user_aclk66_peric",
1036 GATE_IP_PERIC, 22, 0, 0),
1037 GATE(CLK_PCM2, "pcm2", "mout_user_aclk66_peric",
1038 GATE_IP_PERIC, 23, 0, 0),
1039 GATE(CLK_PWM, "pwm", "mout_user_aclk66_peric",
1040 GATE_IP_PERIC, 24, 0, 0),
1041 GATE(CLK_SPDIF, "spdif", "mout_user_aclk66_peric",
1042 GATE_IP_PERIC, 26, 0, 0),
1043 GATE(CLK_USI4, "usi4", "mout_user_aclk66_peric",
1044 GATE_IP_PERIC, 28, 0, 0),
1045 GATE(CLK_USI5, "usi5", "mout_user_aclk66_peric",
1046 GATE_IP_PERIC, 30, 0, 0),
1047 GATE(CLK_USI6, "usi6", "mout_user_aclk66_peric",
1048 GATE_IP_PERIC, 31, 0, 0),
1049
1050 GATE(CLK_KEYIF, "keyif", "mout_user_aclk66_peric",
1051 GATE_BUS_PERIC, 22, 0, 0),
1025 1052
1026 /* PERIS Block */ 1053 /* PERIS Block */
1027 GATE(CLK_CHIPID, "chipid", "aclk66_psgen", 1054 GATE(CLK_CHIPID, "chipid", "aclk66_psgen",
diff --git a/drivers/clk/samsung/clk-s3c2410.c b/drivers/clk/samsung/clk-s3c2410.c
index ba0716801db2..140f4733c02e 100644
--- a/drivers/clk/samsung/clk-s3c2410.c
+++ b/drivers/clk/samsung/clk-s3c2410.c
@@ -152,6 +152,11 @@ struct samsung_clock_alias s3c2410_common_aliases[] __initdata = {
152 ALIAS(HCLK, NULL, "hclk"), 152 ALIAS(HCLK, NULL, "hclk"),
153 ALIAS(MPLL, NULL, "mpll"), 153 ALIAS(MPLL, NULL, "mpll"),
154 ALIAS(FCLK, NULL, "fclk"), 154 ALIAS(FCLK, NULL, "fclk"),
155 ALIAS(PCLK, NULL, "watchdog"),
156 ALIAS(PCLK_SDI, NULL, "sdi"),
157 ALIAS(HCLK_NAND, NULL, "nand"),
158 ALIAS(PCLK_I2S, NULL, "iis"),
159 ALIAS(PCLK_I2C, NULL, "i2c"),
155}; 160};
156 161
157/* S3C2410 specific clocks */ 162/* S3C2410 specific clocks */
@@ -378,7 +383,7 @@ void __init s3c2410_common_clk_init(struct device_node *np, unsigned long xti_f,
378 if (!np) 383 if (!np)
379 s3c2410_common_clk_register_fixed_ext(ctx, xti_f); 384 s3c2410_common_clk_register_fixed_ext(ctx, xti_f);
380 385
381 if (current_soc == 2410) { 386 if (current_soc == S3C2410) {
382 if (_get_rate("xti") == 12 * MHZ) { 387 if (_get_rate("xti") == 12 * MHZ) {
383 s3c2410_plls[mpll].rate_table = pll_s3c2410_12mhz_tbl; 388 s3c2410_plls[mpll].rate_table = pll_s3c2410_12mhz_tbl;
384 s3c2410_plls[upll].rate_table = pll_s3c2410_12mhz_tbl; 389 s3c2410_plls[upll].rate_table = pll_s3c2410_12mhz_tbl;
@@ -432,7 +437,7 @@ void __init s3c2410_common_clk_init(struct device_node *np, unsigned long xti_f,
432 samsung_clk_register_fixed_factor(ctx, s3c2410_ffactor, 437 samsung_clk_register_fixed_factor(ctx, s3c2410_ffactor,
433 ARRAY_SIZE(s3c2410_ffactor)); 438 ARRAY_SIZE(s3c2410_ffactor));
434 samsung_clk_register_alias(ctx, s3c2410_aliases, 439 samsung_clk_register_alias(ctx, s3c2410_aliases,
435 ARRAY_SIZE(s3c2410_common_aliases)); 440 ARRAY_SIZE(s3c2410_aliases));
436 break; 441 break;
437 case S3C2440: 442 case S3C2440:
438 samsung_clk_register_mux(ctx, s3c2440_muxes, 443 samsung_clk_register_mux(ctx, s3c2440_muxes,
diff --git a/drivers/clk/samsung/clk-s3c64xx.c b/drivers/clk/samsung/clk-s3c64xx.c
index efa16ee592c8..8889ff1c10fc 100644
--- a/drivers/clk/samsung/clk-s3c64xx.c
+++ b/drivers/clk/samsung/clk-s3c64xx.c
@@ -418,8 +418,10 @@ static struct samsung_clock_alias s3c64xx_clock_aliases[] = {
418 ALIAS(SCLK_MMC2, "s3c-sdhci.2", "mmc_busclk.2"), 418 ALIAS(SCLK_MMC2, "s3c-sdhci.2", "mmc_busclk.2"),
419 ALIAS(SCLK_MMC1, "s3c-sdhci.1", "mmc_busclk.2"), 419 ALIAS(SCLK_MMC1, "s3c-sdhci.1", "mmc_busclk.2"),
420 ALIAS(SCLK_MMC0, "s3c-sdhci.0", "mmc_busclk.2"), 420 ALIAS(SCLK_MMC0, "s3c-sdhci.0", "mmc_busclk.2"),
421 ALIAS(SCLK_SPI1, "s3c6410-spi.1", "spi-bus"), 421 ALIAS(PCLK_SPI1, "s3c6410-spi.1", "spi_busclk0"),
422 ALIAS(SCLK_SPI0, "s3c6410-spi.0", "spi-bus"), 422 ALIAS(SCLK_SPI1, "s3c6410-spi.1", "spi_busclk2"),
423 ALIAS(PCLK_SPI0, "s3c6410-spi.0", "spi_busclk0"),
424 ALIAS(SCLK_SPI0, "s3c6410-spi.0", "spi_busclk2"),
423 ALIAS(SCLK_AUDIO1, "samsung-pcm.1", "audio-bus"), 425 ALIAS(SCLK_AUDIO1, "samsung-pcm.1", "audio-bus"),
424 ALIAS(SCLK_AUDIO1, "samsung-i2s.1", "audio-bus"), 426 ALIAS(SCLK_AUDIO1, "samsung-i2s.1", "audio-bus"),
425 ALIAS(SCLK_AUDIO0, "samsung-pcm.0", "audio-bus"), 427 ALIAS(SCLK_AUDIO0, "samsung-pcm.0", "audio-bus"),
diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c
index c2d204315546..bb5f387774e2 100644
--- a/drivers/clk/spear/spear3xx_clock.c
+++ b/drivers/clk/spear/spear3xx_clock.c
@@ -211,7 +211,7 @@ static inline void spear310_clk_init(void) { }
211/* array of all spear 320 clock lookups */ 211/* array of all spear 320 clock lookups */
212#ifdef CONFIG_MACH_SPEAR320 212#ifdef CONFIG_MACH_SPEAR320
213 213
214#define SPEAR320_CONTROL_REG (soc_config_base + 0x0000) 214#define SPEAR320_CONTROL_REG (soc_config_base + 0x0010)
215#define SPEAR320_EXT_CTRL_REG (soc_config_base + 0x0018) 215#define SPEAR320_EXT_CTRL_REG (soc_config_base + 0x0018)
216 216
217 #define SPEAR320_UARTX_PCLK_MASK 0x1 217 #define SPEAR320_UARTX_PCLK_MASK 0x1
@@ -245,7 +245,8 @@ static const char *smii0_parents[] = { "smii_125m_pad", "ras_pll2_clk",
245 "ras_syn0_gclk", }; 245 "ras_syn0_gclk", };
246static const char *uartx_parents[] = { "ras_syn1_gclk", "ras_apb_clk", }; 246static const char *uartx_parents[] = { "ras_syn1_gclk", "ras_apb_clk", };
247 247
248static void __init spear320_clk_init(void __iomem *soc_config_base) 248static void __init spear320_clk_init(void __iomem *soc_config_base,
249 struct clk *ras_apb_clk)
249{ 250{
250 struct clk *clk; 251 struct clk *clk;
251 252
@@ -342,6 +343,8 @@ static void __init spear320_clk_init(void __iomem *soc_config_base)
342 SPEAR320_CONTROL_REG, UART1_PCLK_SHIFT, UART1_PCLK_MASK, 343 SPEAR320_CONTROL_REG, UART1_PCLK_SHIFT, UART1_PCLK_MASK,
343 0, &_lock); 344 0, &_lock);
344 clk_register_clkdev(clk, NULL, "a3000000.serial"); 345 clk_register_clkdev(clk, NULL, "a3000000.serial");
346 /* Enforce ras_apb_clk */
347 clk_set_parent(clk, ras_apb_clk);
345 348
346 clk = clk_register_mux(NULL, "uart2_clk", uartx_parents, 349 clk = clk_register_mux(NULL, "uart2_clk", uartx_parents,
347 ARRAY_SIZE(uartx_parents), 350 ARRAY_SIZE(uartx_parents),
@@ -349,6 +352,8 @@ static void __init spear320_clk_init(void __iomem *soc_config_base)
349 SPEAR320_EXT_CTRL_REG, SPEAR320_UART2_PCLK_SHIFT, 352 SPEAR320_EXT_CTRL_REG, SPEAR320_UART2_PCLK_SHIFT,
350 SPEAR320_UARTX_PCLK_MASK, 0, &_lock); 353 SPEAR320_UARTX_PCLK_MASK, 0, &_lock);
351 clk_register_clkdev(clk, NULL, "a4000000.serial"); 354 clk_register_clkdev(clk, NULL, "a4000000.serial");
355 /* Enforce ras_apb_clk */
356 clk_set_parent(clk, ras_apb_clk);
352 357
353 clk = clk_register_mux(NULL, "uart3_clk", uartx_parents, 358 clk = clk_register_mux(NULL, "uart3_clk", uartx_parents,
354 ARRAY_SIZE(uartx_parents), 359 ARRAY_SIZE(uartx_parents),
@@ -379,12 +384,12 @@ static void __init spear320_clk_init(void __iomem *soc_config_base)
379 clk_register_clkdev(clk, NULL, "60100000.serial"); 384 clk_register_clkdev(clk, NULL, "60100000.serial");
380} 385}
381#else 386#else
382static inline void spear320_clk_init(void __iomem *soc_config_base) { } 387static inline void spear320_clk_init(void __iomem *sb, struct clk *rc) { }
383#endif 388#endif
384 389
385void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_base) 390void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_base)
386{ 391{
387 struct clk *clk, *clk1; 392 struct clk *clk, *clk1, *ras_apb_clk;
388 393
389 clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT, 394 clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
390 32000); 395 32000);
@@ -613,6 +618,7 @@ void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_
613 clk = clk_register_gate(NULL, "ras_apb_clk", "apb_clk", 0, RAS_CLK_ENB, 618 clk = clk_register_gate(NULL, "ras_apb_clk", "apb_clk", 0, RAS_CLK_ENB,
614 RAS_APB_CLK_ENB, 0, &_lock); 619 RAS_APB_CLK_ENB, 0, &_lock);
615 clk_register_clkdev(clk, "ras_apb_clk", NULL); 620 clk_register_clkdev(clk, "ras_apb_clk", NULL);
621 ras_apb_clk = clk;
616 622
617 clk = clk_register_gate(NULL, "ras_32k_clk", "osc_32k_clk", 0, 623 clk = clk_register_gate(NULL, "ras_32k_clk", "osc_32k_clk", 0,
618 RAS_CLK_ENB, RAS_32K_CLK_ENB, 0, &_lock); 624 RAS_CLK_ENB, RAS_32K_CLK_ENB, 0, &_lock);
@@ -659,5 +665,5 @@ void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_
659 else if (of_machine_is_compatible("st,spear310")) 665 else if (of_machine_is_compatible("st,spear310"))
660 spear310_clk_init(); 666 spear310_clk_init();
661 else if (of_machine_is_compatible("st,spear320")) 667 else if (of_machine_is_compatible("st,spear320"))
662 spear320_clk_init(soc_config_base); 668 spear320_clk_init(soc_config_base, ras_apb_clk);
663} 669}
diff --git a/drivers/clk/sunxi/clk-sun6i-apb0-gates.c b/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
index 44cd27c5c401..670f90d629d7 100644
--- a/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
+++ b/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
@@ -29,7 +29,7 @@ static int sun6i_a31_apb0_gates_clk_probe(struct platform_device *pdev)
29 29
30 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 30 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
31 reg = devm_ioremap_resource(&pdev->dev, r); 31 reg = devm_ioremap_resource(&pdev->dev, r);
32 if (!reg) 32 if (IS_ERR(reg))
33 return PTR_ERR(reg); 33 return PTR_ERR(reg);
34 34
35 clk_parent = of_clk_get_parent_name(np, 0); 35 clk_parent = of_clk_get_parent_name(np, 0);
diff --git a/drivers/clk/ti/apll.c b/drivers/clk/ti/apll.c
index 5428c9c547cd..72d97279eae1 100644
--- a/drivers/clk/ti/apll.c
+++ b/drivers/clk/ti/apll.c
@@ -77,13 +77,11 @@ static int dra7_apll_enable(struct clk_hw *hw)
77 if (i == MAX_APLL_WAIT_TRIES) { 77 if (i == MAX_APLL_WAIT_TRIES) {
78 pr_warn("clock: %s failed transition to '%s'\n", 78 pr_warn("clock: %s failed transition to '%s'\n",
79 clk_name, (state) ? "locked" : "bypassed"); 79 clk_name, (state) ? "locked" : "bypassed");
80 } else { 80 r = -EBUSY;
81 } else
81 pr_debug("clock: %s transition to '%s' in %d loops\n", 82 pr_debug("clock: %s transition to '%s' in %d loops\n",
82 clk_name, (state) ? "locked" : "bypassed", i); 83 clk_name, (state) ? "locked" : "bypassed", i);
83 84
84 r = 0;
85 }
86
87 return r; 85 return r;
88} 86}
89 87
@@ -338,7 +336,7 @@ static void __init of_omap2_apll_setup(struct device_node *node)
338 const char *parent_name; 336 const char *parent_name;
339 u32 val; 337 u32 val;
340 338
341 ad = kzalloc(sizeof(*clk_hw), GFP_KERNEL); 339 ad = kzalloc(sizeof(*ad), GFP_KERNEL);
342 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL); 340 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
343 init = kzalloc(sizeof(*init), GFP_KERNEL); 341 init = kzalloc(sizeof(*init), GFP_KERNEL);
344 342
diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
index abd956d5f838..79791e1bf282 100644
--- a/drivers/clk/ti/dpll.c
+++ b/drivers/clk/ti/dpll.c
@@ -161,7 +161,8 @@ cleanup:
161} 161}
162 162
163#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \ 163#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
164 defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX) 164 defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX) || \
165 defined(CONFIG_SOC_AM43XX)
165/** 166/**
166 * ti_clk_register_dpll_x2 - Registers a DPLLx2 clock 167 * ti_clk_register_dpll_x2 - Registers a DPLLx2 clock
167 * @node: device node for this clock 168 * @node: device node for this clock
@@ -322,7 +323,7 @@ CLK_OF_DECLARE(ti_omap4_dpll_x2_clock, "ti,omap4-dpll-x2-clock",
322 of_ti_omap4_dpll_x2_setup); 323 of_ti_omap4_dpll_x2_setup);
323#endif 324#endif
324 325
325#ifdef CONFIG_SOC_AM33XX 326#if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
326static void __init of_ti_am3_dpll_x2_setup(struct device_node *node) 327static void __init of_ti_am3_dpll_x2_setup(struct device_node *node)
327{ 328{
328 ti_clk_register_dpll_x2(node, &dpll_x2_ck_ops, NULL); 329 ti_clk_register_dpll_x2(node, &dpll_x2_ck_ops, NULL);
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
index 0197a478720c..e9d650e51287 100644
--- a/drivers/clk/ti/mux.c
+++ b/drivers/clk/ti/mux.c
@@ -160,7 +160,7 @@ static void of_mux_clk_setup(struct device_node *node)
160 u8 clk_mux_flags = 0; 160 u8 clk_mux_flags = 0;
161 u32 mask = 0; 161 u32 mask = 0;
162 u32 shift = 0; 162 u32 shift = 0;
163 u32 flags = 0; 163 u32 flags = CLK_SET_RATE_NO_REPARENT;
164 164
165 num_parents = of_clk_get_parent_count(node); 165 num_parents = of_clk_get_parent_count(node);
166 if (num_parents < 2) { 166 if (num_parents < 2) {
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index f71d55f5e6e5..ab51bf20a3ed 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -162,7 +162,7 @@ static void exynos4_mct_frc_start(void)
162 exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON); 162 exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON);
163} 163}
164 164
165static cycle_t exynos4_frc_read(struct clocksource *cs) 165static cycle_t notrace _exynos4_frc_read(void)
166{ 166{
167 unsigned int lo, hi; 167 unsigned int lo, hi;
168 u32 hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U); 168 u32 hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U);
@@ -176,6 +176,11 @@ static cycle_t exynos4_frc_read(struct clocksource *cs)
176 return ((cycle_t)hi << 32) | lo; 176 return ((cycle_t)hi << 32) | lo;
177} 177}
178 178
179static cycle_t exynos4_frc_read(struct clocksource *cs)
180{
181 return _exynos4_frc_read();
182}
183
179static void exynos4_frc_resume(struct clocksource *cs) 184static void exynos4_frc_resume(struct clocksource *cs)
180{ 185{
181 exynos4_mct_frc_start(); 186 exynos4_mct_frc_start();
@@ -192,13 +197,24 @@ struct clocksource mct_frc = {
192 197
193static u64 notrace exynos4_read_sched_clock(void) 198static u64 notrace exynos4_read_sched_clock(void)
194{ 199{
195 return exynos4_frc_read(&mct_frc); 200 return _exynos4_frc_read();
201}
202
203static struct delay_timer exynos4_delay_timer;
204
205static cycles_t exynos4_read_current_timer(void)
206{
207 return _exynos4_frc_read();
196} 208}
197 209
198static void __init exynos4_clocksource_init(void) 210static void __init exynos4_clocksource_init(void)
199{ 211{
200 exynos4_mct_frc_start(); 212 exynos4_mct_frc_start();
201 213
214 exynos4_delay_timer.read_current_timer = &exynos4_read_current_timer;
215 exynos4_delay_timer.freq = clk_rate;
216 register_current_timer_delay(&exynos4_delay_timer);
217
202 if (clocksource_register_hz(&mct_frc, clk_rate)) 218 if (clocksource_register_hz(&mct_frc, clk_rate))
203 panic("%s: can't register clocksource\n", mct_frc.name); 219 panic("%s: can't register clocksource\n", mct_frc.name);
204 220
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index ebac67115009..7364a538e056 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -104,6 +104,7 @@ config ARM_IMX6Q_CPUFREQ
104 tristate "Freescale i.MX6 cpufreq support" 104 tristate "Freescale i.MX6 cpufreq support"
105 depends on ARCH_MXC 105 depends on ARCH_MXC
106 depends on REGULATOR_ANATOP 106 depends on REGULATOR_ANATOP
107 select PM_OPP
107 help 108 help
108 This adds cpufreq driver support for Freescale i.MX6 series SoCs. 109 This adds cpufreq driver support for Freescale i.MX6 series SoCs.
109 110
@@ -118,7 +119,7 @@ config ARM_INTEGRATOR
118 If in doubt, say Y. 119 If in doubt, say Y.
119 120
120config ARM_KIRKWOOD_CPUFREQ 121config ARM_KIRKWOOD_CPUFREQ
121 def_bool MACH_KIRKWOOD 122 def_bool ARCH_KIRKWOOD || MACH_KIRKWOOD
122 help 123 help
123 This adds the CPUFreq driver for Marvell Kirkwood 124 This adds the CPUFreq driver for Marvell Kirkwood
124 SoCs. 125 SoCs.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 738c8b7b17dc..db6d9a2fea4d 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -49,7 +49,7 @@ obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o
49# LITTLE drivers, so that it is probed last. 49# LITTLE drivers, so that it is probed last.
50obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o 50obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o
51 51
52obj-$(CONFIG_ARCH_DAVINCI_DA850) += davinci-cpufreq.o 52obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o
53obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o 53obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o
54obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o 54obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o
55obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o 55obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index ee1ae303a07c..86beda9f950b 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -152,11 +152,8 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
152 goto out_put_reg; 152 goto out_put_reg;
153 } 153 }
154 154
155 ret = of_init_opp_table(cpu_dev); 155 /* OPPs might be populated at runtime, don't check for error here */
156 if (ret) { 156 of_init_opp_table(cpu_dev);
157 pr_err("failed to init OPP table: %d\n", ret);
158 goto out_put_clk;
159 }
160 157
161 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); 158 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
162 if (ret) { 159 if (ret) {
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 62259d27f03e..6f024852c6fb 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1153,10 +1153,12 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1153 * the creation of a brand new one. So we need to perform this update 1153 * the creation of a brand new one. So we need to perform this update
1154 * by invoking update_policy_cpu(). 1154 * by invoking update_policy_cpu().
1155 */ 1155 */
1156 if (recover_policy && cpu != policy->cpu) 1156 if (recover_policy && cpu != policy->cpu) {
1157 update_policy_cpu(policy, cpu); 1157 update_policy_cpu(policy, cpu);
1158 else 1158 WARN_ON(kobject_move(&policy->kobj, &dev->kobj));
1159 } else {
1159 policy->cpu = cpu; 1160 policy->cpu = cpu;
1161 }
1160 1162
1161 cpumask_copy(policy->cpus, cpumask_of(cpu)); 1163 cpumask_copy(policy->cpus, cpumask_of(cpu));
1162 1164
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 924bb2d42b1c..86631cb6f7de 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -128,6 +128,7 @@ static struct pstate_funcs pstate_funcs;
128 128
129struct perf_limits { 129struct perf_limits {
130 int no_turbo; 130 int no_turbo;
131 int turbo_disabled;
131 int max_perf_pct; 132 int max_perf_pct;
132 int min_perf_pct; 133 int min_perf_pct;
133 int32_t max_perf; 134 int32_t max_perf;
@@ -287,7 +288,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
287 if (ret != 1) 288 if (ret != 1)
288 return -EINVAL; 289 return -EINVAL;
289 limits.no_turbo = clamp_t(int, input, 0 , 1); 290 limits.no_turbo = clamp_t(int, input, 0 , 1);
290 291 if (limits.turbo_disabled) {
292 pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
293 limits.no_turbo = limits.turbo_disabled;
294 }
291 return count; 295 return count;
292} 296}
293 297
@@ -357,21 +361,21 @@ static int byt_get_min_pstate(void)
357{ 361{
358 u64 value; 362 u64 value;
359 rdmsrl(BYT_RATIOS, value); 363 rdmsrl(BYT_RATIOS, value);
360 return (value >> 8) & 0x3F; 364 return (value >> 8) & 0x7F;
361} 365}
362 366
363static int byt_get_max_pstate(void) 367static int byt_get_max_pstate(void)
364{ 368{
365 u64 value; 369 u64 value;
366 rdmsrl(BYT_RATIOS, value); 370 rdmsrl(BYT_RATIOS, value);
367 return (value >> 16) & 0x3F; 371 return (value >> 16) & 0x7F;
368} 372}
369 373
370static int byt_get_turbo_pstate(void) 374static int byt_get_turbo_pstate(void)
371{ 375{
372 u64 value; 376 u64 value;
373 rdmsrl(BYT_TURBO_RATIOS, value); 377 rdmsrl(BYT_TURBO_RATIOS, value);
374 return value & 0x3F; 378 return value & 0x7F;
375} 379}
376 380
377static void byt_set_pstate(struct cpudata *cpudata, int pstate) 381static void byt_set_pstate(struct cpudata *cpudata, int pstate)
@@ -381,7 +385,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
381 u32 vid; 385 u32 vid;
382 386
383 val = pstate << 8; 387 val = pstate << 8;
384 if (limits.no_turbo) 388 if (limits.no_turbo && !limits.turbo_disabled)
385 val |= (u64)1 << 32; 389 val |= (u64)1 << 32;
386 390
387 vid_fp = cpudata->vid.min + mul_fp( 391 vid_fp = cpudata->vid.min + mul_fp(
@@ -405,8 +409,8 @@ static void byt_get_vid(struct cpudata *cpudata)
405 409
406 410
407 rdmsrl(BYT_VIDS, value); 411 rdmsrl(BYT_VIDS, value);
408 cpudata->vid.min = int_tofp((value >> 8) & 0x3f); 412 cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
409 cpudata->vid.max = int_tofp((value >> 16) & 0x3f); 413 cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
410 cpudata->vid.ratio = div_fp( 414 cpudata->vid.ratio = div_fp(
411 cpudata->vid.max - cpudata->vid.min, 415 cpudata->vid.max - cpudata->vid.min,
412 int_tofp(cpudata->pstate.max_pstate - 416 int_tofp(cpudata->pstate.max_pstate -
@@ -448,7 +452,7 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate)
448 u64 val; 452 u64 val;
449 453
450 val = pstate << 8; 454 val = pstate << 8;
451 if (limits.no_turbo) 455 if (limits.no_turbo && !limits.turbo_disabled)
452 val |= (u64)1 << 32; 456 val |= (u64)1 << 32;
453 457
454 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); 458 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
@@ -696,9 +700,8 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
696 700
697 cpu = all_cpu_data[cpunum]; 701 cpu = all_cpu_data[cpunum];
698 702
699 intel_pstate_get_cpu_pstates(cpu);
700
701 cpu->cpu = cpunum; 703 cpu->cpu = cpunum;
704 intel_pstate_get_cpu_pstates(cpu);
702 705
703 init_timer_deferrable(&cpu->timer); 706 init_timer_deferrable(&cpu->timer);
704 cpu->timer.function = intel_pstate_timer_func; 707 cpu->timer.function = intel_pstate_timer_func;
@@ -741,7 +744,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
741 limits.min_perf = int_tofp(1); 744 limits.min_perf = int_tofp(1);
742 limits.max_perf_pct = 100; 745 limits.max_perf_pct = 100;
743 limits.max_perf = int_tofp(1); 746 limits.max_perf = int_tofp(1);
744 limits.no_turbo = 0; 747 limits.no_turbo = limits.turbo_disabled;
745 return 0; 748 return 0;
746 } 749 }
747 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 750 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
@@ -784,6 +787,7 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
784{ 787{
785 struct cpudata *cpu; 788 struct cpudata *cpu;
786 int rc; 789 int rc;
790 u64 misc_en;
787 791
788 rc = intel_pstate_init_cpu(policy->cpu); 792 rc = intel_pstate_init_cpu(policy->cpu);
789 if (rc) 793 if (rc)
@@ -791,8 +795,13 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
791 795
792 cpu = all_cpu_data[policy->cpu]; 796 cpu = all_cpu_data[policy->cpu];
793 797
794 if (!limits.no_turbo && 798 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
795 limits.min_perf_pct == 100 && limits.max_perf_pct == 100) 799 if (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
800 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate) {
801 limits.turbo_disabled = 1;
802 limits.no_turbo = 1;
803 }
804 if (limits.min_perf_pct == 100 && limits.max_perf_pct == 100)
796 policy->policy = CPUFREQ_POLICY_PERFORMANCE; 805 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
797 else 806 else
798 policy->policy = CPUFREQ_POLICY_POWERSAVE; 807 policy->policy = CPUFREQ_POLICY_POWERSAVE;
diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c
index 546376719d8f..b5befc211172 100644
--- a/drivers/cpufreq/sa1110-cpufreq.c
+++ b/drivers/cpufreq/sa1110-cpufreq.c
@@ -349,7 +349,7 @@ static int __init sa1110_clk_init(void)
349 name = "K4S641632D"; 349 name = "K4S641632D";
350 if (machine_is_h3100()) 350 if (machine_is_h3100())
351 name = "KM416S4030CT"; 351 name = "KM416S4030CT";
352 if (machine_is_jornada720()) 352 if (machine_is_jornada720() || machine_is_h3600())
353 name = "K4S281632B-1H"; 353 name = "K4S281632B-1H";
354 if (machine_is_nanoengine()) 354 if (machine_is_nanoengine())
355 name = "MT48LC8M16A2TG-75"; 355 name = "MT48LC8M16A2TG-75";
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 1d80bd3636c5..b512a4ba7569 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -453,8 +453,8 @@ static int caam_jr_probe(struct platform_device *pdev)
453 int error; 453 int error;
454 454
455 jrdev = &pdev->dev; 455 jrdev = &pdev->dev;
456 jrpriv = kmalloc(sizeof(struct caam_drv_private_jr), 456 jrpriv = devm_kmalloc(jrdev, sizeof(struct caam_drv_private_jr),
457 GFP_KERNEL); 457 GFP_KERNEL);
458 if (!jrpriv) 458 if (!jrpriv)
459 return -ENOMEM; 459 return -ENOMEM;
460 460
@@ -487,10 +487,8 @@ static int caam_jr_probe(struct platform_device *pdev)
487 487
488 /* Now do the platform independent part */ 488 /* Now do the platform independent part */
489 error = caam_jr_init(jrdev); /* now turn on hardware */ 489 error = caam_jr_init(jrdev); /* now turn on hardware */
490 if (error) { 490 if (error)
491 kfree(jrpriv);
492 return error; 491 return error;
493 }
494 492
495 jrpriv->dev = jrdev; 493 jrpriv->dev = jrdev;
496 spin_lock(&driver_data.jr_alloc_lock); 494 spin_lock(&driver_data.jr_alloc_lock);
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index d028f36ae655..8f8b0b608875 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -86,6 +86,9 @@
86 86
87#define USBSS_IRQ_PD_COMP (1 << 2) 87#define USBSS_IRQ_PD_COMP (1 << 2)
88 88
89/* Packet Descriptor */
90#define PD2_ZERO_LENGTH (1 << 19)
91
89struct cppi41_channel { 92struct cppi41_channel {
90 struct dma_chan chan; 93 struct dma_chan chan;
91 struct dma_async_tx_descriptor txd; 94 struct dma_async_tx_descriptor txd;
@@ -307,7 +310,7 @@ static irqreturn_t cppi41_irq(int irq, void *data)
307 __iormb(); 310 __iormb();
308 311
309 while (val) { 312 while (val) {
310 u32 desc; 313 u32 desc, len;
311 314
312 q_num = __fls(val); 315 q_num = __fls(val);
313 val &= ~(1 << q_num); 316 val &= ~(1 << q_num);
@@ -319,9 +322,13 @@ static irqreturn_t cppi41_irq(int irq, void *data)
319 q_num, desc); 322 q_num, desc);
320 continue; 323 continue;
321 } 324 }
322 c->residue = pd_trans_len(c->desc->pd6) -
323 pd_trans_len(c->desc->pd0);
324 325
326 if (c->desc->pd2 & PD2_ZERO_LENGTH)
327 len = 0;
328 else
329 len = pd_trans_len(c->desc->pd0);
330
331 c->residue = pd_trans_len(c->desc->pd6) - len;
325 dma_cookie_complete(&c->txd); 332 dma_cookie_complete(&c->txd);
326 c->txd.callback(c->txd.callback_param); 333 c->txd.callback(c->txd.callback_param);
327 } 334 }
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 128714622bf5..14867e3ac8ff 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -255,6 +255,7 @@ struct sdma_channel {
255 enum dma_slave_buswidth word_size; 255 enum dma_slave_buswidth word_size;
256 unsigned int buf_tail; 256 unsigned int buf_tail;
257 unsigned int num_bd; 257 unsigned int num_bd;
258 unsigned int period_len;
258 struct sdma_buffer_descriptor *bd; 259 struct sdma_buffer_descriptor *bd;
259 dma_addr_t bd_phys; 260 dma_addr_t bd_phys;
260 unsigned int pc_from_device, pc_to_device; 261 unsigned int pc_from_device, pc_to_device;
@@ -593,6 +594,12 @@ static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
593 594
594static void sdma_handle_channel_loop(struct sdma_channel *sdmac) 595static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
595{ 596{
597 if (sdmac->desc.callback)
598 sdmac->desc.callback(sdmac->desc.callback_param);
599}
600
601static void sdma_update_channel_loop(struct sdma_channel *sdmac)
602{
596 struct sdma_buffer_descriptor *bd; 603 struct sdma_buffer_descriptor *bd;
597 604
598 /* 605 /*
@@ -611,9 +618,6 @@ static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
611 bd->mode.status |= BD_DONE; 618 bd->mode.status |= BD_DONE;
612 sdmac->buf_tail++; 619 sdmac->buf_tail++;
613 sdmac->buf_tail %= sdmac->num_bd; 620 sdmac->buf_tail %= sdmac->num_bd;
614
615 if (sdmac->desc.callback)
616 sdmac->desc.callback(sdmac->desc.callback_param);
617 } 621 }
618} 622}
619 623
@@ -669,6 +673,9 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
669 int channel = fls(stat) - 1; 673 int channel = fls(stat) - 1;
670 struct sdma_channel *sdmac = &sdma->channel[channel]; 674 struct sdma_channel *sdmac = &sdma->channel[channel];
671 675
676 if (sdmac->flags & IMX_DMA_SG_LOOP)
677 sdma_update_channel_loop(sdmac);
678
672 tasklet_schedule(&sdmac->tasklet); 679 tasklet_schedule(&sdmac->tasklet);
673 680
674 __clear_bit(channel, &stat); 681 __clear_bit(channel, &stat);
@@ -1129,6 +1136,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1129 sdmac->status = DMA_IN_PROGRESS; 1136 sdmac->status = DMA_IN_PROGRESS;
1130 1137
1131 sdmac->buf_tail = 0; 1138 sdmac->buf_tail = 0;
1139 sdmac->period_len = period_len;
1132 1140
1133 sdmac->flags |= IMX_DMA_SG_LOOP; 1141 sdmac->flags |= IMX_DMA_SG_LOOP;
1134 sdmac->direction = direction; 1142 sdmac->direction = direction;
@@ -1225,9 +1233,15 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
1225 struct dma_tx_state *txstate) 1233 struct dma_tx_state *txstate)
1226{ 1234{
1227 struct sdma_channel *sdmac = to_sdma_chan(chan); 1235 struct sdma_channel *sdmac = to_sdma_chan(chan);
1236 u32 residue;
1237
1238 if (sdmac->flags & IMX_DMA_SG_LOOP)
1239 residue = (sdmac->num_bd - sdmac->buf_tail) * sdmac->period_len;
1240 else
1241 residue = sdmac->chn_count - sdmac->chn_real_count;
1228 1242
1229 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 1243 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
1230 sdmac->chn_count - sdmac->chn_real_count); 1244 residue);
1231 1245
1232 return sdmac->status; 1246 return sdmac->status;
1233} 1247}
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 4199849e3758..145974f9662b 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -1,4 +1,5 @@
1menu "IEEE 1394 (FireWire) support" 1menu "IEEE 1394 (FireWire) support"
2 depends on HAS_DMA
2 depends on PCI || COMPILE_TEST 3 depends on PCI || COMPILE_TEST
3 # firewire-core does not depend on PCI but is 4 # firewire-core does not depend on PCI but is
4 # not useful without PCI controller driver 5 # not useful without PCI controller driver
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 57985410f12f..a66a3217f1d9 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -336,10 +336,10 @@ static const struct {
336 QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE}, 336 QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE},
337 337
338 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, 0, 338 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, 0,
339 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI}, 339 QUIRK_CYCLE_TIMER /* FIXME: necessary? */ | QUIRK_NO_MSI},
340 340
341 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, PCI_ANY_ID, 341 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, PCI_ANY_ID,
342 0}, 342 QUIRK_NO_MSI},
343 343
344 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID, 344 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID,
345 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI}, 345 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index eff1a2f22f09..dc79346689e6 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -346,6 +346,7 @@ static __initdata struct {
346 346
347struct param_info { 347struct param_info {
348 int verbose; 348 int verbose;
349 int found;
349 void *params; 350 void *params;
350}; 351};
351 352
@@ -362,16 +363,12 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname,
362 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0)) 363 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
363 return 0; 364 return 0;
364 365
365 pr_info("Getting parameters from FDT:\n");
366
367 for (i = 0; i < ARRAY_SIZE(dt_params); i++) { 366 for (i = 0; i < ARRAY_SIZE(dt_params); i++) {
368 prop = of_get_flat_dt_prop(node, dt_params[i].propname, &len); 367 prop = of_get_flat_dt_prop(node, dt_params[i].propname, &len);
369 if (!prop) { 368 if (!prop)
370 pr_err("Can't find %s in device tree!\n",
371 dt_params[i].name);
372 return 0; 369 return 0;
373 }
374 dest = info->params + dt_params[i].offset; 370 dest = info->params + dt_params[i].offset;
371 info->found++;
375 372
376 val = of_read_number(prop, len / sizeof(u32)); 373 val = of_read_number(prop, len / sizeof(u32));
377 374
@@ -390,10 +387,21 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname,
390int __init efi_get_fdt_params(struct efi_fdt_params *params, int verbose) 387int __init efi_get_fdt_params(struct efi_fdt_params *params, int verbose)
391{ 388{
392 struct param_info info; 389 struct param_info info;
390 int ret;
391
392 pr_info("Getting EFI parameters from FDT:\n");
393 393
394 info.verbose = verbose; 394 info.verbose = verbose;
395 info.found = 0;
395 info.params = params; 396 info.params = params;
396 397
397 return of_scan_flat_dt(fdt_find_uefi_params, &info); 398 ret = of_scan_flat_dt(fdt_find_uefi_params, &info);
399 if (!info.found)
400 pr_info("UEFI not found.\n");
401 else if (!ret)
402 pr_err("Can't find '%s' in device tree!\n",
403 dt_params[info.found].name);
404
405 return ret;
398} 406}
399#endif /* CONFIG_EFI_PARAMS_FROM_FDT */ 407#endif /* CONFIG_EFI_PARAMS_FROM_FDT */
diff --git a/drivers/firmware/efi/fdt.c b/drivers/firmware/efi/fdt.c
index 82d774161cc9..507a3df46a5d 100644
--- a/drivers/firmware/efi/fdt.c
+++ b/drivers/firmware/efi/fdt.c
@@ -23,16 +23,6 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
23 u32 fdt_val32; 23 u32 fdt_val32;
24 u64 fdt_val64; 24 u64 fdt_val64;
25 25
26 /*
27 * Copy definition of linux_banner here. Since this code is
28 * built as part of the decompressor for ARM v7, pulling
29 * in version.c where linux_banner is defined for the
30 * kernel brings other kernel dependencies with it.
31 */
32 const char linux_banner[] =
33 "Linux version " UTS_RELEASE " (" LINUX_COMPILE_BY "@"
34 LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION "\n";
35
36 /* Do some checks on provided FDT, if it exists*/ 26 /* Do some checks on provided FDT, if it exists*/
37 if (orig_fdt) { 27 if (orig_fdt) {
38 if (fdt_check_header(orig_fdt)) { 28 if (fdt_check_header(orig_fdt)) {
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index fe7c0e211f9a..57adbc90fdad 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -900,8 +900,6 @@ static int mcp23s08_probe(struct spi_device *spi)
900 if (spi_present_mask & (1 << addr)) 900 if (spi_present_mask & (1 << addr))
901 chips++; 901 chips++;
902 } 902 }
903 if (!chips)
904 return -ENODEV;
905 } else { 903 } else {
906 type = spi_get_device_id(spi)->driver_data; 904 type = spi_get_device_id(spi)->driver_data;
907 pdata = dev_get_platdata(&spi->dev); 905 pdata = dev_get_platdata(&spi->dev);
@@ -940,10 +938,6 @@ static int mcp23s08_probe(struct spi_device *spi)
940 if (!(spi_present_mask & (1 << addr))) 938 if (!(spi_present_mask & (1 << addr)))
941 continue; 939 continue;
942 chips--; 940 chips--;
943 if (chips < 0) {
944 dev_err(&spi->dev, "FATAL: invalid negative chip id\n");
945 goto fail;
946 }
947 data->mcp[addr] = &data->chip[chips]; 941 data->mcp[addr] = &data->chip[chips];
948 status = mcp23s08_probe_one(data->mcp[addr], &spi->dev, spi, 942 status = mcp23s08_probe_one(data->mcp[addr], &spi->dev, spi,
949 0x40 | (addr << 1), type, base, 943 0x40 | (addr << 1), type, base,
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 0c9f803fc1ac..b6ae89ea8811 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -284,6 +284,7 @@ static int gpio_rcar_irq_domain_map(struct irq_domain *h, unsigned int irq,
284 284
285static struct irq_domain_ops gpio_rcar_irq_domain_ops = { 285static struct irq_domain_ops gpio_rcar_irq_domain_ops = {
286 .map = gpio_rcar_irq_domain_map, 286 .map = gpio_rcar_irq_domain_map,
287 .xlate = irq_domain_xlate_twocell,
287}; 288};
288 289
289struct gpio_rcar_info { 290struct gpio_rcar_info {
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 6c656392d67d..d44344140627 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1464,12 +1464,13 @@ static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
1464#else 1464#else
1465static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) 1465static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
1466{ 1466{
1467 int ret; 1467 int ret = 0;
1468 1468
1469 DRM_INFO("Replacing VGA console driver\n"); 1469 DRM_INFO("Replacing VGA console driver\n");
1470 1470
1471 console_lock(); 1471 console_lock();
1472 ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1); 1472 if (con_is_bound(&vga_con))
1473 ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
1473 if (ret == 0) { 1474 if (ret == 0) {
1474 ret = do_unregister_con_driver(&vga_con); 1475 ret = do_unregister_con_driver(&vga_con);
1475 1476
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a47fbf60b781..374f964323ad 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -656,6 +656,7 @@ enum intel_sbi_destination {
656#define QUIRK_PIPEA_FORCE (1<<0) 656#define QUIRK_PIPEA_FORCE (1<<0)
657#define QUIRK_LVDS_SSC_DISABLE (1<<1) 657#define QUIRK_LVDS_SSC_DISABLE (1<<1)
658#define QUIRK_INVERT_BRIGHTNESS (1<<2) 658#define QUIRK_INVERT_BRIGHTNESS (1<<2)
659#define QUIRK_BACKLIGHT_PRESENT (1<<3)
659 660
660struct intel_fbdev; 661struct intel_fbdev;
661struct intel_fbc_work; 662struct intel_fbc_work;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f36126383d26..d893e4da5dce 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1616,22 +1616,6 @@ out:
1616 return ret; 1616 return ret;
1617} 1617}
1618 1618
1619void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1620{
1621 struct i915_vma *vma;
1622
1623 /*
1624 * Only the global gtt is relevant for gtt memory mappings, so restrict
1625 * list traversal to objects bound into the global address space. Note
1626 * that the active list should be empty, but better safe than sorry.
1627 */
1628 WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
1629 list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
1630 i915_gem_release_mmap(vma->obj);
1631 list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
1632 i915_gem_release_mmap(vma->obj);
1633}
1634
1635/** 1619/**
1636 * i915_gem_release_mmap - remove physical page mappings 1620 * i915_gem_release_mmap - remove physical page mappings
1637 * @obj: obj in question 1621 * @obj: obj in question
@@ -1657,6 +1641,15 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1657 obj->fault_mappable = false; 1641 obj->fault_mappable = false;
1658} 1642}
1659 1643
1644void
1645i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1646{
1647 struct drm_i915_gem_object *obj;
1648
1649 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1650 i915_gem_release_mmap(obj);
1651}
1652
1660uint32_t 1653uint32_t
1661i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) 1654i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1662{ 1655{
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 3521f998a178..34894b573064 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -31,7 +31,7 @@
31struct i915_render_state { 31struct i915_render_state {
32 struct drm_i915_gem_object *obj; 32 struct drm_i915_gem_object *obj;
33 unsigned long ggtt_offset; 33 unsigned long ggtt_offset;
34 void *batch; 34 u32 *batch;
35 u32 size; 35 u32 size;
36 u32 len; 36 u32 len;
37}; 37};
@@ -80,7 +80,7 @@ free:
80 80
81static void render_state_free(struct i915_render_state *so) 81static void render_state_free(struct i915_render_state *so)
82{ 82{
83 kunmap(so->batch); 83 kunmap(kmap_to_page(so->batch));
84 i915_gem_object_ggtt_unpin(so->obj); 84 i915_gem_object_ggtt_unpin(so->obj);
85 drm_gem_object_unreference(&so->obj->base); 85 drm_gem_object_unreference(&so->obj->base);
86 kfree(so); 86 kfree(so);
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 62ef55ba061c..7465ab0fd396 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -74,6 +74,50 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
74 if (base == 0) 74 if (base == 0)
75 return 0; 75 return 0;
76 76
77 /* make sure we don't clobber the GTT if it's within stolen memory */
78 if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) {
79 struct {
80 u32 start, end;
81 } stolen[2] = {
82 { .start = base, .end = base + dev_priv->gtt.stolen_size, },
83 { .start = base, .end = base + dev_priv->gtt.stolen_size, },
84 };
85 u64 gtt_start, gtt_end;
86
87 gtt_start = I915_READ(PGTBL_CTL);
88 if (IS_GEN4(dev))
89 gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) |
90 (gtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
91 else
92 gtt_start &= PGTBL_ADDRESS_LO_MASK;
93 gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4;
94
95 if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end)
96 stolen[0].end = gtt_start;
97 if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end)
98 stolen[1].start = gtt_end;
99
100 /* pick the larger of the two chunks */
101 if (stolen[0].end - stolen[0].start >
102 stolen[1].end - stolen[1].start) {
103 base = stolen[0].start;
104 dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start;
105 } else {
106 base = stolen[1].start;
107 dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start;
108 }
109
110 if (stolen[0].start != stolen[1].start ||
111 stolen[0].end != stolen[1].end) {
112 DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
113 (unsigned long long) gtt_start,
114 (unsigned long long) gtt_end - 1);
115 DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
116 base, base + (u32) dev_priv->gtt.stolen_size - 1);
117 }
118 }
119
120
77 /* Verify that nothing else uses this physical address. Stolen 121 /* Verify that nothing else uses this physical address. Stolen
78 * memory should be reserved by the BIOS and hidden from the 122 * memory should be reserved by the BIOS and hidden from the
79 * kernel. So if the region is already marked as busy, something 123 * kernel. So if the region is already marked as busy, something
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 267f069765ad..c05c84f3f091 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2845,7 +2845,7 @@ static int semaphore_passed(struct intel_engine_cs *ring)
2845{ 2845{
2846 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2846 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2847 struct intel_engine_cs *signaller; 2847 struct intel_engine_cs *signaller;
2848 u32 seqno, ctl; 2848 u32 seqno;
2849 2849
2850 ring->hangcheck.deadlock++; 2850 ring->hangcheck.deadlock++;
2851 2851
@@ -2857,15 +2857,12 @@ static int semaphore_passed(struct intel_engine_cs *ring)
2857 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS) 2857 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2858 return -1; 2858 return -1;
2859 2859
2860 /* cursory check for an unkickable deadlock */
2861 ctl = I915_READ_CTL(signaller);
2862 if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
2863 return -1;
2864
2865 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno)) 2860 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2866 return 1; 2861 return 1;
2867 2862
2868 if (signaller->hangcheck.deadlock) 2863 /* cursory check for an unkickable deadlock */
2864 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2865 semaphore_passed(signaller) < 0)
2869 return -1; 2866 return -1;
2870 2867
2871 return 0; 2868 return 0;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index e691b30b2817..a5bab61bfc00 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -942,6 +942,9 @@ enum punit_power_well {
942/* 942/*
943 * Instruction and interrupt control regs 943 * Instruction and interrupt control regs
944 */ 944 */
945#define PGTBL_CTL 0x02020
946#define PGTBL_ADDRESS_LO_MASK 0xfffff000 /* bits [31:12] */
947#define PGTBL_ADDRESS_HI_MASK 0x000000f0 /* bits [35:32] (gen4) */
945#define PGTBL_ER 0x02024 948#define PGTBL_ER 0x02024
946#define RENDER_RING_BASE 0x02000 949#define RENDER_RING_BASE 0x02000
947#define BSD_RING_BASE 0x04000 950#define BSD_RING_BASE 0x04000
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 556c916dbf9d..f0be855ddf45 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11591,6 +11591,14 @@ static void quirk_invert_brightness(struct drm_device *dev)
11591 DRM_INFO("applying inverted panel brightness quirk\n"); 11591 DRM_INFO("applying inverted panel brightness quirk\n");
11592} 11592}
11593 11593
11594/* Some VBT's incorrectly indicate no backlight is present */
11595static void quirk_backlight_present(struct drm_device *dev)
11596{
11597 struct drm_i915_private *dev_priv = dev->dev_private;
11598 dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
11599 DRM_INFO("applying backlight present quirk\n");
11600}
11601
11594struct intel_quirk { 11602struct intel_quirk {
11595 int device; 11603 int device;
11596 int subsystem_vendor; 11604 int subsystem_vendor;
@@ -11659,6 +11667,15 @@ static struct intel_quirk intel_quirks[] = {
11659 11667
11660 /* Acer Aspire 5336 */ 11668 /* Acer Aspire 5336 */
11661 { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness }, 11669 { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
11670
11671 /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
11672 { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
11673
11674 /* Toshiba CB35 Chromebook (Celeron 2955U) */
11675 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
11676
11677 /* HP Chromebook 14 (Celeron 2955U) */
11678 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
11662}; 11679};
11663 11680
11664static void intel_init_quirks(struct drm_device *dev) 11681static void intel_init_quirks(struct drm_device *dev)
@@ -11897,6 +11914,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
11897 * ... */ 11914 * ... */
11898 plane = crtc->plane; 11915 plane = crtc->plane;
11899 crtc->plane = !plane; 11916 crtc->plane = !plane;
11917 crtc->primary_enabled = true;
11900 dev_priv->display.crtc_disable(&crtc->base); 11918 dev_priv->display.crtc_disable(&crtc->base);
11901 crtc->plane = plane; 11919 crtc->plane = plane;
11902 11920
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 52fda950fd2a..8a1a4fbc06ac 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -28,6 +28,8 @@
28#include <linux/i2c.h> 28#include <linux/i2c.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/export.h> 30#include <linux/export.h>
31#include <linux/notifier.h>
32#include <linux/reboot.h>
31#include <drm/drmP.h> 33#include <drm/drmP.h>
32#include <drm/drm_crtc.h> 34#include <drm/drm_crtc.h>
33#include <drm/drm_crtc_helper.h> 35#include <drm/drm_crtc_helper.h>
@@ -336,6 +338,37 @@ static u32 _pp_stat_reg(struct intel_dp *intel_dp)
336 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp)); 338 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
337} 339}
338 340
341/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
342 This function only applicable when panel PM state is not to be tracked */
343static int edp_notify_handler(struct notifier_block *this, unsigned long code,
344 void *unused)
345{
346 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
347 edp_notifier);
348 struct drm_device *dev = intel_dp_to_dev(intel_dp);
349 struct drm_i915_private *dev_priv = dev->dev_private;
350 u32 pp_div;
351 u32 pp_ctrl_reg, pp_div_reg;
352 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
353
354 if (!is_edp(intel_dp) || code != SYS_RESTART)
355 return 0;
356
357 if (IS_VALLEYVIEW(dev)) {
358 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
359 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
360 pp_div = I915_READ(pp_div_reg);
361 pp_div &= PP_REFERENCE_DIVIDER_MASK;
362
363 /* 0x1F write to PP_DIV_REG sets max cycle delay */
364 I915_WRITE(pp_div_reg, pp_div | 0x1F);
365 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
366 msleep(intel_dp->panel_power_cycle_delay);
367 }
368
369 return 0;
370}
371
339static bool edp_have_panel_power(struct intel_dp *intel_dp) 372static bool edp_have_panel_power(struct intel_dp *intel_dp)
340{ 373{
341 struct drm_device *dev = intel_dp_to_dev(intel_dp); 374 struct drm_device *dev = intel_dp_to_dev(intel_dp);
@@ -873,8 +906,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
873 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, 906 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
874 bpp); 907 bpp);
875 908
876 for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) { 909 for (clock = min_clock; clock <= max_clock; clock++) {
877 for (clock = min_clock; clock <= max_clock; clock++) { 910 for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
878 link_clock = drm_dp_bw_code_to_link_rate(bws[clock]); 911 link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
879 link_avail = intel_dp_max_data_rate(link_clock, 912 link_avail = intel_dp_max_data_rate(link_clock,
880 lane_count); 913 lane_count);
@@ -3707,6 +3740,10 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
3707 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 3740 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
3708 edp_panel_vdd_off_sync(intel_dp); 3741 edp_panel_vdd_off_sync(intel_dp);
3709 drm_modeset_unlock(&dev->mode_config.connection_mutex); 3742 drm_modeset_unlock(&dev->mode_config.connection_mutex);
3743 if (intel_dp->edp_notifier.notifier_call) {
3744 unregister_reboot_notifier(&intel_dp->edp_notifier);
3745 intel_dp->edp_notifier.notifier_call = NULL;
3746 }
3710 } 3747 }
3711 kfree(intel_dig_port); 3748 kfree(intel_dig_port);
3712} 3749}
@@ -4184,6 +4221,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
4184 } 4221 }
4185 mutex_unlock(&dev->mode_config.mutex); 4222 mutex_unlock(&dev->mode_config.mutex);
4186 4223
4224 if (IS_VALLEYVIEW(dev)) {
4225 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
4226 register_reboot_notifier(&intel_dp->edp_notifier);
4227 }
4228
4187 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); 4229 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
4188 intel_panel_setup_backlight(connector); 4230 intel_panel_setup_backlight(connector);
4189 4231
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index eaa27ee9e367..f67340ed2c12 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -538,6 +538,8 @@ struct intel_dp {
538 unsigned long last_power_on; 538 unsigned long last_power_on;
539 unsigned long last_backlight_off; 539 unsigned long last_backlight_off;
540 bool psr_setup_done; 540 bool psr_setup_done;
541 struct notifier_block edp_notifier;
542
541 bool use_tps3; 543 bool use_tps3;
542 struct intel_connector *attached_connector; 544 struct intel_connector *attached_connector;
543 545
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 02f99d768d49..3fd082933c87 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -117,17 +117,18 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
117 /* bandgap reset is needed after everytime we do power gate */ 117 /* bandgap reset is needed after everytime we do power gate */
118 band_gap_reset(dev_priv); 118 band_gap_reset(dev_priv);
119 119
120 I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
121 usleep_range(2500, 3000);
122
120 val = I915_READ(MIPI_PORT_CTRL(pipe)); 123 val = I915_READ(MIPI_PORT_CTRL(pipe));
121 I915_WRITE(MIPI_PORT_CTRL(pipe), val | LP_OUTPUT_HOLD); 124 I915_WRITE(MIPI_PORT_CTRL(pipe), val | LP_OUTPUT_HOLD);
122 usleep_range(1000, 1500); 125 usleep_range(1000, 1500);
123 I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_EXIT); 126
124 usleep_range(2000, 2500); 127 I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_EXIT);
125 I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY); 128 usleep_range(2500, 3000);
126 usleep_range(2000, 2500); 129
127 I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00);
128 usleep_range(2000, 2500);
129 I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY); 130 I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
130 usleep_range(2000, 2500); 131 usleep_range(2500, 3000);
131} 132}
132 133
133static void intel_dsi_enable(struct intel_encoder *encoder) 134static void intel_dsi_enable(struct intel_encoder *encoder)
@@ -271,23 +272,23 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
271 272
272 DRM_DEBUG_KMS("\n"); 273 DRM_DEBUG_KMS("\n");
273 274
274 I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER); 275 I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_ENTER);
275 usleep_range(2000, 2500); 276 usleep_range(2000, 2500);
276 277
277 I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_EXIT); 278 I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_EXIT);
278 usleep_range(2000, 2500); 279 usleep_range(2000, 2500);
279 280
280 I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER); 281 I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_ENTER);
281 usleep_range(2000, 2500); 282 usleep_range(2000, 2500);
282 283
283 val = I915_READ(MIPI_PORT_CTRL(pipe));
284 I915_WRITE(MIPI_PORT_CTRL(pipe), val & ~LP_OUTPUT_HOLD);
285 usleep_range(1000, 1500);
286
287 if (wait_for(((I915_READ(MIPI_PORT_CTRL(pipe)) & AFE_LATCHOUT) 284 if (wait_for(((I915_READ(MIPI_PORT_CTRL(pipe)) & AFE_LATCHOUT)
288 == 0x00000), 30)) 285 == 0x00000), 30))
289 DRM_ERROR("DSI LP not going Low\n"); 286 DRM_ERROR("DSI LP not going Low\n");
290 287
288 val = I915_READ(MIPI_PORT_CTRL(pipe));
289 I915_WRITE(MIPI_PORT_CTRL(pipe), val & ~LP_OUTPUT_HOLD);
290 usleep_range(1000, 1500);
291
291 I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00); 292 I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00);
292 usleep_range(2000, 2500); 293 usleep_range(2000, 2500);
293 294
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.c b/drivers/gpu/drm/i915/intel_dsi_cmd.c
index 3eeb21b9fddf..933c86305237 100644
--- a/drivers/gpu/drm/i915/intel_dsi_cmd.c
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.c
@@ -404,12 +404,6 @@ int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs)
404 else 404 else
405 cmd |= DPI_LP_MODE; 405 cmd |= DPI_LP_MODE;
406 406
407 /* DPI virtual channel?! */
408
409 mask = DPI_FIFO_EMPTY;
410 if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50))
411 DRM_ERROR("Timeout waiting for DPI FIFO empty.\n");
412
413 /* clear bit */ 407 /* clear bit */
414 I915_WRITE(MIPI_INTR_STAT(pipe), SPL_PKT_SENT_INTERRUPT); 408 I915_WRITE(MIPI_INTR_STAT(pipe), SPL_PKT_SENT_INTERRUPT);
415 409
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 23126023aeba..5e5a72fca5fb 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -111,6 +111,13 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
111 111
112 pipe_config->adjusted_mode.flags |= flags; 112 pipe_config->adjusted_mode.flags |= flags;
113 113
114 /* gen2/3 store dither state in pfit control, needs to match */
115 if (INTEL_INFO(dev)->gen < 4) {
116 tmp = I915_READ(PFIT_CONTROL);
117
118 pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
119 }
120
114 dotclock = pipe_config->port_clock; 121 dotclock = pipe_config->port_clock;
115 122
116 if (HAS_PCH_SPLIT(dev_priv->dev)) 123 if (HAS_PCH_SPLIT(dev_priv->dev))
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 2e2c71fcc9ed..4f6b53998d79 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -403,6 +403,15 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
403 403
404 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); 404 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
405 405
406 /*
407 * If the acpi_video interface is not supposed to be used, don't
408 * bother processing backlight level change requests from firmware.
409 */
410 if (!acpi_video_verify_backlight_support()) {
411 DRM_DEBUG_KMS("opregion backlight request ignored\n");
412 return 0;
413 }
414
406 if (!(bclp & ASLE_BCLP_VALID)) 415 if (!(bclp & ASLE_BCLP_VALID))
407 return ASLC_BACKLIGHT_FAILED; 416 return ASLC_BACKLIGHT_FAILED;
408 417
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 38a98570d10c..12b02fe1d0ae 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -361,16 +361,16 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
361 pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) | 361 pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
362 PFIT_FILTER_FUZZY); 362 PFIT_FILTER_FUZZY);
363 363
364 /* Make sure pre-965 set dither correctly for 18bpp panels. */
365 if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
366 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
367
368out: 364out:
369 if ((pfit_control & PFIT_ENABLE) == 0) { 365 if ((pfit_control & PFIT_ENABLE) == 0) {
370 pfit_control = 0; 366 pfit_control = 0;
371 pfit_pgm_ratios = 0; 367 pfit_pgm_ratios = 0;
372 } 368 }
373 369
370 /* Make sure pre-965 set dither correctly for 18bpp panels. */
371 if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
372 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
373
374 pipe_config->gmch_pfit.control = pfit_control; 374 pipe_config->gmch_pfit.control = pfit_control;
375 pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios; 375 pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
376 pipe_config->gmch_pfit.lvds_border_bits = border; 376 pipe_config->gmch_pfit.lvds_border_bits = border;
@@ -1118,8 +1118,12 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
1118 int ret; 1118 int ret;
1119 1119
1120 if (!dev_priv->vbt.backlight.present) { 1120 if (!dev_priv->vbt.backlight.present) {
1121 DRM_DEBUG_KMS("native backlight control not available per VBT\n"); 1121 if (dev_priv->quirks & QUIRK_BACKLIGHT_PRESENT) {
1122 return 0; 1122 DRM_DEBUG_KMS("no backlight present per VBT, but present per quirk\n");
1123 } else {
1124 DRM_DEBUG_KMS("no backlight present per VBT\n");
1125 return 0;
1126 }
1123 } 1127 }
1124 1128
1125 /* set level and max in panel struct */ 1129 /* set level and max in panel struct */
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 26e962b7e702..2283c442a10d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -1516,11 +1516,11 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
1516 } 1516 }
1517 1517
1518 switch ((ctrl & 0x000f0000) >> 16) { 1518 switch ((ctrl & 0x000f0000) >> 16) {
1519 case 6: datarate = pclk * 30 / 8; break; 1519 case 6: datarate = pclk * 30; break;
1520 case 5: datarate = pclk * 24 / 8; break; 1520 case 5: datarate = pclk * 24; break;
1521 case 2: 1521 case 2:
1522 default: 1522 default:
1523 datarate = pclk * 18 / 8; 1523 datarate = pclk * 18;
1524 break; 1524 break;
1525 } 1525 }
1526 1526
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index 48aa38a87e3f..fa30d8196f35 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -1159,11 +1159,11 @@ nvd0_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head)
1159 if (outp->info.type == DCB_OUTPUT_DP) { 1159 if (outp->info.type == DCB_OUTPUT_DP) {
1160 u32 sync = nv_rd32(priv, 0x660404 + (head * 0x300)); 1160 u32 sync = nv_rd32(priv, 0x660404 + (head * 0x300));
1161 switch ((sync & 0x000003c0) >> 6) { 1161 switch ((sync & 0x000003c0) >> 6) {
1162 case 6: pclk = pclk * 30 / 8; break; 1162 case 6: pclk = pclk * 30; break;
1163 case 5: pclk = pclk * 24 / 8; break; 1163 case 5: pclk = pclk * 24; break;
1164 case 2: 1164 case 2:
1165 default: 1165 default:
1166 pclk = pclk * 18 / 8; 1166 pclk = pclk * 18;
1167 break; 1167 break;
1168 } 1168 }
1169 1169
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c
index 52c299c3d300..eb2d7789555d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c
@@ -34,7 +34,7 @@ nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait)
34 struct nvkm_output_dp *outp = (void *)base; 34 struct nvkm_output_dp *outp = (void *)base;
35 bool retrain = true; 35 bool retrain = true;
36 u8 link[2], stat[3]; 36 u8 link[2], stat[3];
37 u32 rate; 37 u32 linkrate;
38 int ret, i; 38 int ret, i;
39 39
40 /* check that the link is trained at a high enough rate */ 40 /* check that the link is trained at a high enough rate */
@@ -44,8 +44,10 @@ nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait)
44 goto done; 44 goto done;
45 } 45 }
46 46
47 rate = link[0] * 27000 * (link[1] & DPCD_LC01_LANE_COUNT_SET); 47 linkrate = link[0] * 27000 * (link[1] & DPCD_LC01_LANE_COUNT_SET);
48 if (rate < ((datarate / 8) * 10)) { 48 linkrate = (linkrate * 8) / 10; /* 8B/10B coding overhead */
49 datarate = (datarate + 9) / 10; /* -> decakilobits */
50 if (linkrate < datarate) {
49 DBG("link not trained at sufficient rate\n"); 51 DBG("link not trained at sufficient rate\n");
50 goto done; 52 goto done;
51 } 53 }
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
index e1832778e8b6..7a1ebdfa9e1b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
@@ -87,6 +87,7 @@ nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
87 struct nvkm_output_dp *outpdp = (void *)outp; 87 struct nvkm_output_dp *outpdp = (void *)outp;
88 switch (data) { 88 switch (data) {
89 case NV94_DISP_SOR_DP_PWR_STATE_OFF: 89 case NV94_DISP_SOR_DP_PWR_STATE_OFF:
90 nouveau_event_put(outpdp->irq);
90 ((struct nvkm_output_dp_impl *)nv_oclass(outp)) 91 ((struct nvkm_output_dp_impl *)nv_oclass(outp))
91 ->lnk_pwr(outpdp, 0); 92 ->lnk_pwr(outpdp, 0);
92 atomic_set(&outpdp->lt.done, 0); 93 atomic_set(&outpdp->lt.done, 0);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h b/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
index 0f57fcfe0bbf..2af9cfd2c60f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
@@ -26,7 +26,7 @@ ramfuc_reg2(u32 addr1, u32 addr2)
26 }; 26 };
27} 27}
28 28
29static inline struct ramfuc_reg 29static noinline struct ramfuc_reg
30ramfuc_reg(u32 addr) 30ramfuc_reg(u32 addr)
31{ 31{
32 return ramfuc_reg2(addr, addr); 32 return ramfuc_reg2(addr, addr);
@@ -107,7 +107,7 @@ ramfuc_nsec(struct ramfuc *ram, u32 nsec)
107 107
108#define ram_init(s,p) ramfuc_init(&(s)->base, (p)) 108#define ram_init(s,p) ramfuc_init(&(s)->base, (p))
109#define ram_exec(s,e) ramfuc_exec(&(s)->base, (e)) 109#define ram_exec(s,e) ramfuc_exec(&(s)->base, (e))
110#define ram_have(s,r) ((s)->r_##r.addr != 0x000000) 110#define ram_have(s,r) ((s)->r_##r.addr[0] != 0x000000)
111#define ram_rd32(s,r) ramfuc_rd32(&(s)->base, &(s)->r_##r) 111#define ram_rd32(s,r) ramfuc_rd32(&(s)->base, &(s)->r_##r)
112#define ram_wr32(s,r,d) ramfuc_wr32(&(s)->base, &(s)->r_##r, (d)) 112#define ram_wr32(s,r,d) ramfuc_wr32(&(s)->base, &(s)->r_##r, (d))
113#define ram_nuke(s,r) ramfuc_nuke(&(s)->base, &(s)->r_##r) 113#define ram_nuke(s,r) ramfuc_nuke(&(s)->base, &(s)->r_##r)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
index 1ad3ea503133..c5b46e302319 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
@@ -200,6 +200,7 @@ r1373f4_init(struct nve0_ramfuc *fuc)
200 /* (re)program mempll, if required */ 200 /* (re)program mempll, if required */
201 if (ram->mode == 2) { 201 if (ram->mode == 2) {
202 ram_mask(fuc, 0x1373f4, 0x00010000, 0x00000000); 202 ram_mask(fuc, 0x1373f4, 0x00010000, 0x00000000);
203 ram_mask(fuc, 0x132000, 0x80000000, 0x80000000);
203 ram_mask(fuc, 0x132000, 0x00000001, 0x00000000); 204 ram_mask(fuc, 0x132000, 0x00000001, 0x00000000);
204 ram_mask(fuc, 0x132004, 0x103fffff, mcoef); 205 ram_mask(fuc, 0x132004, 0x103fffff, mcoef);
205 ram_mask(fuc, 0x132000, 0x00000001, 0x00000001); 206 ram_mask(fuc, 0x132000, 0x00000001, 0x00000001);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
index cfde9eb44ad0..6212537b90c5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
@@ -192,11 +192,11 @@ alarm_timer_callback(struct nouveau_alarm *alarm)
192 nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_shutdown, 192 nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_shutdown,
193 NOUVEAU_THERM_THRS_SHUTDOWN); 193 NOUVEAU_THERM_THRS_SHUTDOWN);
194 194
195 spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
196
195 /* schedule the next poll in one second */ 197 /* schedule the next poll in one second */
196 if (therm->temp_get(therm) >= 0 && list_empty(&alarm->head)) 198 if (therm->temp_get(therm) >= 0 && list_empty(&alarm->head))
197 ptimer->alarm(ptimer, 1000 * 1000 * 1000, alarm); 199 ptimer->alarm(ptimer, 1000000000ULL, alarm);
198
199 spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
200} 200}
201 201
202void 202void
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index ddd83756b9a2..5425ffe3931d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -652,12 +652,12 @@ int nouveau_pmops_resume(struct device *dev)
652 ret = nouveau_do_resume(drm_dev); 652 ret = nouveau_do_resume(drm_dev);
653 if (ret) 653 if (ret)
654 return ret; 654 return ret;
655 if (drm_dev->mode_config.num_crtc)
656 nouveau_fbcon_set_suspend(drm_dev, 0);
657 655
658 nouveau_fbcon_zfill_all(drm_dev); 656 if (drm_dev->mode_config.num_crtc) {
659 if (drm_dev->mode_config.num_crtc)
660 nouveau_display_resume(drm_dev); 657 nouveau_display_resume(drm_dev);
658 nouveau_fbcon_set_suspend(drm_dev, 0);
659 }
660
661 return 0; 661 return 0;
662} 662}
663 663
@@ -683,11 +683,12 @@ static int nouveau_pmops_thaw(struct device *dev)
683 ret = nouveau_do_resume(drm_dev); 683 ret = nouveau_do_resume(drm_dev);
684 if (ret) 684 if (ret)
685 return ret; 685 return ret;
686 if (drm_dev->mode_config.num_crtc) 686
687 nouveau_fbcon_set_suspend(drm_dev, 0); 687 if (drm_dev->mode_config.num_crtc) {
688 nouveau_fbcon_zfill_all(drm_dev);
689 if (drm_dev->mode_config.num_crtc)
690 nouveau_display_resume(drm_dev); 688 nouveau_display_resume(drm_dev);
689 nouveau_fbcon_set_suspend(drm_dev, 0);
690 }
691
691 return 0; 692 return 0;
692} 693}
693 694
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 64a42cfd3717..191665ee7f52 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -531,17 +531,10 @@ nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
531 if (state == 1) 531 if (state == 1)
532 nouveau_fbcon_save_disable_accel(dev); 532 nouveau_fbcon_save_disable_accel(dev);
533 fb_set_suspend(drm->fbcon->helper.fbdev, state); 533 fb_set_suspend(drm->fbcon->helper.fbdev, state);
534 if (state == 0) 534 if (state == 0) {
535 nouveau_fbcon_restore_accel(dev); 535 nouveau_fbcon_restore_accel(dev);
536 nouveau_fbcon_zfill(dev, drm->fbcon);
537 }
536 console_unlock(); 538 console_unlock();
537 } 539 }
538} 540}
539
540void
541nouveau_fbcon_zfill_all(struct drm_device *dev)
542{
543 struct nouveau_drm *drm = nouveau_drm(dev);
544 if (drm->fbcon) {
545 nouveau_fbcon_zfill(dev, drm->fbcon);
546 }
547}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index fdfc0c94fbcc..fcff797d2084 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -61,7 +61,6 @@ void nouveau_fbcon_gpu_lockup(struct fb_info *info);
61int nouveau_fbcon_init(struct drm_device *dev); 61int nouveau_fbcon_init(struct drm_device *dev);
62void nouveau_fbcon_fini(struct drm_device *dev); 62void nouveau_fbcon_fini(struct drm_device *dev);
63void nouveau_fbcon_set_suspend(struct drm_device *dev, int state); 63void nouveau_fbcon_set_suspend(struct drm_device *dev, int state);
64void nouveau_fbcon_zfill_all(struct drm_device *dev);
65void nouveau_fbcon_save_disable_accel(struct drm_device *dev); 64void nouveau_fbcon_save_disable_accel(struct drm_device *dev);
66void nouveau_fbcon_restore_accel(struct drm_device *dev); 65void nouveau_fbcon_restore_accel(struct drm_device *dev);
67 66
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index afdf607df3e6..4c534b7b04da 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1741,7 +1741,8 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
1741 } 1741 }
1742 } 1742 }
1743 1743
1744 mthd = (ffs(nv_encoder->dcb->sorconf.link) - 1) << 2; 1744 mthd = (ffs(nv_encoder->dcb->heads) - 1) << 3;
1745 mthd |= (ffs(nv_encoder->dcb->sorconf.link) - 1) << 2;
1745 mthd |= nv_encoder->or; 1746 mthd |= nv_encoder->or;
1746 1747
1747 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) { 1748 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
index 34d6a85e9023..0bf1e20c6e44 100644
--- a/drivers/gpu/drm/qxl/qxl_irq.c
+++ b/drivers/gpu/drm/qxl/qxl_irq.c
@@ -33,6 +33,9 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
33 33
34 pending = xchg(&qdev->ram_header->int_pending, 0); 34 pending = xchg(&qdev->ram_header->int_pending, 0);
35 35
36 if (!pending)
37 return IRQ_NONE;
38
36 atomic_inc(&qdev->irq_received); 39 atomic_inc(&qdev->irq_received);
37 40
38 if (pending & QXL_INTERRUPT_DISPLAY) { 41 if (pending & QXL_INTERRUPT_DISPLAY) {
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index a03c73411a56..30d242b25078 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1414,8 +1414,8 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1414 tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN; 1414 tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN;
1415 WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp); 1415 WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
1416 1416
1417 /* set pageflip to happen anywhere in vblank interval */ 1417 /* set pageflip to happen only at start of vblank interval (front porch) */
1418 WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); 1418 WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
1419 1419
1420 if (!atomic && fb && fb != crtc->primary->fb) { 1420 if (!atomic && fb && fb != crtc->primary->fb) {
1421 radeon_fb = to_radeon_framebuffer(fb); 1421 radeon_fb = to_radeon_framebuffer(fb);
@@ -1614,8 +1614,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
1614 tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN; 1614 tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN;
1615 WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp); 1615 WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
1616 1616
1617 /* set pageflip to happen anywhere in vblank interval */ 1617 /* set pageflip to happen only at start of vblank interval (front porch) */
1618 WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); 1618 WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
1619 1619
1620 if (!atomic && fb && fb != crtc->primary->fb) { 1620 if (!atomic && fb && fb != crtc->primary->fb) {
1621 radeon_fb = to_radeon_framebuffer(fb); 1621 radeon_fb = to_radeon_framebuffer(fb);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 35f4182c63b6..b1e11f8434e2 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -127,7 +127,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
127 /* flags not zero */ 127 /* flags not zero */
128 if (args.v1.ucReplyStatus == 2) { 128 if (args.v1.ucReplyStatus == 2) {
129 DRM_DEBUG_KMS("dp_aux_ch flags not zero\n"); 129 DRM_DEBUG_KMS("dp_aux_ch flags not zero\n");
130 r = -EBUSY; 130 r = -EIO;
131 goto done; 131 goto done;
132 } 132 }
133 133
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 2b2908440644..7d68203a3737 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -183,7 +183,6 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
183 struct backlight_properties props; 183 struct backlight_properties props;
184 struct radeon_backlight_privdata *pdata; 184 struct radeon_backlight_privdata *pdata;
185 struct radeon_encoder_atom_dig *dig; 185 struct radeon_encoder_atom_dig *dig;
186 u8 backlight_level;
187 char bl_name[16]; 186 char bl_name[16];
188 187
189 /* Mac laptops with multiple GPUs use the gmux driver for backlight 188 /* Mac laptops with multiple GPUs use the gmux driver for backlight
@@ -222,12 +221,17 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
222 221
223 pdata->encoder = radeon_encoder; 222 pdata->encoder = radeon_encoder;
224 223
225 backlight_level = radeon_atom_get_backlight_level_from_reg(rdev);
226
227 dig = radeon_encoder->enc_priv; 224 dig = radeon_encoder->enc_priv;
228 dig->bl_dev = bd; 225 dig->bl_dev = bd;
229 226
230 bd->props.brightness = radeon_atom_backlight_get_brightness(bd); 227 bd->props.brightness = radeon_atom_backlight_get_brightness(bd);
228 /* Set a reasonable default here if the level is 0 otherwise
229 * fbdev will attempt to turn the backlight on after console
230 * unblanking and it will try and restore 0 which turns the backlight
231 * off again.
232 */
233 if (bd->props.brightness == 0)
234 bd->props.brightness = RADEON_MAX_BL_LEVEL;
231 bd->props.power = FB_BLANK_UNBLANK; 235 bd->props.power = FB_BLANK_UNBLANK;
232 backlight_update_status(bd); 236 backlight_update_status(bd);
233 237
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 10dae4106c08..584090ac3eb9 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -1179,7 +1179,7 @@ static int ci_stop_dpm(struct radeon_device *rdev)
1179 tmp &= ~GLOBAL_PWRMGT_EN; 1179 tmp &= ~GLOBAL_PWRMGT_EN;
1180 WREG32_SMC(GENERAL_PWRMGT, tmp); 1180 WREG32_SMC(GENERAL_PWRMGT, tmp);
1181 1181
1182 tmp = RREG32(SCLK_PWRMGT_CNTL); 1182 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1183 tmp &= ~DYNAMIC_PM_EN; 1183 tmp &= ~DYNAMIC_PM_EN;
1184 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); 1184 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1185 1185
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index dcd4518a9b08..c0ea66192fe0 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -2291,6 +2291,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
2291 gb_tile_moden = 0; 2291 gb_tile_moden = 0;
2292 break; 2292 break;
2293 } 2293 }
2294 rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
2294 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden); 2295 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2295 } 2296 }
2296 } else if (num_pipe_configs == 8) { 2297 } else if (num_pipe_configs == 8) {
@@ -7376,6 +7377,7 @@ static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
7376 tmp = RREG32(IH_RB_CNTL); 7377 tmp = RREG32(IH_RB_CNTL);
7377 tmp |= IH_WPTR_OVERFLOW_CLEAR; 7378 tmp |= IH_WPTR_OVERFLOW_CLEAR;
7378 WREG32(IH_RB_CNTL, tmp); 7379 WREG32(IH_RB_CNTL, tmp);
7380 wptr &= ~RB_OVERFLOW;
7379 } 7381 }
7380 return (wptr & rdev->ih.ptr_mask); 7382 return (wptr & rdev->ih.ptr_mask);
7381} 7383}
@@ -7676,14 +7678,16 @@ restart_ih:
7676 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR); 7678 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
7677 status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS); 7679 status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
7678 mc_client = RREG32(VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); 7680 mc_client = RREG32(VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
7681 /* reset addr and status */
7682 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
7683 if (addr == 0x0 && status == 0x0)
7684 break;
7679 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); 7685 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
7680 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", 7686 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
7681 addr); 7687 addr);
7682 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 7688 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
7683 status); 7689 status);
7684 cik_vm_decode_fault(rdev, status, addr, mc_client); 7690 cik_vm_decode_fault(rdev, status, addr, mc_client);
7685 /* reset addr and status */
7686 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
7687 break; 7691 break;
7688 case 167: /* VCE */ 7692 case 167: /* VCE */
7689 DRM_DEBUG("IH: VCE int: 0x%08x\n", src_data); 7693 DRM_DEBUG("IH: VCE int: 0x%08x\n", src_data);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index e2f605224e8c..15e4f28015e1 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -189,7 +189,7 @@ static const u32 evergreen_golden_registers[] =
189 0x8c1c, 0xffffffff, 0x00001010, 189 0x8c1c, 0xffffffff, 0x00001010,
190 0x28350, 0xffffffff, 0x00000000, 190 0x28350, 0xffffffff, 0x00000000,
191 0xa008, 0xffffffff, 0x00010000, 191 0xa008, 0xffffffff, 0x00010000,
192 0x5cc, 0xffffffff, 0x00000001, 192 0x5c4, 0xffffffff, 0x00000001,
193 0x9508, 0xffffffff, 0x00000002, 193 0x9508, 0xffffffff, 0x00000002,
194 0x913c, 0x0000000f, 0x0000000a 194 0x913c, 0x0000000f, 0x0000000a
195}; 195};
@@ -476,7 +476,7 @@ static const u32 cedar_golden_registers[] =
476 0x8c1c, 0xffffffff, 0x00001010, 476 0x8c1c, 0xffffffff, 0x00001010,
477 0x28350, 0xffffffff, 0x00000000, 477 0x28350, 0xffffffff, 0x00000000,
478 0xa008, 0xffffffff, 0x00010000, 478 0xa008, 0xffffffff, 0x00010000,
479 0x5cc, 0xffffffff, 0x00000001, 479 0x5c4, 0xffffffff, 0x00000001,
480 0x9508, 0xffffffff, 0x00000002 480 0x9508, 0xffffffff, 0x00000002
481}; 481};
482 482
@@ -635,7 +635,7 @@ static const u32 juniper_mgcg_init[] =
635static const u32 supersumo_golden_registers[] = 635static const u32 supersumo_golden_registers[] =
636{ 636{
637 0x5eb4, 0xffffffff, 0x00000002, 637 0x5eb4, 0xffffffff, 0x00000002,
638 0x5cc, 0xffffffff, 0x00000001, 638 0x5c4, 0xffffffff, 0x00000001,
639 0x7030, 0xffffffff, 0x00000011, 639 0x7030, 0xffffffff, 0x00000011,
640 0x7c30, 0xffffffff, 0x00000011, 640 0x7c30, 0xffffffff, 0x00000011,
641 0x6104, 0x01000300, 0x00000000, 641 0x6104, 0x01000300, 0x00000000,
@@ -719,7 +719,7 @@ static const u32 sumo_golden_registers[] =
719static const u32 wrestler_golden_registers[] = 719static const u32 wrestler_golden_registers[] =
720{ 720{
721 0x5eb4, 0xffffffff, 0x00000002, 721 0x5eb4, 0xffffffff, 0x00000002,
722 0x5cc, 0xffffffff, 0x00000001, 722 0x5c4, 0xffffffff, 0x00000001,
723 0x7030, 0xffffffff, 0x00000011, 723 0x7030, 0xffffffff, 0x00000011,
724 0x7c30, 0xffffffff, 0x00000011, 724 0x7c30, 0xffffffff, 0x00000011,
725 0x6104, 0x01000300, 0x00000000, 725 0x6104, 0x01000300, 0x00000000,
@@ -2642,8 +2642,9 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
2642 for (i = 0; i < rdev->num_crtc; i++) { 2642 for (i = 0; i < rdev->num_crtc; i++) {
2643 if (save->crtc_enabled[i]) { 2643 if (save->crtc_enabled[i]) {
2644 tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]); 2644 tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
2645 if ((tmp & 0x3) != 0) { 2645 if ((tmp & 0x7) != 3) {
2646 tmp &= ~0x3; 2646 tmp &= ~0x7;
2647 tmp |= 0x3;
2647 WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); 2648 WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
2648 } 2649 }
2649 tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]); 2650 tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
@@ -4755,6 +4756,7 @@ static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
4755 tmp = RREG32(IH_RB_CNTL); 4756 tmp = RREG32(IH_RB_CNTL);
4756 tmp |= IH_WPTR_OVERFLOW_CLEAR; 4757 tmp |= IH_WPTR_OVERFLOW_CLEAR;
4757 WREG32(IH_RB_CNTL, tmp); 4758 WREG32(IH_RB_CNTL, tmp);
4759 wptr &= ~RB_OVERFLOW;
4758 } 4760 }
4759 return (wptr & rdev->ih.ptr_mask); 4761 return (wptr & rdev->ih.ptr_mask);
4760} 4762}
@@ -5066,14 +5068,16 @@ restart_ih:
5066 case 147: 5068 case 147:
5067 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR); 5069 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
5068 status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS); 5070 status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
5071 /* reset addr and status */
5072 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
5073 if (addr == 0x0 && status == 0x0)
5074 break;
5069 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); 5075 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
5070 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", 5076 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
5071 addr); 5077 addr);
5072 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 5078 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
5073 status); 5079 status);
5074 cayman_vm_decode_fault(rdev, status, addr); 5080 cayman_vm_decode_fault(rdev, status, addr);
5075 /* reset addr and status */
5076 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
5077 break; 5081 break;
5078 case 176: /* CP_INT in ring buffer */ 5082 case 176: /* CP_INT in ring buffer */
5079 case 177: /* CP_INT in IB1 */ 5083 case 177: /* CP_INT in IB1 */
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 333d143fca2c..23bff590fb6e 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -239,7 +239,6 @@
239# define EVERGREEN_CRTC_V_BLANK (1 << 0) 239# define EVERGREEN_CRTC_V_BLANK (1 << 0)
240#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90 240#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
241#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0 241#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0
242#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
243#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4 242#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
244#define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4 243#define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4
245#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8 244#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index c66952d4b00c..3c69f58e46ef 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3795,6 +3795,7 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev)
3795 tmp = RREG32(IH_RB_CNTL); 3795 tmp = RREG32(IH_RB_CNTL);
3796 tmp |= IH_WPTR_OVERFLOW_CLEAR; 3796 tmp |= IH_WPTR_OVERFLOW_CLEAR;
3797 WREG32(IH_RB_CNTL, tmp); 3797 WREG32(IH_RB_CNTL, tmp);
3798 wptr &= ~RB_OVERFLOW;
3798 } 3799 }
3799 return (wptr & rdev->ih.ptr_mask); 3800 return (wptr & rdev->ih.ptr_mask);
3800} 3801}
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 29d9cc04c04e..60c47f829122 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -449,6 +449,7 @@ struct radeon_bo_va {
449 449
450 /* protected by vm mutex */ 450 /* protected by vm mutex */
451 struct list_head vm_list; 451 struct list_head vm_list;
452 struct list_head vm_status;
452 453
453 /* constant after initialization */ 454 /* constant after initialization */
454 struct radeon_vm *vm; 455 struct radeon_vm *vm;
@@ -684,10 +685,9 @@ struct radeon_flip_work {
684 struct work_struct unpin_work; 685 struct work_struct unpin_work;
685 struct radeon_device *rdev; 686 struct radeon_device *rdev;
686 int crtc_id; 687 int crtc_id;
687 struct drm_framebuffer *fb; 688 uint64_t base;
688 struct drm_pending_vblank_event *event; 689 struct drm_pending_vblank_event *event;
689 struct radeon_bo *old_rbo; 690 struct radeon_bo *old_rbo;
690 struct radeon_bo *new_rbo;
691 struct radeon_fence *fence; 691 struct radeon_fence *fence;
692}; 692};
693 693
@@ -868,6 +868,9 @@ struct radeon_vm {
868 struct list_head va; 868 struct list_head va;
869 unsigned id; 869 unsigned id;
870 870
871 /* BOs freed, but not yet updated in the PT */
872 struct list_head freed;
873
871 /* contains the page directory */ 874 /* contains the page directory */
872 struct radeon_bo *page_directory; 875 struct radeon_bo *page_directory;
873 uint64_t pd_gpu_addr; 876 uint64_t pd_gpu_addr;
@@ -876,6 +879,8 @@ struct radeon_vm {
876 /* array of page tables, one for each page directory entry */ 879 /* array of page tables, one for each page directory entry */
877 struct radeon_vm_pt *page_tables; 880 struct radeon_vm_pt *page_tables;
878 881
882 struct radeon_bo_va *ib_bo_va;
883
879 struct mutex mutex; 884 struct mutex mutex;
880 /* last fence for cs using this vm */ 885 /* last fence for cs using this vm */
881 struct radeon_fence *fence; 886 struct radeon_fence *fence;
@@ -2833,9 +2838,10 @@ void radeon_vm_fence(struct radeon_device *rdev,
2833uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr); 2838uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
2834int radeon_vm_update_page_directory(struct radeon_device *rdev, 2839int radeon_vm_update_page_directory(struct radeon_device *rdev,
2835 struct radeon_vm *vm); 2840 struct radeon_vm *vm);
2841int radeon_vm_clear_freed(struct radeon_device *rdev,
2842 struct radeon_vm *vm);
2836int radeon_vm_bo_update(struct radeon_device *rdev, 2843int radeon_vm_bo_update(struct radeon_device *rdev,
2837 struct radeon_vm *vm, 2844 struct radeon_bo_va *bo_va,
2838 struct radeon_bo *bo,
2839 struct ttm_mem_reg *mem); 2845 struct ttm_mem_reg *mem);
2840void radeon_vm_bo_invalidate(struct radeon_device *rdev, 2846void radeon_vm_bo_invalidate(struct radeon_device *rdev,
2841 struct radeon_bo *bo); 2847 struct radeon_bo *bo);
@@ -2848,8 +2854,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
2848 struct radeon_bo_va *bo_va, 2854 struct radeon_bo_va *bo_va,
2849 uint64_t offset, 2855 uint64_t offset,
2850 uint32_t flags); 2856 uint32_t flags);
2851int radeon_vm_bo_rmv(struct radeon_device *rdev, 2857void radeon_vm_bo_rmv(struct radeon_device *rdev,
2852 struct radeon_bo_va *bo_va); 2858 struct radeon_bo_va *bo_va);
2853 2859
2854/* audio */ 2860/* audio */
2855void r600_audio_update_hdmi(struct work_struct *work); 2861void r600_audio_update_hdmi(struct work_struct *work);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 71a143461478..ae763f60c8a0 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -461,13 +461,23 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
461 struct radeon_vm *vm) 461 struct radeon_vm *vm)
462{ 462{
463 struct radeon_device *rdev = p->rdev; 463 struct radeon_device *rdev = p->rdev;
464 struct radeon_bo_va *bo_va;
464 int i, r; 465 int i, r;
465 466
466 r = radeon_vm_update_page_directory(rdev, vm); 467 r = radeon_vm_update_page_directory(rdev, vm);
467 if (r) 468 if (r)
468 return r; 469 return r;
469 470
470 r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo, 471 r = radeon_vm_clear_freed(rdev, vm);
472 if (r)
473 return r;
474
475 if (vm->ib_bo_va == NULL) {
476 DRM_ERROR("Tmp BO not in VM!\n");
477 return -EINVAL;
478 }
479
480 r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
471 &rdev->ring_tmp_bo.bo->tbo.mem); 481 &rdev->ring_tmp_bo.bo->tbo.mem);
472 if (r) 482 if (r)
473 return r; 483 return r;
@@ -480,7 +490,13 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
480 continue; 490 continue;
481 491
482 bo = p->relocs[i].robj; 492 bo = p->relocs[i].robj;
483 r = radeon_vm_bo_update(rdev, vm, bo, &bo->tbo.mem); 493 bo_va = radeon_vm_bo_find(vm, bo);
494 if (bo_va == NULL) {
495 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
496 return -EINVAL;
497 }
498
499 r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
484 if (r) 500 if (r)
485 return r; 501 return r;
486 } 502 }
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 03686fab842d..697add2cd4e3 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1056,36 +1056,36 @@ static void radeon_check_arguments(struct radeon_device *rdev)
1056 if (!radeon_check_pot_argument(radeon_vm_size)) { 1056 if (!radeon_check_pot_argument(radeon_vm_size)) {
1057 dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n", 1057 dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
1058 radeon_vm_size); 1058 radeon_vm_size);
1059 radeon_vm_size = 4096; 1059 radeon_vm_size = 4;
1060 } 1060 }
1061 1061
1062 if (radeon_vm_size < 4) { 1062 if (radeon_vm_size < 1) {
1063 dev_warn(rdev->dev, "VM size (%d) to small, min is 4MB\n", 1063 dev_warn(rdev->dev, "VM size (%d) to small, min is 1GB\n",
1064 radeon_vm_size); 1064 radeon_vm_size);
1065 radeon_vm_size = 4096; 1065 radeon_vm_size = 4;
1066 } 1066 }
1067 1067
1068 /* 1068 /*
1069 * Max GPUVM size for Cayman, SI and CI are 40 bits. 1069 * Max GPUVM size for Cayman, SI and CI are 40 bits.
1070 */ 1070 */
1071 if (radeon_vm_size > 1024*1024) { 1071 if (radeon_vm_size > 1024) {
1072 dev_warn(rdev->dev, "VM size (%d) to large, max is 1TB\n", 1072 dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
1073 radeon_vm_size); 1073 radeon_vm_size);
1074 radeon_vm_size = 4096; 1074 radeon_vm_size = 4;
1075 } 1075 }
1076 1076
1077 /* defines number of bits in page table versus page directory, 1077 /* defines number of bits in page table versus page directory,
1078 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the 1078 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1079 * page table and the remaining bits are in the page directory */ 1079 * page table and the remaining bits are in the page directory */
1080 if (radeon_vm_block_size < 9) { 1080 if (radeon_vm_block_size < 9) {
1081 dev_warn(rdev->dev, "VM page table size (%d) to small\n", 1081 dev_warn(rdev->dev, "VM page table size (%d) too small\n",
1082 radeon_vm_block_size); 1082 radeon_vm_block_size);
1083 radeon_vm_block_size = 9; 1083 radeon_vm_block_size = 9;
1084 } 1084 }
1085 1085
1086 if (radeon_vm_block_size > 24 || 1086 if (radeon_vm_block_size > 24 ||
1087 radeon_vm_size < (1ull << radeon_vm_block_size)) { 1087 (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
1088 dev_warn(rdev->dev, "VM page table size (%d) to large\n", 1088 dev_warn(rdev->dev, "VM page table size (%d) too large\n",
1089 radeon_vm_block_size); 1089 radeon_vm_block_size);
1090 radeon_vm_block_size = 9; 1090 radeon_vm_block_size = 9;
1091 } 1091 }
@@ -1238,7 +1238,7 @@ int radeon_device_init(struct radeon_device *rdev,
1238 /* Adjust VM size here. 1238 /* Adjust VM size here.
1239 * Max GPUVM size for cayman+ is 40 bits. 1239 * Max GPUVM size for cayman+ is 40 bits.
1240 */ 1240 */
1241 rdev->vm_manager.max_pfn = radeon_vm_size << 8; 1241 rdev->vm_manager.max_pfn = radeon_vm_size << 18;
1242 1242
1243 /* Set asic functions */ 1243 /* Set asic functions */
1244 r = radeon_asic_init(rdev); 1244 r = radeon_asic_init(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 13896edcf0b6..bf25061c8ac4 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -366,7 +366,6 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
366 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); 366 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
367 367
368 drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id); 368 drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
369 radeon_fence_unref(&work->fence);
370 radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id); 369 radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id);
371 queue_work(radeon_crtc->flip_queue, &work->unpin_work); 370 queue_work(radeon_crtc->flip_queue, &work->unpin_work);
372} 371}
@@ -386,51 +385,108 @@ static void radeon_flip_work_func(struct work_struct *__work)
386 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id]; 385 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id];
387 386
388 struct drm_crtc *crtc = &radeon_crtc->base; 387 struct drm_crtc *crtc = &radeon_crtc->base;
389 struct drm_framebuffer *fb = work->fb;
390
391 uint32_t tiling_flags, pitch_pixels;
392 uint64_t base;
393
394 unsigned long flags; 388 unsigned long flags;
395 int r; 389 int r;
396 390
397 down_read(&rdev->exclusive_lock); 391 down_read(&rdev->exclusive_lock);
398 while (work->fence) { 392 if (work->fence) {
399 r = radeon_fence_wait(work->fence, false); 393 r = radeon_fence_wait(work->fence, false);
400 if (r == -EDEADLK) { 394 if (r == -EDEADLK) {
401 up_read(&rdev->exclusive_lock); 395 up_read(&rdev->exclusive_lock);
402 r = radeon_gpu_reset(rdev); 396 r = radeon_gpu_reset(rdev);
403 down_read(&rdev->exclusive_lock); 397 down_read(&rdev->exclusive_lock);
404 } 398 }
399 if (r)
400 DRM_ERROR("failed to wait on page flip fence (%d)!\n", r);
405 401
406 if (r) { 402 /* We continue with the page flip even if we failed to wait on
407 DRM_ERROR("failed to wait on page flip fence (%d)!\n", 403 * the fence, otherwise the DRM core and userspace will be
408 r); 404 * confused about which BO the CRTC is scanning out
409 goto cleanup; 405 */
410 } else 406
411 radeon_fence_unref(&work->fence); 407 radeon_fence_unref(&work->fence);
412 } 408 }
413 409
410 /* We borrow the event spin lock for protecting flip_status */
411 spin_lock_irqsave(&crtc->dev->event_lock, flags);
412
413 /* set the proper interrupt */
414 radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
415
416 /* do the flip (mmio) */
417 radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
418
419 radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED;
420 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
421 up_read(&rdev->exclusive_lock);
422}
423
424static int radeon_crtc_page_flip(struct drm_crtc *crtc,
425 struct drm_framebuffer *fb,
426 struct drm_pending_vblank_event *event,
427 uint32_t page_flip_flags)
428{
429 struct drm_device *dev = crtc->dev;
430 struct radeon_device *rdev = dev->dev_private;
431 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
432 struct radeon_framebuffer *old_radeon_fb;
433 struct radeon_framebuffer *new_radeon_fb;
434 struct drm_gem_object *obj;
435 struct radeon_flip_work *work;
436 struct radeon_bo *new_rbo;
437 uint32_t tiling_flags, pitch_pixels;
438 uint64_t base;
439 unsigned long flags;
440 int r;
441
442 work = kzalloc(sizeof *work, GFP_KERNEL);
443 if (work == NULL)
444 return -ENOMEM;
445
446 INIT_WORK(&work->flip_work, radeon_flip_work_func);
447 INIT_WORK(&work->unpin_work, radeon_unpin_work_func);
448
449 work->rdev = rdev;
450 work->crtc_id = radeon_crtc->crtc_id;
451 work->event = event;
452
453 /* schedule unpin of the old buffer */
454 old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
455 obj = old_radeon_fb->obj;
456
457 /* take a reference to the old object */
458 drm_gem_object_reference(obj);
459 work->old_rbo = gem_to_radeon_bo(obj);
460
461 new_radeon_fb = to_radeon_framebuffer(fb);
462 obj = new_radeon_fb->obj;
463 new_rbo = gem_to_radeon_bo(obj);
464
465 spin_lock(&new_rbo->tbo.bdev->fence_lock);
466 if (new_rbo->tbo.sync_obj)
467 work->fence = radeon_fence_ref(new_rbo->tbo.sync_obj);
468 spin_unlock(&new_rbo->tbo.bdev->fence_lock);
469
414 /* pin the new buffer */ 470 /* pin the new buffer */
415 DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n", 471 DRM_DEBUG_DRIVER("flip-ioctl() cur_rbo = %p, new_rbo = %p\n",
416 work->old_rbo, work->new_rbo); 472 work->old_rbo, new_rbo);
417 473
418 r = radeon_bo_reserve(work->new_rbo, false); 474 r = radeon_bo_reserve(new_rbo, false);
419 if (unlikely(r != 0)) { 475 if (unlikely(r != 0)) {
420 DRM_ERROR("failed to reserve new rbo buffer before flip\n"); 476 DRM_ERROR("failed to reserve new rbo buffer before flip\n");
421 goto cleanup; 477 goto cleanup;
422 } 478 }
423 /* Only 27 bit offset for legacy CRTC */ 479 /* Only 27 bit offset for legacy CRTC */
424 r = radeon_bo_pin_restricted(work->new_rbo, RADEON_GEM_DOMAIN_VRAM, 480 r = radeon_bo_pin_restricted(new_rbo, RADEON_GEM_DOMAIN_VRAM,
425 ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base); 481 ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base);
426 if (unlikely(r != 0)) { 482 if (unlikely(r != 0)) {
427 radeon_bo_unreserve(work->new_rbo); 483 radeon_bo_unreserve(new_rbo);
428 r = -EINVAL; 484 r = -EINVAL;
429 DRM_ERROR("failed to pin new rbo buffer before flip\n"); 485 DRM_ERROR("failed to pin new rbo buffer before flip\n");
430 goto cleanup; 486 goto cleanup;
431 } 487 }
432 radeon_bo_get_tiling_flags(work->new_rbo, &tiling_flags, NULL); 488 radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
433 radeon_bo_unreserve(work->new_rbo); 489 radeon_bo_unreserve(new_rbo);
434 490
435 if (!ASIC_IS_AVIVO(rdev)) { 491 if (!ASIC_IS_AVIVO(rdev)) {
436 /* crtc offset is from display base addr not FB location */ 492 /* crtc offset is from display base addr not FB location */
@@ -467,6 +523,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
467 } 523 }
468 base &= ~7; 524 base &= ~7;
469 } 525 }
526 work->base = base;
470 527
471 r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id); 528 r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id);
472 if (r) { 529 if (r) {
@@ -477,100 +534,42 @@ static void radeon_flip_work_func(struct work_struct *__work)
477 /* We borrow the event spin lock for protecting flip_work */ 534 /* We borrow the event spin lock for protecting flip_work */
478 spin_lock_irqsave(&crtc->dev->event_lock, flags); 535 spin_lock_irqsave(&crtc->dev->event_lock, flags);
479 536
480 /* set the proper interrupt */ 537 if (radeon_crtc->flip_status != RADEON_FLIP_NONE) {
481 radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id); 538 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
539 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
540 r = -EBUSY;
541 goto vblank_cleanup;
542 }
543 radeon_crtc->flip_status = RADEON_FLIP_PENDING;
544 radeon_crtc->flip_work = work;
482 545
483 /* do the flip (mmio) */ 546 /* update crtc fb */
484 radeon_page_flip(rdev, radeon_crtc->crtc_id, base); 547 crtc->primary->fb = fb;
485 548
486 radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED;
487 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 549 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
488 up_read(&rdev->exclusive_lock);
489 550
490 return; 551 queue_work(radeon_crtc->flip_queue, &work->flip_work);
552 return 0;
553
554vblank_cleanup:
555 drm_vblank_put(crtc->dev, radeon_crtc->crtc_id);
491 556
492pflip_cleanup: 557pflip_cleanup:
493 if (unlikely(radeon_bo_reserve(work->new_rbo, false) != 0)) { 558 if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) {
494 DRM_ERROR("failed to reserve new rbo in error path\n"); 559 DRM_ERROR("failed to reserve new rbo in error path\n");
495 goto cleanup; 560 goto cleanup;
496 } 561 }
497 if (unlikely(radeon_bo_unpin(work->new_rbo) != 0)) { 562 if (unlikely(radeon_bo_unpin(new_rbo) != 0)) {
498 DRM_ERROR("failed to unpin new rbo in error path\n"); 563 DRM_ERROR("failed to unpin new rbo in error path\n");
499 } 564 }
500 radeon_bo_unreserve(work->new_rbo); 565 radeon_bo_unreserve(new_rbo);
501 566
502cleanup: 567cleanup:
503 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); 568 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
504 radeon_fence_unref(&work->fence); 569 radeon_fence_unref(&work->fence);
505 kfree(work); 570 kfree(work);
506 up_read(&rdev->exclusive_lock);
507}
508
509static int radeon_crtc_page_flip(struct drm_crtc *crtc,
510 struct drm_framebuffer *fb,
511 struct drm_pending_vblank_event *event,
512 uint32_t page_flip_flags)
513{
514 struct drm_device *dev = crtc->dev;
515 struct radeon_device *rdev = dev->dev_private;
516 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
517 struct radeon_framebuffer *old_radeon_fb;
518 struct radeon_framebuffer *new_radeon_fb;
519 struct drm_gem_object *obj;
520 struct radeon_flip_work *work;
521 unsigned long flags;
522
523 work = kzalloc(sizeof *work, GFP_KERNEL);
524 if (work == NULL)
525 return -ENOMEM;
526
527 INIT_WORK(&work->flip_work, radeon_flip_work_func);
528 INIT_WORK(&work->unpin_work, radeon_unpin_work_func);
529
530 work->rdev = rdev;
531 work->crtc_id = radeon_crtc->crtc_id;
532 work->fb = fb;
533 work->event = event;
534
535 /* schedule unpin of the old buffer */
536 old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
537 obj = old_radeon_fb->obj;
538
539 /* take a reference to the old object */
540 drm_gem_object_reference(obj);
541 work->old_rbo = gem_to_radeon_bo(obj);
542
543 new_radeon_fb = to_radeon_framebuffer(fb);
544 obj = new_radeon_fb->obj;
545 work->new_rbo = gem_to_radeon_bo(obj);
546
547 spin_lock(&work->new_rbo->tbo.bdev->fence_lock);
548 if (work->new_rbo->tbo.sync_obj)
549 work->fence = radeon_fence_ref(work->new_rbo->tbo.sync_obj);
550 spin_unlock(&work->new_rbo->tbo.bdev->fence_lock);
551
552 /* We borrow the event spin lock for protecting flip_work */
553 spin_lock_irqsave(&crtc->dev->event_lock, flags);
554 571
555 if (radeon_crtc->flip_status != RADEON_FLIP_NONE) { 572 return r;
556 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
557 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
558 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
559 radeon_fence_unref(&work->fence);
560 kfree(work);
561 return -EBUSY;
562 }
563 radeon_crtc->flip_status = RADEON_FLIP_PENDING;
564 radeon_crtc->flip_work = work;
565
566 /* update crtc fb */
567 crtc->primary->fb = fb;
568
569 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
570
571 queue_work(radeon_crtc->flip_queue, &work->flip_work);
572
573 return 0;
574} 573}
575 574
576static int 575static int
@@ -830,6 +829,10 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
830 struct radeon_device *rdev = dev->dev_private; 829 struct radeon_device *rdev = dev->dev_private;
831 int ret = 0; 830 int ret = 0;
832 831
832 /* don't leak the edid if we already fetched it in detect() */
833 if (radeon_connector->edid)
834 goto got_edid;
835
833 /* on hw with routers, select right port */ 836 /* on hw with routers, select right port */
834 if (radeon_connector->router.ddc_valid) 837 if (radeon_connector->router.ddc_valid)
835 radeon_router_select_ddc_port(radeon_connector); 838 radeon_router_select_ddc_port(radeon_connector);
@@ -868,6 +871,7 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
868 radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev); 871 radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
869 } 872 }
870 if (radeon_connector->edid) { 873 if (radeon_connector->edid) {
874got_edid:
871 drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid); 875 drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
872 ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid); 876 ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
873 drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid); 877 drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index cb1421369e3a..e9e361084249 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -173,7 +173,7 @@ int radeon_dpm = -1;
173int radeon_aspm = -1; 173int radeon_aspm = -1;
174int radeon_runtime_pm = -1; 174int radeon_runtime_pm = -1;
175int radeon_hard_reset = 0; 175int radeon_hard_reset = 0;
176int radeon_vm_size = 4096; 176int radeon_vm_size = 4;
177int radeon_vm_block_size = 9; 177int radeon_vm_block_size = 9;
178int radeon_deep_color = 0; 178int radeon_deep_color = 0;
179 179
@@ -243,7 +243,7 @@ module_param_named(runpm, radeon_runtime_pm, int, 0444);
243MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))"); 243MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))");
244module_param_named(hard_reset, radeon_hard_reset, int, 0444); 244module_param_named(hard_reset, radeon_hard_reset, int, 0444);
245 245
246MODULE_PARM_DESC(vm_size, "VM address space size in megabytes (default 4GB)"); 246MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 4GB)");
247module_param_named(vm_size, radeon_vm_size, int, 0444); 247module_param_named(vm_size, radeon_vm_size, int, 0444);
248 248
249MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)"); 249MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)");
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 35d931881b4b..d25ae6acfd5a 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -579,7 +579,7 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
579 /* new gpu have virtual address space support */ 579 /* new gpu have virtual address space support */
580 if (rdev->family >= CHIP_CAYMAN) { 580 if (rdev->family >= CHIP_CAYMAN) {
581 struct radeon_fpriv *fpriv; 581 struct radeon_fpriv *fpriv;
582 struct radeon_bo_va *bo_va; 582 struct radeon_vm *vm;
583 int r; 583 int r;
584 584
585 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); 585 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
@@ -587,7 +587,8 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
587 return -ENOMEM; 587 return -ENOMEM;
588 } 588 }
589 589
590 r = radeon_vm_init(rdev, &fpriv->vm); 590 vm = &fpriv->vm;
591 r = radeon_vm_init(rdev, vm);
591 if (r) { 592 if (r) {
592 kfree(fpriv); 593 kfree(fpriv);
593 return r; 594 return r;
@@ -596,22 +597,23 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
596 if (rdev->accel_working) { 597 if (rdev->accel_working) {
597 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); 598 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
598 if (r) { 599 if (r) {
599 radeon_vm_fini(rdev, &fpriv->vm); 600 radeon_vm_fini(rdev, vm);
600 kfree(fpriv); 601 kfree(fpriv);
601 return r; 602 return r;
602 } 603 }
603 604
604 /* map the ib pool buffer read only into 605 /* map the ib pool buffer read only into
605 * virtual address space */ 606 * virtual address space */
606 bo_va = radeon_vm_bo_add(rdev, &fpriv->vm, 607 vm->ib_bo_va = radeon_vm_bo_add(rdev, vm,
607 rdev->ring_tmp_bo.bo); 608 rdev->ring_tmp_bo.bo);
608 r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, 609 r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va,
610 RADEON_VA_IB_OFFSET,
609 RADEON_VM_PAGE_READABLE | 611 RADEON_VM_PAGE_READABLE |
610 RADEON_VM_PAGE_SNOOPED); 612 RADEON_VM_PAGE_SNOOPED);
611 613
612 radeon_bo_unreserve(rdev->ring_tmp_bo.bo); 614 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
613 if (r) { 615 if (r) {
614 radeon_vm_fini(rdev, &fpriv->vm); 616 radeon_vm_fini(rdev, vm);
615 kfree(fpriv); 617 kfree(fpriv);
616 return r; 618 return r;
617 } 619 }
@@ -640,21 +642,19 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
640 /* new gpu have virtual address space support */ 642 /* new gpu have virtual address space support */
641 if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) { 643 if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
642 struct radeon_fpriv *fpriv = file_priv->driver_priv; 644 struct radeon_fpriv *fpriv = file_priv->driver_priv;
643 struct radeon_bo_va *bo_va; 645 struct radeon_vm *vm = &fpriv->vm;
644 int r; 646 int r;
645 647
646 if (rdev->accel_working) { 648 if (rdev->accel_working) {
647 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); 649 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
648 if (!r) { 650 if (!r) {
649 bo_va = radeon_vm_bo_find(&fpriv->vm, 651 if (vm->ib_bo_va)
650 rdev->ring_tmp_bo.bo); 652 radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
651 if (bo_va)
652 radeon_vm_bo_rmv(rdev, bo_va);
653 radeon_bo_unreserve(rdev->ring_tmp_bo.bo); 653 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
654 } 654 }
655 } 655 }
656 656
657 radeon_vm_fini(rdev, &fpriv->vm); 657 radeon_vm_fini(rdev, vm);
658 kfree(fpriv); 658 kfree(fpriv);
659 file_priv->driver_priv = NULL; 659 file_priv->driver_priv = NULL;
660 } 660 }
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index eecff6bbd341..725d3669014f 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -332,6 +332,7 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
332 bo_va->ref_count = 1; 332 bo_va->ref_count = 1;
333 INIT_LIST_HEAD(&bo_va->bo_list); 333 INIT_LIST_HEAD(&bo_va->bo_list);
334 INIT_LIST_HEAD(&bo_va->vm_list); 334 INIT_LIST_HEAD(&bo_va->vm_list);
335 INIT_LIST_HEAD(&bo_va->vm_status);
335 336
336 mutex_lock(&vm->mutex); 337 mutex_lock(&vm->mutex);
337 list_add(&bo_va->vm_list, &vm->va); 338 list_add(&bo_va->vm_list, &vm->va);
@@ -468,6 +469,19 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
468 head = &tmp->vm_list; 469 head = &tmp->vm_list;
469 } 470 }
470 471
472 if (bo_va->soffset) {
473 /* add a clone of the bo_va to clear the old address */
474 tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
475 if (!tmp) {
476 mutex_unlock(&vm->mutex);
477 return -ENOMEM;
478 }
479 tmp->soffset = bo_va->soffset;
480 tmp->eoffset = bo_va->eoffset;
481 tmp->vm = vm;
482 list_add(&tmp->vm_status, &vm->freed);
483 }
484
471 bo_va->soffset = soffset; 485 bo_va->soffset = soffset;
472 bo_va->eoffset = eoffset; 486 bo_va->eoffset = eoffset;
473 bo_va->flags = flags; 487 bo_va->flags = flags;
@@ -823,25 +837,19 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
823 * Object have to be reserved and mutex must be locked! 837 * Object have to be reserved and mutex must be locked!
824 */ 838 */
825int radeon_vm_bo_update(struct radeon_device *rdev, 839int radeon_vm_bo_update(struct radeon_device *rdev,
826 struct radeon_vm *vm, 840 struct radeon_bo_va *bo_va,
827 struct radeon_bo *bo,
828 struct ttm_mem_reg *mem) 841 struct ttm_mem_reg *mem)
829{ 842{
843 struct radeon_vm *vm = bo_va->vm;
830 struct radeon_ib ib; 844 struct radeon_ib ib;
831 struct radeon_bo_va *bo_va;
832 unsigned nptes, ndw; 845 unsigned nptes, ndw;
833 uint64_t addr; 846 uint64_t addr;
834 int r; 847 int r;
835 848
836 bo_va = radeon_vm_bo_find(vm, bo);
837 if (bo_va == NULL) {
838 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
839 return -EINVAL;
840 }
841 849
842 if (!bo_va->soffset) { 850 if (!bo_va->soffset) {
843 dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n", 851 dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
844 bo, vm); 852 bo_va->bo, vm);
845 return -EINVAL; 853 return -EINVAL;
846 } 854 }
847 855
@@ -868,7 +876,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
868 876
869 trace_radeon_vm_bo_update(bo_va); 877 trace_radeon_vm_bo_update(bo_va);
870 878
871 nptes = radeon_bo_ngpu_pages(bo); 879 nptes = (bo_va->eoffset - bo_va->soffset) / RADEON_GPU_PAGE_SIZE;
872 880
873 /* padding, etc. */ 881 /* padding, etc. */
874 ndw = 64; 882 ndw = 64;
@@ -911,33 +919,61 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
911} 919}
912 920
913/** 921/**
922 * radeon_vm_clear_freed - clear freed BOs in the PT
923 *
924 * @rdev: radeon_device pointer
925 * @vm: requested vm
926 *
927 * Make sure all freed BOs are cleared in the PT.
928 * Returns 0 for success.
929 *
930 * PTs have to be reserved and mutex must be locked!
931 */
932int radeon_vm_clear_freed(struct radeon_device *rdev,
933 struct radeon_vm *vm)
934{
935 struct radeon_bo_va *bo_va, *tmp;
936 int r;
937
938 list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
939 list_del(&bo_va->vm_status);
940 r = radeon_vm_bo_update(rdev, bo_va, NULL);
941 kfree(bo_va);
942 if (r)
943 return r;
944 }
945 return 0;
946
947}
948
949/**
914 * radeon_vm_bo_rmv - remove a bo to a specific vm 950 * radeon_vm_bo_rmv - remove a bo to a specific vm
915 * 951 *
916 * @rdev: radeon_device pointer 952 * @rdev: radeon_device pointer
917 * @bo_va: requested bo_va 953 * @bo_va: requested bo_va
918 * 954 *
919 * Remove @bo_va->bo from the requested vm (cayman+). 955 * Remove @bo_va->bo from the requested vm (cayman+).
920 * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
921 * remove the ptes for @bo_va in the page table.
922 * Returns 0 for success.
923 * 956 *
924 * Object have to be reserved! 957 * Object have to be reserved!
925 */ 958 */
926int radeon_vm_bo_rmv(struct radeon_device *rdev, 959void radeon_vm_bo_rmv(struct radeon_device *rdev,
927 struct radeon_bo_va *bo_va) 960 struct radeon_bo_va *bo_va)
928{ 961{
929 int r = 0; 962 struct radeon_vm *vm = bo_va->vm;
930 963
931 mutex_lock(&bo_va->vm->mutex); 964 list_del(&bo_va->bo_list);
932 if (bo_va->soffset)
933 r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
934 965
966 mutex_lock(&vm->mutex);
935 list_del(&bo_va->vm_list); 967 list_del(&bo_va->vm_list);
936 mutex_unlock(&bo_va->vm->mutex);
937 list_del(&bo_va->bo_list);
938 968
939 kfree(bo_va); 969 if (bo_va->soffset) {
940 return r; 970 bo_va->bo = NULL;
971 list_add(&bo_va->vm_status, &vm->freed);
972 } else {
973 kfree(bo_va);
974 }
975
976 mutex_unlock(&vm->mutex);
941} 977}
942 978
943/** 979/**
@@ -975,11 +1011,13 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
975 int r; 1011 int r;
976 1012
977 vm->id = 0; 1013 vm->id = 0;
1014 vm->ib_bo_va = NULL;
978 vm->fence = NULL; 1015 vm->fence = NULL;
979 vm->last_flush = NULL; 1016 vm->last_flush = NULL;
980 vm->last_id_use = NULL; 1017 vm->last_id_use = NULL;
981 mutex_init(&vm->mutex); 1018 mutex_init(&vm->mutex);
982 INIT_LIST_HEAD(&vm->va); 1019 INIT_LIST_HEAD(&vm->va);
1020 INIT_LIST_HEAD(&vm->freed);
983 1021
984 pd_size = radeon_vm_directory_size(rdev); 1022 pd_size = radeon_vm_directory_size(rdev);
985 pd_entries = radeon_vm_num_pdes(rdev); 1023 pd_entries = radeon_vm_num_pdes(rdev);
@@ -1034,7 +1072,8 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
1034 kfree(bo_va); 1072 kfree(bo_va);
1035 } 1073 }
1036 } 1074 }
1037 1075 list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status)
1076 kfree(bo_va);
1038 1077
1039 for (i = 0; i < radeon_vm_num_pdes(rdev); i++) 1078 for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
1040 radeon_bo_unref(&vm->page_tables[i].bo); 1079 radeon_bo_unref(&vm->page_tables[i].bo);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 237dd29d9f1c..3e21e869015f 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -406,8 +406,9 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
406 for (i = 0; i < rdev->num_crtc; i++) { 406 for (i = 0; i < rdev->num_crtc; i++) {
407 if (save->crtc_enabled[i]) { 407 if (save->crtc_enabled[i]) {
408 tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]); 408 tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]);
409 if ((tmp & 0x3) != 0) { 409 if ((tmp & 0x7) != 3) {
410 tmp &= ~0x3; 410 tmp &= ~0x7;
411 tmp |= 0x3;
411 WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); 412 WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
412 } 413 }
413 tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]); 414 tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index da041a43d82e..3c76e1dcdf04 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2329,12 +2329,6 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev)
2329 pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, 2329 pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
2330 ASIC_INTERNAL_MEMORY_SS, 0); 2330 ASIC_INTERNAL_MEMORY_SS, 0);
2331 2331
2332 /* disable ss, causes hangs on some cayman boards */
2333 if (rdev->family == CHIP_CAYMAN) {
2334 pi->sclk_ss = false;
2335 pi->mclk_ss = false;
2336 }
2337
2338 if (pi->sclk_ss || pi->mclk_ss) 2332 if (pi->sclk_ss || pi->mclk_ss)
2339 pi->dynamic_ss = true; 2333 pi->dynamic_ss = true;
2340 else 2334 else
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 730cee2c34cf..9e854fd016da 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6103,6 +6103,7 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
6103 tmp = RREG32(IH_RB_CNTL); 6103 tmp = RREG32(IH_RB_CNTL);
6104 tmp |= IH_WPTR_OVERFLOW_CLEAR; 6104 tmp |= IH_WPTR_OVERFLOW_CLEAR;
6105 WREG32(IH_RB_CNTL, tmp); 6105 WREG32(IH_RB_CNTL, tmp);
6106 wptr &= ~RB_OVERFLOW;
6106 } 6107 }
6107 return (wptr & rdev->ih.ptr_mask); 6108 return (wptr & rdev->ih.ptr_mask);
6108} 6109}
@@ -6376,14 +6377,16 @@ restart_ih:
6376 case 147: 6377 case 147:
6377 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR); 6378 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
6378 status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS); 6379 status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
6380 /* reset addr and status */
6381 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
6382 if (addr == 0x0 && status == 0x0)
6383 break;
6379 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); 6384 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
6380 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", 6385 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
6381 addr); 6386 addr);
6382 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 6387 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
6383 status); 6388 status);
6384 si_vm_decode_fault(rdev, status, addr); 6389 si_vm_decode_fault(rdev, status, addr);
6385 /* reset addr and status */
6386 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
6387 break; 6390 break;
6388 case 176: /* RINGID0 CP_INT */ 6391 case 176: /* RINGID0 CP_INT */
6389 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 6392 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 20da6ff183df..32e50be9c4ac 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1874,15 +1874,16 @@ int trinity_dpm_init(struct radeon_device *rdev)
1874 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) 1874 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
1875 pi->at[i] = TRINITY_AT_DFLT; 1875 pi->at[i] = TRINITY_AT_DFLT;
1876 1876
1877 /* There are stability issues reported on latops with 1877 /* There are stability issues reported on with
1878 * bapm installed when switching between AC and battery 1878 * bapm enabled when switching between AC and battery
1879 * power. At the same time, some desktop boards hang 1879 * power. At the same time, some MSI boards hang
1880 * if it's not enabled and dpm is enabled. 1880 * if it's not enabled and dpm is enabled. Just enable
1881 * it for MSI boards right now.
1881 */ 1882 */
1882 if (rdev->flags & RADEON_IS_MOBILITY) 1883 if (rdev->pdev->subsystem_vendor == 0x1462)
1883 pi->enable_bapm = false;
1884 else
1885 pi->enable_bapm = true; 1884 pi->enable_bapm = true;
1885 else
1886 pi->enable_bapm = false;
1886 pi->enable_nbps_policy = true; 1887 pi->enable_nbps_policy = true;
1887 pi->enable_sclk_ds = true; 1888 pi->enable_sclk_ds = true;
1888 pi->enable_gfx_power_gating = true; 1889 pi->enable_gfx_power_gating = true;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 800c8b60f7a2..5e79c6ad914f 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -810,7 +810,7 @@ config HID_ZYDACRON
810 810
811config HID_SENSOR_HUB 811config HID_SENSOR_HUB
812 tristate "HID Sensors framework support" 812 tristate "HID Sensors framework support"
813 depends on HID 813 depends on HID && HAS_IOMEM
814 select MFD_CORE 814 select MFD_CORE
815 default n 815 default n
816 ---help--- 816 ---help---
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 6d00bb9366fa..48b66bbffc94 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -323,6 +323,7 @@
323 323
324#define USB_VENDOR_ID_ETURBOTOUCH 0x22b9 324#define USB_VENDOR_ID_ETURBOTOUCH 0x22b9
325#define USB_DEVICE_ID_ETURBOTOUCH 0x0006 325#define USB_DEVICE_ID_ETURBOTOUCH 0x0006
326#define USB_DEVICE_ID_ETURBOTOUCH_2968 0x2968
326 327
327#define USB_VENDOR_ID_EZKEY 0x0518 328#define USB_VENDOR_ID_EZKEY 0x0518
328#define USB_DEVICE_ID_BTC_8193 0x0002 329#define USB_DEVICE_ID_BTC_8193 0x0002
@@ -715,6 +716,8 @@
715 716
716#define USB_VENDOR_ID_PENMOUNT 0x14e1 717#define USB_VENDOR_ID_PENMOUNT 0x14e1
717#define USB_DEVICE_ID_PENMOUNT_PCI 0x3500 718#define USB_DEVICE_ID_PENMOUNT_PCI 0x3500
719#define USB_DEVICE_ID_PENMOUNT_1610 0x1610
720#define USB_DEVICE_ID_PENMOUNT_1640 0x1640
718 721
719#define USB_VENDOR_ID_PETALYNX 0x18b1 722#define USB_VENDOR_ID_PETALYNX 0x18b1
720#define USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE 0x0037 723#define USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE 0x0037
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 2451c7e5febd..578bbe65902b 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -428,6 +428,7 @@ static int rmi_raw_event(struct hid_device *hdev,
428 return 0; 428 return 0;
429} 429}
430 430
431#ifdef CONFIG_PM
431static int rmi_post_reset(struct hid_device *hdev) 432static int rmi_post_reset(struct hid_device *hdev)
432{ 433{
433 return rmi_set_mode(hdev, RMI_MODE_ATTN_REPORTS); 434 return rmi_set_mode(hdev, RMI_MODE_ATTN_REPORTS);
@@ -437,6 +438,7 @@ static int rmi_post_resume(struct hid_device *hdev)
437{ 438{
438 return rmi_set_mode(hdev, RMI_MODE_ATTN_REPORTS); 439 return rmi_set_mode(hdev, RMI_MODE_ATTN_REPORTS);
439} 440}
441#endif /* CONFIG_PM */
440 442
441#define RMI4_MAX_PAGE 0xff 443#define RMI4_MAX_PAGE 0xff
442#define RMI4_PAGE_SIZE 0x0100 444#define RMI4_PAGE_SIZE 0x0100
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index a8d5c8faf8cf..e244e449cbba 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -159,17 +159,18 @@ int sensor_hub_register_callback(struct hid_sensor_hub_device *hsdev,
159{ 159{
160 struct hid_sensor_hub_callbacks_list *callback; 160 struct hid_sensor_hub_callbacks_list *callback;
161 struct sensor_hub_data *pdata = hid_get_drvdata(hsdev->hdev); 161 struct sensor_hub_data *pdata = hid_get_drvdata(hsdev->hdev);
162 unsigned long flags;
162 163
163 spin_lock(&pdata->dyn_callback_lock); 164 spin_lock_irqsave(&pdata->dyn_callback_lock, flags);
164 list_for_each_entry(callback, &pdata->dyn_callback_list, list) 165 list_for_each_entry(callback, &pdata->dyn_callback_list, list)
165 if (callback->usage_id == usage_id && 166 if (callback->usage_id == usage_id &&
166 callback->hsdev == hsdev) { 167 callback->hsdev == hsdev) {
167 spin_unlock(&pdata->dyn_callback_lock); 168 spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
168 return -EINVAL; 169 return -EINVAL;
169 } 170 }
170 callback = kzalloc(sizeof(*callback), GFP_ATOMIC); 171 callback = kzalloc(sizeof(*callback), GFP_ATOMIC);
171 if (!callback) { 172 if (!callback) {
172 spin_unlock(&pdata->dyn_callback_lock); 173 spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
173 return -ENOMEM; 174 return -ENOMEM;
174 } 175 }
175 callback->hsdev = hsdev; 176 callback->hsdev = hsdev;
@@ -177,7 +178,7 @@ int sensor_hub_register_callback(struct hid_sensor_hub_device *hsdev,
177 callback->usage_id = usage_id; 178 callback->usage_id = usage_id;
178 callback->priv = NULL; 179 callback->priv = NULL;
179 list_add_tail(&callback->list, &pdata->dyn_callback_list); 180 list_add_tail(&callback->list, &pdata->dyn_callback_list);
180 spin_unlock(&pdata->dyn_callback_lock); 181 spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
181 182
182 return 0; 183 return 0;
183} 184}
@@ -188,8 +189,9 @@ int sensor_hub_remove_callback(struct hid_sensor_hub_device *hsdev,
188{ 189{
189 struct hid_sensor_hub_callbacks_list *callback; 190 struct hid_sensor_hub_callbacks_list *callback;
190 struct sensor_hub_data *pdata = hid_get_drvdata(hsdev->hdev); 191 struct sensor_hub_data *pdata = hid_get_drvdata(hsdev->hdev);
192 unsigned long flags;
191 193
192 spin_lock(&pdata->dyn_callback_lock); 194 spin_lock_irqsave(&pdata->dyn_callback_lock, flags);
193 list_for_each_entry(callback, &pdata->dyn_callback_list, list) 195 list_for_each_entry(callback, &pdata->dyn_callback_list, list)
194 if (callback->usage_id == usage_id && 196 if (callback->usage_id == usage_id &&
195 callback->hsdev == hsdev) { 197 callback->hsdev == hsdev) {
@@ -197,7 +199,7 @@ int sensor_hub_remove_callback(struct hid_sensor_hub_device *hsdev,
197 kfree(callback); 199 kfree(callback);
198 break; 200 break;
199 } 201 }
200 spin_unlock(&pdata->dyn_callback_lock); 202 spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
201 203
202 return 0; 204 return 0;
203} 205}
@@ -378,15 +380,16 @@ static int sensor_hub_suspend(struct hid_device *hdev, pm_message_t message)
378{ 380{
379 struct sensor_hub_data *pdata = hid_get_drvdata(hdev); 381 struct sensor_hub_data *pdata = hid_get_drvdata(hdev);
380 struct hid_sensor_hub_callbacks_list *callback; 382 struct hid_sensor_hub_callbacks_list *callback;
383 unsigned long flags;
381 384
382 hid_dbg(hdev, " sensor_hub_suspend\n"); 385 hid_dbg(hdev, " sensor_hub_suspend\n");
383 spin_lock(&pdata->dyn_callback_lock); 386 spin_lock_irqsave(&pdata->dyn_callback_lock, flags);
384 list_for_each_entry(callback, &pdata->dyn_callback_list, list) { 387 list_for_each_entry(callback, &pdata->dyn_callback_list, list) {
385 if (callback->usage_callback->suspend) 388 if (callback->usage_callback->suspend)
386 callback->usage_callback->suspend( 389 callback->usage_callback->suspend(
387 callback->hsdev, callback->priv); 390 callback->hsdev, callback->priv);
388 } 391 }
389 spin_unlock(&pdata->dyn_callback_lock); 392 spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
390 393
391 return 0; 394 return 0;
392} 395}
@@ -395,15 +398,16 @@ static int sensor_hub_resume(struct hid_device *hdev)
395{ 398{
396 struct sensor_hub_data *pdata = hid_get_drvdata(hdev); 399 struct sensor_hub_data *pdata = hid_get_drvdata(hdev);
397 struct hid_sensor_hub_callbacks_list *callback; 400 struct hid_sensor_hub_callbacks_list *callback;
401 unsigned long flags;
398 402
399 hid_dbg(hdev, " sensor_hub_resume\n"); 403 hid_dbg(hdev, " sensor_hub_resume\n");
400 spin_lock(&pdata->dyn_callback_lock); 404 spin_lock_irqsave(&pdata->dyn_callback_lock, flags);
401 list_for_each_entry(callback, &pdata->dyn_callback_list, list) { 405 list_for_each_entry(callback, &pdata->dyn_callback_list, list) {
402 if (callback->usage_callback->resume) 406 if (callback->usage_callback->resume)
403 callback->usage_callback->resume( 407 callback->usage_callback->resume(
404 callback->hsdev, callback->priv); 408 callback->hsdev, callback->priv);
405 } 409 }
406 spin_unlock(&pdata->dyn_callback_lock); 410 spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
407 411
408 return 0; 412 return 0;
409} 413}
@@ -632,6 +636,7 @@ static int sensor_hub_probe(struct hid_device *hdev,
632 if (name == NULL) { 636 if (name == NULL) {
633 hid_err(hdev, "Failed MFD device name\n"); 637 hid_err(hdev, "Failed MFD device name\n");
634 ret = -ENOMEM; 638 ret = -ENOMEM;
639 kfree(hsdev);
635 goto err_no_mem; 640 goto err_no_mem;
636 } 641 }
637 sd->hid_sensor_hub_client_devs[ 642 sd->hid_sensor_hub_client_devs[
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 59badc10a08c..31e6727cd009 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -49,6 +49,7 @@ static const struct hid_blacklist {
49 49
50 { USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II, HID_QUIRK_MULTI_INPUT }, 50 { USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II, HID_QUIRK_MULTI_INPUT },
51 { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT }, 51 { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT },
52 { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH_2968, HID_QUIRK_MULTI_INPUT },
52 { USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT }, 53 { USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT },
53 { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, 54 { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
54 { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT }, 55 { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT },
@@ -76,6 +77,8 @@ static const struct hid_blacklist {
76 { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS }, 77 { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
77 { USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS }, 78 { USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS },
78 { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS }, 79 { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
80 { USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1610, HID_QUIRK_NOGET },
81 { USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1640, HID_QUIRK_NOGET },
79 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS }, 82 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
80 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS }, 83 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
81 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS }, 84 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index e84f4526eb36..ae22e3c1fc4c 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -339,9 +339,13 @@ static void process_chn_event(u32 relid)
339 */ 339 */
340 340
341 do { 341 do {
342 hv_begin_read(&channel->inbound); 342 if (read_state)
343 hv_begin_read(&channel->inbound);
343 channel->onchannel_callback(arg); 344 channel->onchannel_callback(arg);
344 bytes_to_read = hv_end_read(&channel->inbound); 345 if (read_state)
346 bytes_to_read = hv_end_read(&channel->inbound);
347 else
348 bytes_to_read = 0;
345 } while (read_state && (bytes_to_read != 0)); 349 } while (read_state && (bytes_to_read != 0));
346 } else { 350 } else {
347 pr_err("no channel callback for relid - %u\n", relid); 351 pr_err("no channel callback for relid - %u\n", relid);
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index eaaa3d843b80..23b2ce294c4c 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -246,8 +246,8 @@ void hv_fcopy_onchannelcallback(void *context)
246 /* 246 /*
247 * Send the information to the user-level daemon. 247 * Send the information to the user-level daemon.
248 */ 248 */
249 fcopy_send_data();
250 schedule_delayed_work(&fcopy_work, 5*HZ); 249 schedule_delayed_work(&fcopy_work, 5*HZ);
250 fcopy_send_data();
251 return; 251 return;
252 } 252 }
253 icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE; 253 icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index ea852537307e..521c14625b3a 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -127,6 +127,17 @@ kvp_work_func(struct work_struct *dummy)
127 kvp_respond_to_host(NULL, HV_E_FAIL); 127 kvp_respond_to_host(NULL, HV_E_FAIL);
128} 128}
129 129
130static void poll_channel(struct vmbus_channel *channel)
131{
132 if (channel->target_cpu != smp_processor_id())
133 smp_call_function_single(channel->target_cpu,
134 hv_kvp_onchannelcallback,
135 channel, true);
136 else
137 hv_kvp_onchannelcallback(channel);
138}
139
140
130static int kvp_handle_handshake(struct hv_kvp_msg *msg) 141static int kvp_handle_handshake(struct hv_kvp_msg *msg)
131{ 142{
132 int ret = 1; 143 int ret = 1;
@@ -155,7 +166,7 @@ static int kvp_handle_handshake(struct hv_kvp_msg *msg)
155 kvp_register(dm_reg_value); 166 kvp_register(dm_reg_value);
156 kvp_transaction.active = false; 167 kvp_transaction.active = false;
157 if (kvp_transaction.kvp_context) 168 if (kvp_transaction.kvp_context)
158 hv_kvp_onchannelcallback(kvp_transaction.kvp_context); 169 poll_channel(kvp_transaction.kvp_context);
159 } 170 }
160 return ret; 171 return ret;
161} 172}
@@ -568,7 +579,7 @@ response_done:
568 579
569 vmbus_sendpacket(channel, recv_buffer, buf_len, req_id, 580 vmbus_sendpacket(channel, recv_buffer, buf_len, req_id,
570 VM_PKT_DATA_INBAND, 0); 581 VM_PKT_DATA_INBAND, 0);
571 582 poll_channel(channel);
572} 583}
573 584
574/* 585/*
@@ -603,7 +614,7 @@ void hv_kvp_onchannelcallback(void *context)
603 return; 614 return;
604 } 615 }
605 616
606 vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen, 617 vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 4, &recvlen,
607 &requestid); 618 &requestid);
608 619
609 if (recvlen > 0) { 620 if (recvlen > 0) {
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index dd761806f0e8..3b9c9ef0deb8 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -319,7 +319,7 @@ static int util_probe(struct hv_device *dev,
319 (struct hv_util_service *)dev_id->driver_data; 319 (struct hv_util_service *)dev_id->driver_data;
320 int ret; 320 int ret;
321 321
322 srv->recv_buffer = kmalloc(PAGE_SIZE * 2, GFP_KERNEL); 322 srv->recv_buffer = kmalloc(PAGE_SIZE * 4, GFP_KERNEL);
323 if (!srv->recv_buffer) 323 if (!srv->recv_buffer)
324 return -ENOMEM; 324 return -ENOMEM;
325 if (srv->util_init) { 325 if (srv->util_init) {
diff --git a/drivers/hwmon/adc128d818.c b/drivers/hwmon/adc128d818.c
index 5ffd81f19d01..0625e50d7a6e 100644
--- a/drivers/hwmon/adc128d818.c
+++ b/drivers/hwmon/adc128d818.c
@@ -239,50 +239,50 @@ static ssize_t adc128_show_alarm(struct device *dev,
239 return sprintf(buf, "%u\n", !!(alarms & mask)); 239 return sprintf(buf, "%u\n", !!(alarms & mask));
240} 240}
241 241
242static SENSOR_DEVICE_ATTR_2(in0_input, S_IWUSR | S_IRUGO, 242static SENSOR_DEVICE_ATTR_2(in0_input, S_IRUGO,
243 adc128_show_in, adc128_set_in, 0, 0); 243 adc128_show_in, NULL, 0, 0);
244static SENSOR_DEVICE_ATTR_2(in0_min, S_IWUSR | S_IRUGO, 244static SENSOR_DEVICE_ATTR_2(in0_min, S_IWUSR | S_IRUGO,
245 adc128_show_in, adc128_set_in, 0, 1); 245 adc128_show_in, adc128_set_in, 0, 1);
246static SENSOR_DEVICE_ATTR_2(in0_max, S_IWUSR | S_IRUGO, 246static SENSOR_DEVICE_ATTR_2(in0_max, S_IWUSR | S_IRUGO,
247 adc128_show_in, adc128_set_in, 0, 2); 247 adc128_show_in, adc128_set_in, 0, 2);
248 248
249static SENSOR_DEVICE_ATTR_2(in1_input, S_IWUSR | S_IRUGO, 249static SENSOR_DEVICE_ATTR_2(in1_input, S_IRUGO,
250 adc128_show_in, adc128_set_in, 1, 0); 250 adc128_show_in, NULL, 1, 0);
251static SENSOR_DEVICE_ATTR_2(in1_min, S_IWUSR | S_IRUGO, 251static SENSOR_DEVICE_ATTR_2(in1_min, S_IWUSR | S_IRUGO,
252 adc128_show_in, adc128_set_in, 1, 1); 252 adc128_show_in, adc128_set_in, 1, 1);
253static SENSOR_DEVICE_ATTR_2(in1_max, S_IWUSR | S_IRUGO, 253static SENSOR_DEVICE_ATTR_2(in1_max, S_IWUSR | S_IRUGO,
254 adc128_show_in, adc128_set_in, 1, 2); 254 adc128_show_in, adc128_set_in, 1, 2);
255 255
256static SENSOR_DEVICE_ATTR_2(in2_input, S_IWUSR | S_IRUGO, 256static SENSOR_DEVICE_ATTR_2(in2_input, S_IRUGO,
257 adc128_show_in, adc128_set_in, 2, 0); 257 adc128_show_in, NULL, 2, 0);
258static SENSOR_DEVICE_ATTR_2(in2_min, S_IWUSR | S_IRUGO, 258static SENSOR_DEVICE_ATTR_2(in2_min, S_IWUSR | S_IRUGO,
259 adc128_show_in, adc128_set_in, 2, 1); 259 adc128_show_in, adc128_set_in, 2, 1);
260static SENSOR_DEVICE_ATTR_2(in2_max, S_IWUSR | S_IRUGO, 260static SENSOR_DEVICE_ATTR_2(in2_max, S_IWUSR | S_IRUGO,
261 adc128_show_in, adc128_set_in, 2, 2); 261 adc128_show_in, adc128_set_in, 2, 2);
262 262
263static SENSOR_DEVICE_ATTR_2(in3_input, S_IWUSR | S_IRUGO, 263static SENSOR_DEVICE_ATTR_2(in3_input, S_IRUGO,
264 adc128_show_in, adc128_set_in, 3, 0); 264 adc128_show_in, NULL, 3, 0);
265static SENSOR_DEVICE_ATTR_2(in3_min, S_IWUSR | S_IRUGO, 265static SENSOR_DEVICE_ATTR_2(in3_min, S_IWUSR | S_IRUGO,
266 adc128_show_in, adc128_set_in, 3, 1); 266 adc128_show_in, adc128_set_in, 3, 1);
267static SENSOR_DEVICE_ATTR_2(in3_max, S_IWUSR | S_IRUGO, 267static SENSOR_DEVICE_ATTR_2(in3_max, S_IWUSR | S_IRUGO,
268 adc128_show_in, adc128_set_in, 3, 2); 268 adc128_show_in, adc128_set_in, 3, 2);
269 269
270static SENSOR_DEVICE_ATTR_2(in4_input, S_IWUSR | S_IRUGO, 270static SENSOR_DEVICE_ATTR_2(in4_input, S_IRUGO,
271 adc128_show_in, adc128_set_in, 4, 0); 271 adc128_show_in, NULL, 4, 0);
272static SENSOR_DEVICE_ATTR_2(in4_min, S_IWUSR | S_IRUGO, 272static SENSOR_DEVICE_ATTR_2(in4_min, S_IWUSR | S_IRUGO,
273 adc128_show_in, adc128_set_in, 4, 1); 273 adc128_show_in, adc128_set_in, 4, 1);
274static SENSOR_DEVICE_ATTR_2(in4_max, S_IWUSR | S_IRUGO, 274static SENSOR_DEVICE_ATTR_2(in4_max, S_IWUSR | S_IRUGO,
275 adc128_show_in, adc128_set_in, 4, 2); 275 adc128_show_in, adc128_set_in, 4, 2);
276 276
277static SENSOR_DEVICE_ATTR_2(in5_input, S_IWUSR | S_IRUGO, 277static SENSOR_DEVICE_ATTR_2(in5_input, S_IRUGO,
278 adc128_show_in, adc128_set_in, 5, 0); 278 adc128_show_in, NULL, 5, 0);
279static SENSOR_DEVICE_ATTR_2(in5_min, S_IWUSR | S_IRUGO, 279static SENSOR_DEVICE_ATTR_2(in5_min, S_IWUSR | S_IRUGO,
280 adc128_show_in, adc128_set_in, 5, 1); 280 adc128_show_in, adc128_set_in, 5, 1);
281static SENSOR_DEVICE_ATTR_2(in5_max, S_IWUSR | S_IRUGO, 281static SENSOR_DEVICE_ATTR_2(in5_max, S_IWUSR | S_IRUGO,
282 adc128_show_in, adc128_set_in, 5, 2); 282 adc128_show_in, adc128_set_in, 5, 2);
283 283
284static SENSOR_DEVICE_ATTR_2(in6_input, S_IWUSR | S_IRUGO, 284static SENSOR_DEVICE_ATTR_2(in6_input, S_IRUGO,
285 adc128_show_in, adc128_set_in, 6, 0); 285 adc128_show_in, NULL, 6, 0);
286static SENSOR_DEVICE_ATTR_2(in6_min, S_IWUSR | S_IRUGO, 286static SENSOR_DEVICE_ATTR_2(in6_min, S_IWUSR | S_IRUGO,
287 adc128_show_in, adc128_set_in, 6, 1); 287 adc128_show_in, adc128_set_in, 6, 1);
288static SENSOR_DEVICE_ATTR_2(in6_max, S_IWUSR | S_IRUGO, 288static SENSOR_DEVICE_ATTR_2(in6_max, S_IWUSR | S_IRUGO,
diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
index 3eb4281689b5..d74241bb278c 100644
--- a/drivers/hwmon/adm1021.c
+++ b/drivers/hwmon/adm1021.c
@@ -185,7 +185,7 @@ static ssize_t set_temp_max(struct device *dev,
185 struct adm1021_data *data = dev_get_drvdata(dev); 185 struct adm1021_data *data = dev_get_drvdata(dev);
186 struct i2c_client *client = data->client; 186 struct i2c_client *client = data->client;
187 long temp; 187 long temp;
188 int err; 188 int reg_val, err;
189 189
190 err = kstrtol(buf, 10, &temp); 190 err = kstrtol(buf, 10, &temp);
191 if (err) 191 if (err)
@@ -193,10 +193,11 @@ static ssize_t set_temp_max(struct device *dev,
193 temp /= 1000; 193 temp /= 1000;
194 194
195 mutex_lock(&data->update_lock); 195 mutex_lock(&data->update_lock);
196 data->temp_max[index] = clamp_val(temp, -128, 127); 196 reg_val = clamp_val(temp, -128, 127);
197 data->temp_max[index] = reg_val * 1000;
197 if (!read_only) 198 if (!read_only)
198 i2c_smbus_write_byte_data(client, ADM1021_REG_TOS_W(index), 199 i2c_smbus_write_byte_data(client, ADM1021_REG_TOS_W(index),
199 data->temp_max[index]); 200 reg_val);
200 mutex_unlock(&data->update_lock); 201 mutex_unlock(&data->update_lock);
201 202
202 return count; 203 return count;
@@ -210,7 +211,7 @@ static ssize_t set_temp_min(struct device *dev,
210 struct adm1021_data *data = dev_get_drvdata(dev); 211 struct adm1021_data *data = dev_get_drvdata(dev);
211 struct i2c_client *client = data->client; 212 struct i2c_client *client = data->client;
212 long temp; 213 long temp;
213 int err; 214 int reg_val, err;
214 215
215 err = kstrtol(buf, 10, &temp); 216 err = kstrtol(buf, 10, &temp);
216 if (err) 217 if (err)
@@ -218,10 +219,11 @@ static ssize_t set_temp_min(struct device *dev,
218 temp /= 1000; 219 temp /= 1000;
219 220
220 mutex_lock(&data->update_lock); 221 mutex_lock(&data->update_lock);
221 data->temp_min[index] = clamp_val(temp, -128, 127); 222 reg_val = clamp_val(temp, -128, 127);
223 data->temp_min[index] = reg_val * 1000;
222 if (!read_only) 224 if (!read_only)
223 i2c_smbus_write_byte_data(client, ADM1021_REG_THYST_W(index), 225 i2c_smbus_write_byte_data(client, ADM1021_REG_THYST_W(index),
224 data->temp_min[index]); 226 reg_val);
225 mutex_unlock(&data->update_lock); 227 mutex_unlock(&data->update_lock);
226 228
227 return count; 229 return count;
diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c
index 78339e880bd6..2804571b269e 100644
--- a/drivers/hwmon/adm1029.c
+++ b/drivers/hwmon/adm1029.c
@@ -232,6 +232,9 @@ static ssize_t set_fan_div(struct device *dev,
232 /* Update the value */ 232 /* Update the value */
233 reg = (reg & 0x3F) | (val << 6); 233 reg = (reg & 0x3F) | (val << 6);
234 234
235 /* Update the cache */
236 data->fan_div[attr->index] = reg;
237
235 /* Write value */ 238 /* Write value */
236 i2c_smbus_write_byte_data(client, 239 i2c_smbus_write_byte_data(client,
237 ADM1029_REG_FAN_DIV[attr->index], reg); 240 ADM1029_REG_FAN_DIV[attr->index], reg);
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index a8a540ca8c34..51c1a5a165ab 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -365,6 +365,7 @@ set_auto_temp_min(struct device *dev, struct device_attribute *attr,
365 if (ret) 365 if (ret)
366 return ret; 366 return ret;
367 367
368 val = clamp_val(val, 0, 127000);
368 mutex_lock(&data->update_lock); 369 mutex_lock(&data->update_lock);
369 data->auto_temp[nr] = AUTO_TEMP_MIN_TO_REG(val, data->auto_temp[nr]); 370 data->auto_temp[nr] = AUTO_TEMP_MIN_TO_REG(val, data->auto_temp[nr]);
370 adm1031_write_value(client, ADM1031_REG_AUTO_TEMP(nr), 371 adm1031_write_value(client, ADM1031_REG_AUTO_TEMP(nr),
@@ -394,6 +395,7 @@ set_auto_temp_max(struct device *dev, struct device_attribute *attr,
394 if (ret) 395 if (ret)
395 return ret; 396 return ret;
396 397
398 val = clamp_val(val, 0, 127000);
397 mutex_lock(&data->update_lock); 399 mutex_lock(&data->update_lock);
398 data->temp_max[nr] = AUTO_TEMP_MAX_TO_REG(val, data->auto_temp[nr], 400 data->temp_max[nr] = AUTO_TEMP_MAX_TO_REG(val, data->auto_temp[nr],
399 data->pwm[nr]); 401 data->pwm[nr]);
@@ -696,7 +698,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
696 if (ret) 698 if (ret)
697 return ret; 699 return ret;
698 700
699 val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875); 701 val = clamp_val(val, -55000, 127000);
700 mutex_lock(&data->update_lock); 702 mutex_lock(&data->update_lock);
701 data->temp_min[nr] = TEMP_TO_REG(val); 703 data->temp_min[nr] = TEMP_TO_REG(val);
702 adm1031_write_value(client, ADM1031_REG_TEMP_MIN(nr), 704 adm1031_write_value(client, ADM1031_REG_TEMP_MIN(nr),
@@ -717,7 +719,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
717 if (ret) 719 if (ret)
718 return ret; 720 return ret;
719 721
720 val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875); 722 val = clamp_val(val, -55000, 127000);
721 mutex_lock(&data->update_lock); 723 mutex_lock(&data->update_lock);
722 data->temp_max[nr] = TEMP_TO_REG(val); 724 data->temp_max[nr] = TEMP_TO_REG(val);
723 adm1031_write_value(client, ADM1031_REG_TEMP_MAX(nr), 725 adm1031_write_value(client, ADM1031_REG_TEMP_MAX(nr),
@@ -738,7 +740,7 @@ static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr,
738 if (ret) 740 if (ret)
739 return ret; 741 return ret;
740 742
741 val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875); 743 val = clamp_val(val, -55000, 127000);
742 mutex_lock(&data->update_lock); 744 mutex_lock(&data->update_lock);
743 data->temp_crit[nr] = TEMP_TO_REG(val); 745 data->temp_crit[nr] = TEMP_TO_REG(val);
744 adm1031_write_value(client, ADM1031_REG_TEMP_CRIT(nr), 746 adm1031_write_value(client, ADM1031_REG_TEMP_CRIT(nr),
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index 0f4dea5ccf17..9ee3913850d6 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -515,7 +515,7 @@ static ssize_t set_temp_min(struct device *dev,
515 return -EINVAL; 515 return -EINVAL;
516 516
517 temp = DIV_ROUND_CLOSEST(temp, 1000); 517 temp = DIV_ROUND_CLOSEST(temp, 1000);
518 temp = clamp_val(temp, 0, 255); 518 temp = clamp_val(temp, -128, 127);
519 519
520 mutex_lock(&data->lock); 520 mutex_lock(&data->lock);
521 data->temp_min[attr->index] = temp; 521 data->temp_min[attr->index] = temp;
@@ -549,7 +549,7 @@ static ssize_t set_temp_max(struct device *dev,
549 return -EINVAL; 549 return -EINVAL;
550 550
551 temp = DIV_ROUND_CLOSEST(temp, 1000); 551 temp = DIV_ROUND_CLOSEST(temp, 1000);
552 temp = clamp_val(temp, 0, 255); 552 temp = clamp_val(temp, -128, 127);
553 553
554 mutex_lock(&data->lock); 554 mutex_lock(&data->lock);
555 data->temp_max[attr->index] = temp; 555 data->temp_max[attr->index] = temp;
@@ -826,7 +826,7 @@ static ssize_t set_pwm_tmin(struct device *dev,
826 return -EINVAL; 826 return -EINVAL;
827 827
828 temp = DIV_ROUND_CLOSEST(temp, 1000); 828 temp = DIV_ROUND_CLOSEST(temp, 1000);
829 temp = clamp_val(temp, 0, 255); 829 temp = clamp_val(temp, -128, 127);
830 830
831 mutex_lock(&data->lock); 831 mutex_lock(&data->lock);
832 data->pwm_tmin[attr->index] = temp; 832 data->pwm_tmin[attr->index] = temp;
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
index eea817296513..9f2be3dd28f3 100644
--- a/drivers/hwmon/amc6821.c
+++ b/drivers/hwmon/amc6821.c
@@ -704,7 +704,7 @@ static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO,
704 get_temp_alarm, NULL, IDX_TEMP1_MAX); 704 get_temp_alarm, NULL, IDX_TEMP1_MAX);
705static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, 705static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO,
706 get_temp_alarm, NULL, IDX_TEMP1_CRIT); 706 get_temp_alarm, NULL, IDX_TEMP1_CRIT);
707static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO | S_IWUSR, 707static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO,
708 get_temp, NULL, IDX_TEMP2_INPUT); 708 get_temp, NULL, IDX_TEMP2_INPUT);
709static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR, get_temp, 709static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR, get_temp,
710 set_temp, IDX_TEMP2_MIN); 710 set_temp, IDX_TEMP2_MIN);
diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c
index afd31042b452..d14ab3c45daa 100644
--- a/drivers/hwmon/da9052-hwmon.c
+++ b/drivers/hwmon/da9052-hwmon.c
@@ -194,7 +194,7 @@ static ssize_t da9052_hwmon_show_name(struct device *dev,
194 struct device_attribute *devattr, 194 struct device_attribute *devattr,
195 char *buf) 195 char *buf)
196{ 196{
197 return sprintf(buf, "da9052-hwmon\n"); 197 return sprintf(buf, "da9052\n");
198} 198}
199 199
200static ssize_t show_label(struct device *dev, 200static ssize_t show_label(struct device *dev,
diff --git a/drivers/hwmon/da9055-hwmon.c b/drivers/hwmon/da9055-hwmon.c
index 73b3865f1207..35eb7738d711 100644
--- a/drivers/hwmon/da9055-hwmon.c
+++ b/drivers/hwmon/da9055-hwmon.c
@@ -204,7 +204,7 @@ static ssize_t da9055_hwmon_show_name(struct device *dev,
204 struct device_attribute *devattr, 204 struct device_attribute *devattr,
205 char *buf) 205 char *buf)
206{ 206{
207 return sprintf(buf, "da9055-hwmon\n"); 207 return sprintf(buf, "da9055\n");
208} 208}
209 209
210static ssize_t show_label(struct device *dev, 210static ssize_t show_label(struct device *dev,
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
index fd892dd48e4c..78002de46cb6 100644
--- a/drivers/hwmon/emc2103.c
+++ b/drivers/hwmon/emc2103.c
@@ -250,9 +250,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *da,
250 if (result < 0) 250 if (result < 0)
251 return result; 251 return result;
252 252
253 val = DIV_ROUND_CLOSEST(val, 1000); 253 val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -63, 127);
254 if ((val < -63) || (val > 127))
255 return -EINVAL;
256 254
257 mutex_lock(&data->update_lock); 255 mutex_lock(&data->update_lock);
258 data->temp_min[nr] = val; 256 data->temp_min[nr] = val;
@@ -274,9 +272,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *da,
274 if (result < 0) 272 if (result < 0)
275 return result; 273 return result;
276 274
277 val = DIV_ROUND_CLOSEST(val, 1000); 275 val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -63, 127);
278 if ((val < -63) || (val > 127))
279 return -EINVAL;
280 276
281 mutex_lock(&data->update_lock); 277 mutex_lock(&data->update_lock);
282 data->temp_max[nr] = val; 278 data->temp_max[nr] = val;
@@ -390,15 +386,14 @@ static ssize_t set_fan_target(struct device *dev, struct device_attribute *da,
390{ 386{
391 struct emc2103_data *data = emc2103_update_device(dev); 387 struct emc2103_data *data = emc2103_update_device(dev);
392 struct i2c_client *client = to_i2c_client(dev); 388 struct i2c_client *client = to_i2c_client(dev);
393 long rpm_target; 389 unsigned long rpm_target;
394 390
395 int result = kstrtol(buf, 10, &rpm_target); 391 int result = kstrtoul(buf, 10, &rpm_target);
396 if (result < 0) 392 if (result < 0)
397 return result; 393 return result;
398 394
399 /* Datasheet states 16384 as maximum RPM target (table 3.2) */ 395 /* Datasheet states 16384 as maximum RPM target (table 3.2) */
400 if ((rpm_target < 0) || (rpm_target > 16384)) 396 rpm_target = clamp_val(rpm_target, 0, 16384);
401 return -EINVAL;
402 397
403 mutex_lock(&data->update_lock); 398 mutex_lock(&data->update_lock);
404 399
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index bdfbe9114889..ae66f42c4d6d 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -512,7 +512,7 @@ static int ntc_thermistor_probe(struct platform_device *pdev)
512 } 512 }
513 513
514 dev_info(&pdev->dev, "Thermistor type: %s successfully probed.\n", 514 dev_info(&pdev->dev, "Thermistor type: %s successfully probed.\n",
515 pdev->name); 515 pdev_id->name);
516 516
517 return 0; 517 return 0;
518err_after_sysfs: 518err_after_sysfs:
diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
index efee4c59239f..34b9a601ad07 100644
--- a/drivers/hwmon/smsc47m192.c
+++ b/drivers/hwmon/smsc47m192.c
@@ -86,7 +86,7 @@ static inline u8 IN_TO_REG(unsigned long val, int n)
86 */ 86 */
87static inline s8 TEMP_TO_REG(int val) 87static inline s8 TEMP_TO_REG(int val)
88{ 88{
89 return clamp_val(SCALE(val, 1, 1000), -128000, 127000); 89 return SCALE(clamp_val(val, -128000, 127000), 1, 1000);
90} 90}
91 91
92static inline int TEMP_FROM_REG(s8 val) 92static inline int TEMP_FROM_REG(s8 val)
@@ -384,6 +384,8 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
384 err = kstrtoul(buf, 10, &val); 384 err = kstrtoul(buf, 10, &val);
385 if (err) 385 if (err)
386 return err; 386 return err;
387 if (val > 255)
388 return -EINVAL;
387 389
388 data->vrm = val; 390 data->vrm = val;
389 return count; 391 return count;
diff --git a/drivers/i2c/busses/i2c-sun6i-p2wi.c b/drivers/i2c/busses/i2c-sun6i-p2wi.c
index 09de4fd12d57..4d75d4759709 100644
--- a/drivers/i2c/busses/i2c-sun6i-p2wi.c
+++ b/drivers/i2c/busses/i2c-sun6i-p2wi.c
@@ -22,7 +22,6 @@
22 * 22 *
23 */ 23 */
24#include <linux/clk.h> 24#include <linux/clk.h>
25#include <linux/module.h>
26#include <linux/i2c.h> 25#include <linux/i2c.h>
27#include <linux/io.h> 26#include <linux/io.h>
28#include <linux/interrupt.h> 27#include <linux/interrupt.h>
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index f7f9865b8b89..f6d313e528de 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -40,6 +40,7 @@ config I2C_MUX_PCA9541
40 40
41config I2C_MUX_PCA954x 41config I2C_MUX_PCA954x
42 tristate "Philips PCA954x I2C Mux/switches" 42 tristate "Philips PCA954x I2C Mux/switches"
43 depends on GPIOLIB
43 help 44 help
44 If you say yes here you get support for the Philips PCA954x 45 If you say yes here you get support for the Philips PCA954x
45 I2C mux/switch devices. 46 I2C mux/switch devices.
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 8fb46aab2d87..a04c49f2a011 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -416,6 +416,7 @@ config BLK_DEV_CY82C693
416 416
417config BLK_DEV_CS5520 417config BLK_DEV_CS5520
418 tristate "Cyrix CS5510/20 MediaGX chipset support (VERY EXPERIMENTAL)" 418 tristate "Cyrix CS5510/20 MediaGX chipset support (VERY EXPERIMENTAL)"
419 depends on X86_32 || COMPILE_TEST
419 select BLK_DEV_IDEDMA_PCI 420 select BLK_DEV_IDEDMA_PCI
420 help 421 help
421 Include support for PIO tuning and virtual DMA on the Cyrix MediaGX 422 Include support for PIO tuning and virtual DMA on the Cyrix MediaGX
@@ -426,6 +427,7 @@ config BLK_DEV_CS5520
426 427
427config BLK_DEV_CS5530 428config BLK_DEV_CS5530
428 tristate "Cyrix/National Semiconductor CS5530 MediaGX chipset support" 429 tristate "Cyrix/National Semiconductor CS5530 MediaGX chipset support"
430 depends on X86_32 || COMPILE_TEST
429 select BLK_DEV_IDEDMA_PCI 431 select BLK_DEV_IDEDMA_PCI
430 help 432 help
431 Include support for UDMA on the Cyrix MediaGX 5530 chipset. This 433 Include support for UDMA on the Cyrix MediaGX 5530 chipset. This
@@ -435,7 +437,7 @@ config BLK_DEV_CS5530
435 437
436config BLK_DEV_CS5535 438config BLK_DEV_CS5535
437 tristate "AMD CS5535 chipset support" 439 tristate "AMD CS5535 chipset support"
438 depends on X86 && !X86_64 440 depends on X86_32
439 select BLK_DEV_IDEDMA_PCI 441 select BLK_DEV_IDEDMA_PCI
440 help 442 help
441 Include support for UDMA on the NSC/AMD CS5535 companion chipset. 443 Include support for UDMA on the NSC/AMD CS5535 companion chipset.
@@ -486,6 +488,7 @@ config BLK_DEV_JMICRON
486 488
487config BLK_DEV_SC1200 489config BLK_DEV_SC1200
488 tristate "National SCx200 chipset support" 490 tristate "National SCx200 chipset support"
491 depends on X86_32 || COMPILE_TEST
489 select BLK_DEV_IDEDMA_PCI 492 select BLK_DEV_IDEDMA_PCI
490 help 493 help
491 This driver adds support for the on-board IDE controller on the 494 This driver adds support for the on-board IDE controller on the
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 2a744a91370e..a3d3b1733c49 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -853,8 +853,9 @@ static int init_irq (ide_hwif_t *hwif)
853 if (irq_handler == NULL) 853 if (irq_handler == NULL)
854 irq_handler = ide_intr; 854 irq_handler = ide_intr;
855 855
856 if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif)) 856 if (!host->get_lock)
857 goto out_up; 857 if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif))
858 goto out_up;
858 859
859#if !defined(__mc68000__) 860#if !defined(__mc68000__)
860 printk(KERN_INFO "%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name, 861 printk(KERN_INFO "%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name,
@@ -1533,7 +1534,8 @@ static void ide_unregister(ide_hwif_t *hwif)
1533 1534
1534 ide_proc_unregister_port(hwif); 1535 ide_proc_unregister_port(hwif);
1535 1536
1536 free_irq(hwif->irq, hwif); 1537 if (!hwif->host->get_lock)
1538 free_irq(hwif->irq, hwif);
1537 1539
1538 device_unregister(hwif->portdev); 1540 device_unregister(hwif->portdev);
1539 device_unregister(&hwif->gendev); 1541 device_unregister(&hwif->gendev);
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index 69abf9163df7..54e464e4bb72 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -110,7 +110,6 @@ static int accel_3d_read_raw(struct iio_dev *indio_dev,
110 struct accel_3d_state *accel_state = iio_priv(indio_dev); 110 struct accel_3d_state *accel_state = iio_priv(indio_dev);
111 int report_id = -1; 111 int report_id = -1;
112 u32 address; 112 u32 address;
113 int ret;
114 int ret_type; 113 int ret_type;
115 s32 poll_value; 114 s32 poll_value;
116 115
@@ -151,14 +150,12 @@ static int accel_3d_read_raw(struct iio_dev *indio_dev,
151 ret_type = IIO_VAL_INT; 150 ret_type = IIO_VAL_INT;
152 break; 151 break;
153 case IIO_CHAN_INFO_SAMP_FREQ: 152 case IIO_CHAN_INFO_SAMP_FREQ:
154 ret = hid_sensor_read_samp_freq_value( 153 ret_type = hid_sensor_read_samp_freq_value(
155 &accel_state->common_attributes, val, val2); 154 &accel_state->common_attributes, val, val2);
156 ret_type = IIO_VAL_INT_PLUS_MICRO;
157 break; 155 break;
158 case IIO_CHAN_INFO_HYSTERESIS: 156 case IIO_CHAN_INFO_HYSTERESIS:
159 ret = hid_sensor_read_raw_hyst_value( 157 ret_type = hid_sensor_read_raw_hyst_value(
160 &accel_state->common_attributes, val, val2); 158 &accel_state->common_attributes, val, val2);
161 ret_type = IIO_VAL_INT_PLUS_MICRO;
162 break; 159 break;
163 default: 160 default:
164 ret_type = -EINVAL; 161 ret_type = -EINVAL;
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index 17aeea170566..2a5fa9a436e5 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -111,8 +111,14 @@ static const int mma8452_samp_freq[8][2] = {
111 {6, 250000}, {1, 560000} 111 {6, 250000}, {1, 560000}
112}; 112};
113 113
114/*
115 * Hardware has fullscale of -2G, -4G, -8G corresponding to raw value -2048
116 * The userspace interface uses m/s^2 and we declare micro units
117 * So scale factor is given by:
118 * g * N * 1000000 / 2048 for N = 2, 4, 8 and g=9.80665
119 */
114static const int mma8452_scales[3][2] = { 120static const int mma8452_scales[3][2] = {
115 {0, 977}, {0, 1953}, {0, 3906} 121 {0, 9577}, {0, 19154}, {0, 38307}
116}; 122};
117 123
118static ssize_t mma8452_show_samp_freq_avail(struct device *dev, 124static ssize_t mma8452_show_samp_freq_avail(struct device *dev,
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index a4db3026bec6..d5dc4c6ce86c 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -374,7 +374,7 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
374 return -EAGAIN; 374 return -EAGAIN;
375 } 375 }
376 } 376 }
377 map_val = chan->channel + TOTAL_CHANNELS; 377 map_val = adc_dev->channel_step[chan->scan_index];
378 378
379 /* 379 /*
380 * We check the complete FIFO. We programmed just one entry but in case 380 * We check the complete FIFO. We programmed just one entry but in case
diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c
index 40f4e4935d0d..fa034a3dad78 100644
--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c
+++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c
@@ -110,7 +110,6 @@ static int gyro_3d_read_raw(struct iio_dev *indio_dev,
110 struct gyro_3d_state *gyro_state = iio_priv(indio_dev); 110 struct gyro_3d_state *gyro_state = iio_priv(indio_dev);
111 int report_id = -1; 111 int report_id = -1;
112 u32 address; 112 u32 address;
113 int ret;
114 int ret_type; 113 int ret_type;
115 s32 poll_value; 114 s32 poll_value;
116 115
@@ -151,14 +150,12 @@ static int gyro_3d_read_raw(struct iio_dev *indio_dev,
151 ret_type = IIO_VAL_INT; 150 ret_type = IIO_VAL_INT;
152 break; 151 break;
153 case IIO_CHAN_INFO_SAMP_FREQ: 152 case IIO_CHAN_INFO_SAMP_FREQ:
154 ret = hid_sensor_read_samp_freq_value( 153 ret_type = hid_sensor_read_samp_freq_value(
155 &gyro_state->common_attributes, val, val2); 154 &gyro_state->common_attributes, val, val2);
156 ret_type = IIO_VAL_INT_PLUS_MICRO;
157 break; 155 break;
158 case IIO_CHAN_INFO_HYSTERESIS: 156 case IIO_CHAN_INFO_HYSTERESIS:
159 ret = hid_sensor_read_raw_hyst_value( 157 ret_type = hid_sensor_read_raw_hyst_value(
160 &gyro_state->common_attributes, val, val2); 158 &gyro_state->common_attributes, val, val2);
161 ret_type = IIO_VAL_INT_PLUS_MICRO;
162 break; 159 break;
163 default: 160 default:
164 ret_type = -EINVAL; 161 ret_type = -EINVAL;
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index 258a973a1fb8..bfbf4d419f41 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -345,6 +345,9 @@ static int iio_device_add_event(struct iio_dev *indio_dev,
345 &indio_dev->event_interface->dev_attr_list); 345 &indio_dev->event_interface->dev_attr_list);
346 kfree(postfix); 346 kfree(postfix);
347 347
348 if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
349 continue;
350
348 if (ret) 351 if (ret)
349 return ret; 352 return ret;
350 353
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index f34c94380b41..96e71e103ea7 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -79,7 +79,6 @@ static int als_read_raw(struct iio_dev *indio_dev,
79 struct als_state *als_state = iio_priv(indio_dev); 79 struct als_state *als_state = iio_priv(indio_dev);
80 int report_id = -1; 80 int report_id = -1;
81 u32 address; 81 u32 address;
82 int ret;
83 int ret_type; 82 int ret_type;
84 s32 poll_value; 83 s32 poll_value;
85 84
@@ -129,14 +128,12 @@ static int als_read_raw(struct iio_dev *indio_dev,
129 ret_type = IIO_VAL_INT; 128 ret_type = IIO_VAL_INT;
130 break; 129 break;
131 case IIO_CHAN_INFO_SAMP_FREQ: 130 case IIO_CHAN_INFO_SAMP_FREQ:
132 ret = hid_sensor_read_samp_freq_value( 131 ret_type = hid_sensor_read_samp_freq_value(
133 &als_state->common_attributes, val, val2); 132 &als_state->common_attributes, val, val2);
134 ret_type = IIO_VAL_INT_PLUS_MICRO;
135 break; 133 break;
136 case IIO_CHAN_INFO_HYSTERESIS: 134 case IIO_CHAN_INFO_HYSTERESIS:
137 ret = hid_sensor_read_raw_hyst_value( 135 ret_type = hid_sensor_read_raw_hyst_value(
138 &als_state->common_attributes, val, val2); 136 &als_state->common_attributes, val, val2);
139 ret_type = IIO_VAL_INT_PLUS_MICRO;
140 break; 137 break;
141 default: 138 default:
142 ret_type = -EINVAL; 139 ret_type = -EINVAL;
diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
index d203ef4d892f..412bae86d6ae 100644
--- a/drivers/iio/light/hid-sensor-prox.c
+++ b/drivers/iio/light/hid-sensor-prox.c
@@ -74,7 +74,6 @@ static int prox_read_raw(struct iio_dev *indio_dev,
74 struct prox_state *prox_state = iio_priv(indio_dev); 74 struct prox_state *prox_state = iio_priv(indio_dev);
75 int report_id = -1; 75 int report_id = -1;
76 u32 address; 76 u32 address;
77 int ret;
78 int ret_type; 77 int ret_type;
79 s32 poll_value; 78 s32 poll_value;
80 79
@@ -125,14 +124,12 @@ static int prox_read_raw(struct iio_dev *indio_dev,
125 ret_type = IIO_VAL_INT; 124 ret_type = IIO_VAL_INT;
126 break; 125 break;
127 case IIO_CHAN_INFO_SAMP_FREQ: 126 case IIO_CHAN_INFO_SAMP_FREQ:
128 ret = hid_sensor_read_samp_freq_value( 127 ret_type = hid_sensor_read_samp_freq_value(
129 &prox_state->common_attributes, val, val2); 128 &prox_state->common_attributes, val, val2);
130 ret_type = IIO_VAL_INT_PLUS_MICRO;
131 break; 129 break;
132 case IIO_CHAN_INFO_HYSTERESIS: 130 case IIO_CHAN_INFO_HYSTERESIS:
133 ret = hid_sensor_read_raw_hyst_value( 131 ret_type = hid_sensor_read_raw_hyst_value(
134 &prox_state->common_attributes, val, val2); 132 &prox_state->common_attributes, val, val2);
135 ret_type = IIO_VAL_INT_PLUS_MICRO;
136 break; 133 break;
137 default: 134 default:
138 ret_type = -EINVAL; 135 ret_type = -EINVAL;
diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c
index fe063a0a21cd..752569985d1d 100644
--- a/drivers/iio/light/tcs3472.c
+++ b/drivers/iio/light/tcs3472.c
@@ -52,6 +52,7 @@
52 52
53struct tcs3472_data { 53struct tcs3472_data {
54 struct i2c_client *client; 54 struct i2c_client *client;
55 struct mutex lock;
55 u8 enable; 56 u8 enable;
56 u8 control; 57 u8 control;
57 u8 atime; 58 u8 atime;
@@ -116,10 +117,17 @@ static int tcs3472_read_raw(struct iio_dev *indio_dev,
116 117
117 switch (mask) { 118 switch (mask) {
118 case IIO_CHAN_INFO_RAW: 119 case IIO_CHAN_INFO_RAW:
120 if (iio_buffer_enabled(indio_dev))
121 return -EBUSY;
122
123 mutex_lock(&data->lock);
119 ret = tcs3472_req_data(data); 124 ret = tcs3472_req_data(data);
120 if (ret < 0) 125 if (ret < 0) {
126 mutex_unlock(&data->lock);
121 return ret; 127 return ret;
128 }
122 ret = i2c_smbus_read_word_data(data->client, chan->address); 129 ret = i2c_smbus_read_word_data(data->client, chan->address);
130 mutex_unlock(&data->lock);
123 if (ret < 0) 131 if (ret < 0)
124 return ret; 132 return ret;
125 *val = ret; 133 *val = ret;
@@ -255,6 +263,7 @@ static int tcs3472_probe(struct i2c_client *client,
255 data = iio_priv(indio_dev); 263 data = iio_priv(indio_dev);
256 i2c_set_clientdata(client, indio_dev); 264 i2c_set_clientdata(client, indio_dev);
257 data->client = client; 265 data->client = client;
266 mutex_init(&data->lock);
258 267
259 indio_dev->dev.parent = &client->dev; 268 indio_dev->dev.parent = &client->dev;
260 indio_dev->info = &tcs3472_info; 269 indio_dev->info = &tcs3472_info;
diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
index 41cf29e2a371..b2b0937d5133 100644
--- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
@@ -110,7 +110,6 @@ static int magn_3d_read_raw(struct iio_dev *indio_dev,
110 struct magn_3d_state *magn_state = iio_priv(indio_dev); 110 struct magn_3d_state *magn_state = iio_priv(indio_dev);
111 int report_id = -1; 111 int report_id = -1;
112 u32 address; 112 u32 address;
113 int ret;
114 int ret_type; 113 int ret_type;
115 s32 poll_value; 114 s32 poll_value;
116 115
@@ -153,14 +152,12 @@ static int magn_3d_read_raw(struct iio_dev *indio_dev,
153 ret_type = IIO_VAL_INT; 152 ret_type = IIO_VAL_INT;
154 break; 153 break;
155 case IIO_CHAN_INFO_SAMP_FREQ: 154 case IIO_CHAN_INFO_SAMP_FREQ:
156 ret = hid_sensor_read_samp_freq_value( 155 ret_type = hid_sensor_read_samp_freq_value(
157 &magn_state->common_attributes, val, val2); 156 &magn_state->common_attributes, val, val2);
158 ret_type = IIO_VAL_INT_PLUS_MICRO;
159 break; 157 break;
160 case IIO_CHAN_INFO_HYSTERESIS: 158 case IIO_CHAN_INFO_HYSTERESIS:
161 ret = hid_sensor_read_raw_hyst_value( 159 ret_type = hid_sensor_read_raw_hyst_value(
162 &magn_state->common_attributes, val, val2); 160 &magn_state->common_attributes, val, val2);
163 ret_type = IIO_VAL_INT_PLUS_MICRO;
164 break; 161 break;
165 default: 162 default:
166 ret_type = -EINVAL; 163 ret_type = -EINVAL;
diff --git a/drivers/iio/pressure/hid-sensor-press.c b/drivers/iio/pressure/hid-sensor-press.c
index 1cd190c73788..2c0d2a4fed8c 100644
--- a/drivers/iio/pressure/hid-sensor-press.c
+++ b/drivers/iio/pressure/hid-sensor-press.c
@@ -78,7 +78,6 @@ static int press_read_raw(struct iio_dev *indio_dev,
78 struct press_state *press_state = iio_priv(indio_dev); 78 struct press_state *press_state = iio_priv(indio_dev);
79 int report_id = -1; 79 int report_id = -1;
80 u32 address; 80 u32 address;
81 int ret;
82 int ret_type; 81 int ret_type;
83 s32 poll_value; 82 s32 poll_value;
84 83
@@ -128,14 +127,12 @@ static int press_read_raw(struct iio_dev *indio_dev,
128 ret_type = IIO_VAL_INT; 127 ret_type = IIO_VAL_INT;
129 break; 128 break;
130 case IIO_CHAN_INFO_SAMP_FREQ: 129 case IIO_CHAN_INFO_SAMP_FREQ:
131 ret = hid_sensor_read_samp_freq_value( 130 ret_type = hid_sensor_read_samp_freq_value(
132 &press_state->common_attributes, val, val2); 131 &press_state->common_attributes, val, val2);
133 ret_type = IIO_VAL_INT_PLUS_MICRO;
134 break; 132 break;
135 case IIO_CHAN_INFO_HYSTERESIS: 133 case IIO_CHAN_INFO_HYSTERESIS:
136 ret = hid_sensor_read_raw_hyst_value( 134 ret_type = hid_sensor_read_raw_hyst_value(
137 &press_state->common_attributes, val, val2); 135 &press_state->common_attributes, val, val2);
138 ret_type = IIO_VAL_INT_PLUS_MICRO;
139 break; 136 break;
140 default: 137 default:
141 ret_type = -EINVAL; 138 ret_type = -EINVAL;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 5e153f6d4b48..768a0fb67dd6 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -432,8 +432,17 @@ static void arp_failure_discard(void *handle, struct sk_buff *skb)
432 */ 432 */
433static void act_open_req_arp_failure(void *handle, struct sk_buff *skb) 433static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
434{ 434{
435 struct c4iw_ep *ep = handle;
436
435 printk(KERN_ERR MOD "ARP failure duing connect\n"); 437 printk(KERN_ERR MOD "ARP failure duing connect\n");
436 kfree_skb(skb); 438 kfree_skb(skb);
439 connect_reply_upcall(ep, -EHOSTUNREACH);
440 state_set(&ep->com, DEAD);
441 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
442 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
443 dst_release(ep->dst);
444 cxgb4_l2t_release(ep->l2t);
445 c4iw_put_ep(&ep->com);
437} 446}
438 447
439/* 448/*
@@ -658,7 +667,7 @@ static int send_connect(struct c4iw_ep *ep)
658 opt2 |= T5_OPT_2_VALID; 667 opt2 |= T5_OPT_2_VALID;
659 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); 668 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
660 } 669 }
661 t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); 670 t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
662 671
663 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { 672 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
664 if (ep->com.remote_addr.ss_family == AF_INET) { 673 if (ep->com.remote_addr.ss_family == AF_INET) {
@@ -2180,7 +2189,6 @@ static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
2180 PDBG("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid); 2189 PDBG("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid);
2181 BUG_ON(skb_cloned(skb)); 2190 BUG_ON(skb_cloned(skb));
2182 skb_trim(skb, sizeof(struct cpl_tid_release)); 2191 skb_trim(skb, sizeof(struct cpl_tid_release));
2183 skb_get(skb);
2184 release_tid(&dev->rdev, hwtid, skb); 2192 release_tid(&dev->rdev, hwtid, skb);
2185 return; 2193 return;
2186} 2194}
@@ -3917,7 +3925,7 @@ int __init c4iw_cm_init(void)
3917 return 0; 3925 return 0;
3918} 3926}
3919 3927
3920void __exit c4iw_cm_term(void) 3928void c4iw_cm_term(void)
3921{ 3929{
3922 WARN_ON(!list_empty(&timeout_list)); 3930 WARN_ON(!list_empty(&timeout_list));
3923 flush_workqueue(workq); 3931 flush_workqueue(workq);
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index dd93aadc996e..7db82b24302b 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -696,6 +696,7 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
696 pr_err(MOD "error allocating status page\n"); 696 pr_err(MOD "error allocating status page\n");
697 goto err4; 697 goto err4;
698 } 698 }
699 rdev->status_page->db_off = 0;
699 return 0; 700 return 0;
700err4: 701err4:
701 c4iw_rqtpool_destroy(rdev); 702 c4iw_rqtpool_destroy(rdev);
@@ -729,7 +730,6 @@ static void c4iw_dealloc(struct uld_ctx *ctx)
729 if (ctx->dev->rdev.oc_mw_kva) 730 if (ctx->dev->rdev.oc_mw_kva)
730 iounmap(ctx->dev->rdev.oc_mw_kva); 731 iounmap(ctx->dev->rdev.oc_mw_kva);
731 ib_dealloc_device(&ctx->dev->ibdev); 732 ib_dealloc_device(&ctx->dev->ibdev);
732 iwpm_exit(RDMA_NL_C4IW);
733 ctx->dev = NULL; 733 ctx->dev = NULL;
734} 734}
735 735
@@ -826,12 +826,6 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
826 setup_debugfs(devp); 826 setup_debugfs(devp);
827 } 827 }
828 828
829 ret = iwpm_init(RDMA_NL_C4IW);
830 if (ret) {
831 pr_err("port mapper initialization failed with %d\n", ret);
832 ib_dealloc_device(&devp->ibdev);
833 return ERR_PTR(ret);
834 }
835 829
836 return devp; 830 return devp;
837} 831}
@@ -1332,6 +1326,15 @@ static int __init c4iw_init_module(void)
1332 pr_err("%s[%u]: Failed to add netlink callback\n" 1326 pr_err("%s[%u]: Failed to add netlink callback\n"
1333 , __func__, __LINE__); 1327 , __func__, __LINE__);
1334 1328
1329 err = iwpm_init(RDMA_NL_C4IW);
1330 if (err) {
1331 pr_err("port mapper initialization failed with %d\n", err);
1332 ibnl_remove_client(RDMA_NL_C4IW);
1333 c4iw_cm_term();
1334 debugfs_remove_recursive(c4iw_debugfs_root);
1335 return err;
1336 }
1337
1335 cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info); 1338 cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
1336 1339
1337 return 0; 1340 return 0;
@@ -1349,6 +1352,7 @@ static void __exit c4iw_exit_module(void)
1349 } 1352 }
1350 mutex_unlock(&dev_mutex); 1353 mutex_unlock(&dev_mutex);
1351 cxgb4_unregister_uld(CXGB4_ULD_RDMA); 1354 cxgb4_unregister_uld(CXGB4_ULD_RDMA);
1355 iwpm_exit(RDMA_NL_C4IW);
1352 ibnl_remove_client(RDMA_NL_C4IW); 1356 ibnl_remove_client(RDMA_NL_C4IW);
1353 c4iw_cm_term(); 1357 c4iw_cm_term();
1354 debugfs_remove_recursive(c4iw_debugfs_root); 1358 debugfs_remove_recursive(c4iw_debugfs_root);
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 125bc5d1e175..361fff7a0742 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -908,7 +908,7 @@ int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
908int c4iw_register_device(struct c4iw_dev *dev); 908int c4iw_register_device(struct c4iw_dev *dev);
909void c4iw_unregister_device(struct c4iw_dev *dev); 909void c4iw_unregister_device(struct c4iw_dev *dev);
910int __init c4iw_cm_init(void); 910int __init c4iw_cm_init(void);
911void __exit c4iw_cm_term(void); 911void c4iw_cm_term(void);
912void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev, 912void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
913 struct c4iw_dev_ucontext *uctx); 913 struct c4iw_dev_ucontext *uctx);
914void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev, 914void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index d13ddf1c0033..bbbcf389272c 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -675,7 +675,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
675 int err; 675 int err;
676 676
677 uuari = &dev->mdev.priv.uuari; 677 uuari = &dev->mdev.priv.uuari;
678 if (init_attr->create_flags & ~IB_QP_CREATE_SIGNATURE_EN) 678 if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK))
679 return -EINVAL; 679 return -EINVAL;
680 680
681 if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR) 681 if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 1c4c0db05550..29ca0bb4f561 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -257,9 +257,10 @@ static int input_handle_abs_event(struct input_dev *dev,
257} 257}
258 258
259static int input_get_disposition(struct input_dev *dev, 259static int input_get_disposition(struct input_dev *dev,
260 unsigned int type, unsigned int code, int value) 260 unsigned int type, unsigned int code, int *pval)
261{ 261{
262 int disposition = INPUT_IGNORE_EVENT; 262 int disposition = INPUT_IGNORE_EVENT;
263 int value = *pval;
263 264
264 switch (type) { 265 switch (type) {
265 266
@@ -357,6 +358,7 @@ static int input_get_disposition(struct input_dev *dev,
357 break; 358 break;
358 } 359 }
359 360
361 *pval = value;
360 return disposition; 362 return disposition;
361} 363}
362 364
@@ -365,7 +367,7 @@ static void input_handle_event(struct input_dev *dev,
365{ 367{
366 int disposition; 368 int disposition;
367 369
368 disposition = input_get_disposition(dev, type, code, value); 370 disposition = input_get_disposition(dev, type, code, &value);
369 371
370 if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event) 372 if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event)
371 dev->event(dev, type, code, value); 373 dev->event(dev, type, code, value);
diff --git a/drivers/input/keyboard/st-keyscan.c b/drivers/input/keyboard/st-keyscan.c
index 758b48731415..de7be4f03d91 100644
--- a/drivers/input/keyboard/st-keyscan.c
+++ b/drivers/input/keyboard/st-keyscan.c
@@ -215,6 +215,7 @@ static int keyscan_probe(struct platform_device *pdev)
215 return 0; 215 return 0;
216} 216}
217 217
218#ifdef CONFIG_PM_SLEEP
218static int keyscan_suspend(struct device *dev) 219static int keyscan_suspend(struct device *dev)
219{ 220{
220 struct platform_device *pdev = to_platform_device(dev); 221 struct platform_device *pdev = to_platform_device(dev);
@@ -249,6 +250,7 @@ static int keyscan_resume(struct device *dev)
249 mutex_unlock(&input->mutex); 250 mutex_unlock(&input->mutex);
250 return retval; 251 return retval;
251} 252}
253#endif
252 254
253static SIMPLE_DEV_PM_OPS(keyscan_dev_pm_ops, keyscan_suspend, keyscan_resume); 255static SIMPLE_DEV_PM_OPS(keyscan_dev_pm_ops, keyscan_suspend, keyscan_resume);
254 256
diff --git a/drivers/input/misc/sirfsoc-onkey.c b/drivers/input/misc/sirfsoc-onkey.c
index e4104f9b2e6d..fed5102e1802 100644
--- a/drivers/input/misc/sirfsoc-onkey.c
+++ b/drivers/input/misc/sirfsoc-onkey.c
@@ -213,7 +213,7 @@ static struct platform_driver sirfsoc_pwrc_driver = {
213 213
214module_platform_driver(sirfsoc_pwrc_driver); 214module_platform_driver(sirfsoc_pwrc_driver);
215 215
216MODULE_LICENSE("GPLv2"); 216MODULE_LICENSE("GPL v2");
217MODULE_AUTHOR("Binghua Duan <Binghua.Duan@csr.com>, Xianglong Du <Xianglong.Du@csr.com>"); 217MODULE_AUTHOR("Binghua Duan <Binghua.Duan@csr.com>, Xianglong Du <Xianglong.Du@csr.com>");
218MODULE_DESCRIPTION("CSR Prima2 PWRC Driver"); 218MODULE_DESCRIPTION("CSR Prima2 PWRC Driver");
219MODULE_ALIAS("platform:sirfsoc-pwrc"); 219MODULE_ALIAS("platform:sirfsoc-pwrc");
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index ec772d962f06..ef9e0b8a9aa7 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -132,7 +132,8 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
132 1232, 5710, 1156, 4696 132 1232, 5710, 1156, 4696
133 }, 133 },
134 { 134 {
135 (const char * const []){"LEN0034", "LEN0036", "LEN2004", NULL}, 135 (const char * const []){"LEN0034", "LEN0036", "LEN2002",
136 "LEN2004", NULL},
136 1024, 5112, 2024, 4832 137 1024, 5112, 2024, 4832
137 }, 138 },
138 { 139 {
@@ -168,7 +169,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
168 "LEN0049", 169 "LEN0049",
169 "LEN2000", 170 "LEN2000",
170 "LEN2001", /* Edge E431 */ 171 "LEN2001", /* Edge E431 */
171 "LEN2002", 172 "LEN2002", /* Edge E531 */
172 "LEN2003", 173 "LEN2003",
173 "LEN2004", /* L440 */ 174 "LEN2004", /* L440 */
174 "LEN2005", 175 "LEN2005",
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 381b20d4c561..136b7b204f56 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -402,6 +402,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
402 }, 402 },
403 }, 403 },
404 { 404 {
405 /* Acer Aspire 5710 */
406 .matches = {
407 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
408 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"),
409 },
410 },
411 {
405 /* Gericom Bellagio */ 412 /* Gericom Bellagio */
406 .matches = { 413 .matches = {
407 DMI_MATCH(DMI_SYS_VENDOR, "Gericom"), 414 DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 977d05cd9e2e..e73cf2c71f35 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -1217,9 +1217,9 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
1217 * a=(pi*r^2)/C. 1217 * a=(pi*r^2)/C.
1218 */ 1218 */
1219 int a = data[5]; 1219 int a = data[5];
1220 int x_res = input_abs_get_res(input, ABS_X); 1220 int x_res = input_abs_get_res(input, ABS_MT_POSITION_X);
1221 int y_res = input_abs_get_res(input, ABS_Y); 1221 int y_res = input_abs_get_res(input, ABS_MT_POSITION_Y);
1222 width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE); 1222 width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE);
1223 height = width * y_res / x_res; 1223 height = width * y_res / x_res;
1224 } 1224 }
1225 1225
@@ -1587,7 +1587,7 @@ static void wacom_abs_set_axis(struct input_dev *input_dev,
1587 input_abs_set_res(input_dev, ABS_X, features->x_resolution); 1587 input_abs_set_res(input_dev, ABS_X, features->x_resolution);
1588 input_abs_set_res(input_dev, ABS_Y, features->y_resolution); 1588 input_abs_set_res(input_dev, ABS_Y, features->y_resolution);
1589 } else { 1589 } else {
1590 if (features->touch_max <= 2) { 1590 if (features->touch_max == 1) {
1591 input_set_abs_params(input_dev, ABS_X, 0, 1591 input_set_abs_params(input_dev, ABS_X, 0,
1592 features->x_max, features->x_fuzz, 0); 1592 features->x_max, features->x_fuzz, 0);
1593 input_set_abs_params(input_dev, ABS_Y, 0, 1593 input_set_abs_params(input_dev, ABS_Y, 0,
@@ -1815,14 +1815,8 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
1815 case MTTPC: 1815 case MTTPC:
1816 case MTTPC_B: 1816 case MTTPC_B:
1817 case TABLETPC2FG: 1817 case TABLETPC2FG:
1818 if (features->device_type == BTN_TOOL_FINGER) { 1818 if (features->device_type == BTN_TOOL_FINGER && features->touch_max > 1)
1819 unsigned int flags = INPUT_MT_DIRECT; 1819 input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_DIRECT);
1820
1821 if (wacom_wac->features.type == TABLETPC2FG)
1822 flags = 0;
1823
1824 input_mt_init_slots(input_dev, features->touch_max, flags);
1825 }
1826 /* fall through */ 1820 /* fall through */
1827 1821
1828 case TABLETPC: 1822 case TABLETPC:
@@ -1883,10 +1877,6 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
1883 __set_bit(BTN_RIGHT, input_dev->keybit); 1877 __set_bit(BTN_RIGHT, input_dev->keybit);
1884 1878
1885 if (features->touch_max) { 1879 if (features->touch_max) {
1886 /* touch interface */
1887 unsigned int flags = INPUT_MT_POINTER;
1888
1889 __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
1890 if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) { 1880 if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
1891 input_set_abs_params(input_dev, 1881 input_set_abs_params(input_dev,
1892 ABS_MT_TOUCH_MAJOR, 1882 ABS_MT_TOUCH_MAJOR,
@@ -1894,12 +1884,8 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
1894 input_set_abs_params(input_dev, 1884 input_set_abs_params(input_dev,
1895 ABS_MT_TOUCH_MINOR, 1885 ABS_MT_TOUCH_MINOR,
1896 0, features->y_max, 0, 0); 1886 0, features->y_max, 0, 0);
1897 } else {
1898 __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
1899 __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
1900 flags = 0;
1901 } 1887 }
1902 input_mt_init_slots(input_dev, features->touch_max, flags); 1888 input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_POINTER);
1903 } else { 1889 } else {
1904 /* buttons/keys only interface */ 1890 /* buttons/keys only interface */
1905 __clear_bit(ABS_X, input_dev->absbit); 1891 __clear_bit(ABS_X, input_dev->absbit);
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
index 4e793a17361f..2ce649520fe0 100644
--- a/drivers/input/touchscreen/ti_am335x_tsc.c
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -359,9 +359,12 @@ static int titsc_parse_dt(struct platform_device *pdev,
359 */ 359 */
360 err = of_property_read_u32(node, "ti,coordinate-readouts", 360 err = of_property_read_u32(node, "ti,coordinate-readouts",
361 &ts_dev->coordinate_readouts); 361 &ts_dev->coordinate_readouts);
362 if (err < 0) 362 if (err < 0) {
363 dev_warn(&pdev->dev, "please use 'ti,coordinate-readouts' instead\n");
363 err = of_property_read_u32(node, "ti,coordiante-readouts", 364 err = of_property_read_u32(node, "ti,coordiante-readouts",
364 &ts_dev->coordinate_readouts); 365 &ts_dev->coordinate_readouts);
366 }
367
365 if (err < 0) 368 if (err < 0)
366 return err; 369 return err;
367 370
diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
index b99dd88e31b9..bb446d742a2d 100644
--- a/drivers/iommu/fsl_pamu.c
+++ b/drivers/iommu/fsl_pamu.c
@@ -170,10 +170,10 @@ int pamu_disable_liodn(int liodn)
170static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size) 170static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size)
171{ 171{
172 /* Bug if not a power of 2 */ 172 /* Bug if not a power of 2 */
173 BUG_ON(!is_power_of_2(addrspace_size)); 173 BUG_ON((addrspace_size & (addrspace_size - 1)));
174 174
175 /* window size is 2^(WSE+1) bytes */ 175 /* window size is 2^(WSE+1) bytes */
176 return __ffs(addrspace_size) - 1; 176 return fls64(addrspace_size) - 2;
177} 177}
178 178
179/* Derive the PAACE window count encoding for the subwindow count */ 179/* Derive the PAACE window count encoding for the subwindow count */
@@ -351,7 +351,7 @@ int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
351 struct paace *ppaace; 351 struct paace *ppaace;
352 unsigned long fspi; 352 unsigned long fspi;
353 353
354 if (!is_power_of_2(win_size) || win_size < PAMU_PAGE_SIZE) { 354 if ((win_size & (win_size - 1)) || win_size < PAMU_PAGE_SIZE) {
355 pr_debug("window size too small or not a power of two %llx\n", win_size); 355 pr_debug("window size too small or not a power of two %llx\n", win_size);
356 return -EINVAL; 356 return -EINVAL;
357 } 357 }
@@ -464,7 +464,7 @@ int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin,
464 return -ENOENT; 464 return -ENOENT;
465 } 465 }
466 466
467 if (!is_power_of_2(subwin_size) || subwin_size < PAMU_PAGE_SIZE) { 467 if ((subwin_size & (subwin_size - 1)) || subwin_size < PAMU_PAGE_SIZE) {
468 pr_debug("subwindow size out of range, or not a power of 2\n"); 468 pr_debug("subwindow size out of range, or not a power of 2\n");
469 return -EINVAL; 469 return -EINVAL;
470 } 470 }
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index 93072ba44b1d..af47648301a9 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -301,7 +301,7 @@ static int check_size(u64 size, dma_addr_t iova)
301 * Size must be a power of two and at least be equal 301 * Size must be a power of two and at least be equal
302 * to PAMU page size. 302 * to PAMU page size.
303 */ 303 */
304 if (!is_power_of_2(size) || size < PAMU_PAGE_SIZE) { 304 if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) {
305 pr_debug("%s: size too small or not a power of two\n", __func__); 305 pr_debug("%s: size too small or not a power of two\n", __func__);
306 return -EINVAL; 306 return -EINVAL;
307 } 307 }
@@ -335,11 +335,6 @@ static struct fsl_dma_domain *iommu_alloc_dma_domain(void)
335 return domain; 335 return domain;
336} 336}
337 337
338static inline struct device_domain_info *find_domain(struct device *dev)
339{
340 return dev->archdata.iommu_domain;
341}
342
343static void remove_device_ref(struct device_domain_info *info, u32 win_cnt) 338static void remove_device_ref(struct device_domain_info *info, u32 win_cnt)
344{ 339{
345 unsigned long flags; 340 unsigned long flags;
@@ -380,7 +375,7 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d
380 * Check here if the device is already attached to domain or not. 375 * Check here if the device is already attached to domain or not.
381 * If the device is already attached to a domain detach it. 376 * If the device is already attached to a domain detach it.
382 */ 377 */
383 old_domain_info = find_domain(dev); 378 old_domain_info = dev->archdata.iommu_domain;
384 if (old_domain_info && old_domain_info->domain != dma_domain) { 379 if (old_domain_info && old_domain_info->domain != dma_domain) {
385 spin_unlock_irqrestore(&device_domain_lock, flags); 380 spin_unlock_irqrestore(&device_domain_lock, flags);
386 detach_device(dev, old_domain_info->domain); 381 detach_device(dev, old_domain_info->domain);
@@ -399,7 +394,7 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d
399 * the info for the first LIODN as all 394 * the info for the first LIODN as all
400 * LIODNs share the same domain 395 * LIODNs share the same domain
401 */ 396 */
402 if (!old_domain_info) 397 if (!dev->archdata.iommu_domain)
403 dev->archdata.iommu_domain = info; 398 dev->archdata.iommu_domain = info;
404 spin_unlock_irqrestore(&device_domain_lock, flags); 399 spin_unlock_irqrestore(&device_domain_lock, flags);
405 400
@@ -1042,12 +1037,15 @@ root_bus:
1042 group = get_shared_pci_device_group(pdev); 1037 group = get_shared_pci_device_group(pdev);
1043 } 1038 }
1044 1039
1040 if (!group)
1041 group = ERR_PTR(-ENODEV);
1042
1045 return group; 1043 return group;
1046} 1044}
1047 1045
1048static int fsl_pamu_add_device(struct device *dev) 1046static int fsl_pamu_add_device(struct device *dev)
1049{ 1047{
1050 struct iommu_group *group = NULL; 1048 struct iommu_group *group = ERR_PTR(-ENODEV);
1051 struct pci_dev *pdev; 1049 struct pci_dev *pdev;
1052 const u32 *prop; 1050 const u32 *prop;
1053 int ret, len; 1051 int ret, len;
@@ -1070,7 +1068,7 @@ static int fsl_pamu_add_device(struct device *dev)
1070 group = get_device_iommu_group(dev); 1068 group = get_device_iommu_group(dev);
1071 } 1069 }
1072 1070
1073 if (!group || IS_ERR(group)) 1071 if (IS_ERR(group))
1074 return PTR_ERR(group); 1072 return PTR_ERR(group);
1075 1073
1076 ret = iommu_group_add_device(group, dev); 1074 ret = iommu_group_add_device(group, dev);
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 7e11c9d6ae8c..7c131cf7cc13 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -42,6 +42,7 @@
42#include <linux/irqchip/chained_irq.h> 42#include <linux/irqchip/chained_irq.h>
43#include <linux/irqchip/arm-gic.h> 43#include <linux/irqchip/arm-gic.h>
44 44
45#include <asm/cputype.h>
45#include <asm/irq.h> 46#include <asm/irq.h>
46#include <asm/exception.h> 47#include <asm/exception.h>
47#include <asm/smp_plat.h> 48#include <asm/smp_plat.h>
@@ -954,7 +955,9 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
954 } 955 }
955 956
956 for_each_possible_cpu(cpu) { 957 for_each_possible_cpu(cpu) {
957 unsigned long offset = percpu_offset * cpu_logical_map(cpu); 958 u32 mpidr = cpu_logical_map(cpu);
959 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
960 unsigned long offset = percpu_offset * core_id;
958 *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset; 961 *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
959 *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset; 962 *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
960 } 963 }
@@ -1071,8 +1074,10 @@ gic_of_init(struct device_node *node, struct device_node *parent)
1071 gic_cnt++; 1074 gic_cnt++;
1072 return 0; 1075 return 0;
1073} 1076}
1077IRQCHIP_DECLARE(gic_400, "arm,gic-400", gic_of_init);
1074IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init); 1078IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
1075IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init); 1079IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
1080IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
1076IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init); 1081IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
1077IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init); 1082IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
1078 1083
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index c44950d3eb7b..b7ae0a0dd5b6 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -2400,6 +2400,7 @@ allocerr:
2400error: 2400error:
2401 freeurbs(cs); 2401 freeurbs(cs);
2402 usb_set_intfdata(interface, NULL); 2402 usb_set_intfdata(interface, NULL);
2403 usb_put_dev(udev);
2403 gigaset_freecs(cs); 2404 gigaset_freecs(cs);
2404 return rc; 2405 return rc;
2405} 2406}
diff --git a/drivers/isdn/hisax/l3ni1.c b/drivers/isdn/hisax/l3ni1.c
index 0df6691d045c..8dc791bfaa6f 100644
--- a/drivers/isdn/hisax/l3ni1.c
+++ b/drivers/isdn/hisax/l3ni1.c
@@ -2059,13 +2059,17 @@ static int l3ni1_cmd_global(struct PStack *st, isdn_ctrl *ic)
2059 memcpy(p, ic->parm.ni1_io.data, ic->parm.ni1_io.datalen); /* copy data */ 2059 memcpy(p, ic->parm.ni1_io.data, ic->parm.ni1_io.datalen); /* copy data */
2060 l = (p - temp) + ic->parm.ni1_io.datalen; /* total length */ 2060 l = (p - temp) + ic->parm.ni1_io.datalen; /* total length */
2061 2061
2062 if (ic->parm.ni1_io.timeout > 0) 2062 if (ic->parm.ni1_io.timeout > 0) {
2063 if (!(pc = ni1_new_l3_process(st, -1))) 2063 pc = ni1_new_l3_process(st, -1);
2064 { free_invoke_id(st, id); 2064 if (!pc) {
2065 free_invoke_id(st, id);
2065 return (-2); 2066 return (-2);
2066 } 2067 }
2067 pc->prot.ni1.ll_id = ic->parm.ni1_io.ll_id; /* remember id */ 2068 /* remember id */
2068 pc->prot.ni1.proc = ic->parm.ni1_io.proc; /* and procedure */ 2069 pc->prot.ni1.ll_id = ic->parm.ni1_io.ll_id;
2070 /* and procedure */
2071 pc->prot.ni1.proc = ic->parm.ni1_io.proc;
2072 }
2069 2073
2070 if (!(skb = l3_alloc_skb(l))) 2074 if (!(skb = l3_alloc_skb(l)))
2071 { free_invoke_id(st, id); 2075 { free_invoke_id(st, id);
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index 61ac63237446..62f0688d45a5 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -442,7 +442,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
442{ 442{
443 struct sock_fprog uprog; 443 struct sock_fprog uprog;
444 struct sock_filter *code = NULL; 444 struct sock_filter *code = NULL;
445 int len, err; 445 int len;
446 446
447 if (copy_from_user(&uprog, arg, sizeof(uprog))) 447 if (copy_from_user(&uprog, arg, sizeof(uprog)))
448 return -EFAULT; 448 return -EFAULT;
@@ -458,12 +458,6 @@ static int get_filter(void __user *arg, struct sock_filter **p)
458 if (IS_ERR(code)) 458 if (IS_ERR(code))
459 return PTR_ERR(code); 459 return PTR_ERR(code);
460 460
461 err = sk_chk_filter(code, uprog.len);
462 if (err) {
463 kfree(code);
464 return err;
465 }
466
467 *p = code; 461 *p = code;
468 return uprog.len; 462 return uprog.len;
469} 463}
@@ -644,9 +638,15 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
644 fprog.len = len; 638 fprog.len = len;
645 fprog.filter = code; 639 fprog.filter = code;
646 640
647 if (is->pass_filter) 641 if (is->pass_filter) {
648 sk_unattached_filter_destroy(is->pass_filter); 642 sk_unattached_filter_destroy(is->pass_filter);
649 err = sk_unattached_filter_create(&is->pass_filter, &fprog); 643 is->pass_filter = NULL;
644 }
645 if (fprog.filter != NULL)
646 err = sk_unattached_filter_create(&is->pass_filter,
647 &fprog);
648 else
649 err = 0;
650 kfree(code); 650 kfree(code);
651 651
652 return err; 652 return err;
@@ -663,9 +663,15 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
663 fprog.len = len; 663 fprog.len = len;
664 fprog.filter = code; 664 fprog.filter = code;
665 665
666 if (is->active_filter) 666 if (is->active_filter) {
667 sk_unattached_filter_destroy(is->active_filter); 667 sk_unattached_filter_destroy(is->active_filter);
668 err = sk_unattached_filter_create(&is->active_filter, &fprog); 668 is->active_filter = NULL;
669 }
670 if (fprog.filter != NULL)
671 err = sk_unattached_filter_create(&is->active_filter,
672 &fprog);
673 else
674 err = 0;
669 kfree(code); 675 kfree(code);
670 676
671 return err; 677 return err;
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 4ead4ba60656..d2899e7eb3aa 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -425,6 +425,15 @@ static int __open_metadata(struct dm_cache_metadata *cmd)
425 425
426 disk_super = dm_block_data(sblock); 426 disk_super = dm_block_data(sblock);
427 427
428 /* Verify the data block size hasn't changed */
429 if (le32_to_cpu(disk_super->data_block_size) != cmd->data_block_size) {
430 DMERR("changing the data block size (from %u to %llu) is not supported",
431 le32_to_cpu(disk_super->data_block_size),
432 (unsigned long long)cmd->data_block_size);
433 r = -EINVAL;
434 goto bad;
435 }
436
428 r = __check_incompat_features(disk_super, cmd); 437 r = __check_incompat_features(disk_super, cmd);
429 if (r < 0) 438 if (r < 0)
430 goto bad; 439 goto bad;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 53b213226c01..4cba2d808afb 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2003 Christophe Saout <christophe@saout.de> 2 * Copyright (C) 2003 Jana Saout <jana@saout.de>
3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> 3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
4 * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved. 4 * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
5 * Copyright (C) 2013 Milan Broz <gmazyland@gmail.com> 5 * Copyright (C) 2013 Milan Broz <gmazyland@gmail.com>
@@ -1996,6 +1996,6 @@ static void __exit dm_crypt_exit(void)
1996module_init(dm_crypt_init); 1996module_init(dm_crypt_init);
1997module_exit(dm_crypt_exit); 1997module_exit(dm_crypt_exit);
1998 1998
1999MODULE_AUTHOR("Christophe Saout <christophe@saout.de>"); 1999MODULE_AUTHOR("Jana Saout <jana@saout.de>");
2000MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption"); 2000MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
2001MODULE_LICENSE("GPL"); 2001MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 3842ac738f98..db404a0f7e2c 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -10,6 +10,7 @@
10#include <linux/device-mapper.h> 10#include <linux/device-mapper.h>
11 11
12#include <linux/bio.h> 12#include <linux/bio.h>
13#include <linux/completion.h>
13#include <linux/mempool.h> 14#include <linux/mempool.h>
14#include <linux/module.h> 15#include <linux/module.h>
15#include <linux/sched.h> 16#include <linux/sched.h>
@@ -32,7 +33,7 @@ struct dm_io_client {
32struct io { 33struct io {
33 unsigned long error_bits; 34 unsigned long error_bits;
34 atomic_t count; 35 atomic_t count;
35 struct task_struct *sleeper; 36 struct completion *wait;
36 struct dm_io_client *client; 37 struct dm_io_client *client;
37 io_notify_fn callback; 38 io_notify_fn callback;
38 void *context; 39 void *context;
@@ -121,8 +122,8 @@ static void dec_count(struct io *io, unsigned int region, int error)
121 invalidate_kernel_vmap_range(io->vma_invalidate_address, 122 invalidate_kernel_vmap_range(io->vma_invalidate_address,
122 io->vma_invalidate_size); 123 io->vma_invalidate_size);
123 124
124 if (io->sleeper) 125 if (io->wait)
125 wake_up_process(io->sleeper); 126 complete(io->wait);
126 127
127 else { 128 else {
128 unsigned long r = io->error_bits; 129 unsigned long r = io->error_bits;
@@ -387,6 +388,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
387 */ 388 */
388 volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1]; 389 volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1];
389 struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io)); 390 struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io));
391 DECLARE_COMPLETION_ONSTACK(wait);
390 392
391 if (num_regions > 1 && (rw & RW_MASK) != WRITE) { 393 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
392 WARN_ON(1); 394 WARN_ON(1);
@@ -395,7 +397,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
395 397
396 io->error_bits = 0; 398 io->error_bits = 0;
397 atomic_set(&io->count, 1); /* see dispatch_io() */ 399 atomic_set(&io->count, 1); /* see dispatch_io() */
398 io->sleeper = current; 400 io->wait = &wait;
399 io->client = client; 401 io->client = client;
400 402
401 io->vma_invalidate_address = dp->vma_invalidate_address; 403 io->vma_invalidate_address = dp->vma_invalidate_address;
@@ -403,15 +405,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
403 405
404 dispatch_io(rw, num_regions, where, dp, io, 1); 406 dispatch_io(rw, num_regions, where, dp, io, 1);
405 407
406 while (1) { 408 wait_for_completion_io(&wait);
407 set_current_state(TASK_UNINTERRUPTIBLE);
408
409 if (!atomic_read(&io->count))
410 break;
411
412 io_schedule();
413 }
414 set_current_state(TASK_RUNNING);
415 409
416 if (error_bits) 410 if (error_bits)
417 *error_bits = io->error_bits; 411 *error_bits = io->error_bits;
@@ -434,7 +428,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
434 io = mempool_alloc(client->pool, GFP_NOIO); 428 io = mempool_alloc(client->pool, GFP_NOIO);
435 io->error_bits = 0; 429 io->error_bits = 0;
436 atomic_set(&io->count, 1); /* see dispatch_io() */ 430 atomic_set(&io->count, 1); /* see dispatch_io() */
437 io->sleeper = NULL; 431 io->wait = NULL;
438 io->client = client; 432 io->client = client;
439 io->callback = fn; 433 io->callback = fn;
440 io->context = context; 434 io->context = context;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 3f6fd9d33ba3..f4167b013d99 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1611,8 +1611,9 @@ static int multipath_busy(struct dm_target *ti)
1611 1611
1612 spin_lock_irqsave(&m->lock, flags); 1612 spin_lock_irqsave(&m->lock, flags);
1613 1613
1614 /* pg_init in progress, requeue until done */ 1614 /* pg_init in progress or no paths available */
1615 if (!pg_ready(m)) { 1615 if (m->pg_init_in_progress ||
1616 (!m->nr_valid_paths && m->queue_if_no_path)) {
1616 busy = 1; 1617 busy = 1;
1617 goto out; 1618 goto out;
1618 } 1619 }
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index b086a945edcb..e9d33ad59df5 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -613,6 +613,15 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
613 613
614 disk_super = dm_block_data(sblock); 614 disk_super = dm_block_data(sblock);
615 615
616 /* Verify the data block size hasn't changed */
617 if (le32_to_cpu(disk_super->data_block_size) != pmd->data_block_size) {
618 DMERR("changing the data block size (from %u to %llu) is not supported",
619 le32_to_cpu(disk_super->data_block_size),
620 (unsigned long long)pmd->data_block_size);
621 r = -EINVAL;
622 goto bad_unlock_sblock;
623 }
624
616 r = __check_incompat_features(disk_super, pmd); 625 r = __check_incompat_features(disk_super, pmd);
617 if (r < 0) 626 if (r < 0)
618 goto bad_unlock_sblock; 627 goto bad_unlock_sblock;
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index c99003e0d47a..b9a64bbce304 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2003 Christophe Saout <christophe@saout.de> 2 * Copyright (C) 2003 Jana Saout <jana@saout.de>
3 * 3 *
4 * This file is released under the GPL. 4 * This file is released under the GPL.
5 */ 5 */
@@ -79,6 +79,6 @@ static void __exit dm_zero_exit(void)
79module_init(dm_zero_init) 79module_init(dm_zero_init)
80module_exit(dm_zero_exit) 80module_exit(dm_zero_exit)
81 81
82MODULE_AUTHOR("Christophe Saout <christophe@saout.de>"); 82MODULE_AUTHOR("Jana Saout <jana@saout.de>");
83MODULE_DESCRIPTION(DM_NAME " dummy target returning zeros"); 83MODULE_DESCRIPTION(DM_NAME " dummy target returning zeros");
84MODULE_LICENSE("GPL"); 84MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 437d99045ef2..32b958dbc499 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -54,6 +54,8 @@ static void do_deferred_remove(struct work_struct *w);
54 54
55static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 55static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
56 56
57static struct workqueue_struct *deferred_remove_workqueue;
58
57/* 59/*
58 * For bio-based dm. 60 * For bio-based dm.
59 * One of these is allocated per bio. 61 * One of these is allocated per bio.
@@ -276,16 +278,24 @@ static int __init local_init(void)
276 if (r) 278 if (r)
277 goto out_free_rq_tio_cache; 279 goto out_free_rq_tio_cache;
278 280
281 deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
282 if (!deferred_remove_workqueue) {
283 r = -ENOMEM;
284 goto out_uevent_exit;
285 }
286
279 _major = major; 287 _major = major;
280 r = register_blkdev(_major, _name); 288 r = register_blkdev(_major, _name);
281 if (r < 0) 289 if (r < 0)
282 goto out_uevent_exit; 290 goto out_free_workqueue;
283 291
284 if (!_major) 292 if (!_major)
285 _major = r; 293 _major = r;
286 294
287 return 0; 295 return 0;
288 296
297out_free_workqueue:
298 destroy_workqueue(deferred_remove_workqueue);
289out_uevent_exit: 299out_uevent_exit:
290 dm_uevent_exit(); 300 dm_uevent_exit();
291out_free_rq_tio_cache: 301out_free_rq_tio_cache:
@@ -299,6 +309,7 @@ out_free_io_cache:
299static void local_exit(void) 309static void local_exit(void)
300{ 310{
301 flush_scheduled_work(); 311 flush_scheduled_work();
312 destroy_workqueue(deferred_remove_workqueue);
302 313
303 kmem_cache_destroy(_rq_tio_cache); 314 kmem_cache_destroy(_rq_tio_cache);
304 kmem_cache_destroy(_io_cache); 315 kmem_cache_destroy(_io_cache);
@@ -407,7 +418,7 @@ static void dm_blk_close(struct gendisk *disk, fmode_t mode)
407 418
408 if (atomic_dec_and_test(&md->open_count) && 419 if (atomic_dec_and_test(&md->open_count) &&
409 (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 420 (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
410 schedule_work(&deferred_remove_work); 421 queue_work(deferred_remove_workqueue, &deferred_remove_work);
411 422
412 dm_put(md); 423 dm_put(md);
413 424
diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
index 8637d2ed7623..2e3cdcfa0a67 100644
--- a/drivers/media/dvb-frontends/si2168.c
+++ b/drivers/media/dvb-frontends/si2168.c
@@ -60,7 +60,7 @@ static int si2168_cmd_execute(struct si2168 *s, struct si2168_cmd *cmd)
60 jiffies_to_msecs(jiffies) - 60 jiffies_to_msecs(jiffies) -
61 (jiffies_to_msecs(timeout) - TIMEOUT)); 61 (jiffies_to_msecs(timeout) - TIMEOUT));
62 62
63 if (!(cmd->args[0] >> 7) & 0x01) { 63 if (!((cmd->args[0] >> 7) & 0x01)) {
64 ret = -ETIMEDOUT; 64 ret = -ETIMEDOUT;
65 goto err_mutex_unlock; 65 goto err_mutex_unlock;
66 } 66 }
@@ -485,20 +485,6 @@ static int si2168_init(struct dvb_frontend *fe)
485 if (ret) 485 if (ret)
486 goto err; 486 goto err;
487 487
488 cmd.args[0] = 0x05;
489 cmd.args[1] = 0x00;
490 cmd.args[2] = 0xaa;
491 cmd.args[3] = 0x4d;
492 cmd.args[4] = 0x56;
493 cmd.args[5] = 0x40;
494 cmd.args[6] = 0x00;
495 cmd.args[7] = 0x00;
496 cmd.wlen = 8;
497 cmd.rlen = 1;
498 ret = si2168_cmd_execute(s, &cmd);
499 if (ret)
500 goto err;
501
502 /* cold state - try to download firmware */ 488 /* cold state - try to download firmware */
503 dev_info(&s->client->dev, "%s: found a '%s' in cold state\n", 489 dev_info(&s->client->dev, "%s: found a '%s' in cold state\n",
504 KBUILD_MODNAME, si2168_ops.info.name); 490 KBUILD_MODNAME, si2168_ops.info.name);
diff --git a/drivers/media/dvb-frontends/si2168_priv.h b/drivers/media/dvb-frontends/si2168_priv.h
index 2a343e896f40..53f7f06ae343 100644
--- a/drivers/media/dvb-frontends/si2168_priv.h
+++ b/drivers/media/dvb-frontends/si2168_priv.h
@@ -22,7 +22,7 @@
22#include <linux/firmware.h> 22#include <linux/firmware.h>
23#include <linux/i2c-mux.h> 23#include <linux/i2c-mux.h>
24 24
25#define SI2168_FIRMWARE "dvb-demod-si2168-01.fw" 25#define SI2168_FIRMWARE "dvb-demod-si2168-02.fw"
26 26
27/* state struct */ 27/* state struct */
28struct si2168 { 28struct si2168 {
diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
index 522fe00f5eee..9619be5d4827 100644
--- a/drivers/media/dvb-frontends/tda10071.c
+++ b/drivers/media/dvb-frontends/tda10071.c
@@ -668,6 +668,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
668 struct dtv_frontend_properties *c = &fe->dtv_property_cache; 668 struct dtv_frontend_properties *c = &fe->dtv_property_cache;
669 int ret, i; 669 int ret, i;
670 u8 mode, rolloff, pilot, inversion, div; 670 u8 mode, rolloff, pilot, inversion, div;
671 fe_modulation_t modulation;
671 672
672 dev_dbg(&priv->i2c->dev, 673 dev_dbg(&priv->i2c->dev,
673 "%s: delivery_system=%d modulation=%d frequency=%d symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n", 674 "%s: delivery_system=%d modulation=%d frequency=%d symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n",
@@ -702,10 +703,13 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
702 703
703 switch (c->delivery_system) { 704 switch (c->delivery_system) {
704 case SYS_DVBS: 705 case SYS_DVBS:
706 modulation = QPSK;
705 rolloff = 0; 707 rolloff = 0;
706 pilot = 2; 708 pilot = 2;
707 break; 709 break;
708 case SYS_DVBS2: 710 case SYS_DVBS2:
711 modulation = c->modulation;
712
709 switch (c->rolloff) { 713 switch (c->rolloff) {
710 case ROLLOFF_20: 714 case ROLLOFF_20:
711 rolloff = 2; 715 rolloff = 2;
@@ -750,7 +754,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
750 754
751 for (i = 0, mode = 0xff; i < ARRAY_SIZE(TDA10071_MODCOD); i++) { 755 for (i = 0, mode = 0xff; i < ARRAY_SIZE(TDA10071_MODCOD); i++) {
752 if (c->delivery_system == TDA10071_MODCOD[i].delivery_system && 756 if (c->delivery_system == TDA10071_MODCOD[i].delivery_system &&
753 c->modulation == TDA10071_MODCOD[i].modulation && 757 modulation == TDA10071_MODCOD[i].modulation &&
754 c->fec_inner == TDA10071_MODCOD[i].fec) { 758 c->fec_inner == TDA10071_MODCOD[i].fec) {
755 mode = TDA10071_MODCOD[i].val; 759 mode = TDA10071_MODCOD[i].val;
756 dev_dbg(&priv->i2c->dev, "%s: mode found=%02x\n", 760 dev_dbg(&priv->i2c->dev, "%s: mode found=%02x\n",
@@ -834,10 +838,10 @@ static int tda10071_get_frontend(struct dvb_frontend *fe)
834 838
835 switch ((buf[1] >> 0) & 0x01) { 839 switch ((buf[1] >> 0) & 0x01) {
836 case 0: 840 case 0:
837 c->inversion = INVERSION_OFF; 841 c->inversion = INVERSION_ON;
838 break; 842 break;
839 case 1: 843 case 1:
840 c->inversion = INVERSION_ON; 844 c->inversion = INVERSION_OFF;
841 break; 845 break;
842 } 846 }
843 847
@@ -856,7 +860,7 @@ static int tda10071_get_frontend(struct dvb_frontend *fe)
856 if (ret) 860 if (ret)
857 goto error; 861 goto error;
858 862
859 c->symbol_rate = (buf[0] << 16) | (buf[1] << 8) | (buf[2] << 0); 863 c->symbol_rate = ((buf[0] << 16) | (buf[1] << 8) | (buf[2] << 0)) * 1000;
860 864
861 return ret; 865 return ret;
862error: 866error:
diff --git a/drivers/media/dvb-frontends/tda10071_priv.h b/drivers/media/dvb-frontends/tda10071_priv.h
index 4baf14bfb65a..420486192736 100644
--- a/drivers/media/dvb-frontends/tda10071_priv.h
+++ b/drivers/media/dvb-frontends/tda10071_priv.h
@@ -55,6 +55,7 @@ static struct tda10071_modcod {
55 { SYS_DVBS2, QPSK, FEC_8_9, 0x0a }, 55 { SYS_DVBS2, QPSK, FEC_8_9, 0x0a },
56 { SYS_DVBS2, QPSK, FEC_9_10, 0x0b }, 56 { SYS_DVBS2, QPSK, FEC_9_10, 0x0b },
57 /* 8PSK */ 57 /* 8PSK */
58 { SYS_DVBS2, PSK_8, FEC_AUTO, 0x00 },
58 { SYS_DVBS2, PSK_8, FEC_3_5, 0x0c }, 59 { SYS_DVBS2, PSK_8, FEC_3_5, 0x0c },
59 { SYS_DVBS2, PSK_8, FEC_2_3, 0x0d }, 60 { SYS_DVBS2, PSK_8, FEC_2_3, 0x0d },
60 { SYS_DVBS2, PSK_8, FEC_3_4, 0x0e }, 61 { SYS_DVBS2, PSK_8, FEC_3_4, 0x0e },
diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
index e65c760e4e8b..0006d6bf8c18 100644
--- a/drivers/media/pci/saa7134/saa7134-empress.c
+++ b/drivers/media/pci/saa7134/saa7134-empress.c
@@ -179,7 +179,7 @@ static const struct v4l2_file_operations ts_fops =
179 .read = vb2_fop_read, 179 .read = vb2_fop_read,
180 .poll = vb2_fop_poll, 180 .poll = vb2_fop_poll,
181 .mmap = vb2_fop_mmap, 181 .mmap = vb2_fop_mmap,
182 .ioctl = video_ioctl2, 182 .unlocked_ioctl = video_ioctl2,
183}; 183};
184 184
185static const struct v4l2_ioctl_ops ts_ioctl_ops = { 185static const struct v4l2_ioctl_ops ts_ioctl_ops = {
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index a7ed16497903..1e4ec697fb10 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -269,6 +269,7 @@ err:
269 list_del(&buf->list); 269 list_del(&buf->list);
270 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED); 270 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
271 } 271 }
272 spin_unlock_irqrestore(&common->irqlock, flags);
272 273
273 return ret; 274 return ret;
274} 275}
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index 5bb085b19bcb..b431b58f39e3 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -233,6 +233,7 @@ err:
233 list_del(&buf->list); 233 list_del(&buf->list);
234 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED); 234 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
235 } 235 }
236 spin_unlock_irqrestore(&common->irqlock, flags);
236 237
237 return ret; 238 return ret;
238} 239}
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index 271a752cee54..fa4cc7b880aa 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -57,7 +57,7 @@ static int si2157_cmd_execute(struct si2157 *s, struct si2157_cmd *cmd)
57 jiffies_to_msecs(jiffies) - 57 jiffies_to_msecs(jiffies) -
58 (jiffies_to_msecs(timeout) - TIMEOUT)); 58 (jiffies_to_msecs(timeout) - TIMEOUT));
59 59
60 if (!(buf[0] >> 7) & 0x01) { 60 if (!((buf[0] >> 7) & 0x01)) {
61 ret = -ETIMEDOUT; 61 ret = -ETIMEDOUT;
62 goto err_mutex_unlock; 62 goto err_mutex_unlock;
63 } else { 63 } else {
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 021e4d35e4d7..7b9b75f60774 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -704,15 +704,41 @@ static int af9035_read_config(struct dvb_usb_device *d)
704 if (ret < 0) 704 if (ret < 0)
705 goto err; 705 goto err;
706 706
707 if (tmp == 0x00) 707 dev_dbg(&d->udev->dev, "%s: [%d]tuner=%02x\n",
708 dev_dbg(&d->udev->dev, 708 __func__, i, tmp);
709 "%s: [%d]tuner not set, using default\n", 709
710 __func__, i); 710 /* tuner sanity check */
711 else 711 if (state->chip_type == 0x9135) {
712 if (state->chip_version == 0x02) {
713 /* IT9135 BX (v2) */
714 switch (tmp) {
715 case AF9033_TUNER_IT9135_60:
716 case AF9033_TUNER_IT9135_61:
717 case AF9033_TUNER_IT9135_62:
718 state->af9033_config[i].tuner = tmp;
719 break;
720 }
721 } else {
722 /* IT9135 AX (v1) */
723 switch (tmp) {
724 case AF9033_TUNER_IT9135_38:
725 case AF9033_TUNER_IT9135_51:
726 case AF9033_TUNER_IT9135_52:
727 state->af9033_config[i].tuner = tmp;
728 break;
729 }
730 }
731 } else {
732 /* AF9035 */
712 state->af9033_config[i].tuner = tmp; 733 state->af9033_config[i].tuner = tmp;
734 }
713 735
714 dev_dbg(&d->udev->dev, "%s: [%d]tuner=%02x\n", 736 if (state->af9033_config[i].tuner != tmp) {
715 __func__, i, state->af9033_config[i].tuner); 737 dev_info(&d->udev->dev,
738 "%s: [%d] overriding tuner from %02x to %02x\n",
739 KBUILD_MODNAME, i, tmp,
740 state->af9033_config[i].tuner);
741 }
716 742
717 switch (state->af9033_config[i].tuner) { 743 switch (state->af9033_config[i].tuner) {
718 case AF9033_TUNER_TUA9001: 744 case AF9033_TUNER_TUA9001:
diff --git a/drivers/media/usb/gspca/pac7302.c b/drivers/media/usb/gspca/pac7302.c
index 2fd1c5e31a0f..339adce7c7a5 100644
--- a/drivers/media/usb/gspca/pac7302.c
+++ b/drivers/media/usb/gspca/pac7302.c
@@ -928,6 +928,7 @@ static const struct usb_device_id device_table[] = {
928 {USB_DEVICE(0x093a, 0x2620)}, 928 {USB_DEVICE(0x093a, 0x2620)},
929 {USB_DEVICE(0x093a, 0x2621)}, 929 {USB_DEVICE(0x093a, 0x2621)},
930 {USB_DEVICE(0x093a, 0x2622), .driver_info = FL_VFLIP}, 930 {USB_DEVICE(0x093a, 0x2622), .driver_info = FL_VFLIP},
931 {USB_DEVICE(0x093a, 0x2623), .driver_info = FL_VFLIP},
931 {USB_DEVICE(0x093a, 0x2624), .driver_info = FL_VFLIP}, 932 {USB_DEVICE(0x093a, 0x2624), .driver_info = FL_VFLIP},
932 {USB_DEVICE(0x093a, 0x2625)}, 933 {USB_DEVICE(0x093a, 0x2625)},
933 {USB_DEVICE(0x093a, 0x2626)}, 934 {USB_DEVICE(0x093a, 0x2626)},
diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
index 0500c4175d5f..6bce01a674f9 100644
--- a/drivers/media/usb/hdpvr/hdpvr-video.c
+++ b/drivers/media/usb/hdpvr/hdpvr-video.c
@@ -82,7 +82,7 @@ static void hdpvr_read_bulk_callback(struct urb *urb)
82} 82}
83 83
84/*=========================================================================*/ 84/*=========================================================================*/
85/* bufffer bits */ 85/* buffer bits */
86 86
87/* function expects dev->io_mutex to be hold by caller */ 87/* function expects dev->io_mutex to be hold by caller */
88int hdpvr_cancel_queue(struct hdpvr_device *dev) 88int hdpvr_cancel_queue(struct hdpvr_device *dev)
@@ -926,7 +926,7 @@ static int hdpvr_s_ctrl(struct v4l2_ctrl *ctrl)
926 case V4L2_CID_MPEG_AUDIO_ENCODING: 926 case V4L2_CID_MPEG_AUDIO_ENCODING:
927 if (dev->flags & HDPVR_FLAG_AC3_CAP) { 927 if (dev->flags & HDPVR_FLAG_AC3_CAP) {
928 opt->audio_codec = ctrl->val; 928 opt->audio_codec = ctrl->val;
929 return hdpvr_set_audio(dev, opt->audio_input, 929 return hdpvr_set_audio(dev, opt->audio_input + 1,
930 opt->audio_codec); 930 opt->audio_codec);
931 } 931 }
932 return 0; 932 return 0;
@@ -1198,7 +1198,7 @@ int hdpvr_register_videodev(struct hdpvr_device *dev, struct device *parent,
1198 v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops, 1198 v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops,
1199 V4L2_CID_MPEG_AUDIO_ENCODING, 1199 V4L2_CID_MPEG_AUDIO_ENCODING,
1200 ac3 ? V4L2_MPEG_AUDIO_ENCODING_AC3 : V4L2_MPEG_AUDIO_ENCODING_AAC, 1200 ac3 ? V4L2_MPEG_AUDIO_ENCODING_AC3 : V4L2_MPEG_AUDIO_ENCODING_AAC,
1201 0x7, V4L2_MPEG_AUDIO_ENCODING_AAC); 1201 0x7, ac3 ? dev->options.audio_codec : V4L2_MPEG_AUDIO_ENCODING_AAC);
1202 v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops, 1202 v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops,
1203 V4L2_CID_MPEG_VIDEO_ENCODING, 1203 V4L2_CID_MPEG_VIDEO_ENCODING,
1204 V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC, 0x3, 1204 V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC, 0x3,
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index 4ae54caadd03..ce1c9f5d9dee 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -610,10 +610,10 @@ struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait)
610 aspect.denominator = 9; 610 aspect.denominator = 9;
611 } else if (ratio == 34) { 611 } else if (ratio == 34) {
612 aspect.numerator = 4; 612 aspect.numerator = 4;
613 aspect.numerator = 3; 613 aspect.denominator = 3;
614 } else if (ratio == 68) { 614 } else if (ratio == 68) {
615 aspect.numerator = 15; 615 aspect.numerator = 15;
616 aspect.numerator = 9; 616 aspect.denominator = 9;
617 } else { 617 } else {
618 aspect.numerator = hor_landscape + 99; 618 aspect.numerator = hor_landscape + 99;
619 aspect.denominator = 100; 619 aspect.denominator = 100;
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index e4ec355704a6..a7543ba3e190 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -52,6 +52,11 @@
52/* Atmel chips */ 52/* Atmel chips */
53#define AT49BV640D 0x02de 53#define AT49BV640D 0x02de
54#define AT49BV640DT 0x02db 54#define AT49BV640DT 0x02db
55/* Sharp chips */
56#define LH28F640BFHE_PTTL90 0x00b0
57#define LH28F640BFHE_PBTL90 0x00b1
58#define LH28F640BFHE_PTTL70A 0x00b2
59#define LH28F640BFHE_PBTL70A 0x00b3
55 60
56static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 61static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
57static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 62static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
@@ -258,6 +263,36 @@ static void fixup_st_m28w320cb(struct mtd_info *mtd)
258 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e; 263 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
259}; 264};
260 265
266static int is_LH28F640BF(struct cfi_private *cfi)
267{
268 /* Sharp LH28F640BF Family */
269 if (cfi->mfr == CFI_MFR_SHARP && (
270 cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 ||
271 cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A))
272 return 1;
273 return 0;
274}
275
276static void fixup_LH28F640BF(struct mtd_info *mtd)
277{
278 struct map_info *map = mtd->priv;
279 struct cfi_private *cfi = map->fldrv_priv;
280 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
281
282 /* Reset the Partition Configuration Register on LH28F640BF
283 * to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */
284 if (is_LH28F640BF(cfi)) {
285 printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n");
286 map_write(map, CMD(0x60), 0);
287 map_write(map, CMD(0x04), 0);
288
289 /* We have set one single partition thus
290 * Simultaneous Operations are not allowed */
291 printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n");
292 extp->FeatureSupport &= ~512;
293 }
294}
295
261static void fixup_use_point(struct mtd_info *mtd) 296static void fixup_use_point(struct mtd_info *mtd)
262{ 297{
263 struct map_info *map = mtd->priv; 298 struct map_info *map = mtd->priv;
@@ -309,6 +344,8 @@ static struct cfi_fixup cfi_fixup_table[] = {
309 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct }, 344 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
310 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb }, 345 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
311 { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock }, 346 { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
347 { CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock },
348 { CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF },
312 { 0, 0, NULL } 349 { 0, 0, NULL }
313}; 350};
314 351
@@ -1649,6 +1686,12 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1649 initial_adr = adr; 1686 initial_adr = adr;
1650 cmd_adr = adr & ~(wbufsize-1); 1687 cmd_adr = adr & ~(wbufsize-1);
1651 1688
1689 /* Sharp LH28F640BF chips need the first address for the
1690 * Page Buffer Program command. See Table 5 of
1691 * LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */
1692 if (is_LH28F640BF(cfi))
1693 cmd_adr = adr;
1694
1652 /* Let's determine this according to the interleave only once */ 1695 /* Let's determine this according to the interleave only once */
1653 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9); 1696 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
1654 1697
diff --git a/drivers/mtd/devices/elm.c b/drivers/mtd/devices/elm.c
index 7df86948e6d4..b4f61c7fc161 100644
--- a/drivers/mtd/devices/elm.c
+++ b/drivers/mtd/devices/elm.c
@@ -475,6 +475,7 @@ static int elm_context_save(struct elm_info *info)
475 ELM_SYNDROME_FRAGMENT_1 + offset); 475 ELM_SYNDROME_FRAGMENT_1 + offset);
476 regs->elm_syndrome_fragment_0[i] = elm_read_reg(info, 476 regs->elm_syndrome_fragment_0[i] = elm_read_reg(info,
477 ELM_SYNDROME_FRAGMENT_0 + offset); 477 ELM_SYNDROME_FRAGMENT_0 + offset);
478 break;
478 default: 479 default:
479 return -EINVAL; 480 return -EINVAL;
480 } 481 }
@@ -520,6 +521,7 @@ static int elm_context_restore(struct elm_info *info)
520 regs->elm_syndrome_fragment_1[i]); 521 regs->elm_syndrome_fragment_1[i]);
521 elm_write_reg(info, ELM_SYNDROME_FRAGMENT_0 + offset, 522 elm_write_reg(info, ELM_SYNDROME_FRAGMENT_0 + offset,
522 regs->elm_syndrome_fragment_0[i]); 523 regs->elm_syndrome_fragment_0[i]);
524 break;
523 default: 525 default:
524 return -EINVAL; 526 return -EINVAL;
525 } 527 }
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 41167e9e991e..4f3e80c68a26 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -4047,8 +4047,10 @@ int nand_scan_tail(struct mtd_info *mtd)
4047 ecc->layout->oobavail += ecc->layout->oobfree[i].length; 4047 ecc->layout->oobavail += ecc->layout->oobfree[i].length;
4048 mtd->oobavail = ecc->layout->oobavail; 4048 mtd->oobavail = ecc->layout->oobavail;
4049 4049
4050 /* ECC sanity check: warn noisily if it's too weak */ 4050 /* ECC sanity check: warn if it's too weak */
4051 WARN_ON(!nand_ecc_strength_good(mtd)); 4051 if (!nand_ecc_strength_good(mtd))
4052 pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
4053 mtd->name);
4052 4054
4053 /* 4055 /*
4054 * Set the number of read / write steps for one page depending on ECC 4056 * Set the number of read / write steps for one page depending on ECC
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index b04e7d059888..0431b46d9fd9 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -125,7 +125,7 @@ static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
125 parent = *p; 125 parent = *p;
126 av = rb_entry(parent, struct ubi_ainf_volume, rb); 126 av = rb_entry(parent, struct ubi_ainf_volume, rb);
127 127
128 if (vol_id < av->vol_id) 128 if (vol_id > av->vol_id)
129 p = &(*p)->rb_left; 129 p = &(*p)->rb_left;
130 else 130 else
131 p = &(*p)->rb_right; 131 p = &(*p)->rb_right;
@@ -423,7 +423,7 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
423 pnum, err); 423 pnum, err);
424 ret = err > 0 ? UBI_BAD_FASTMAP : err; 424 ret = err > 0 ? UBI_BAD_FASTMAP : err;
425 goto out; 425 goto out;
426 } else if (ret == UBI_IO_BITFLIPS) 426 } else if (err == UBI_IO_BITFLIPS)
427 scrub = 1; 427 scrub = 1;
428 428
429 /* 429 /*
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 3a451b6cd3d5..701f86cd5993 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4068,7 +4068,7 @@ static int bond_check_params(struct bond_params *params)
4068 } 4068 }
4069 4069
4070 if (ad_select) { 4070 if (ad_select) {
4071 bond_opt_initstr(&newval, lacp_rate); 4071 bond_opt_initstr(&newval, ad_select);
4072 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT), 4072 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT),
4073 &newval); 4073 &newval);
4074 if (!valptr) { 4074 if (!valptr) {
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 824108cd9fd5..12430be6448a 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -287,7 +287,8 @@ static int c_can_plat_probe(struct platform_device *pdev)
287 break; 287 break;
288 } 288 }
289 289
290 priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res); 290 priv->raminit_ctrlreg = devm_ioremap(&pdev->dev, res->start,
291 resource_size(res));
291 if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0) 292 if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0)
292 dev_info(&pdev->dev, "control memory is not used for raminit\n"); 293 dev_info(&pdev->dev, "control memory is not used for raminit\n");
293 else 294 else
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index c83584a26713..5a1891faba8a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -339,7 +339,8 @@ static int xgbe_probe(struct platform_device *pdev)
339 /* Calculate the number of Tx and Rx rings to be created */ 339 /* Calculate the number of Tx and Rx rings to be created */
340 pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(), 340 pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
341 pdata->hw_feat.tx_ch_cnt); 341 pdata->hw_feat.tx_ch_cnt);
342 if (netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count)) { 342 ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
343 if (ret) {
343 dev_err(dev, "error setting real tx queue count\n"); 344 dev_err(dev, "error setting real tx queue count\n");
344 goto err_io; 345 goto err_io;
345 } 346 }
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 141160ef249a..5776e503e4c5 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -654,13 +654,13 @@ static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
654 654
655 work_done = bcm_sysport_tx_reclaim(ring->priv, ring); 655 work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
656 656
657 if (work_done < budget) { 657 if (work_done == 0) {
658 napi_complete(napi); 658 napi_complete(napi);
659 /* re-enable TX interrupt */ 659 /* re-enable TX interrupt */
660 intrl2_1_mask_clear(ring->priv, BIT(ring->index)); 660 intrl2_1_mask_clear(ring->priv, BIT(ring->index));
661 } 661 }
662 662
663 return work_done; 663 return 0;
664} 664}
665 665
666static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv) 666static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
@@ -1254,28 +1254,17 @@ static inline void umac_enable_set(struct bcm_sysport_priv *priv,
1254 usleep_range(1000, 2000); 1254 usleep_range(1000, 2000);
1255} 1255}
1256 1256
1257static inline int umac_reset(struct bcm_sysport_priv *priv) 1257static inline void umac_reset(struct bcm_sysport_priv *priv)
1258{ 1258{
1259 unsigned int timeout = 0;
1260 u32 reg; 1259 u32 reg;
1261 int ret = 0;
1262
1263 umac_writel(priv, 0, UMAC_CMD);
1264 while (timeout++ < 1000) {
1265 reg = umac_readl(priv, UMAC_CMD);
1266 if (!(reg & CMD_SW_RESET))
1267 break;
1268
1269 udelay(1);
1270 }
1271
1272 if (timeout == 1000) {
1273 dev_err(&priv->pdev->dev,
1274 "timeout waiting for MAC to come out of reset\n");
1275 ret = -ETIMEDOUT;
1276 }
1277 1260
1278 return ret; 1261 reg = umac_readl(priv, UMAC_CMD);
1262 reg |= CMD_SW_RESET;
1263 umac_writel(priv, reg, UMAC_CMD);
1264 udelay(10);
1265 reg = umac_readl(priv, UMAC_CMD);
1266 reg &= ~CMD_SW_RESET;
1267 umac_writel(priv, reg, UMAC_CMD);
1279} 1268}
1280 1269
1281static void umac_set_hw_addr(struct bcm_sysport_priv *priv, 1270static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
@@ -1303,11 +1292,7 @@ static int bcm_sysport_open(struct net_device *dev)
1303 int ret; 1292 int ret;
1304 1293
1305 /* Reset UniMAC */ 1294 /* Reset UniMAC */
1306 ret = umac_reset(priv); 1295 umac_reset(priv);
1307 if (ret) {
1308 netdev_err(dev, "UniMAC reset failed\n");
1309 return ret;
1310 }
1311 1296
1312 /* Flush TX and RX FIFOs at TOPCTRL level */ 1297 /* Flush TX and RX FIFOs at TOPCTRL level */
1313 topctrl_flush(priv); 1298 topctrl_flush(priv);
@@ -1589,12 +1574,6 @@ static int bcm_sysport_probe(struct platform_device *pdev)
1589 BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8); 1574 BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
1590 dev->needed_headroom += sizeof(struct bcm_tsb); 1575 dev->needed_headroom += sizeof(struct bcm_tsb);
1591 1576
1592 /* We are interfaced to a switch which handles the multicast
1593 * filtering for us, so we do not support programming any
1594 * multicast hash table in this Ethernet MAC.
1595 */
1596 dev->flags &= ~IFF_MULTICAST;
1597
1598 /* libphy will adjust the link state accordingly */ 1577 /* libphy will adjust the link state accordingly */
1599 netif_carrier_off(dev); 1578 netif_carrier_off(dev);
1600 1579
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 4cab09d3f807..8206a293e6b4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -346,6 +346,7 @@ struct sw_tx_bd {
346 u8 flags; 346 u8 flags;
347/* Set on the first BD descriptor when there is a split BD */ 347/* Set on the first BD descriptor when there is a split BD */
348#define BNX2X_TSO_SPLIT_BD (1<<0) 348#define BNX2X_TSO_SPLIT_BD (1<<0)
349#define BNX2X_HAS_SECOND_PBD (1<<1)
349}; 350};
350 351
351struct sw_rx_page { 352struct sw_rx_page {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 47c5814114e1..c43e7238de21 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -227,6 +227,12 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
227 --nbd; 227 --nbd;
228 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 228 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
229 229
230 if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
231 /* Skip second parse bd... */
232 --nbd;
233 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
234 }
235
230 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */ 236 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
231 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { 237 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
232 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; 238 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
@@ -797,7 +803,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
797 803
798 return; 804 return;
799 } 805 }
800 bnx2x_frag_free(fp, new_data); 806 if (new_data)
807 bnx2x_frag_free(fp, new_data);
801drop: 808drop:
802 /* drop the packet and keep the buffer in the bin */ 809 /* drop the packet and keep the buffer in the bin */
803 DP(NETIF_MSG_RX_STATUS, 810 DP(NETIF_MSG_RX_STATUS,
@@ -3888,6 +3895,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3888 /* set encapsulation flag in start BD */ 3895 /* set encapsulation flag in start BD */
3889 SET_FLAG(tx_start_bd->general_data, 3896 SET_FLAG(tx_start_bd->general_data,
3890 ETH_TX_START_BD_TUNNEL_EXIST, 1); 3897 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3898
3899 tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3900
3891 nbd++; 3901 nbd++;
3892 } else if (xmit_type & XMIT_CSUM) { 3902 } else if (xmit_type & XMIT_CSUM) {
3893 /* Set PBD in checksum offload case w/o encapsulation */ 3903 /* Set PBD in checksum offload case w/o encapsulation */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index bd0600cf7266..25eddd90f482 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -379,6 +379,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
379 break; 379 break;
380 case PORT_FIBRE: 380 case PORT_FIBRE:
381 case PORT_DA: 381 case PORT_DA:
382 case PORT_NONE:
382 if (!(bp->port.supported[0] & SUPPORTED_FIBRE || 383 if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
383 bp->port.supported[1] & SUPPORTED_FIBRE)) { 384 bp->port.supported[1] & SUPPORTED_FIBRE)) {
384 DP(BNX2X_MSG_ETHTOOL, 385 DP(BNX2X_MSG_ETHTOOL,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 2887034523e0..6a8b1453a1b9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12937,7 +12937,7 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
12937 * without the default SB. 12937 * without the default SB.
12938 * For VFs there is no default SB, then we return (index+1). 12938 * For VFs there is no default SB, then we return (index+1).
12939 */ 12939 */
12940 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSI_FLAGS, &control); 12940 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control);
12941 12941
12942 index = control & PCI_MSIX_FLAGS_QSIZE; 12942 index = control & PCI_MSIX_FLAGS_QSIZE;
12943 12943
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 5ba1cfbd60da..4e615debe472 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1149,6 +1149,11 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1149 goto out; 1149 goto out;
1150 } 1150 }
1151 1151
1152 if (skb_padto(skb, ETH_ZLEN)) {
1153 ret = NETDEV_TX_OK;
1154 goto out;
1155 }
1156
1152 /* set the SKB transmit checksum */ 1157 /* set the SKB transmit checksum */
1153 if (priv->desc_64b_en) { 1158 if (priv->desc_64b_en) {
1154 ret = bcmgenet_put_tx_csum(dev, skb); 1159 ret = bcmgenet_put_tx_csum(dev, skb);
@@ -1408,13 +1413,6 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv)
1408 if (cb->skb) 1413 if (cb->skb)
1409 continue; 1414 continue;
1410 1415
1411 /* set the DMA descriptor length once and for all
1412 * it will only change if we support dynamically sizing
1413 * priv->rx_buf_len, but we do not
1414 */
1415 dmadesc_set_length_status(priv, priv->rx_bd_assign_ptr,
1416 priv->rx_buf_len << DMA_BUFLENGTH_SHIFT);
1417
1418 ret = bcmgenet_rx_refill(priv, cb); 1416 ret = bcmgenet_rx_refill(priv, cb);
1419 if (ret) 1417 if (ret)
1420 break; 1418 break;
@@ -2535,14 +2533,17 @@ static int bcmgenet_probe(struct platform_device *pdev)
2535 netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1); 2533 netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
2536 netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1); 2534 netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
2537 2535
2538 err = register_netdev(dev); 2536 /* libphy will determine the link state */
2539 if (err) 2537 netif_carrier_off(dev);
2540 goto err_clk_disable;
2541 2538
2542 /* Turn off the main clock, WOL clock is handled separately */ 2539 /* Turn off the main clock, WOL clock is handled separately */
2543 if (!IS_ERR(priv->clk)) 2540 if (!IS_ERR(priv->clk))
2544 clk_disable_unprepare(priv->clk); 2541 clk_disable_unprepare(priv->clk);
2545 2542
2543 err = register_netdev(dev);
2544 if (err)
2545 goto err;
2546
2546 return err; 2547 return err;
2547 2548
2548err_clk_disable: 2549err_clk_disable:
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 0f117105fed1..e23c993b1362 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -331,9 +331,9 @@ struct bcmgenet_mib_counters {
331#define EXT_ENERGY_DET_MASK (1 << 12) 331#define EXT_ENERGY_DET_MASK (1 << 12)
332 332
333#define EXT_RGMII_OOB_CTRL 0x0C 333#define EXT_RGMII_OOB_CTRL 0x0C
334#define RGMII_MODE_EN (1 << 0)
335#define RGMII_LINK (1 << 4) 334#define RGMII_LINK (1 << 4)
336#define OOB_DISABLE (1 << 5) 335#define OOB_DISABLE (1 << 5)
336#define RGMII_MODE_EN (1 << 6)
337#define ID_MODE_DIS (1 << 16) 337#define ID_MODE_DIS (1 << 16)
338 338
339#define EXT_GPHY_CTRL 0x1C 339#define EXT_GPHY_CTRL 0x1C
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 34a26e42f19d..1e187fb760f8 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2902,7 +2902,7 @@ static int be_open(struct net_device *netdev)
2902 for_all_evt_queues(adapter, eqo, i) { 2902 for_all_evt_queues(adapter, eqo, i) {
2903 napi_enable(&eqo->napi); 2903 napi_enable(&eqo->napi);
2904 be_enable_busy_poll(eqo); 2904 be_enable_busy_poll(eqo);
2905 be_eq_notify(adapter, eqo->q.id, true, false, 0); 2905 be_eq_notify(adapter, eqo->q.id, true, true, 0);
2906 } 2906 }
2907 adapter->flags |= BE_FLAGS_NAPI_ENABLED; 2907 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2908 2908
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index fab39e295441..36fc429298e3 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -2990,11 +2990,11 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2990 if (ug_info->rxExtendedFiltering) { 2990 if (ug_info->rxExtendedFiltering) {
2991 size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; 2991 size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
2992 if (ug_info->largestexternallookupkeysize == 2992 if (ug_info->largestexternallookupkeysize ==
2993 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES) 2993 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
2994 size += 2994 size +=
2995 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; 2995 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
2996 if (ug_info->largestexternallookupkeysize == 2996 if (ug_info->largestexternallookupkeysize ==
2997 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES) 2997 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
2998 size += 2998 size +=
2999 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; 2999 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
3000 } 3000 }
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index a2db388cc31e..ee74f9536b31 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1481,6 +1481,13 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)
1481 s32 ret_val; 1481 s32 ret_val;
1482 u16 i, rar_count = mac->rar_entry_count; 1482 u16 i, rar_count = mac->rar_entry_count;
1483 1483
1484 if ((hw->mac.type >= e1000_i210) &&
1485 !(igb_get_flash_presence_i210(hw))) {
1486 ret_val = igb_pll_workaround_i210(hw);
1487 if (ret_val)
1488 return ret_val;
1489 }
1490
1484 /* Initialize identification LED */ 1491 /* Initialize identification LED */
1485 ret_val = igb_id_led_init(hw); 1492 ret_val = igb_id_led_init(hw);
1486 if (ret_val) { 1493 if (ret_val) {
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 2a8bb35c2df2..217f8138851b 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -46,14 +46,15 @@
46#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */ 46#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */
47 47
48/* Physical Func Reset Done Indication */ 48/* Physical Func Reset Done Indication */
49#define E1000_CTRL_EXT_PFRSTD 0x00004000 49#define E1000_CTRL_EXT_PFRSTD 0x00004000
50#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 50#define E1000_CTRL_EXT_SDLPE 0X00040000 /* SerDes Low Power Enable */
51#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 51#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
52#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 52#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
53#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 53#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000
54#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 54#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
55#define E1000_CTRL_EXT_EIAME 0x01000000 55#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
56#define E1000_CTRL_EXT_IRCA 0x00000001 56#define E1000_CTRL_EXT_EIAME 0x01000000
57#define E1000_CTRL_EXT_IRCA 0x00000001
57/* Interrupt delay cancellation */ 58/* Interrupt delay cancellation */
58/* Driver loaded bit for FW */ 59/* Driver loaded bit for FW */
59#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 60#define E1000_CTRL_EXT_DRV_LOAD 0x10000000
@@ -62,6 +63,7 @@
62/* packet buffer parity error detection enabled */ 63/* packet buffer parity error detection enabled */
63/* descriptor FIFO parity error detection enable */ 64/* descriptor FIFO parity error detection enable */
64#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ 65#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
66#define E1000_CTRL_EXT_PHYPDEN 0x00100000
65#define E1000_I2CCMD_REG_ADDR_SHIFT 16 67#define E1000_I2CCMD_REG_ADDR_SHIFT 16
66#define E1000_I2CCMD_PHY_ADDR_SHIFT 24 68#define E1000_I2CCMD_PHY_ADDR_SHIFT 24
67#define E1000_I2CCMD_OPCODE_READ 0x08000000 69#define E1000_I2CCMD_OPCODE_READ 0x08000000
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 89925e405849..ce55ea5d750c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -567,4 +567,7 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
567/* These functions must be implemented by drivers */ 567/* These functions must be implemented by drivers */
568s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); 568s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
569s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); 569s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
570
571void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
572void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
570#endif /* _E1000_HW_H_ */ 573#endif /* _E1000_HW_H_ */
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 337161f440dd..65d931669f81 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -834,3 +834,69 @@ s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
834 } 834 }
835 return ret_val; 835 return ret_val;
836} 836}
837
838/**
839 * igb_pll_workaround_i210
840 * @hw: pointer to the HW structure
841 *
842 * Works around an errata in the PLL circuit where it occasionally
843 * provides the wrong clock frequency after power up.
844 **/
845s32 igb_pll_workaround_i210(struct e1000_hw *hw)
846{
847 s32 ret_val;
848 u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
849 u16 nvm_word, phy_word, pci_word, tmp_nvm;
850 int i;
851
852 /* Get and set needed register values */
853 wuc = rd32(E1000_WUC);
854 mdicnfg = rd32(E1000_MDICNFG);
855 reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
856 wr32(E1000_MDICNFG, reg_val);
857
858 /* Get data from NVM, or set default */
859 ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
860 &nvm_word);
861 if (ret_val)
862 nvm_word = E1000_INVM_DEFAULT_AL;
863 tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
864 for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
865 /* check current state directly from internal PHY */
866 igb_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE |
867 E1000_PHY_PLL_FREQ_REG), &phy_word);
868 if ((phy_word & E1000_PHY_PLL_UNCONF)
869 != E1000_PHY_PLL_UNCONF) {
870 ret_val = 0;
871 break;
872 } else {
873 ret_val = -E1000_ERR_PHY;
874 }
875 /* directly reset the internal PHY */
876 ctrl = rd32(E1000_CTRL);
877 wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
878
879 ctrl_ext = rd32(E1000_CTRL_EXT);
880 ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
881 wr32(E1000_CTRL_EXT, ctrl_ext);
882
883 wr32(E1000_WUC, 0);
884 reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
885 wr32(E1000_EEARBC_I210, reg_val);
886
887 igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
888 pci_word |= E1000_PCI_PMCSR_D3;
889 igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
890 usleep_range(1000, 2000);
891 pci_word &= ~E1000_PCI_PMCSR_D3;
892 igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
893 reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
894 wr32(E1000_EEARBC_I210, reg_val);
895
896 /* restore WUC register */
897 wr32(E1000_WUC, wuc);
898 }
899 /* restore MDICNFG setting */
900 wr32(E1000_MDICNFG, mdicnfg);
901 return ret_val;
902}
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 9f34976687ba..3442b6357d01 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -33,6 +33,7 @@ s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data);
33s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data); 33s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data);
34s32 igb_init_nvm_params_i210(struct e1000_hw *hw); 34s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
35bool igb_get_flash_presence_i210(struct e1000_hw *hw); 35bool igb_get_flash_presence_i210(struct e1000_hw *hw);
36s32 igb_pll_workaround_i210(struct e1000_hw *hw);
36 37
37#define E1000_STM_OPCODE 0xDB00 38#define E1000_STM_OPCODE 0xDB00
38#define E1000_EEPROM_FLASH_SIZE_WORD 0x11 39#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
@@ -78,4 +79,15 @@ enum E1000_INVM_STRUCTURE_TYPE {
78#define NVM_LED_1_CFG_DEFAULT_I211 0x0184 79#define NVM_LED_1_CFG_DEFAULT_I211 0x0184
79#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C 80#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C
80 81
82/* PLL Defines */
83#define E1000_PCI_PMCSR 0x44
84#define E1000_PCI_PMCSR_D3 0x03
85#define E1000_MAX_PLL_TRIES 5
86#define E1000_PHY_PLL_UNCONF 0xFF
87#define E1000_PHY_PLL_FREQ_PAGE 0xFC0000
88#define E1000_PHY_PLL_FREQ_REG 0x000E
89#define E1000_INVM_DEFAULT_AL 0x202F
90#define E1000_INVM_AUTOLOAD 0x0A
91#define E1000_INVM_PLL_WO_VAL 0x0010
92
81#endif 93#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 1cc4b1a7e597..f5ba4e4eafb9 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -66,6 +66,7 @@
66#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ 66#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
67#define E1000_PBS 0x01008 /* Packet Buffer Size */ 67#define E1000_PBS 0x01008 /* Packet Buffer Size */
68#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ 68#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
69#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */
69#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ 70#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
70#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ 71#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */
71#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ 72#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index f145adbb55ac..a9537ba7a5a0 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -7215,6 +7215,20 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
7215 } 7215 }
7216} 7216}
7217 7217
7218void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
7219{
7220 struct igb_adapter *adapter = hw->back;
7221
7222 pci_read_config_word(adapter->pdev, reg, value);
7223}
7224
7225void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
7226{
7227 struct igb_adapter *adapter = hw->back;
7228
7229 pci_write_config_word(adapter->pdev, reg, *value);
7230}
7231
7218s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) 7232s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
7219{ 7233{
7220 struct igb_adapter *adapter = hw->back; 7234 struct igb_adapter *adapter = hw->back;
@@ -7578,6 +7592,8 @@ static int igb_sriov_reinit(struct pci_dev *dev)
7578 7592
7579 if (netif_running(netdev)) 7593 if (netif_running(netdev))
7580 igb_close(netdev); 7594 igb_close(netdev);
7595 else
7596 igb_reset(adapter);
7581 7597
7582 igb_clear_interrupt_scheme(adapter); 7598 igb_clear_interrupt_scheme(adapter);
7583 7599
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 45beca17fa50..dadd9a5f6323 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1207,7 +1207,7 @@ static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1207 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT; 1207 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
1208 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT; 1208 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1209 1209
1210 if (l3_proto == swab16(ETH_P_IP)) 1210 if (l3_proto == htons(ETH_P_IP))
1211 command |= MVNETA_TXD_IP_CSUM; 1211 command |= MVNETA_TXD_IP_CSUM;
1212 else 1212 else
1213 command |= MVNETA_TX_L3_IP6; 1213 command |= MVNETA_TX_L3_IP6;
@@ -2529,7 +2529,7 @@ static void mvneta_adjust_link(struct net_device *ndev)
2529 2529
2530 if (phydev->speed == SPEED_1000) 2530 if (phydev->speed == SPEED_1000)
2531 val |= MVNETA_GMAC_CONFIG_GMII_SPEED; 2531 val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
2532 else 2532 else if (phydev->speed == SPEED_100)
2533 val |= MVNETA_GMAC_CONFIG_MII_SPEED; 2533 val |= MVNETA_GMAC_CONFIG_MII_SPEED;
2534 2534
2535 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 2535 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 80f725228f5b..56022d647837 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -294,8 +294,6 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
294 init_completion(&cq->free); 294 init_completion(&cq->free);
295 295
296 cq->irq = priv->eq_table.eq[cq->vector].irq; 296 cq->irq = priv->eq_table.eq[cq->vector].irq;
297 cq->irq_affinity_change = false;
298
299 return 0; 297 return 0;
300 298
301err_radix: 299err_radix:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 4b2130760eed..82322b1c8411 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -128,11 +128,16 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
128 mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n", 128 mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n",
129 name); 129 name);
130 } 130 }
131
131 } 132 }
132 } else { 133 } else {
133 cq->vector = (cq->ring + 1 + priv->port) % 134 cq->vector = (cq->ring + 1 + priv->port) %
134 mdev->dev->caps.num_comp_vectors; 135 mdev->dev->caps.num_comp_vectors;
135 } 136 }
137
138 cq->irq_desc =
139 irq_to_desc(mlx4_eq_get_irq(mdev->dev,
140 cq->vector));
136 } else { 141 } else {
137 /* For TX we use the same irq per 142 /* For TX we use the same irq per
138 ring we assigned for the RX */ 143 ring we assigned for the RX */
@@ -187,8 +192,6 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
187 mlx4_en_unmap_buffer(&cq->wqres.buf); 192 mlx4_en_unmap_buffer(&cq->wqres.buf);
188 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); 193 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
189 if (priv->mdev->dev->caps.comp_pool && cq->vector) { 194 if (priv->mdev->dev->caps.comp_pool && cq->vector) {
190 if (!cq->is_tx)
191 irq_set_affinity_hint(cq->mcq.irq, NULL);
192 mlx4_release_eq(priv->mdev->dev, cq->vector); 195 mlx4_release_eq(priv->mdev->dev, cq->vector);
193 } 196 }
194 cq->vector = 0; 197 cq->vector = 0;
@@ -204,6 +207,7 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
204 if (!cq->is_tx) { 207 if (!cq->is_tx) {
205 napi_hash_del(&cq->napi); 208 napi_hash_del(&cq->napi);
206 synchronize_rcu(); 209 synchronize_rcu();
210 irq_set_affinity_hint(cq->mcq.irq, NULL);
207 } 211 }
208 netif_napi_del(&cq->napi); 212 netif_napi_del(&cq->napi);
209 213
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index fa1a069e14e6..68d763d2d030 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -417,6 +417,8 @@ static int mlx4_en_get_coalesce(struct net_device *dev,
417 417
418 coal->tx_coalesce_usecs = priv->tx_usecs; 418 coal->tx_coalesce_usecs = priv->tx_usecs;
419 coal->tx_max_coalesced_frames = priv->tx_frames; 419 coal->tx_max_coalesced_frames = priv->tx_frames;
420 coal->tx_max_coalesced_frames_irq = priv->tx_work_limit;
421
420 coal->rx_coalesce_usecs = priv->rx_usecs; 422 coal->rx_coalesce_usecs = priv->rx_usecs;
421 coal->rx_max_coalesced_frames = priv->rx_frames; 423 coal->rx_max_coalesced_frames = priv->rx_frames;
422 424
@@ -426,6 +428,7 @@ static int mlx4_en_get_coalesce(struct net_device *dev,
426 coal->rx_coalesce_usecs_high = priv->rx_usecs_high; 428 coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
427 coal->rate_sample_interval = priv->sample_interval; 429 coal->rate_sample_interval = priv->sample_interval;
428 coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal; 430 coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
431
429 return 0; 432 return 0;
430} 433}
431 434
@@ -434,6 +437,9 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
434{ 437{
435 struct mlx4_en_priv *priv = netdev_priv(dev); 438 struct mlx4_en_priv *priv = netdev_priv(dev);
436 439
440 if (!coal->tx_max_coalesced_frames_irq)
441 return -EINVAL;
442
437 priv->rx_frames = (coal->rx_max_coalesced_frames == 443 priv->rx_frames = (coal->rx_max_coalesced_frames ==
438 MLX4_EN_AUTO_CONF) ? 444 MLX4_EN_AUTO_CONF) ?
439 MLX4_EN_RX_COAL_TARGET : 445 MLX4_EN_RX_COAL_TARGET :
@@ -457,6 +463,7 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
457 priv->rx_usecs_high = coal->rx_coalesce_usecs_high; 463 priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
458 priv->sample_interval = coal->rate_sample_interval; 464 priv->sample_interval = coal->rate_sample_interval;
459 priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce; 465 priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
466 priv->tx_work_limit = coal->tx_max_coalesced_frames_irq;
460 467
461 return mlx4_en_moderation_update(priv); 468 return mlx4_en_moderation_update(priv);
462} 469}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 7d4fb7bf2593..7345c43b019e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2336,7 +2336,7 @@ static void mlx4_en_add_vxlan_port(struct net_device *dev,
2336 struct mlx4_en_priv *priv = netdev_priv(dev); 2336 struct mlx4_en_priv *priv = netdev_priv(dev);
2337 __be16 current_port; 2337 __be16 current_port;
2338 2338
2339 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)) 2339 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
2340 return; 2340 return;
2341 2341
2342 if (sa_family == AF_INET6) 2342 if (sa_family == AF_INET6)
@@ -2473,6 +2473,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2473 MLX4_WQE_CTRL_SOLICITED); 2473 MLX4_WQE_CTRL_SOLICITED);
2474 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up; 2474 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
2475 priv->tx_ring_num = prof->tx_ring_num; 2475 priv->tx_ring_num = prof->tx_ring_num;
2476 priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
2476 2477
2477 priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS, 2478 priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
2478 GFP_KERNEL); 2479 GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index d2d415732d99..5535862f27cc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -40,6 +40,7 @@
40#include <linux/if_ether.h> 40#include <linux/if_ether.h>
41#include <linux/if_vlan.h> 41#include <linux/if_vlan.h>
42#include <linux/vmalloc.h> 42#include <linux/vmalloc.h>
43#include <linux/irq.h>
43 44
44#include "mlx4_en.h" 45#include "mlx4_en.h"
45 46
@@ -782,6 +783,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
782 PKT_HASH_TYPE_L3); 783 PKT_HASH_TYPE_L3);
783 784
784 skb_record_rx_queue(gro_skb, cq->ring); 785 skb_record_rx_queue(gro_skb, cq->ring);
786 skb_mark_napi_id(gro_skb, &cq->napi);
785 787
786 if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) { 788 if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
787 timestamp = mlx4_en_get_cqe_ts(cqe); 789 timestamp = mlx4_en_get_cqe_ts(cqe);
@@ -896,16 +898,25 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
896 898
897 /* If we used up all the quota - we're probably not done yet... */ 899 /* If we used up all the quota - we're probably not done yet... */
898 if (done == budget) { 900 if (done == budget) {
901 int cpu_curr;
902 const struct cpumask *aff;
903
899 INC_PERF_COUNTER(priv->pstats.napi_quota); 904 INC_PERF_COUNTER(priv->pstats.napi_quota);
900 if (unlikely(cq->mcq.irq_affinity_change)) { 905
901 cq->mcq.irq_affinity_change = false; 906 cpu_curr = smp_processor_id();
907 aff = irq_desc_get_irq_data(cq->irq_desc)->affinity;
908
909 if (unlikely(!cpumask_test_cpu(cpu_curr, aff))) {
910 /* Current cpu is not according to smp_irq_affinity -
911 * probably affinity changed. need to stop this NAPI
912 * poll, and restart it on the right CPU
913 */
902 napi_complete(napi); 914 napi_complete(napi);
903 mlx4_en_arm_cq(priv, cq); 915 mlx4_en_arm_cq(priv, cq);
904 return 0; 916 return 0;
905 } 917 }
906 } else { 918 } else {
907 /* Done for now */ 919 /* Done for now */
908 cq->mcq.irq_affinity_change = false;
909 napi_complete(napi); 920 napi_complete(napi);
910 mlx4_en_arm_cq(priv, cq); 921 mlx4_en_arm_cq(priv, cq);
911 } 922 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 8be7483f8236..5045bab59633 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -351,9 +351,8 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
351 return cnt; 351 return cnt;
352} 352}
353 353
354static int mlx4_en_process_tx_cq(struct net_device *dev, 354static bool mlx4_en_process_tx_cq(struct net_device *dev,
355 struct mlx4_en_cq *cq, 355 struct mlx4_en_cq *cq)
356 int budget)
357{ 356{
358 struct mlx4_en_priv *priv = netdev_priv(dev); 357 struct mlx4_en_priv *priv = netdev_priv(dev);
359 struct mlx4_cq *mcq = &cq->mcq; 358 struct mlx4_cq *mcq = &cq->mcq;
@@ -372,9 +371,10 @@ static int mlx4_en_process_tx_cq(struct net_device *dev,
372 int factor = priv->cqe_factor; 371 int factor = priv->cqe_factor;
373 u64 timestamp = 0; 372 u64 timestamp = 0;
374 int done = 0; 373 int done = 0;
374 int budget = priv->tx_work_limit;
375 375
376 if (!priv->port_up) 376 if (!priv->port_up)
377 return 0; 377 return true;
378 378
379 index = cons_index & size_mask; 379 index = cons_index & size_mask;
380 cqe = &buf[(index << factor) + factor]; 380 cqe = &buf[(index << factor) + factor];
@@ -447,7 +447,7 @@ static int mlx4_en_process_tx_cq(struct net_device *dev,
447 netif_tx_wake_queue(ring->tx_queue); 447 netif_tx_wake_queue(ring->tx_queue);
448 ring->wake_queue++; 448 ring->wake_queue++;
449 } 449 }
450 return done; 450 return done < budget;
451} 451}
452 452
453void mlx4_en_tx_irq(struct mlx4_cq *mcq) 453void mlx4_en_tx_irq(struct mlx4_cq *mcq)
@@ -467,24 +467,16 @@ int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
467 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); 467 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
468 struct net_device *dev = cq->dev; 468 struct net_device *dev = cq->dev;
469 struct mlx4_en_priv *priv = netdev_priv(dev); 469 struct mlx4_en_priv *priv = netdev_priv(dev);
470 int done; 470 int clean_complete;
471 471
472 done = mlx4_en_process_tx_cq(dev, cq, budget); 472 clean_complete = mlx4_en_process_tx_cq(dev, cq);
473 if (!clean_complete)
474 return budget;
473 475
474 /* If we used up all the quota - we're probably not done yet... */ 476 napi_complete(napi);
475 if (done < budget) { 477 mlx4_en_arm_cq(priv, cq);
476 /* Done for now */ 478
477 cq->mcq.irq_affinity_change = false; 479 return 0;
478 napi_complete(napi);
479 mlx4_en_arm_cq(priv, cq);
480 return done;
481 } else if (unlikely(cq->mcq.irq_affinity_change)) {
482 cq->mcq.irq_affinity_change = false;
483 napi_complete(napi);
484 mlx4_en_arm_cq(priv, cq);
485 return 0;
486 }
487 return budget;
488} 480}
489 481
490static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, 482static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index d954ec1eac17..2a004b347e1d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -53,11 +53,6 @@ enum {
53 MLX4_EQ_ENTRY_SIZE = 0x20 53 MLX4_EQ_ENTRY_SIZE = 0x20
54}; 54};
55 55
56struct mlx4_irq_notify {
57 void *arg;
58 struct irq_affinity_notify notify;
59};
60
61#define MLX4_EQ_STATUS_OK ( 0 << 28) 56#define MLX4_EQ_STATUS_OK ( 0 << 28)
62#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28) 57#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
63#define MLX4_EQ_OWNER_SW ( 0 << 24) 58#define MLX4_EQ_OWNER_SW ( 0 << 24)
@@ -1088,57 +1083,6 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
1088 iounmap(priv->clr_base); 1083 iounmap(priv->clr_base);
1089} 1084}
1090 1085
1091static void mlx4_irq_notifier_notify(struct irq_affinity_notify *notify,
1092 const cpumask_t *mask)
1093{
1094 struct mlx4_irq_notify *n = container_of(notify,
1095 struct mlx4_irq_notify,
1096 notify);
1097 struct mlx4_priv *priv = (struct mlx4_priv *)n->arg;
1098 struct radix_tree_iter iter;
1099 void **slot;
1100
1101 radix_tree_for_each_slot(slot, &priv->cq_table.tree, &iter, 0) {
1102 struct mlx4_cq *cq = (struct mlx4_cq *)(*slot);
1103
1104 if (cq->irq == notify->irq)
1105 cq->irq_affinity_change = true;
1106 }
1107}
1108
1109static void mlx4_release_irq_notifier(struct kref *ref)
1110{
1111 struct mlx4_irq_notify *n = container_of(ref, struct mlx4_irq_notify,
1112 notify.kref);
1113 kfree(n);
1114}
1115
1116static void mlx4_assign_irq_notifier(struct mlx4_priv *priv,
1117 struct mlx4_dev *dev, int irq)
1118{
1119 struct mlx4_irq_notify *irq_notifier = NULL;
1120 int err = 0;
1121
1122 irq_notifier = kzalloc(sizeof(*irq_notifier), GFP_KERNEL);
1123 if (!irq_notifier) {
1124 mlx4_warn(dev, "Failed to allocate irq notifier. irq %d\n",
1125 irq);
1126 return;
1127 }
1128
1129 irq_notifier->notify.irq = irq;
1130 irq_notifier->notify.notify = mlx4_irq_notifier_notify;
1131 irq_notifier->notify.release = mlx4_release_irq_notifier;
1132 irq_notifier->arg = priv;
1133 err = irq_set_affinity_notifier(irq, &irq_notifier->notify);
1134 if (err) {
1135 kfree(irq_notifier);
1136 irq_notifier = NULL;
1137 mlx4_warn(dev, "Failed to set irq notifier. irq %d\n", irq);
1138 }
1139}
1140
1141
1142int mlx4_alloc_eq_table(struct mlx4_dev *dev) 1086int mlx4_alloc_eq_table(struct mlx4_dev *dev)
1143{ 1087{
1144 struct mlx4_priv *priv = mlx4_priv(dev); 1088 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1409,8 +1353,6 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
1409 continue; 1353 continue;
1410 /*we dont want to break here*/ 1354 /*we dont want to break here*/
1411 } 1355 }
1412 mlx4_assign_irq_notifier(priv, dev,
1413 priv->eq_table.eq[vec].irq);
1414 1356
1415 eq_set_ci(&priv->eq_table.eq[vec], 1); 1357 eq_set_ci(&priv->eq_table.eq[vec], 1);
1416 } 1358 }
@@ -1427,6 +1369,14 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
1427} 1369}
1428EXPORT_SYMBOL(mlx4_assign_eq); 1370EXPORT_SYMBOL(mlx4_assign_eq);
1429 1371
1372int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec)
1373{
1374 struct mlx4_priv *priv = mlx4_priv(dev);
1375
1376 return priv->eq_table.eq[vec].irq;
1377}
1378EXPORT_SYMBOL(mlx4_eq_get_irq);
1379
1430void mlx4_release_eq(struct mlx4_dev *dev, int vec) 1380void mlx4_release_eq(struct mlx4_dev *dev, int vec)
1431{ 1381{
1432 struct mlx4_priv *priv = mlx4_priv(dev); 1382 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1438,9 +1388,6 @@ void mlx4_release_eq(struct mlx4_dev *dev, int vec)
1438 Belonging to a legacy EQ*/ 1388 Belonging to a legacy EQ*/
1439 mutex_lock(&priv->msix_ctl.pool_lock); 1389 mutex_lock(&priv->msix_ctl.pool_lock);
1440 if (priv->msix_ctl.pool_bm & 1ULL << i) { 1390 if (priv->msix_ctl.pool_bm & 1ULL << i) {
1441 irq_set_affinity_notifier(
1442 priv->eq_table.eq[vec].irq,
1443 NULL);
1444 free_irq(priv->eq_table.eq[vec].irq, 1391 free_irq(priv->eq_table.eq[vec].irq,
1445 &priv->eq_table.eq[vec]); 1392 &priv->eq_table.eq[vec]);
1446 priv->msix_ctl.pool_bm &= ~(1ULL << i); 1393 priv->msix_ctl.pool_bm &= ~(1ULL << i);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 0e15295bedd6..d72a5a894fc6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -126,6 +126,8 @@ enum {
126#define MAX_TX_RINGS (MLX4_EN_MAX_TX_RING_P_UP * \ 126#define MAX_TX_RINGS (MLX4_EN_MAX_TX_RING_P_UP * \
127 MLX4_EN_NUM_UP) 127 MLX4_EN_NUM_UP)
128 128
129#define MLX4_EN_DEFAULT_TX_WORK 256
130
129/* Target number of packets to coalesce with interrupt moderation */ 131/* Target number of packets to coalesce with interrupt moderation */
130#define MLX4_EN_RX_COAL_TARGET 44 132#define MLX4_EN_RX_COAL_TARGET 44
131#define MLX4_EN_RX_COAL_TIME 0x10 133#define MLX4_EN_RX_COAL_TIME 0x10
@@ -343,6 +345,7 @@ struct mlx4_en_cq {
343#define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD) 345#define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD)
344 spinlock_t poll_lock; /* protects from LLS/napi conflicts */ 346 spinlock_t poll_lock; /* protects from LLS/napi conflicts */
345#endif /* CONFIG_NET_RX_BUSY_POLL */ 347#endif /* CONFIG_NET_RX_BUSY_POLL */
348 struct irq_desc *irq_desc;
346}; 349};
347 350
348struct mlx4_en_port_profile { 351struct mlx4_en_port_profile {
@@ -542,6 +545,7 @@ struct mlx4_en_priv {
542 __be32 ctrl_flags; 545 __be32 ctrl_flags;
543 u32 flags; 546 u32 flags;
544 u8 num_tx_rings_p_up; 547 u8 num_tx_rings_p_up;
548 u32 tx_work_limit;
545 u32 tx_ring_num; 549 u32 tx_ring_num;
546 u32 rx_ring_num; 550 u32 rx_ring_num;
547 u32 rx_skb_size; 551 u32 rx_skb_size;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index ba0401d4af50..184c3615f479 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -94,6 +94,11 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
94 write_lock_irq(&table->lock); 94 write_lock_irq(&table->lock);
95 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->key), mr); 95 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->key), mr);
96 write_unlock_irq(&table->lock); 96 write_unlock_irq(&table->lock);
97 if (err) {
98 mlx5_core_warn(dev, "failed radix tree insert of mr 0x%x, %d\n",
99 mlx5_base_mkey(mr->key), err);
100 mlx5_core_destroy_mkey(dev, mr);
101 }
97 102
98 return err; 103 return err;
99} 104}
@@ -104,12 +109,22 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr)
104 struct mlx5_mr_table *table = &dev->priv.mr_table; 109 struct mlx5_mr_table *table = &dev->priv.mr_table;
105 struct mlx5_destroy_mkey_mbox_in in; 110 struct mlx5_destroy_mkey_mbox_in in;
106 struct mlx5_destroy_mkey_mbox_out out; 111 struct mlx5_destroy_mkey_mbox_out out;
112 struct mlx5_core_mr *deleted_mr;
107 unsigned long flags; 113 unsigned long flags;
108 int err; 114 int err;
109 115
110 memset(&in, 0, sizeof(in)); 116 memset(&in, 0, sizeof(in));
111 memset(&out, 0, sizeof(out)); 117 memset(&out, 0, sizeof(out));
112 118
119 write_lock_irqsave(&table->lock, flags);
120 deleted_mr = radix_tree_delete(&table->tree, mlx5_base_mkey(mr->key));
121 write_unlock_irqrestore(&table->lock, flags);
122 if (!deleted_mr) {
123 mlx5_core_warn(dev, "failed radix tree delete of mr 0x%x\n",
124 mlx5_base_mkey(mr->key));
125 return -ENOENT;
126 }
127
113 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_MKEY); 128 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_MKEY);
114 in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key)); 129 in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key));
115 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); 130 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
@@ -119,10 +134,6 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr)
119 if (out.hdr.status) 134 if (out.hdr.status)
120 return mlx5_cmd_status_to_err(&out.hdr); 135 return mlx5_cmd_status_to_err(&out.hdr);
121 136
122 write_lock_irqsave(&table->lock, flags);
123 radix_tree_delete(&table->tree, mlx5_base_mkey(mr->key));
124 write_unlock_irqrestore(&table->lock, flags);
125
126 return err; 137 return err;
127} 138}
128EXPORT_SYMBOL(mlx5_core_destroy_mkey); 139EXPORT_SYMBOL(mlx5_core_destroy_mkey);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index be425ad5e824..61623e9af574 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -538,6 +538,7 @@ enum rtl_register_content {
538 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */ 538 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
539 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */ 539 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
540 Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */ 540 Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */
541 Rdy_to_L23 = (1 << 1), /* L23 Enable */
541 Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */ 542 Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */
542 543
543 /* Config4 register */ 544 /* Config4 register */
@@ -4239,6 +4240,8 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
4239 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); 4240 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
4240 break; 4241 break;
4241 case RTL_GIGA_MAC_VER_40: 4242 case RTL_GIGA_MAC_VER_40:
4243 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
4244 break;
4242 case RTL_GIGA_MAC_VER_41: 4245 case RTL_GIGA_MAC_VER_41:
4243 case RTL_GIGA_MAC_VER_42: 4246 case RTL_GIGA_MAC_VER_42:
4244 case RTL_GIGA_MAC_VER_43: 4247 case RTL_GIGA_MAC_VER_43:
@@ -4897,6 +4900,21 @@ static void rtl_enable_clock_request(struct pci_dev *pdev)
4897 PCI_EXP_LNKCTL_CLKREQ_EN); 4900 PCI_EXP_LNKCTL_CLKREQ_EN);
4898} 4901}
4899 4902
4903static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable)
4904{
4905 void __iomem *ioaddr = tp->mmio_addr;
4906 u8 data;
4907
4908 data = RTL_R8(Config3);
4909
4910 if (enable)
4911 data |= Rdy_to_L23;
4912 else
4913 data &= ~Rdy_to_L23;
4914
4915 RTL_W8(Config3, data);
4916}
4917
4900#define R8168_CPCMD_QUIRK_MASK (\ 4918#define R8168_CPCMD_QUIRK_MASK (\
4901 EnableBist | \ 4919 EnableBist | \
4902 Mac_dbgo_oe | \ 4920 Mac_dbgo_oe | \
@@ -5246,6 +5264,7 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
5246 }; 5264 };
5247 5265
5248 rtl_hw_start_8168f(tp); 5266 rtl_hw_start_8168f(tp);
5267 rtl_pcie_state_l2l3_enable(tp, false);
5249 5268
5250 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); 5269 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5251 5270
@@ -5284,6 +5303,8 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
5284 5303
5285 rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC); 5304 rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC);
5286 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC); 5305 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
5306
5307 rtl_pcie_state_l2l3_enable(tp, false);
5287} 5308}
5288 5309
5289static void rtl_hw_start_8168g_2(struct rtl8169_private *tp) 5310static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
@@ -5536,6 +5557,8 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
5536 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); 5557 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5537 5558
5538 rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1)); 5559 rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
5560
5561 rtl_pcie_state_l2l3_enable(tp, false);
5539} 5562}
5540 5563
5541static void rtl_hw_start_8105e_2(struct rtl8169_private *tp) 5564static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
@@ -5571,6 +5594,8 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
5571 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 5594 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5572 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 5595 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5573 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC); 5596 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
5597
5598 rtl_pcie_state_l2l3_enable(tp, false);
5574} 5599}
5575 5600
5576static void rtl_hw_start_8106(struct rtl8169_private *tp) 5601static void rtl_hw_start_8106(struct rtl8169_private *tp)
@@ -5583,6 +5608,8 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
5583 RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN); 5608 RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
5584 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET); 5609 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5585 RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); 5610 RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
5611
5612 rtl_pcie_state_l2l3_enable(tp, false);
5586} 5613}
5587 5614
5588static void rtl_hw_start_8101(struct net_device *dev) 5615static void rtl_hw_start_8101(struct net_device *dev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index b3e148ef5683..9d3748361a1e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -320,11 +320,8 @@ static void dwmac1000_set_eee_timer(void __iomem *ioaddr, int ls, int tw)
320 320
321static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool restart) 321static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool restart)
322{ 322{
323 u32 value;
324
325 value = readl(ioaddr + GMAC_AN_CTRL);
326 /* auto negotiation enable and External Loopback enable */ 323 /* auto negotiation enable and External Loopback enable */
327 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE; 324 u32 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE;
328 325
329 if (restart) 326 if (restart)
330 value |= GMAC_AN_CTRL_RAN; 327 value |= GMAC_AN_CTRL_RAN;
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 7e6628a91514..1e2bcf5f89e1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -145,7 +145,7 @@ static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
145 x->rx_msg_type_delay_req++; 145 x->rx_msg_type_delay_req++;
146 else if (p->des4.erx.msg_type == RDES_EXT_DELAY_RESP) 146 else if (p->des4.erx.msg_type == RDES_EXT_DELAY_RESP)
147 x->rx_msg_type_delay_resp++; 147 x->rx_msg_type_delay_resp++;
148 else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ) 148 else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_REQ)
149 x->rx_msg_type_pdelay_req++; 149 x->rx_msg_type_pdelay_req++;
150 else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_RESP) 150 else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_RESP)
151 x->rx_msg_type_pdelay_resp++; 151 x->rx_msg_type_pdelay_resp++;
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 1c24a8f368bd..d813bfb1a847 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -610,6 +610,13 @@ static int __vnet_tx_trigger(struct vnet_port *port)
610 return err; 610 return err;
611} 611}
612 612
613static inline bool port_is_up(struct vnet_port *vnet)
614{
615 struct vio_driver_state *vio = &vnet->vio;
616
617 return !!(vio->hs_state & VIO_HS_COMPLETE);
618}
619
613struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb) 620struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
614{ 621{
615 unsigned int hash = vnet_hashfn(skb->data); 622 unsigned int hash = vnet_hashfn(skb->data);
@@ -617,14 +624,19 @@ struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
617 struct vnet_port *port; 624 struct vnet_port *port;
618 625
619 hlist_for_each_entry(port, hp, hash) { 626 hlist_for_each_entry(port, hp, hash) {
627 if (!port_is_up(port))
628 continue;
620 if (ether_addr_equal(port->raddr, skb->data)) 629 if (ether_addr_equal(port->raddr, skb->data))
621 return port; 630 return port;
622 } 631 }
623 port = NULL; 632 list_for_each_entry(port, &vp->port_list, list) {
624 if (!list_empty(&vp->port_list)) 633 if (!port->switch_port)
625 port = list_entry(vp->port_list.next, struct vnet_port, list); 634 continue;
626 635 if (!port_is_up(port))
627 return port; 636 continue;
637 return port;
638 }
639 return NULL;
628} 640}
629 641
630struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb) 642struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb)
@@ -1083,6 +1095,24 @@ static struct vnet *vnet_find_or_create(const u64 *local_mac)
1083 return vp; 1095 return vp;
1084} 1096}
1085 1097
1098static void vnet_cleanup(void)
1099{
1100 struct vnet *vp;
1101 struct net_device *dev;
1102
1103 mutex_lock(&vnet_list_mutex);
1104 while (!list_empty(&vnet_list)) {
1105 vp = list_first_entry(&vnet_list, struct vnet, list);
1106 list_del(&vp->list);
1107 dev = vp->dev;
1108 /* vio_unregister_driver() should have cleaned up port_list */
1109 BUG_ON(!list_empty(&vp->port_list));
1110 unregister_netdev(dev);
1111 free_netdev(dev);
1112 }
1113 mutex_unlock(&vnet_list_mutex);
1114}
1115
1086static const char *local_mac_prop = "local-mac-address"; 1116static const char *local_mac_prop = "local-mac-address";
1087 1117
1088static struct vnet *vnet_find_parent(struct mdesc_handle *hp, 1118static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
@@ -1240,7 +1270,6 @@ static int vnet_port_remove(struct vio_dev *vdev)
1240 1270
1241 kfree(port); 1271 kfree(port);
1242 1272
1243 unregister_netdev(vp->dev);
1244 } 1273 }
1245 return 0; 1274 return 0;
1246} 1275}
@@ -1268,6 +1297,7 @@ static int __init vnet_init(void)
1268static void __exit vnet_exit(void) 1297static void __exit vnet_exit(void)
1269{ 1298{
1270 vio_unregister_driver(&vnet_port_driver); 1299 vio_unregister_driver(&vnet_port_driver);
1300 vnet_cleanup();
1271} 1301}
1272 1302
1273module_init(vnet_init); 1303module_init(vnet_init);
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index eb78203cd58e..2aa57270838f 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -291,7 +291,11 @@ static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type);
291 291
292static int dfx_rcv_init(DFX_board_t *bp, int get_buffers); 292static int dfx_rcv_init(DFX_board_t *bp, int get_buffers);
293static void dfx_rcv_queue_process(DFX_board_t *bp); 293static void dfx_rcv_queue_process(DFX_board_t *bp);
294#ifdef DYNAMIC_BUFFERS
294static void dfx_rcv_flush(DFX_board_t *bp); 295static void dfx_rcv_flush(DFX_board_t *bp);
296#else
297static inline void dfx_rcv_flush(DFX_board_t *bp) {}
298#endif
295 299
296static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb, 300static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
297 struct net_device *dev); 301 struct net_device *dev);
@@ -2849,7 +2853,7 @@ static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type)
2849 * Align an sk_buff to a boundary power of 2 2853 * Align an sk_buff to a boundary power of 2
2850 * 2854 *
2851 */ 2855 */
2852 2856#ifdef DYNAMIC_BUFFERS
2853static void my_skb_align(struct sk_buff *skb, int n) 2857static void my_skb_align(struct sk_buff *skb, int n)
2854{ 2858{
2855 unsigned long x = (unsigned long)skb->data; 2859 unsigned long x = (unsigned long)skb->data;
@@ -2859,7 +2863,7 @@ static void my_skb_align(struct sk_buff *skb, int n)
2859 2863
2860 skb_reserve(skb, v - x); 2864 skb_reserve(skb, v - x);
2861} 2865}
2862 2866#endif
2863 2867
2864/* 2868/*
2865 * ================ 2869 * ================
@@ -3074,10 +3078,7 @@ static void dfx_rcv_queue_process(
3074 break; 3078 break;
3075 } 3079 }
3076 else { 3080 else {
3077#ifndef DYNAMIC_BUFFERS 3081 if (!rx_in_place) {
3078 if (! rx_in_place)
3079#endif
3080 {
3081 /* Receive buffer allocated, pass receive packet up */ 3082 /* Receive buffer allocated, pass receive packet up */
3082 3083
3083 skb_copy_to_linear_data(skb, 3084 skb_copy_to_linear_data(skb,
@@ -3453,10 +3454,6 @@ static void dfx_rcv_flush( DFX_board_t *bp )
3453 } 3454 }
3454 3455
3455 } 3456 }
3456#else
3457static inline void dfx_rcv_flush( DFX_board_t *bp )
3458{
3459}
3460#endif /* DYNAMIC_BUFFERS */ 3457#endif /* DYNAMIC_BUFFERS */
3461 3458
3462/* 3459/*
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 4ed38eaecea8..d97d5f39a04e 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -378,8 +378,10 @@ static int netvsc_init_buf(struct hv_device *device)
378 378
379 net_device->send_section_map = 379 net_device->send_section_map =
380 kzalloc(net_device->map_words * sizeof(ulong), GFP_KERNEL); 380 kzalloc(net_device->map_words * sizeof(ulong), GFP_KERNEL);
381 if (net_device->send_section_map == NULL) 381 if (net_device->send_section_map == NULL) {
382 ret = -ENOMEM;
382 goto cleanup; 383 goto cleanup;
384 }
383 385
384 goto exit; 386 goto exit;
385 387
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 6a999e6814a0..9408157a246c 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1323,15 +1323,15 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
1323{ 1323{
1324 struct dp83640_private *dp83640 = phydev->priv; 1324 struct dp83640_private *dp83640 = phydev->priv;
1325 1325
1326 if (!dp83640->hwts_rx_en)
1327 return false;
1328
1329 if (is_status_frame(skb, type)) { 1326 if (is_status_frame(skb, type)) {
1330 decode_status_frame(dp83640, skb); 1327 decode_status_frame(dp83640, skb);
1331 kfree_skb(skb); 1328 kfree_skb(skb);
1332 return true; 1329 return true;
1333 } 1330 }
1334 1331
1332 if (!dp83640->hwts_rx_en)
1333 return false;
1334
1335 SKB_PTP_TYPE(skb) = type; 1335 SKB_PTP_TYPE(skb) = type;
1336 skb_queue_tail(&dp83640->rx_queue, skb); 1336 skb_queue_tail(&dp83640->rx_queue, skb);
1337 schedule_work(&dp83640->ts_work); 1337 schedule_work(&dp83640->ts_work);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 2e58aa54484c..203651ebccb0 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -187,6 +187,50 @@ struct mii_bus *of_mdio_find_bus(struct device_node *mdio_bus_np)
187 return d ? to_mii_bus(d) : NULL; 187 return d ? to_mii_bus(d) : NULL;
188} 188}
189EXPORT_SYMBOL(of_mdio_find_bus); 189EXPORT_SYMBOL(of_mdio_find_bus);
190
191/* Walk the list of subnodes of a mdio bus and look for a node that matches the
192 * phy's address with its 'reg' property. If found, set the of_node pointer for
193 * the phy. This allows auto-probed pyh devices to be supplied with information
194 * passed in via DT.
195 */
196static void of_mdiobus_link_phydev(struct mii_bus *mdio,
197 struct phy_device *phydev)
198{
199 struct device *dev = &phydev->dev;
200 struct device_node *child;
201
202 if (dev->of_node || !mdio->dev.of_node)
203 return;
204
205 for_each_available_child_of_node(mdio->dev.of_node, child) {
206 int addr;
207 int ret;
208
209 ret = of_property_read_u32(child, "reg", &addr);
210 if (ret < 0) {
211 dev_err(dev, "%s has invalid PHY address\n",
212 child->full_name);
213 continue;
214 }
215
216 /* A PHY must have a reg property in the range [0-31] */
217 if (addr >= PHY_MAX_ADDR) {
218 dev_err(dev, "%s PHY address %i is too large\n",
219 child->full_name, addr);
220 continue;
221 }
222
223 if (addr == phydev->addr) {
224 dev->of_node = child;
225 return;
226 }
227 }
228}
229#else /* !IS_ENABLED(CONFIG_OF_MDIO) */
230static inline void of_mdiobus_link_phydev(struct mii_bus *mdio,
231 struct phy_device *phydev)
232{
233}
190#endif 234#endif
191 235
192/** 236/**
@@ -211,6 +255,7 @@ int mdiobus_register(struct mii_bus *bus)
211 255
212 bus->dev.parent = bus->parent; 256 bus->dev.parent = bus->parent;
213 bus->dev.class = &mdio_bus_class; 257 bus->dev.class = &mdio_bus_class;
258 bus->dev.driver = bus->parent->driver;
214 bus->dev.groups = NULL; 259 bus->dev.groups = NULL;
215 dev_set_name(&bus->dev, "%s", bus->id); 260 dev_set_name(&bus->dev, "%s", bus->id);
216 261
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 35d753d22f78..22c57be4dfa0 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -355,7 +355,7 @@ int phy_device_register(struct phy_device *phydev)
355 phydev->bus->phy_map[phydev->addr] = phydev; 355 phydev->bus->phy_map[phydev->addr] = phydev;
356 356
357 /* Run all of the fixups for this PHY */ 357 /* Run all of the fixups for this PHY */
358 err = phy_init_hw(phydev); 358 err = phy_scan_fixups(phydev);
359 if (err) { 359 if (err) {
360 pr_err("PHY %d failed to initialize\n", phydev->addr); 360 pr_err("PHY %d failed to initialize\n", phydev->addr);
361 goto out; 361 goto out;
@@ -575,6 +575,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
575 u32 flags, phy_interface_t interface) 575 u32 flags, phy_interface_t interface)
576{ 576{
577 struct device *d = &phydev->dev; 577 struct device *d = &phydev->dev;
578 struct module *bus_module;
578 int err; 579 int err;
579 580
580 /* Assume that if there is no driver, that it doesn't 581 /* Assume that if there is no driver, that it doesn't
@@ -599,6 +600,14 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
599 return -EBUSY; 600 return -EBUSY;
600 } 601 }
601 602
603 /* Increment the bus module reference count */
604 bus_module = phydev->bus->dev.driver ?
605 phydev->bus->dev.driver->owner : NULL;
606 if (!try_module_get(bus_module)) {
607 dev_err(&dev->dev, "failed to get the bus module\n");
608 return -EIO;
609 }
610
602 phydev->attached_dev = dev; 611 phydev->attached_dev = dev;
603 dev->phydev = phydev; 612 dev->phydev = phydev;
604 613
@@ -664,6 +673,10 @@ EXPORT_SYMBOL(phy_attach);
664void phy_detach(struct phy_device *phydev) 673void phy_detach(struct phy_device *phydev)
665{ 674{
666 int i; 675 int i;
676
677 if (phydev->bus->dev.driver)
678 module_put(phydev->bus->dev.driver->owner);
679
667 phydev->attached_dev->phydev = NULL; 680 phydev->attached_dev->phydev = NULL;
668 phydev->attached_dev = NULL; 681 phydev->attached_dev = NULL;
669 phy_suspend(phydev); 682 phy_suspend(phydev);
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 91d6c1272fcf..d5b77ef3a210 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -539,7 +539,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
539{ 539{
540 struct sock_fprog uprog; 540 struct sock_fprog uprog;
541 struct sock_filter *code = NULL; 541 struct sock_filter *code = NULL;
542 int len, err; 542 int len;
543 543
544 if (copy_from_user(&uprog, arg, sizeof(uprog))) 544 if (copy_from_user(&uprog, arg, sizeof(uprog)))
545 return -EFAULT; 545 return -EFAULT;
@@ -554,12 +554,6 @@ static int get_filter(void __user *arg, struct sock_filter **p)
554 if (IS_ERR(code)) 554 if (IS_ERR(code))
555 return PTR_ERR(code); 555 return PTR_ERR(code);
556 556
557 err = sk_chk_filter(code, uprog.len);
558 if (err) {
559 kfree(code);
560 return err;
561 }
562
563 *p = code; 557 *p = code;
564 return uprog.len; 558 return uprog.len;
565} 559}
@@ -763,10 +757,15 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
763 }; 757 };
764 758
765 ppp_lock(ppp); 759 ppp_lock(ppp);
766 if (ppp->pass_filter) 760 if (ppp->pass_filter) {
767 sk_unattached_filter_destroy(ppp->pass_filter); 761 sk_unattached_filter_destroy(ppp->pass_filter);
768 err = sk_unattached_filter_create(&ppp->pass_filter, 762 ppp->pass_filter = NULL;
769 &fprog); 763 }
764 if (fprog.filter != NULL)
765 err = sk_unattached_filter_create(&ppp->pass_filter,
766 &fprog);
767 else
768 err = 0;
770 kfree(code); 769 kfree(code);
771 ppp_unlock(ppp); 770 ppp_unlock(ppp);
772 } 771 }
@@ -784,10 +783,15 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
784 }; 783 };
785 784
786 ppp_lock(ppp); 785 ppp_lock(ppp);
787 if (ppp->active_filter) 786 if (ppp->active_filter) {
788 sk_unattached_filter_destroy(ppp->active_filter); 787 sk_unattached_filter_destroy(ppp->active_filter);
789 err = sk_unattached_filter_create(&ppp->active_filter, 788 ppp->active_filter = NULL;
790 &fprog); 789 }
790 if (fprog.filter != NULL)
791 err = sk_unattached_filter_create(&ppp->active_filter,
792 &fprog);
793 else
794 err = 0;
791 kfree(code); 795 kfree(code);
792 ppp_unlock(ppp); 796 ppp_unlock(ppp);
793 } 797 }
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 2ea7efd11857..6c9c16d76935 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -675,7 +675,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
675 po->chan.hdrlen = (sizeof(struct pppoe_hdr) + 675 po->chan.hdrlen = (sizeof(struct pppoe_hdr) +
676 dev->hard_header_len); 676 dev->hard_header_len);
677 677
678 po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr); 678 po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2;
679 po->chan.private = sk; 679 po->chan.private = sk;
680 po->chan.ops = &pppoe_chan_ops; 680 po->chan.ops = &pppoe_chan_ops;
681 681
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 9ea4bfe5d318..2a32d9167d3b 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -341,6 +341,22 @@ next_desc:
341 usb_driver_release_interface(driver, info->data); 341 usb_driver_release_interface(driver, info->data);
342 return -ENODEV; 342 return -ENODEV;
343 } 343 }
344
345 /* Some devices don't initialise properly. In particular
346 * the packet filter is not reset. There are devices that
347 * don't do reset all the way. So the packet filter should
348 * be set to a sane initial value.
349 */
350 usb_control_msg(dev->udev,
351 usb_sndctrlpipe(dev->udev, 0),
352 USB_CDC_SET_ETHERNET_PACKET_FILTER,
353 USB_TYPE_CLASS | USB_RECIP_INTERFACE,
354 USB_CDC_PACKET_TYPE_ALL_MULTICAST | USB_CDC_PACKET_TYPE_DIRECTED | USB_CDC_PACKET_TYPE_BROADCAST,
355 intf->cur_altsetting->desc.bInterfaceNumber,
356 NULL,
357 0,
358 USB_CTRL_SET_TIMEOUT
359 );
344 return 0; 360 return 0;
345 361
346bad_desc: 362bad_desc:
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index a3a05869309d..a4272ed62da8 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -258,10 +258,8 @@ struct hso_serial {
258 * so as not to drop characters on the floor. 258 * so as not to drop characters on the floor.
259 */ 259 */
260 int curr_rx_urb_idx; 260 int curr_rx_urb_idx;
261 u16 curr_rx_urb_offset;
262 u8 rx_urb_filled[MAX_RX_URBS]; 261 u8 rx_urb_filled[MAX_RX_URBS];
263 struct tasklet_struct unthrottle_tasklet; 262 struct tasklet_struct unthrottle_tasklet;
264 struct work_struct retry_unthrottle_workqueue;
265}; 263};
266 264
267struct hso_device { 265struct hso_device {
@@ -1252,14 +1250,6 @@ static void hso_unthrottle(struct tty_struct *tty)
1252 tasklet_hi_schedule(&serial->unthrottle_tasklet); 1250 tasklet_hi_schedule(&serial->unthrottle_tasklet);
1253} 1251}
1254 1252
1255static void hso_unthrottle_workfunc(struct work_struct *work)
1256{
1257 struct hso_serial *serial =
1258 container_of(work, struct hso_serial,
1259 retry_unthrottle_workqueue);
1260 hso_unthrottle_tasklet(serial);
1261}
1262
1263/* open the requested serial port */ 1253/* open the requested serial port */
1264static int hso_serial_open(struct tty_struct *tty, struct file *filp) 1254static int hso_serial_open(struct tty_struct *tty, struct file *filp)
1265{ 1255{
@@ -1295,8 +1285,6 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
1295 tasklet_init(&serial->unthrottle_tasklet, 1285 tasklet_init(&serial->unthrottle_tasklet,
1296 (void (*)(unsigned long))hso_unthrottle_tasklet, 1286 (void (*)(unsigned long))hso_unthrottle_tasklet,
1297 (unsigned long)serial); 1287 (unsigned long)serial);
1298 INIT_WORK(&serial->retry_unthrottle_workqueue,
1299 hso_unthrottle_workfunc);
1300 result = hso_start_serial_device(serial->parent, GFP_KERNEL); 1288 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
1301 if (result) { 1289 if (result) {
1302 hso_stop_serial_device(serial->parent); 1290 hso_stop_serial_device(serial->parent);
@@ -1345,7 +1333,6 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
1345 if (!usb_gone) 1333 if (!usb_gone)
1346 hso_stop_serial_device(serial->parent); 1334 hso_stop_serial_device(serial->parent);
1347 tasklet_kill(&serial->unthrottle_tasklet); 1335 tasklet_kill(&serial->unthrottle_tasklet);
1348 cancel_work_sync(&serial->retry_unthrottle_workqueue);
1349 } 1336 }
1350 1337
1351 if (!usb_gone) 1338 if (!usb_gone)
@@ -2013,8 +2000,7 @@ static void ctrl_callback(struct urb *urb)
2013static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial) 2000static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial)
2014{ 2001{
2015 struct tty_struct *tty; 2002 struct tty_struct *tty;
2016 int write_length_remaining = 0; 2003 int count;
2017 int curr_write_len;
2018 2004
2019 /* Sanity check */ 2005 /* Sanity check */
2020 if (urb == NULL || serial == NULL) { 2006 if (urb == NULL || serial == NULL) {
@@ -2024,29 +2010,28 @@ static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial)
2024 2010
2025 tty = tty_port_tty_get(&serial->port); 2011 tty = tty_port_tty_get(&serial->port);
2026 2012
2013 if (tty && test_bit(TTY_THROTTLED, &tty->flags)) {
2014 tty_kref_put(tty);
2015 return -1;
2016 }
2017
2027 /* Push data to tty */ 2018 /* Push data to tty */
2028 write_length_remaining = urb->actual_length -
2029 serial->curr_rx_urb_offset;
2030 D1("data to push to tty"); 2019 D1("data to push to tty");
2031 while (write_length_remaining) { 2020 count = tty_buffer_request_room(&serial->port, urb->actual_length);
2032 if (tty && test_bit(TTY_THROTTLED, &tty->flags)) { 2021 if (count >= urb->actual_length) {
2033 tty_kref_put(tty); 2022 tty_insert_flip_string(&serial->port, urb->transfer_buffer,
2034 return -1; 2023 urb->actual_length);
2035 }
2036 curr_write_len = tty_insert_flip_string(&serial->port,
2037 urb->transfer_buffer + serial->curr_rx_urb_offset,
2038 write_length_remaining);
2039 serial->curr_rx_urb_offset += curr_write_len;
2040 write_length_remaining -= curr_write_len;
2041 tty_flip_buffer_push(&serial->port); 2024 tty_flip_buffer_push(&serial->port);
2025 } else {
2026 dev_warn(&serial->parent->usb->dev,
2027 "dropping data, %d bytes lost\n", urb->actual_length);
2042 } 2028 }
2029
2043 tty_kref_put(tty); 2030 tty_kref_put(tty);
2044 2031
2045 if (write_length_remaining == 0) { 2032 serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0;
2046 serial->curr_rx_urb_offset = 0; 2033
2047 serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0; 2034 return 0;
2048 }
2049 return write_length_remaining;
2050} 2035}
2051 2036
2052 2037
@@ -2217,7 +2202,6 @@ static int hso_stop_serial_device(struct hso_device *hso_dev)
2217 } 2202 }
2218 } 2203 }
2219 serial->curr_rx_urb_idx = 0; 2204 serial->curr_rx_urb_idx = 0;
2220 serial->curr_rx_urb_offset = 0;
2221 2205
2222 if (serial->tx_urb) 2206 if (serial->tx_urb)
2223 usb_kill_urb(serial->tx_urb); 2207 usb_kill_urb(serial->tx_urb);
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
index 5d95a13dbe2a..735f7dadb9a0 100644
--- a/drivers/net/usb/huawei_cdc_ncm.c
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -194,6 +194,9 @@ static const struct usb_device_id huawei_cdc_ncm_devs[] = {
194 { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76), 194 { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76),
195 .driver_info = (unsigned long)&huawei_cdc_ncm_info, 195 .driver_info = (unsigned long)&huawei_cdc_ncm_info,
196 }, 196 },
197 { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x03, 0x16),
198 .driver_info = (unsigned long)&huawei_cdc_ncm_info,
199 },
197 200
198 /* Terminating entry */ 201 /* Terminating entry */
199 { 202 {
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index cf62d7e8329f..22756db53dca 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -667,6 +667,7 @@ static const struct usb_device_id products[] = {
667 {QMI_FIXED_INTF(0x05c6, 0x9084, 4)}, 667 {QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
668 {QMI_FIXED_INTF(0x05c6, 0x920d, 0)}, 668 {QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
669 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, 669 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
670 {QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
670 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ 671 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
671 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ 672 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
672 {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */ 673 {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
@@ -741,6 +742,7 @@ static const struct usb_device_id products[] = {
741 {QMI_FIXED_INTF(0x19d2, 0x1424, 2)}, 742 {QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
742 {QMI_FIXED_INTF(0x19d2, 0x1425, 2)}, 743 {QMI_FIXED_INTF(0x19d2, 0x1425, 2)},
743 {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */ 744 {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
745 {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
744 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ 746 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
745 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ 747 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
746 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ 748 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
@@ -756,6 +758,7 @@ static const struct usb_device_id products[] = {
756 {QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */ 758 {QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */
757 {QMI_FIXED_INTF(0x1199, 0x9055, 8)}, /* Netgear AirCard 341U */ 759 {QMI_FIXED_INTF(0x1199, 0x9055, 8)}, /* Netgear AirCard 341U */
758 {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */ 760 {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */
761 {QMI_FIXED_INTF(0x1199, 0x9057, 8)},
759 {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */ 762 {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
760 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ 763 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
761 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ 764 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 25431965a625..3eab74c7c554 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -282,7 +282,7 @@
282/* USB_DEV_STAT */ 282/* USB_DEV_STAT */
283#define STAT_SPEED_MASK 0x0006 283#define STAT_SPEED_MASK 0x0006
284#define STAT_SPEED_HIGH 0x0000 284#define STAT_SPEED_HIGH 0x0000
285#define STAT_SPEED_FULL 0x0001 285#define STAT_SPEED_FULL 0x0002
286 286
287/* USB_TX_AGG */ 287/* USB_TX_AGG */
288#define TX_AGG_MAX_THRESHOLD 0x03 288#define TX_AGG_MAX_THRESHOLD 0x03
@@ -1359,7 +1359,7 @@ static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb,
1359 struct sk_buff_head seg_list; 1359 struct sk_buff_head seg_list;
1360 struct sk_buff *segs, *nskb; 1360 struct sk_buff *segs, *nskb;
1361 1361
1362 features &= ~(NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO); 1362 features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
1363 segs = skb_gso_segment(skb, features); 1363 segs = skb_gso_segment(skb, features);
1364 if (IS_ERR(segs) || !segs) 1364 if (IS_ERR(segs) || !segs)
1365 goto drop; 1365 goto drop;
@@ -2292,9 +2292,8 @@ static void r8152b_exit_oob(struct r8152 *tp)
2292 /* rx share fifo credit full threshold */ 2292 /* rx share fifo credit full threshold */
2293 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_NORMAL); 2293 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_NORMAL);
2294 2294
2295 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_DEV_STAT); 2295 if (tp->udev->speed == USB_SPEED_FULL ||
2296 ocp_data &= STAT_SPEED_MASK; 2296 tp->udev->speed == USB_SPEED_LOW) {
2297 if (ocp_data == STAT_SPEED_FULL) {
2298 /* rx share fifo credit near full threshold */ 2297 /* rx share fifo credit near full threshold */
2299 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, 2298 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1,
2300 RXFIFO_THR2_FULL); 2299 RXFIFO_THR2_FULL);
@@ -3204,8 +3203,13 @@ static void rtl8152_get_ethtool_stats(struct net_device *dev,
3204 struct r8152 *tp = netdev_priv(dev); 3203 struct r8152 *tp = netdev_priv(dev);
3205 struct tally_counter tally; 3204 struct tally_counter tally;
3206 3205
3206 if (usb_autopm_get_interface(tp->intf) < 0)
3207 return;
3208
3207 generic_ocp_read(tp, PLA_TALLYCNT, sizeof(tally), &tally, MCU_TYPE_PLA); 3209 generic_ocp_read(tp, PLA_TALLYCNT, sizeof(tally), &tally, MCU_TYPE_PLA);
3208 3210
3211 usb_autopm_put_interface(tp->intf);
3212
3209 data[0] = le64_to_cpu(tally.tx_packets); 3213 data[0] = le64_to_cpu(tally.tx_packets);
3210 data[1] = le64_to_cpu(tally.rx_packets); 3214 data[1] = le64_to_cpu(tally.rx_packets);
3211 data[2] = le64_to_cpu(tally.tx_errors); 3215 data[2] = le64_to_cpu(tally.tx_errors);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 424db65e4396..d07bf4cb893f 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1714,6 +1714,18 @@ static int smsc95xx_resume(struct usb_interface *intf)
1714 return ret; 1714 return ret;
1715} 1715}
1716 1716
1717static int smsc95xx_reset_resume(struct usb_interface *intf)
1718{
1719 struct usbnet *dev = usb_get_intfdata(intf);
1720 int ret;
1721
1722 ret = smsc95xx_reset(dev);
1723 if (ret < 0)
1724 return ret;
1725
1726 return smsc95xx_resume(intf);
1727}
1728
1717static void smsc95xx_rx_csum_offload(struct sk_buff *skb) 1729static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
1718{ 1730{
1719 skb->csum = *(u16 *)(skb_tail_pointer(skb) - 2); 1731 skb->csum = *(u16 *)(skb_tail_pointer(skb) - 2);
@@ -2004,7 +2016,7 @@ static struct usb_driver smsc95xx_driver = {
2004 .probe = usbnet_probe, 2016 .probe = usbnet_probe,
2005 .suspend = smsc95xx_suspend, 2017 .suspend = smsc95xx_suspend,
2006 .resume = smsc95xx_resume, 2018 .resume = smsc95xx_resume,
2007 .reset_resume = smsc95xx_resume, 2019 .reset_resume = smsc95xx_reset_resume,
2008 .disconnect = usbnet_disconnect, 2020 .disconnect = usbnet_disconnect,
2009 .disable_hub_initiated_lpm = 1, 2021 .disable_hub_initiated_lpm = 1,
2010 .supports_autosuspend = 1, 2022 .supports_autosuspend = 1,
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index ade33ef82823..9f79192c9aa0 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -339,7 +339,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
339 ndm->ndm_state = fdb->state; 339 ndm->ndm_state = fdb->state;
340 ndm->ndm_ifindex = vxlan->dev->ifindex; 340 ndm->ndm_ifindex = vxlan->dev->ifindex;
341 ndm->ndm_flags = fdb->flags; 341 ndm->ndm_flags = fdb->flags;
342 ndm->ndm_type = NDA_DST; 342 ndm->ndm_type = RTN_UNICAST;
343 343
344 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr)) 344 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
345 goto nla_put_failure; 345 goto nla_put_failure;
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 93ace042d0aa..1f041271f7fe 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2363,7 +2363,7 @@ static char *type_strings[] = {
2363 "FarSync TE1" 2363 "FarSync TE1"
2364}; 2364};
2365 2365
2366static void 2366static int
2367fst_init_card(struct fst_card_info *card) 2367fst_init_card(struct fst_card_info *card)
2368{ 2368{
2369 int i; 2369 int i;
@@ -2374,24 +2374,21 @@ fst_init_card(struct fst_card_info *card)
2374 * we'll have to revise it in some way then. 2374 * we'll have to revise it in some way then.
2375 */ 2375 */
2376 for (i = 0; i < card->nports; i++) { 2376 for (i = 0; i < card->nports; i++) {
2377 err = register_hdlc_device(card->ports[i].dev); 2377 err = register_hdlc_device(card->ports[i].dev);
2378 if (err < 0) { 2378 if (err < 0) {
2379 int j;
2380 pr_err("Cannot register HDLC device for port %d (errno %d)\n", 2379 pr_err("Cannot register HDLC device for port %d (errno %d)\n",
2381 i, -err); 2380 i, -err);
2382 for (j = i; j < card->nports; j++) { 2381 while (i--)
2383 free_netdev(card->ports[j].dev); 2382 unregister_hdlc_device(card->ports[i].dev);
2384 card->ports[j].dev = NULL; 2383 return err;
2385 } 2384 }
2386 card->nports = i;
2387 break;
2388 }
2389 } 2385 }
2390 2386
2391 pr_info("%s-%s: %s IRQ%d, %d ports\n", 2387 pr_info("%s-%s: %s IRQ%d, %d ports\n",
2392 port_to_dev(&card->ports[0])->name, 2388 port_to_dev(&card->ports[0])->name,
2393 port_to_dev(&card->ports[card->nports - 1])->name, 2389 port_to_dev(&card->ports[card->nports - 1])->name,
2394 type_strings[card->type], card->irq, card->nports); 2390 type_strings[card->type], card->irq, card->nports);
2391 return 0;
2395} 2392}
2396 2393
2397static const struct net_device_ops fst_ops = { 2394static const struct net_device_ops fst_ops = {
@@ -2447,15 +2444,12 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2447 /* Try to enable the device */ 2444 /* Try to enable the device */
2448 if ((err = pci_enable_device(pdev)) != 0) { 2445 if ((err = pci_enable_device(pdev)) != 0) {
2449 pr_err("Failed to enable card. Err %d\n", -err); 2446 pr_err("Failed to enable card. Err %d\n", -err);
2450 kfree(card); 2447 goto enable_fail;
2451 return err;
2452 } 2448 }
2453 2449
2454 if ((err = pci_request_regions(pdev, "FarSync")) !=0) { 2450 if ((err = pci_request_regions(pdev, "FarSync")) !=0) {
2455 pr_err("Failed to allocate regions. Err %d\n", -err); 2451 pr_err("Failed to allocate regions. Err %d\n", -err);
2456 pci_disable_device(pdev); 2452 goto regions_fail;
2457 kfree(card);
2458 return err;
2459 } 2453 }
2460 2454
2461 /* Get virtual addresses of memory regions */ 2455 /* Get virtual addresses of memory regions */
@@ -2464,30 +2458,21 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2464 card->phys_ctlmem = pci_resource_start(pdev, 3); 2458 card->phys_ctlmem = pci_resource_start(pdev, 3);
2465 if ((card->mem = ioremap(card->phys_mem, FST_MEMSIZE)) == NULL) { 2459 if ((card->mem = ioremap(card->phys_mem, FST_MEMSIZE)) == NULL) {
2466 pr_err("Physical memory remap failed\n"); 2460 pr_err("Physical memory remap failed\n");
2467 pci_release_regions(pdev); 2461 err = -ENODEV;
2468 pci_disable_device(pdev); 2462 goto ioremap_physmem_fail;
2469 kfree(card);
2470 return -ENODEV;
2471 } 2463 }
2472 if ((card->ctlmem = ioremap(card->phys_ctlmem, 0x10)) == NULL) { 2464 if ((card->ctlmem = ioremap(card->phys_ctlmem, 0x10)) == NULL) {
2473 pr_err("Control memory remap failed\n"); 2465 pr_err("Control memory remap failed\n");
2474 pci_release_regions(pdev); 2466 err = -ENODEV;
2475 pci_disable_device(pdev); 2467 goto ioremap_ctlmem_fail;
2476 iounmap(card->mem);
2477 kfree(card);
2478 return -ENODEV;
2479 } 2468 }
2480 dbg(DBG_PCI, "kernel mem %p, ctlmem %p\n", card->mem, card->ctlmem); 2469 dbg(DBG_PCI, "kernel mem %p, ctlmem %p\n", card->mem, card->ctlmem);
2481 2470
2482 /* Register the interrupt handler */ 2471 /* Register the interrupt handler */
2483 if (request_irq(pdev->irq, fst_intr, IRQF_SHARED, FST_DEV_NAME, card)) { 2472 if (request_irq(pdev->irq, fst_intr, IRQF_SHARED, FST_DEV_NAME, card)) {
2484 pr_err("Unable to register interrupt %d\n", card->irq); 2473 pr_err("Unable to register interrupt %d\n", card->irq);
2485 pci_release_regions(pdev); 2474 err = -ENODEV;
2486 pci_disable_device(pdev); 2475 goto irq_fail;
2487 iounmap(card->ctlmem);
2488 iounmap(card->mem);
2489 kfree(card);
2490 return -ENODEV;
2491 } 2476 }
2492 2477
2493 /* Record info we need */ 2478 /* Record info we need */
@@ -2513,13 +2498,8 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2513 while (i--) 2498 while (i--)
2514 free_netdev(card->ports[i].dev); 2499 free_netdev(card->ports[i].dev);
2515 pr_err("FarSync: out of memory\n"); 2500 pr_err("FarSync: out of memory\n");
2516 free_irq(card->irq, card); 2501 err = -ENOMEM;
2517 pci_release_regions(pdev); 2502 goto hdlcdev_fail;
2518 pci_disable_device(pdev);
2519 iounmap(card->ctlmem);
2520 iounmap(card->mem);
2521 kfree(card);
2522 return -ENODEV;
2523 } 2503 }
2524 card->ports[i].dev = dev; 2504 card->ports[i].dev = dev;
2525 card->ports[i].card = card; 2505 card->ports[i].card = card;
@@ -2565,9 +2545,16 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2565 pci_set_drvdata(pdev, card); 2545 pci_set_drvdata(pdev, card);
2566 2546
2567 /* Remainder of card setup */ 2547 /* Remainder of card setup */
2548 if (no_of_cards_added >= FST_MAX_CARDS) {
2549 pr_err("FarSync: too many cards\n");
2550 err = -ENOMEM;
2551 goto card_array_fail;
2552 }
2568 fst_card_array[no_of_cards_added] = card; 2553 fst_card_array[no_of_cards_added] = card;
2569 card->card_no = no_of_cards_added++; /* Record instance and bump it */ 2554 card->card_no = no_of_cards_added++; /* Record instance and bump it */
2570 fst_init_card(card); 2555 err = fst_init_card(card);
2556 if (err)
2557 goto init_card_fail;
2571 if (card->family == FST_FAMILY_TXU) { 2558 if (card->family == FST_FAMILY_TXU) {
2572 /* 2559 /*
2573 * Allocate a dma buffer for transmit and receives 2560 * Allocate a dma buffer for transmit and receives
@@ -2577,29 +2564,46 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2577 &card->rx_dma_handle_card); 2564 &card->rx_dma_handle_card);
2578 if (card->rx_dma_handle_host == NULL) { 2565 if (card->rx_dma_handle_host == NULL) {
2579 pr_err("Could not allocate rx dma buffer\n"); 2566 pr_err("Could not allocate rx dma buffer\n");
2580 fst_disable_intr(card); 2567 err = -ENOMEM;
2581 pci_release_regions(pdev); 2568 goto rx_dma_fail;
2582 pci_disable_device(pdev);
2583 iounmap(card->ctlmem);
2584 iounmap(card->mem);
2585 kfree(card);
2586 return -ENOMEM;
2587 } 2569 }
2588 card->tx_dma_handle_host = 2570 card->tx_dma_handle_host =
2589 pci_alloc_consistent(card->device, FST_MAX_MTU, 2571 pci_alloc_consistent(card->device, FST_MAX_MTU,
2590 &card->tx_dma_handle_card); 2572 &card->tx_dma_handle_card);
2591 if (card->tx_dma_handle_host == NULL) { 2573 if (card->tx_dma_handle_host == NULL) {
2592 pr_err("Could not allocate tx dma buffer\n"); 2574 pr_err("Could not allocate tx dma buffer\n");
2593 fst_disable_intr(card); 2575 err = -ENOMEM;
2594 pci_release_regions(pdev); 2576 goto tx_dma_fail;
2595 pci_disable_device(pdev);
2596 iounmap(card->ctlmem);
2597 iounmap(card->mem);
2598 kfree(card);
2599 return -ENOMEM;
2600 } 2577 }
2601 } 2578 }
2602 return 0; /* Success */ 2579 return 0; /* Success */
2580
2581tx_dma_fail:
2582 pci_free_consistent(card->device, FST_MAX_MTU,
2583 card->rx_dma_handle_host,
2584 card->rx_dma_handle_card);
2585rx_dma_fail:
2586 fst_disable_intr(card);
2587 for (i = 0 ; i < card->nports ; i++)
2588 unregister_hdlc_device(card->ports[i].dev);
2589init_card_fail:
2590 fst_card_array[card->card_no] = NULL;
2591card_array_fail:
2592 for (i = 0 ; i < card->nports ; i++)
2593 free_netdev(card->ports[i].dev);
2594hdlcdev_fail:
2595 free_irq(card->irq, card);
2596irq_fail:
2597 iounmap(card->ctlmem);
2598ioremap_ctlmem_fail:
2599 iounmap(card->mem);
2600ioremap_physmem_fail:
2601 pci_release_regions(pdev);
2602regions_fail:
2603 pci_disable_device(pdev);
2604enable_fail:
2605 kfree(card);
2606 return err;
2603} 2607}
2604 2608
2605/* 2609/*
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 5895f1978691..fa9fdfa128c1 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -122,8 +122,12 @@ static int x25_asy_change_mtu(struct net_device *dev, int newmtu)
122{ 122{
123 struct x25_asy *sl = netdev_priv(dev); 123 struct x25_asy *sl = netdev_priv(dev);
124 unsigned char *xbuff, *rbuff; 124 unsigned char *xbuff, *rbuff;
125 int len = 2 * newmtu; 125 int len;
126 126
127 if (newmtu > 65534)
128 return -EINVAL;
129
130 len = 2 * newmtu;
127 xbuff = kmalloc(len + 4, GFP_ATOMIC); 131 xbuff = kmalloc(len + 4, GFP_ATOMIC);
128 rbuff = kmalloc(len + 4, GFP_ATOMIC); 132 rbuff = kmalloc(len + 4, GFP_ATOMIC);
129 133
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 82017f56e661..e6c56c5bb0f6 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -795,7 +795,11 @@ int ath10k_core_start(struct ath10k *ar)
795 if (status) 795 if (status)
796 goto err_htc_stop; 796 goto err_htc_stop;
797 797
798 ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1; 798 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
799 ar->free_vdev_map = (1 << TARGET_10X_NUM_VDEVS) - 1;
800 else
801 ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
802
799 INIT_LIST_HEAD(&ar->arvifs); 803 INIT_LIST_HEAD(&ar->arvifs);
800 804
801 if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags)) 805 if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 6c102b1312ff..eebc860c3655 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -312,7 +312,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
312 int msdu_len, msdu_chaining = 0; 312 int msdu_len, msdu_chaining = 0;
313 struct sk_buff *msdu; 313 struct sk_buff *msdu;
314 struct htt_rx_desc *rx_desc; 314 struct htt_rx_desc *rx_desc;
315 bool corrupted = false;
316 315
317 lockdep_assert_held(&htt->rx_ring.lock); 316 lockdep_assert_held(&htt->rx_ring.lock);
318 317
@@ -439,9 +438,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
439 last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) & 438 last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
440 RX_MSDU_END_INFO0_LAST_MSDU; 439 RX_MSDU_END_INFO0_LAST_MSDU;
441 440
442 if (msdu_chaining && !last_msdu)
443 corrupted = true;
444
445 if (last_msdu) { 441 if (last_msdu) {
446 msdu->next = NULL; 442 msdu->next = NULL;
447 break; 443 break;
@@ -457,20 +453,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
457 msdu_chaining = -1; 453 msdu_chaining = -1;
458 454
459 /* 455 /*
460 * Apparently FW sometimes reports weird chained MSDU sequences with
461 * more than one rx descriptor. This seems like a bug but needs more
462 * analyzing. For the time being fix it by dropping such sequences to
463 * avoid blowing up the host system.
464 */
465 if (corrupted) {
466 ath10k_warn("failed to pop chained msdus, dropping\n");
467 ath10k_htt_rx_free_msdu_chain(*head_msdu);
468 *head_msdu = NULL;
469 *tail_msdu = NULL;
470 msdu_chaining = -EINVAL;
471 }
472
473 /*
474 * Don't refill the ring yet. 456 * Don't refill the ring yet.
475 * 457 *
476 * First, the elements popped here are still in use - it is not 458 * First, the elements popped here are still in use - it is not
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 66acb2cbd9df..7c28cb55610b 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -887,6 +887,15 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
887 887
888 tx_info = IEEE80211_SKB_CB(skb); 888 tx_info = IEEE80211_SKB_CB(skb);
889 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT; 889 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
890
891 /*
892 * No aggregation session is running, but there may be frames
893 * from a previous session or a failed attempt in the queue.
894 * Send them out as normal data frames
895 */
896 if (!tid->active)
897 tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU;
898
890 if (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) { 899 if (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
891 bf->bf_state.bf_type = 0; 900 bf->bf_state.bf_type = 0;
892 return bf; 901 return bf;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 6db51a666f61..d06fcb05adf2 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -1184,8 +1184,6 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
1184 bus->bus_priv.usb = bus_pub; 1184 bus->bus_priv.usb = bus_pub;
1185 dev_set_drvdata(dev, bus); 1185 dev_set_drvdata(dev, bus);
1186 bus->ops = &brcmf_usb_bus_ops; 1186 bus->ops = &brcmf_usb_bus_ops;
1187 bus->chip = bus_pub->devid;
1188 bus->chiprev = bus_pub->chiprev;
1189 bus->proto_type = BRCMF_PROTO_BCDC; 1187 bus->proto_type = BRCMF_PROTO_BCDC;
1190 bus->always_use_fws_queue = true; 1188 bus->always_use_fws_queue = true;
1191 1189
@@ -1194,6 +1192,9 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
1194 if (ret) 1192 if (ret)
1195 goto fail; 1193 goto fail;
1196 } 1194 }
1195 bus->chip = bus_pub->devid;
1196 bus->chiprev = bus_pub->chiprev;
1197
1197 /* request firmware here */ 1198 /* request firmware here */
1198 brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo), NULL, 1199 brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo), NULL,
1199 brcmf_usb_probe_phase2); 1200 brcmf_usb_probe_phase2);
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index ed50de6362ed..6dc5dd3ced44 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -1068,13 +1068,6 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1068 /* recalculate basic rates */ 1068 /* recalculate basic rates */
1069 iwl_calc_basic_rates(priv, ctx); 1069 iwl_calc_basic_rates(priv, ctx);
1070 1070
1071 /*
1072 * force CTS-to-self frames protection if RTS-CTS is not preferred
1073 * one aggregation protection method
1074 */
1075 if (!priv->hw_params.use_rts_for_aggregation)
1076 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
1077
1078 if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) || 1071 if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
1079 !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK)) 1072 !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
1080 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; 1073 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
@@ -1480,11 +1473,6 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
1480 else 1473 else
1481 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK; 1474 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
1482 1475
1483 if (bss_conf->use_cts_prot)
1484 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
1485 else
1486 ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
1487
1488 memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN); 1476 memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
1489 1477
1490 if (vif->type == NL80211_IFTYPE_AP || 1478 if (vif->type == NL80211_IFTYPE_AP ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index 0aa7c0085c9f..b1a33322b9ba 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -88,6 +88,7 @@
88 * P2P client interfaces simultaneously if they are in different bindings. 88 * P2P client interfaces simultaneously if they are in different bindings.
89 * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_SCM: support power save on BSS station and 89 * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_SCM: support power save on BSS station and
90 * P2P client interfaces simultaneously if they are in same bindings. 90 * P2P client interfaces simultaneously if they are in same bindings.
91 * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD
91 * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save 92 * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
92 * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering. 93 * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
93 * @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients 94 * @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index 8b5302777632..8b79081d4885 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -667,10 +667,9 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
667 if (vif->bss_conf.qos) 667 if (vif->bss_conf.qos)
668 cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA); 668 cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
669 669
670 if (vif->bss_conf.use_cts_prot) { 670 if (vif->bss_conf.use_cts_prot)
671 cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT); 671 cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
672 cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN); 672
673 }
674 IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n", 673 IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n",
675 vif->bss_conf.use_cts_prot, 674 vif->bss_conf.use_cts_prot,
676 vif->bss_conf.ht_operation_mode); 675 vif->bss_conf.ht_operation_mode);
@@ -1073,8 +1072,12 @@ static int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm,
1073 /* Fill the common data for all mac context types */ 1072 /* Fill the common data for all mac context types */
1074 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); 1073 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
1075 1074
1076 /* Also enable probe requests to pass */ 1075 /*
1077 cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); 1076 * pass probe requests and beacons from other APs (needed
1077 * for ht protection)
1078 */
1079 cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST |
1080 MAC_FILTER_IN_BEACON);
1078 1081
1079 /* Fill the data specific for ap mode */ 1082 /* Fill the data specific for ap mode */
1080 iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.ap, 1083 iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.ap,
@@ -1095,6 +1098,13 @@ static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm,
1095 /* Fill the common data for all mac context types */ 1098 /* Fill the common data for all mac context types */
1096 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); 1099 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
1097 1100
1101 /*
1102 * pass probe requests and beacons from other APs (needed
1103 * for ht protection)
1104 */
1105 cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST |
1106 MAC_FILTER_IN_BEACON);
1107
1098 /* Fill the data specific for GO mode */ 1108 /* Fill the data specific for GO mode */
1099 iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.go.ap, 1109 iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.go.ap,
1100 action == FW_CTXT_ACTION_ADD); 1110 action == FW_CTXT_ACTION_ADD);
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 7215f5980186..98556d03c1ed 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -1159,8 +1159,12 @@ static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
1159 1159
1160 bcast_mac = &cmd->macs[mvmvif->id]; 1160 bcast_mac = &cmd->macs[mvmvif->id];
1161 1161
1162 /* enable filtering only for associated stations */ 1162 /*
1163 if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc) 1163 * enable filtering only for associated stations, but not for P2P
1164 * Clients
1165 */
1166 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p ||
1167 !vif->bss_conf.assoc)
1164 return; 1168 return;
1165 1169
1166 bcast_mac->default_discard = 1; 1170 bcast_mac->default_discard = 1;
@@ -1237,10 +1241,6 @@ static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
1237 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING)) 1241 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
1238 return 0; 1242 return 0;
1239 1243
1240 /* bcast filtering isn't supported for P2P client */
1241 if (vif->p2p)
1242 return 0;
1243
1244 if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) 1244 if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
1245 return 0; 1245 return 0;
1246 1246
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 4b6c7d4bd199..eac2b424f6a0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -588,9 +588,7 @@ static void iwl_build_scan_cmd(struct iwl_mvm *mvm,
588 struct iwl_scan_offload_cmd *scan, 588 struct iwl_scan_offload_cmd *scan,
589 struct iwl_mvm_scan_params *params) 589 struct iwl_mvm_scan_params *params)
590{ 590{
591 scan->channel_count = 591 scan->channel_count = req->n_channels;
592 mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
593 mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
594 scan->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME); 592 scan->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
595 scan->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH); 593 scan->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
596 scan->good_CRC_th = IWL_GOOD_CRC_TH_DEFAULT; 594 scan->good_CRC_th = IWL_GOOD_CRC_TH_DEFAULT;
@@ -669,61 +667,37 @@ static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
669 struct cfg80211_sched_scan_request *req, 667 struct cfg80211_sched_scan_request *req,
670 struct iwl_scan_channel_cfg *channels, 668 struct iwl_scan_channel_cfg *channels,
671 enum ieee80211_band band, 669 enum ieee80211_band band,
672 int *head, int *tail, 670 int *head,
673 u32 ssid_bitmap, 671 u32 ssid_bitmap,
674 struct iwl_mvm_scan_params *params) 672 struct iwl_mvm_scan_params *params)
675{ 673{
676 struct ieee80211_supported_band *s_band; 674 int i, index = 0;
677 int n_channels = req->n_channels;
678 int i, j, index = 0;
679 bool partial;
680 675
681 /* 676 for (i = 0; i < req->n_channels; i++) {
682 * We have to configure all supported channels, even if we don't want to 677 struct ieee80211_channel *chan = req->channels[i];
683 * scan on them, but we have to send channels in the order that we want 678
684 * to scan. So add requested channels to head of the list and others to 679 if (chan->band != band)
685 * the end. 680 continue;
686 */ 681
687 s_band = &mvm->nvm_data->bands[band]; 682 index = *head;
688 683 (*head)++;
689 for (i = 0; i < s_band->n_channels && *head <= *tail; i++) { 684
690 partial = false; 685 channels->channel_number[index] = cpu_to_le16(chan->hw_value);
691 for (j = 0; j < n_channels; j++)
692 if (s_band->channels[i].center_freq ==
693 req->channels[j]->center_freq) {
694 index = *head;
695 (*head)++;
696 /*
697 * Channels that came with the request will be
698 * in partial scan .
699 */
700 partial = true;
701 break;
702 }
703 if (!partial) {
704 index = *tail;
705 (*tail)--;
706 }
707 channels->channel_number[index] =
708 cpu_to_le16(ieee80211_frequency_to_channel(
709 s_band->channels[i].center_freq));
710 channels->dwell_time[index][0] = params->dwell[band].active; 686 channels->dwell_time[index][0] = params->dwell[band].active;
711 channels->dwell_time[index][1] = params->dwell[band].passive; 687 channels->dwell_time[index][1] = params->dwell[band].passive;
712 688
713 channels->iter_count[index] = cpu_to_le16(1); 689 channels->iter_count[index] = cpu_to_le16(1);
714 channels->iter_interval[index] = 0; 690 channels->iter_interval[index] = 0;
715 691
716 if (!(s_band->channels[i].flags & IEEE80211_CHAN_NO_IR)) 692 if (!(chan->flags & IEEE80211_CHAN_NO_IR))
717 channels->type[index] |= 693 channels->type[index] |=
718 cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE); 694 cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE);
719 695
720 channels->type[index] |= 696 channels->type[index] |=
721 cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_FULL); 697 cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_FULL |
722 if (partial) 698 IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL);
723 channels->type[index] |=
724 cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL);
725 699
726 if (s_band->channels[i].flags & IEEE80211_CHAN_NO_HT40) 700 if (chan->flags & IEEE80211_CHAN_NO_HT40)
727 channels->type[index] |= 701 channels->type[index] |=
728 cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_NARROW); 702 cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_NARROW);
729 703
@@ -740,7 +714,6 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
740 int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels; 714 int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
741 int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels; 715 int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
742 int head = 0; 716 int head = 0;
743 int tail = band_2ghz + band_5ghz - 1;
744 u32 ssid_bitmap; 717 u32 ssid_bitmap;
745 int cmd_len; 718 int cmd_len;
746 int ret; 719 int ret;
@@ -772,7 +745,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
772 &scan_cfg->scan_cmd.tx_cmd[0], 745 &scan_cfg->scan_cmd.tx_cmd[0],
773 scan_cfg->data); 746 scan_cfg->data);
774 iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg, 747 iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
775 IEEE80211_BAND_2GHZ, &head, &tail, 748 IEEE80211_BAND_2GHZ, &head,
776 ssid_bitmap, &params); 749 ssid_bitmap, &params);
777 } 750 }
778 if (band_5ghz) { 751 if (band_5ghz) {
@@ -782,7 +755,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
782 scan_cfg->data + 755 scan_cfg->data +
783 SCAN_OFFLOAD_PROBE_REQ_SIZE); 756 SCAN_OFFLOAD_PROBE_REQ_SIZE);
784 iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg, 757 iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
785 IEEE80211_BAND_5GHZ, &head, &tail, 758 IEEE80211_BAND_5GHZ, &head,
786 ssid_bitmap, &params); 759 ssid_bitmap, &params);
787 } 760 }
788 761
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 7091a18d5a72..98950e45c7b0 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -367,6 +367,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
367 {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)}, 367 {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
368 {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)}, 368 {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)},
369 {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, 369 {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
370 {IWL_PCI_DEVICE(0x095A, 0x5510, iwl7265_2ac_cfg)},
370 {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)}, 371 {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)},
371 {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, 372 {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
372 {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)}, 373 {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)},
@@ -380,7 +381,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
380 {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, 381 {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
381 {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)}, 382 {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
382 {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, 383 {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
383 {IWL_PCI_DEVICE(0x095A, 0x9200, iwl7265_2ac_cfg)}, 384 {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)},
384 {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)}, 385 {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
385 {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)}, 386 {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
386 {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, 387 {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index 5b32106182f8..fe0f66f73507 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -185,6 +185,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
185 skb_reserve(skb_aggr, headroom + sizeof(struct txpd)); 185 skb_reserve(skb_aggr, headroom + sizeof(struct txpd));
186 tx_info_aggr = MWIFIEX_SKB_TXCB(skb_aggr); 186 tx_info_aggr = MWIFIEX_SKB_TXCB(skb_aggr);
187 187
188 memset(tx_info_aggr, 0, sizeof(*tx_info_aggr));
188 tx_info_aggr->bss_type = tx_info_src->bss_type; 189 tx_info_aggr->bss_type = tx_info_src->bss_type;
189 tx_info_aggr->bss_num = tx_info_src->bss_num; 190 tx_info_aggr->bss_num = tx_info_src->bss_num;
190 191
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index e95dec91a561..b511613bba2d 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -220,6 +220,7 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
220 } 220 }
221 221
222 tx_info = MWIFIEX_SKB_TXCB(skb); 222 tx_info = MWIFIEX_SKB_TXCB(skb);
223 memset(tx_info, 0, sizeof(*tx_info));
223 tx_info->bss_num = priv->bss_num; 224 tx_info->bss_num = priv->bss_num;
224 tx_info->bss_type = priv->bss_type; 225 tx_info->bss_type = priv->bss_type;
225 tx_info->pkt_len = pkt_len; 226 tx_info->pkt_len = pkt_len;
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 8dee6c86f4f1..c161141f6c39 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -453,6 +453,7 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter)
453 453
454 if (skb) { 454 if (skb) {
455 rx_info = MWIFIEX_SKB_RXCB(skb); 455 rx_info = MWIFIEX_SKB_RXCB(skb);
456 memset(rx_info, 0, sizeof(*rx_info));
456 rx_info->bss_num = priv->bss_num; 457 rx_info->bss_num = priv->bss_num;
457 rx_info->bss_type = priv->bss_type; 458 rx_info->bss_type = priv->bss_type;
458 } 459 }
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index cbabc12fbda3..e91cd0fa5ca8 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -645,6 +645,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
645 } 645 }
646 646
647 tx_info = MWIFIEX_SKB_TXCB(skb); 647 tx_info = MWIFIEX_SKB_TXCB(skb);
648 memset(tx_info, 0, sizeof(*tx_info));
648 tx_info->bss_num = priv->bss_num; 649 tx_info->bss_num = priv->bss_num;
649 tx_info->bss_type = priv->bss_type; 650 tx_info->bss_type = priv->bss_type;
650 tx_info->pkt_len = skb->len; 651 tx_info->pkt_len = skb->len;
diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c
index 5fce7e78a36e..70eb863c7249 100644
--- a/drivers/net/wireless/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/mwifiex/sta_tx.c
@@ -150,6 +150,7 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
150 return -1; 150 return -1;
151 151
152 tx_info = MWIFIEX_SKB_TXCB(skb); 152 tx_info = MWIFIEX_SKB_TXCB(skb);
153 memset(tx_info, 0, sizeof(*tx_info));
153 tx_info->bss_num = priv->bss_num; 154 tx_info->bss_num = priv->bss_num;
154 tx_info->bss_type = priv->bss_type; 155 tx_info->bss_type = priv->bss_type;
155 tx_info->pkt_len = data_len - (sizeof(struct txpd) + INTF_HEADER_LEN); 156 tx_info->pkt_len = data_len - (sizeof(struct txpd) + INTF_HEADER_LEN);
diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c
index e73034fbbde9..0e88364e0c67 100644
--- a/drivers/net/wireless/mwifiex/tdls.c
+++ b/drivers/net/wireless/mwifiex/tdls.c
@@ -605,6 +605,7 @@ int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
605 } 605 }
606 606
607 tx_info = MWIFIEX_SKB_TXCB(skb); 607 tx_info = MWIFIEX_SKB_TXCB(skb);
608 memset(tx_info, 0, sizeof(*tx_info));
608 tx_info->bss_num = priv->bss_num; 609 tx_info->bss_num = priv->bss_num;
609 tx_info->bss_type = priv->bss_type; 610 tx_info->bss_type = priv->bss_type;
610 611
@@ -760,6 +761,7 @@ int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv, const u8 *peer,
760 skb->priority = MWIFIEX_PRIO_VI; 761 skb->priority = MWIFIEX_PRIO_VI;
761 762
762 tx_info = MWIFIEX_SKB_TXCB(skb); 763 tx_info = MWIFIEX_SKB_TXCB(skb);
764 memset(tx_info, 0, sizeof(*tx_info));
763 tx_info->bss_num = priv->bss_num; 765 tx_info->bss_num = priv->bss_num;
764 tx_info->bss_type = priv->bss_type; 766 tx_info->bss_type = priv->bss_type;
765 tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT; 767 tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index 37f26afd4314..fd7e5b9b4581 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -55,6 +55,7 @@ int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter,
55 return -1; 55 return -1;
56 } 56 }
57 57
58 memset(rx_info, 0, sizeof(*rx_info));
58 rx_info->bss_num = priv->bss_num; 59 rx_info->bss_num = priv->bss_num;
59 rx_info->bss_type = priv->bss_type; 60 rx_info->bss_type = priv->bss_type;
60 61
diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c
index 9a56bc61cb1d..b0601b91cc4f 100644
--- a/drivers/net/wireless/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/mwifiex/uap_txrx.c
@@ -175,6 +175,7 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
175 } 175 }
176 176
177 tx_info = MWIFIEX_SKB_TXCB(skb); 177 tx_info = MWIFIEX_SKB_TXCB(skb);
178 memset(tx_info, 0, sizeof(*tx_info));
178 tx_info->bss_num = priv->bss_num; 179 tx_info->bss_num = priv->bss_num;
179 tx_info->bss_type = priv->bss_type; 180 tx_info->bss_type = priv->bss_type;
180 tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT; 181 tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT;
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index e11dab2216c6..832006b5aab1 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -231,9 +231,12 @@ static enum hrtimer_restart rt2800usb_tx_sta_fifo_timeout(struct hrtimer *timer)
231 */ 231 */
232static int rt2800usb_autorun_detect(struct rt2x00_dev *rt2x00dev) 232static int rt2800usb_autorun_detect(struct rt2x00_dev *rt2x00dev)
233{ 233{
234 __le32 reg; 234 __le32 *reg;
235 u32 fw_mode; 235 u32 fw_mode;
236 236
237 reg = kmalloc(sizeof(*reg), GFP_KERNEL);
238 if (reg == NULL)
239 return -ENOMEM;
237 /* cannot use rt2x00usb_register_read here as it uses different 240 /* cannot use rt2x00usb_register_read here as it uses different
238 * mode (MULTI_READ vs. DEVICE_MODE) and does not pass the 241 * mode (MULTI_READ vs. DEVICE_MODE) and does not pass the
239 * magic value USB_MODE_AUTORUN (0x11) to the device, thus the 242 * magic value USB_MODE_AUTORUN (0x11) to the device, thus the
@@ -241,8 +244,9 @@ static int rt2800usb_autorun_detect(struct rt2x00_dev *rt2x00dev)
241 */ 244 */
242 rt2x00usb_vendor_request(rt2x00dev, USB_DEVICE_MODE, 245 rt2x00usb_vendor_request(rt2x00dev, USB_DEVICE_MODE,
243 USB_VENDOR_REQUEST_IN, 0, USB_MODE_AUTORUN, 246 USB_VENDOR_REQUEST_IN, 0, USB_MODE_AUTORUN,
244 &reg, sizeof(reg), REGISTER_TIMEOUT_FIRMWARE); 247 reg, sizeof(*reg), REGISTER_TIMEOUT_FIRMWARE);
245 fw_mode = le32_to_cpu(reg); 248 fw_mode = le32_to_cpu(*reg);
249 kfree(reg);
246 250
247 if ((fw_mode & 0x00000003) == 2) 251 if ((fw_mode & 0x00000003) == 2)
248 return 1; 252 return 1;
@@ -261,6 +265,7 @@ static int rt2800usb_write_firmware(struct rt2x00_dev *rt2x00dev,
261 int status; 265 int status;
262 u32 offset; 266 u32 offset;
263 u32 length; 267 u32 length;
268 int retval;
264 269
265 /* 270 /*
266 * Check which section of the firmware we need. 271 * Check which section of the firmware we need.
@@ -278,7 +283,10 @@ static int rt2800usb_write_firmware(struct rt2x00_dev *rt2x00dev,
278 /* 283 /*
279 * Write firmware to device. 284 * Write firmware to device.
280 */ 285 */
281 if (rt2800usb_autorun_detect(rt2x00dev)) { 286 retval = rt2800usb_autorun_detect(rt2x00dev);
287 if (retval < 0)
288 return retval;
289 if (retval) {
282 rt2x00_info(rt2x00dev, 290 rt2x00_info(rt2x00dev,
283 "Firmware loading not required - NIC in AutoRun mode\n"); 291 "Firmware loading not required - NIC in AutoRun mode\n");
284 } else { 292 } else {
@@ -763,7 +771,12 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
763 */ 771 */
764static int rt2800usb_efuse_detect(struct rt2x00_dev *rt2x00dev) 772static int rt2800usb_efuse_detect(struct rt2x00_dev *rt2x00dev)
765{ 773{
766 if (rt2800usb_autorun_detect(rt2x00dev)) 774 int retval;
775
776 retval = rt2800usb_autorun_detect(rt2x00dev);
777 if (retval < 0)
778 return retval;
779 if (retval)
767 return 1; 780 return 1;
768 return rt2800_efuse_detect(rt2x00dev); 781 return rt2800_efuse_detect(rt2x00dev);
769} 782}
@@ -772,7 +785,10 @@ static int rt2800usb_read_eeprom(struct rt2x00_dev *rt2x00dev)
772{ 785{
773 int retval; 786 int retval;
774 787
775 if (rt2800usb_efuse_detect(rt2x00dev)) 788 retval = rt2800usb_efuse_detect(rt2x00dev);
789 if (retval < 0)
790 return retval;
791 if (retval)
776 retval = rt2800_read_eeprom_efuse(rt2x00dev); 792 retval = rt2800_read_eeprom_efuse(rt2x00dev);
777 else 793 else
778 retval = rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom, 794 retval = rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom,
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 1844a47636b6..c65b636bcab9 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1030,14 +1030,21 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
1030{ 1030{
1031 struct gnttab_map_grant_ref *gop_map = *gopp_map; 1031 struct gnttab_map_grant_ref *gop_map = *gopp_map;
1032 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; 1032 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1033 /* This always points to the shinfo of the skb being checked, which
1034 * could be either the first or the one on the frag_list
1035 */
1033 struct skb_shared_info *shinfo = skb_shinfo(skb); 1036 struct skb_shared_info *shinfo = skb_shinfo(skb);
1037 /* If this is non-NULL, we are currently checking the frag_list skb, and
1038 * this points to the shinfo of the first one
1039 */
1040 struct skb_shared_info *first_shinfo = NULL;
1034 int nr_frags = shinfo->nr_frags; 1041 int nr_frags = shinfo->nr_frags;
1042 const bool sharedslot = nr_frags &&
1043 frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
1035 int i, err; 1044 int i, err;
1036 struct sk_buff *first_skb = NULL;
1037 1045
1038 /* Check status of header. */ 1046 /* Check status of header. */
1039 err = (*gopp_copy)->status; 1047 err = (*gopp_copy)->status;
1040 (*gopp_copy)++;
1041 if (unlikely(err)) { 1048 if (unlikely(err)) {
1042 if (net_ratelimit()) 1049 if (net_ratelimit())
1043 netdev_dbg(queue->vif->dev, 1050 netdev_dbg(queue->vif->dev,
@@ -1045,8 +1052,12 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
1045 (*gopp_copy)->status, 1052 (*gopp_copy)->status,
1046 pending_idx, 1053 pending_idx,
1047 (*gopp_copy)->source.u.ref); 1054 (*gopp_copy)->source.u.ref);
1048 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR); 1055 /* The first frag might still have this slot mapped */
1056 if (!sharedslot)
1057 xenvif_idx_release(queue, pending_idx,
1058 XEN_NETIF_RSP_ERROR);
1049 } 1059 }
1060 (*gopp_copy)++;
1050 1061
1051check_frags: 1062check_frags:
1052 for (i = 0; i < nr_frags; i++, gop_map++) { 1063 for (i = 0; i < nr_frags; i++, gop_map++) {
@@ -1062,8 +1073,19 @@ check_frags:
1062 pending_idx, 1073 pending_idx,
1063 gop_map->handle); 1074 gop_map->handle);
1064 /* Had a previous error? Invalidate this fragment. */ 1075 /* Had a previous error? Invalidate this fragment. */
1065 if (unlikely(err)) 1076 if (unlikely(err)) {
1066 xenvif_idx_unmap(queue, pending_idx); 1077 xenvif_idx_unmap(queue, pending_idx);
1078 /* If the mapping of the first frag was OK, but
1079 * the header's copy failed, and they are
1080 * sharing a slot, send an error
1081 */
1082 if (i == 0 && sharedslot)
1083 xenvif_idx_release(queue, pending_idx,
1084 XEN_NETIF_RSP_ERROR);
1085 else
1086 xenvif_idx_release(queue, pending_idx,
1087 XEN_NETIF_RSP_OKAY);
1088 }
1067 continue; 1089 continue;
1068 } 1090 }
1069 1091
@@ -1075,42 +1097,53 @@ check_frags:
1075 gop_map->status, 1097 gop_map->status,
1076 pending_idx, 1098 pending_idx,
1077 gop_map->ref); 1099 gop_map->ref);
1100
1078 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR); 1101 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
1079 1102
1080 /* Not the first error? Preceding frags already invalidated. */ 1103 /* Not the first error? Preceding frags already invalidated. */
1081 if (err) 1104 if (err)
1082 continue; 1105 continue;
1083 /* First error: invalidate preceding fragments. */ 1106
1107 /* First error: if the header haven't shared a slot with the
1108 * first frag, release it as well.
1109 */
1110 if (!sharedslot)
1111 xenvif_idx_release(queue,
1112 XENVIF_TX_CB(skb)->pending_idx,
1113 XEN_NETIF_RSP_OKAY);
1114
1115 /* Invalidate preceding fragments of this skb. */
1084 for (j = 0; j < i; j++) { 1116 for (j = 0; j < i; j++) {
1085 pending_idx = frag_get_pending_idx(&shinfo->frags[j]); 1117 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1086 xenvif_idx_unmap(queue, pending_idx); 1118 xenvif_idx_unmap(queue, pending_idx);
1119 xenvif_idx_release(queue, pending_idx,
1120 XEN_NETIF_RSP_OKAY);
1121 }
1122
1123 /* And if we found the error while checking the frag_list, unmap
1124 * the first skb's frags
1125 */
1126 if (first_shinfo) {
1127 for (j = 0; j < first_shinfo->nr_frags; j++) {
1128 pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
1129 xenvif_idx_unmap(queue, pending_idx);
1130 xenvif_idx_release(queue, pending_idx,
1131 XEN_NETIF_RSP_OKAY);
1132 }
1087 } 1133 }
1088 1134
1089 /* Remember the error: invalidate all subsequent fragments. */ 1135 /* Remember the error: invalidate all subsequent fragments. */
1090 err = newerr; 1136 err = newerr;
1091 } 1137 }
1092 1138
1093 if (skb_has_frag_list(skb)) { 1139 if (skb_has_frag_list(skb) && !first_shinfo) {
1094 first_skb = skb; 1140 first_shinfo = skb_shinfo(skb);
1095 skb = shinfo->frag_list; 1141 shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
1096 shinfo = skb_shinfo(skb);
1097 nr_frags = shinfo->nr_frags; 1142 nr_frags = shinfo->nr_frags;
1098 1143
1099 goto check_frags; 1144 goto check_frags;
1100 } 1145 }
1101 1146
1102 /* There was a mapping error in the frag_list skb. We have to unmap
1103 * the first skb's frags
1104 */
1105 if (first_skb && err) {
1106 int j;
1107 shinfo = skb_shinfo(first_skb);
1108 for (j = 0; j < shinfo->nr_frags; j++) {
1109 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1110 xenvif_idx_unmap(queue, pending_idx);
1111 }
1112 }
1113
1114 *gopp_map = gop_map; 1147 *gopp_map = gop_map;
1115 return err; 1148 return err;
1116} 1149}
@@ -1518,7 +1551,16 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
1518 1551
1519 /* Check the remap error code. */ 1552 /* Check the remap error code. */
1520 if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) { 1553 if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
1554 /* If there was an error, xenvif_tx_check_gop is
1555 * expected to release all the frags which were mapped,
1556 * so kfree_skb shouldn't do it again
1557 */
1521 skb_shinfo(skb)->nr_frags = 0; 1558 skb_shinfo(skb)->nr_frags = 0;
1559 if (skb_has_frag_list(skb)) {
1560 struct sk_buff *nskb =
1561 skb_shinfo(skb)->frag_list;
1562 skb_shinfo(nskb)->nr_frags = 0;
1563 }
1522 kfree_skb(skb); 1564 kfree_skb(skb);
1523 continue; 1565 continue;
1524 } 1566 }
@@ -1822,8 +1864,6 @@ void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
1822 tx_unmap_op.status); 1864 tx_unmap_op.status);
1823 BUG(); 1865 BUG();
1824 } 1866 }
1825
1826 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_OKAY);
1827} 1867}
1828 1868
1829static inline int rx_work_todo(struct xenvif_queue *queue) 1869static inline int rx_work_todo(struct xenvif_queue *queue)
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 2ccb4a02368b..055222bae6e4 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1439,16 +1439,11 @@ static void xennet_disconnect_backend(struct netfront_info *info)
1439 unsigned int i = 0; 1439 unsigned int i = 0;
1440 unsigned int num_queues = info->netdev->real_num_tx_queues; 1440 unsigned int num_queues = info->netdev->real_num_tx_queues;
1441 1441
1442 netif_carrier_off(info->netdev);
1443
1442 for (i = 0; i < num_queues; ++i) { 1444 for (i = 0; i < num_queues; ++i) {
1443 struct netfront_queue *queue = &info->queues[i]; 1445 struct netfront_queue *queue = &info->queues[i];
1444 1446
1445 /* Stop old i/f to prevent errors whilst we rebuild the state. */
1446 spin_lock_bh(&queue->rx_lock);
1447 spin_lock_irq(&queue->tx_lock);
1448 netif_carrier_off(queue->info->netdev);
1449 spin_unlock_irq(&queue->tx_lock);
1450 spin_unlock_bh(&queue->rx_lock);
1451
1452 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) 1447 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1453 unbind_from_irqhandler(queue->tx_irq, queue); 1448 unbind_from_irqhandler(queue->tx_irq, queue);
1454 if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) { 1449 if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
@@ -1458,6 +1453,8 @@ static void xennet_disconnect_backend(struct netfront_info *info)
1458 queue->tx_evtchn = queue->rx_evtchn = 0; 1453 queue->tx_evtchn = queue->rx_evtchn = 0;
1459 queue->tx_irq = queue->rx_irq = 0; 1454 queue->tx_irq = queue->rx_irq = 0;
1460 1455
1456 napi_synchronize(&queue->napi);
1457
1461 /* End access and free the pages */ 1458 /* End access and free the pages */
1462 xennet_end_access(queue->tx_ring_ref, queue->tx.sring); 1459 xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1463 xennet_end_access(queue->rx_ring_ref, queue->rx.sring); 1460 xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
@@ -2046,13 +2043,15 @@ static int xennet_connect(struct net_device *dev)
2046 /* By now, the queue structures have been set up */ 2043 /* By now, the queue structures have been set up */
2047 for (j = 0; j < num_queues; ++j) { 2044 for (j = 0; j < num_queues; ++j) {
2048 queue = &np->queues[j]; 2045 queue = &np->queues[j];
2049 spin_lock_bh(&queue->rx_lock);
2050 spin_lock_irq(&queue->tx_lock);
2051 2046
2052 /* Step 1: Discard all pending TX packet fragments. */ 2047 /* Step 1: Discard all pending TX packet fragments. */
2048 spin_lock_irq(&queue->tx_lock);
2053 xennet_release_tx_bufs(queue); 2049 xennet_release_tx_bufs(queue);
2050 spin_unlock_irq(&queue->tx_lock);
2054 2051
2055 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ 2052 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
2053 spin_lock_bh(&queue->rx_lock);
2054
2056 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { 2055 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
2057 skb_frag_t *frag; 2056 skb_frag_t *frag;
2058 const struct page *page; 2057 const struct page *page;
@@ -2076,6 +2075,8 @@ static int xennet_connect(struct net_device *dev)
2076 } 2075 }
2077 2076
2078 queue->rx.req_prod_pvt = requeue_idx; 2077 queue->rx.req_prod_pvt = requeue_idx;
2078
2079 spin_unlock_bh(&queue->rx_lock);
2079 } 2080 }
2080 2081
2081 /* 2082 /*
@@ -2087,13 +2088,17 @@ static int xennet_connect(struct net_device *dev)
2087 netif_carrier_on(np->netdev); 2088 netif_carrier_on(np->netdev);
2088 for (j = 0; j < num_queues; ++j) { 2089 for (j = 0; j < num_queues; ++j) {
2089 queue = &np->queues[j]; 2090 queue = &np->queues[j];
2091
2090 notify_remote_via_irq(queue->tx_irq); 2092 notify_remote_via_irq(queue->tx_irq);
2091 if (queue->tx_irq != queue->rx_irq) 2093 if (queue->tx_irq != queue->rx_irq)
2092 notify_remote_via_irq(queue->rx_irq); 2094 notify_remote_via_irq(queue->rx_irq);
2093 xennet_tx_buf_gc(queue);
2094 xennet_alloc_rx_buffers(queue);
2095 2095
2096 spin_lock_irq(&queue->tx_lock);
2097 xennet_tx_buf_gc(queue);
2096 spin_unlock_irq(&queue->tx_lock); 2098 spin_unlock_irq(&queue->tx_lock);
2099
2100 spin_lock_bh(&queue->rx_lock);
2101 xennet_alloc_rx_buffers(queue);
2097 spin_unlock_bh(&queue->rx_lock); 2102 spin_unlock_bh(&queue->rx_lock);
2098 } 2103 }
2099 2104
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index a3bf2122a8d5..401b2453da45 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -182,40 +182,6 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
182} 182}
183EXPORT_SYMBOL(of_mdiobus_register); 183EXPORT_SYMBOL(of_mdiobus_register);
184 184
185/**
186 * of_mdiobus_link_phydev - Find a device node for a phy
187 * @mdio: pointer to mii_bus structure
188 * @phydev: phydev for which the of_node pointer should be set
189 *
190 * Walk the list of subnodes of a mdio bus and look for a node that matches the
191 * phy's address with its 'reg' property. If found, set the of_node pointer for
192 * the phy. This allows auto-probed pyh devices to be supplied with information
193 * passed in via DT.
194 */
195void of_mdiobus_link_phydev(struct mii_bus *mdio,
196 struct phy_device *phydev)
197{
198 struct device *dev = &phydev->dev;
199 struct device_node *child;
200
201 if (dev->of_node || !mdio->dev.of_node)
202 return;
203
204 for_each_available_child_of_node(mdio->dev.of_node, child) {
205 int addr;
206
207 addr = of_mdio_parse_addr(&mdio->dev, child);
208 if (addr < 0)
209 continue;
210
211 if (addr == phydev->addr) {
212 dev->of_node = child;
213 return;
214 }
215 }
216}
217EXPORT_SYMBOL(of_mdiobus_link_phydev);
218
219/* Helper function for of_phy_find_device */ 185/* Helper function for of_phy_find_device */
220static int of_phy_match(struct device *dev, void *phy_np) 186static int of_phy_match(struct device *dev, void *phy_np)
221{ 187{
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
index 2872ece81f35..44333bd8f908 100644
--- a/drivers/parport/Kconfig
+++ b/drivers/parport/Kconfig
@@ -5,6 +5,12 @@
5# Parport configuration. 5# Parport configuration.
6# 6#
7 7
8config ARCH_MIGHT_HAVE_PC_PARPORT
9 bool
10 help
11 Select this config option from the architecture Kconfig if
12 the architecture might have PC parallel port hardware.
13
8menuconfig PARPORT 14menuconfig PARPORT
9 tristate "Parallel port support" 15 tristate "Parallel port support"
10 depends on HAS_IOMEM 16 depends on HAS_IOMEM
@@ -31,12 +37,6 @@ menuconfig PARPORT
31 37
32 If unsure, say Y. 38 If unsure, say Y.
33 39
34config ARCH_MIGHT_HAVE_PC_PARPORT
35 bool
36 help
37 Select this config option from the architecture Kconfig if
38 the architecture might have PC parallel port hardware.
39
40if PARPORT 40if PARPORT
41 41
42config PARPORT_PC 42config PARPORT_PC
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 63a54a340863..1c8592b0e146 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3135,8 +3135,13 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
3135 if (probe) 3135 if (probe)
3136 return 0; 3136 return 0;
3137 3137
3138 /* Wait for Transaction Pending bit clean */ 3138 /*
3139 if (pci_wait_for_pending(dev, pos + PCI_AF_STATUS, PCI_AF_STATUS_TP)) 3139 * Wait for Transaction Pending bit to clear. A word-aligned test
3140 * is used, so we use the conrol offset rather than status and shift
3141 * the test bit to match.
3142 */
3143 if (pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
3144 PCI_AF_STATUS_TP << 8))
3140 goto clear; 3145 goto clear;
3141 3146
3142 dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n"); 3147 dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 16a2f067c242..64b98d242ea6 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -112,6 +112,7 @@ config PHY_EXYNOS5250_SATA
112config PHY_SUN4I_USB 112config PHY_SUN4I_USB
113 tristate "Allwinner sunxi SoC USB PHY driver" 113 tristate "Allwinner sunxi SoC USB PHY driver"
114 depends on ARCH_SUNXI && HAS_IOMEM && OF 114 depends on ARCH_SUNXI && HAS_IOMEM && OF
115 depends on RESET_CONTROLLER
115 select GENERIC_PHY 116 select GENERIC_PHY
116 help 117 help
117 Enable this to support the transceiver that is part of Allwinner 118 Enable this to support the transceiver that is part of Allwinner
@@ -122,6 +123,7 @@ config PHY_SUN4I_USB
122 123
123config PHY_SAMSUNG_USB2 124config PHY_SAMSUNG_USB2
124 tristate "Samsung USB 2.0 PHY driver" 125 tristate "Samsung USB 2.0 PHY driver"
126 depends on HAS_IOMEM
125 select GENERIC_PHY 127 select GENERIC_PHY
126 select MFD_SYSCON 128 select MFD_SYSCON
127 help 129 help
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index c64a2f3b2d62..49c446530101 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -614,8 +614,9 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
614 return phy; 614 return phy;
615 615
616put_dev: 616put_dev:
617 put_device(&phy->dev); 617 put_device(&phy->dev); /* calls phy_release() which frees resources */
618 ida_remove(&phy_ida, phy->id); 618 return ERR_PTR(ret);
619
619free_phy: 620free_phy:
620 kfree(phy); 621 kfree(phy);
621 return ERR_PTR(ret); 622 return ERR_PTR(ret);
@@ -799,7 +800,7 @@ static void phy_release(struct device *dev)
799 800
800 phy = to_phy(dev); 801 phy = to_phy(dev);
801 dev_vdbg(dev, "releasing '%s'\n", dev_name(dev)); 802 dev_vdbg(dev, "releasing '%s'\n", dev_name(dev));
802 ida_remove(&phy_ida, phy->id); 803 ida_simple_remove(&phy_ida, phy->id);
803 kfree(phy); 804 kfree(phy);
804} 805}
805 806
diff --git a/drivers/phy/phy-omap-usb2.c b/drivers/phy/phy-omap-usb2.c
index 7007c11fe07d..34b396146c8a 100644
--- a/drivers/phy/phy-omap-usb2.c
+++ b/drivers/phy/phy-omap-usb2.c
@@ -233,8 +233,8 @@ static int omap_usb2_probe(struct platform_device *pdev)
233 if (phy_data->flags & OMAP_USB2_CALIBRATE_FALSE_DISCONNECT) { 233 if (phy_data->flags & OMAP_USB2_CALIBRATE_FALSE_DISCONNECT) {
234 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 234 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
235 phy->phy_base = devm_ioremap_resource(&pdev->dev, res); 235 phy->phy_base = devm_ioremap_resource(&pdev->dev, res);
236 if (!phy->phy_base) 236 if (IS_ERR(phy->phy_base))
237 return -ENOMEM; 237 return PTR_ERR(phy->phy_base);
238 phy->flags |= OMAP_USB2_CALIBRATE_FALSE_DISCONNECT; 238 phy->flags |= OMAP_USB2_CALIBRATE_FALSE_DISCONNECT;
239 } 239 }
240 240
@@ -262,7 +262,6 @@ static int omap_usb2_probe(struct platform_device *pdev)
262 otg->phy = &phy->phy; 262 otg->phy = &phy->phy;
263 263
264 platform_set_drvdata(pdev, phy); 264 platform_set_drvdata(pdev, phy);
265 pm_runtime_enable(phy->dev);
266 265
267 generic_phy = devm_phy_create(phy->dev, &ops, NULL); 266 generic_phy = devm_phy_create(phy->dev, &ops, NULL);
268 if (IS_ERR(generic_phy)) 267 if (IS_ERR(generic_phy))
@@ -270,10 +269,13 @@ static int omap_usb2_probe(struct platform_device *pdev)
270 269
271 phy_set_drvdata(generic_phy, phy); 270 phy_set_drvdata(generic_phy, phy);
272 271
272 pm_runtime_enable(phy->dev);
273 phy_provider = devm_of_phy_provider_register(phy->dev, 273 phy_provider = devm_of_phy_provider_register(phy->dev,
274 of_phy_simple_xlate); 274 of_phy_simple_xlate);
275 if (IS_ERR(phy_provider)) 275 if (IS_ERR(phy_provider)) {
276 pm_runtime_disable(phy->dev);
276 return PTR_ERR(phy_provider); 277 return PTR_ERR(phy_provider);
278 }
277 279
278 phy->wkupclk = devm_clk_get(phy->dev, "wkupclk"); 280 phy->wkupclk = devm_clk_get(phy->dev, "wkupclk");
279 if (IS_ERR(phy->wkupclk)) { 281 if (IS_ERR(phy->wkupclk)) {
@@ -317,6 +319,7 @@ static int omap_usb2_remove(struct platform_device *pdev)
317 if (!IS_ERR(phy->optclk)) 319 if (!IS_ERR(phy->optclk))
318 clk_unprepare(phy->optclk); 320 clk_unprepare(phy->optclk);
319 usb_remove_phy(&phy->phy); 321 usb_remove_phy(&phy->phy);
322 pm_runtime_disable(phy->dev);
320 323
321 return 0; 324 return 0;
322} 325}
diff --git a/drivers/phy/phy-samsung-usb2.c b/drivers/phy/phy-samsung-usb2.c
index 8a8c6bc8709a..1e69a32c221d 100644
--- a/drivers/phy/phy-samsung-usb2.c
+++ b/drivers/phy/phy-samsung-usb2.c
@@ -107,6 +107,7 @@ static const struct of_device_id samsung_usb2_phy_of_match[] = {
107#endif 107#endif
108 { }, 108 { },
109}; 109};
110MODULE_DEVICE_TABLE(of, samsung_usb2_phy_of_match);
110 111
111static int samsung_usb2_phy_probe(struct platform_device *pdev) 112static int samsung_usb2_phy_probe(struct platform_device *pdev)
112{ 113{
diff --git a/drivers/pinctrl/berlin/berlin.c b/drivers/pinctrl/berlin/berlin.c
index edf5d2fd2b22..86db2235ab00 100644
--- a/drivers/pinctrl/berlin/berlin.c
+++ b/drivers/pinctrl/berlin/berlin.c
@@ -320,7 +320,7 @@ int berlin_pinctrl_probe(struct platform_device *pdev,
320 320
321 regmap = dev_get_regmap(&pdev->dev, NULL); 321 regmap = dev_get_regmap(&pdev->dev, NULL);
322 if (!regmap) 322 if (!regmap)
323 return PTR_ERR(regmap); 323 return -ENODEV;
324 324
325 pctrl = devm_kzalloc(dev, sizeof(*pctrl), GFP_KERNEL); 325 pctrl = devm_kzalloc(dev, sizeof(*pctrl), GFP_KERNEL);
326 if (!pctrl) 326 if (!pctrl)
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 1bd6363bc95e..9f43916637ca 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1431,7 +1431,7 @@ static void st_gpio_irqmux_handler(unsigned irq, struct irq_desc *desc)
1431 1431
1432 status = readl(info->irqmux_base); 1432 status = readl(info->irqmux_base);
1433 1433
1434 for_each_set_bit(n, &status, ST_GPIO_PINS_PER_BANK) 1434 for_each_set_bit(n, &status, info->nbanks)
1435 __gpio_irq_handler(&info->banks[n]); 1435 __gpio_irq_handler(&info->banks[n]);
1436 1436
1437 chained_irq_exit(chip, desc); 1437 chained_irq_exit(chip, desc);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index f1ca75e6d7b1..5f38c7f67834 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -211,6 +211,10 @@ static int sunxi_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
211 configlen++; 211 configlen++;
212 212
213 pinconfig = kzalloc(configlen * sizeof(*pinconfig), GFP_KERNEL); 213 pinconfig = kzalloc(configlen * sizeof(*pinconfig), GFP_KERNEL);
214 if (!pinconfig) {
215 kfree(*map);
216 return -ENOMEM;
217 }
214 218
215 if (!of_property_read_u32(node, "allwinner,drive", &val)) { 219 if (!of_property_read_u32(node, "allwinner,drive", &val)) {
216 u16 strength = (val + 1) * 10; 220 u16 strength = (val + 1) * 10;
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 15b3459f8656..220acb4cbee5 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -633,7 +633,6 @@ raw3270_reset_device_cb(struct raw3270_request *rq, void *data)
633 } else 633 } else
634 raw3270_writesf_readpart(rp); 634 raw3270_writesf_readpart(rp);
635 memset(&rp->init_reset, 0, sizeof(rp->init_reset)); 635 memset(&rp->init_reset, 0, sizeof(rp->init_reset));
636 memset(&rp->init_data, 0, sizeof(rp->init_data));
637} 636}
638 637
639static int 638static int
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 69ef4f8cfac8..4038437ff033 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -901,10 +901,15 @@ static int ap_device_probe(struct device *dev)
901 int rc; 901 int rc;
902 902
903 ap_dev->drv = ap_drv; 903 ap_dev->drv = ap_drv;
904
905 spin_lock_bh(&ap_device_list_lock);
906 list_add(&ap_dev->list, &ap_device_list);
907 spin_unlock_bh(&ap_device_list_lock);
908
904 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; 909 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
905 if (!rc) { 910 if (rc) {
906 spin_lock_bh(&ap_device_list_lock); 911 spin_lock_bh(&ap_device_list_lock);
907 list_add(&ap_dev->list, &ap_device_list); 912 list_del_init(&ap_dev->list);
908 spin_unlock_bh(&ap_device_list_lock); 913 spin_unlock_bh(&ap_device_list_lock);
909 } 914 }
910 return rc; 915 return rc;
diff --git a/drivers/staging/media/omap4iss/Kconfig b/drivers/staging/media/omap4iss/Kconfig
index 78b0fba7047e..8afc6fee40c5 100644
--- a/drivers/staging/media/omap4iss/Kconfig
+++ b/drivers/staging/media/omap4iss/Kconfig
@@ -1,6 +1,6 @@
1config VIDEO_OMAP4 1config VIDEO_OMAP4
2 bool "OMAP 4 Camera support" 2 bool "OMAP 4 Camera support"
3 depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && I2C && ARCH_OMAP4 3 depends on VIDEO_V4L2=y && VIDEO_V4L2_SUBDEV_API && I2C=y && ARCH_OMAP4
4 select VIDEOBUF2_DMA_CONTIG 4 select VIDEOBUF2_DMA_CONTIG
5 ---help--- 5 ---help---
6 Driver for an OMAP 4 ISS controller. 6 Driver for an OMAP 4 ISS controller.
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index a99c63152b8d..2c516f2eebed 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -306,7 +306,7 @@ static int imx_get_sensor_data(struct platform_device *pdev)
306{ 306{
307 struct imx_thermal_data *data = platform_get_drvdata(pdev); 307 struct imx_thermal_data *data = platform_get_drvdata(pdev);
308 struct regmap *map; 308 struct regmap *map;
309 int t1, t2, n1, n2; 309 int t1, n1;
310 int ret; 310 int ret;
311 u32 val; 311 u32 val;
312 u64 temp64; 312 u64 temp64;
@@ -333,14 +333,10 @@ static int imx_get_sensor_data(struct platform_device *pdev)
333 /* 333 /*
334 * Sensor data layout: 334 * Sensor data layout:
335 * [31:20] - sensor value @ 25C 335 * [31:20] - sensor value @ 25C
336 * [19:8] - sensor value of hot
337 * [7:0] - hot temperature value
338 * Use universal formula now and only need sensor value @ 25C 336 * Use universal formula now and only need sensor value @ 25C
339 * slope = 0.4297157 - (0.0015976 * 25C fuse) 337 * slope = 0.4297157 - (0.0015976 * 25C fuse)
340 */ 338 */
341 n1 = val >> 20; 339 n1 = val >> 20;
342 n2 = (val & 0xfff00) >> 8;
343 t2 = val & 0xff;
344 t1 = 25; /* t1 always 25C */ 340 t1 = 25; /* t1 always 25C */
345 341
346 /* 342 /*
@@ -366,16 +362,16 @@ static int imx_get_sensor_data(struct platform_device *pdev)
366 data->c2 = n1 * data->c1 + 1000 * t1; 362 data->c2 = n1 * data->c1 + 1000 * t1;
367 363
368 /* 364 /*
369 * Set the default passive cooling trip point to 20 °C below the 365 * Set the default passive cooling trip point,
370 * maximum die temperature. Can be changed from userspace. 366 * can be changed from userspace.
371 */ 367 */
372 data->temp_passive = 1000 * (t2 - 20); 368 data->temp_passive = IMX_TEMP_PASSIVE;
373 369
374 /* 370 /*
375 * The maximum die temperature is t2, let's give 5 °C cushion 371 * The maximum die temperature set to 20 C higher than
376 * for noise and possible temperature rise between measurements. 372 * IMX_TEMP_PASSIVE.
377 */ 373 */
378 data->temp_critical = 1000 * (t2 - 5); 374 data->temp_critical = 1000 * 20 + data->temp_passive;
379 375
380 return 0; 376 return 0;
381} 377}
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 04b1be7fa018..4b2b999b7611 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -156,8 +156,8 @@ static int of_thermal_bind(struct thermal_zone_device *thermal,
156 156
157 ret = thermal_zone_bind_cooling_device(thermal, 157 ret = thermal_zone_bind_cooling_device(thermal,
158 tbp->trip_id, cdev, 158 tbp->trip_id, cdev,
159 tbp->min, 159 tbp->max,
160 tbp->max); 160 tbp->min);
161 if (ret) 161 if (ret)
162 return ret; 162 return ret;
163 } 163 }
@@ -712,11 +712,12 @@ thermal_of_build_thermal_zone(struct device_node *np)
712 } 712 }
713 713
714 i = 0; 714 i = 0;
715 for_each_child_of_node(child, gchild) 715 for_each_child_of_node(child, gchild) {
716 ret = thermal_of_populate_bind_params(gchild, &tz->tbps[i++], 716 ret = thermal_of_populate_bind_params(gchild, &tz->tbps[i++],
717 tz->trips, tz->ntrips); 717 tz->trips, tz->ntrips);
718 if (ret) 718 if (ret)
719 goto free_tbps; 719 goto free_tbps;
720 }
720 721
721finish: 722finish:
722 of_node_put(child); 723 of_node_put(child);
diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
index fdb07199d9c2..1967bee4f076 100644
--- a/drivers/thermal/thermal_hwmon.c
+++ b/drivers/thermal/thermal_hwmon.c
@@ -140,6 +140,12 @@ thermal_hwmon_lookup_temp(const struct thermal_hwmon_device *hwmon,
140 return NULL; 140 return NULL;
141} 141}
142 142
143static bool thermal_zone_crit_temp_valid(struct thermal_zone_device *tz)
144{
145 unsigned long temp;
146 return tz->ops->get_crit_temp && !tz->ops->get_crit_temp(tz, &temp);
147}
148
143int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) 149int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
144{ 150{
145 struct thermal_hwmon_device *hwmon; 151 struct thermal_hwmon_device *hwmon;
@@ -189,21 +195,18 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
189 if (result) 195 if (result)
190 goto free_temp_mem; 196 goto free_temp_mem;
191 197
192 if (tz->ops->get_crit_temp) { 198 if (thermal_zone_crit_temp_valid(tz)) {
193 unsigned long temperature; 199 snprintf(temp->temp_crit.name,
194 if (!tz->ops->get_crit_temp(tz, &temperature)) { 200 sizeof(temp->temp_crit.name),
195 snprintf(temp->temp_crit.name,
196 sizeof(temp->temp_crit.name),
197 "temp%d_crit", hwmon->count); 201 "temp%d_crit", hwmon->count);
198 temp->temp_crit.attr.attr.name = temp->temp_crit.name; 202 temp->temp_crit.attr.attr.name = temp->temp_crit.name;
199 temp->temp_crit.attr.attr.mode = 0444; 203 temp->temp_crit.attr.attr.mode = 0444;
200 temp->temp_crit.attr.show = temp_crit_show; 204 temp->temp_crit.attr.show = temp_crit_show;
201 sysfs_attr_init(&temp->temp_crit.attr.attr); 205 sysfs_attr_init(&temp->temp_crit.attr.attr);
202 result = device_create_file(hwmon->device, 206 result = device_create_file(hwmon->device,
203 &temp->temp_crit.attr); 207 &temp->temp_crit.attr);
204 if (result) 208 if (result)
205 goto unregister_input; 209 goto unregister_input;
206 }
207 } 210 }
208 211
209 mutex_lock(&thermal_hwmon_list_lock); 212 mutex_lock(&thermal_hwmon_list_lock);
@@ -250,7 +253,7 @@ void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
250 } 253 }
251 254
252 device_remove_file(hwmon->device, &temp->temp_input.attr); 255 device_remove_file(hwmon->device, &temp->temp_input.attr);
253 if (tz->ops->get_crit_temp) 256 if (thermal_zone_crit_temp_valid(tz))
254 device_remove_file(hwmon->device, &temp->temp_crit.attr); 257 device_remove_file(hwmon->device, &temp->temp_crit.attr);
255 258
256 mutex_lock(&thermal_hwmon_list_lock); 259 mutex_lock(&thermal_hwmon_list_lock);
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
index a1271b55103a..634b6ce0e63a 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
@@ -1155,7 +1155,7 @@ static struct ti_bandgap *ti_bandgap_build(struct platform_device *pdev)
1155 /* register shadow for context save and restore */ 1155 /* register shadow for context save and restore */
1156 bgp->regval = devm_kzalloc(&pdev->dev, sizeof(*bgp->regval) * 1156 bgp->regval = devm_kzalloc(&pdev->dev, sizeof(*bgp->regval) *
1157 bgp->conf->sensor_count, GFP_KERNEL); 1157 bgp->conf->sensor_count, GFP_KERNEL);
1158 if (!bgp) { 1158 if (!bgp->regval) {
1159 dev_err(&pdev->dev, "Unable to allocate mem for driver ref\n"); 1159 dev_err(&pdev->dev, "Unable to allocate mem for driver ref\n");
1160 return ERR_PTR(-ENOMEM); 1160 return ERR_PTR(-ENOMEM);
1161 } 1161 }
diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
index c9f5c9dcc15c..008c223eaf26 100644
--- a/drivers/tty/serial/arc_uart.c
+++ b/drivers/tty/serial/arc_uart.c
@@ -177,7 +177,7 @@ static void arc_serial_tx_chars(struct arc_uart_port *uart)
177 uart->port.icount.tx++; 177 uart->port.icount.tx++;
178 uart->port.x_char = 0; 178 uart->port.x_char = 0;
179 sent = 1; 179 sent = 1;
180 } else if (xmit->tail != xmit->head) { /* TODO: uart_circ_empty */ 180 } else if (!uart_circ_empty(xmit)) {
181 ch = xmit->buf[xmit->tail]; 181 ch = xmit->buf[xmit->tail];
182 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 182 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
183 uart->port.icount.tx++; 183 uart->port.icount.tx++;
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index e2f93874989b..044e86d528ae 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -567,6 +567,9 @@ static void imx_start_tx(struct uart_port *port)
567 struct imx_port *sport = (struct imx_port *)port; 567 struct imx_port *sport = (struct imx_port *)port;
568 unsigned long temp; 568 unsigned long temp;
569 569
570 if (uart_circ_empty(&port->state->xmit))
571 return;
572
570 if (USE_IRDA(sport)) { 573 if (USE_IRDA(sport)) {
571 /* half duplex in IrDA mode; have to disable receive mode */ 574 /* half duplex in IrDA mode; have to disable receive mode */
572 temp = readl(sport->port.membase + UCR4); 575 temp = readl(sport->port.membase + UCR4);
diff --git a/drivers/tty/serial/ip22zilog.c b/drivers/tty/serial/ip22zilog.c
index 1efd4c36ba0c..99b7b8697861 100644
--- a/drivers/tty/serial/ip22zilog.c
+++ b/drivers/tty/serial/ip22zilog.c
@@ -603,6 +603,8 @@ static void ip22zilog_start_tx(struct uart_port *port)
603 } else { 603 } else {
604 struct circ_buf *xmit = &port->state->xmit; 604 struct circ_buf *xmit = &port->state->xmit;
605 605
606 if (uart_circ_empty(xmit))
607 return;
606 writeb(xmit->buf[xmit->tail], &channel->data); 608 writeb(xmit->buf[xmit->tail], &channel->data);
607 ZSDELAY(); 609 ZSDELAY();
608 ZS_WSYNC(channel); 610 ZS_WSYNC(channel);
diff --git a/drivers/tty/serial/m32r_sio.c b/drivers/tty/serial/m32r_sio.c
index 68f2c53e0b54..5702828fb62e 100644
--- a/drivers/tty/serial/m32r_sio.c
+++ b/drivers/tty/serial/m32r_sio.c
@@ -266,9 +266,11 @@ static void m32r_sio_start_tx(struct uart_port *port)
266 if (!(up->ier & UART_IER_THRI)) { 266 if (!(up->ier & UART_IER_THRI)) {
267 up->ier |= UART_IER_THRI; 267 up->ier |= UART_IER_THRI;
268 serial_out(up, UART_IER, up->ier); 268 serial_out(up, UART_IER, up->ier);
269 serial_out(up, UART_TX, xmit->buf[xmit->tail]); 269 if (!uart_circ_empty(xmit)) {
270 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 270 serial_out(up, UART_TX, xmit->buf[xmit->tail]);
271 up->port.icount.tx++; 271 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
272 up->port.icount.tx++;
273 }
272 } 274 }
273 while((serial_in(up, UART_LSR) & UART_EMPTY) != UART_EMPTY); 275 while((serial_in(up, UART_LSR) & UART_EMPTY) != UART_EMPTY);
274#else 276#else
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 8193635103ee..f7ad5b903055 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -653,6 +653,8 @@ static void pmz_start_tx(struct uart_port *port)
653 } else { 653 } else {
654 struct circ_buf *xmit = &port->state->xmit; 654 struct circ_buf *xmit = &port->state->xmit;
655 655
656 if (uart_circ_empty(xmit))
657 goto out;
656 write_zsdata(uap, xmit->buf[xmit->tail]); 658 write_zsdata(uap, xmit->buf[xmit->tail]);
657 zssync(uap); 659 zssync(uap);
658 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 660 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
@@ -661,6 +663,7 @@ static void pmz_start_tx(struct uart_port *port)
661 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 663 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
662 uart_write_wakeup(&uap->port); 664 uart_write_wakeup(&uap->port);
663 } 665 }
666 out:
664 pmz_debug("pmz: start_tx() done.\n"); 667 pmz_debug("pmz: start_tx() done.\n");
665} 668}
666 669
diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
index 80a58eca785b..2f57df9a71d9 100644
--- a/drivers/tty/serial/sunsab.c
+++ b/drivers/tty/serial/sunsab.c
@@ -427,6 +427,9 @@ static void sunsab_start_tx(struct uart_port *port)
427 struct circ_buf *xmit = &up->port.state->xmit; 427 struct circ_buf *xmit = &up->port.state->xmit;
428 int i; 428 int i;
429 429
430 if (uart_circ_empty(xmit))
431 return;
432
430 up->interrupt_mask1 &= ~(SAB82532_IMR1_ALLS|SAB82532_IMR1_XPR); 433 up->interrupt_mask1 &= ~(SAB82532_IMR1_ALLS|SAB82532_IMR1_XPR);
431 writeb(up->interrupt_mask1, &up->regs->w.imr1); 434 writeb(up->interrupt_mask1, &up->regs->w.imr1);
432 435
diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
index a85db8b87156..02df3940b95e 100644
--- a/drivers/tty/serial/sunzilog.c
+++ b/drivers/tty/serial/sunzilog.c
@@ -703,6 +703,8 @@ static void sunzilog_start_tx(struct uart_port *port)
703 } else { 703 } else {
704 struct circ_buf *xmit = &port->state->xmit; 704 struct circ_buf *xmit = &port->state->xmit;
705 705
706 if (uart_circ_empty(xmit))
707 return;
706 writeb(xmit->buf[xmit->tail], &channel->data); 708 writeb(xmit->buf[xmit->tail], &channel->data);
707 ZSDELAY(); 709 ZSDELAY();
708 ZS_WSYNC(channel); 710 ZS_WSYNC(channel);
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 9d2b673f90e3..b8125aa64ad8 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1169,8 +1169,8 @@ static int ep_enable(struct usb_ep *ep,
1169 1169
1170 if (hwep->type == USB_ENDPOINT_XFER_CONTROL) 1170 if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
1171 cap |= QH_IOS; 1171 cap |= QH_IOS;
1172 if (hwep->num) 1172
1173 cap |= QH_ZLT; 1173 cap |= QH_ZLT;
1174 cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT; 1174 cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT;
1175 /* 1175 /*
1176 * For ISO-TX, we set mult at QH as the largest value, and use 1176 * For ISO-TX, we set mult at QH as the largest value, and use
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 21b99b4b4082..0e950ad8cb25 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -889,6 +889,25 @@ static int hub_usb3_port_disable(struct usb_hub *hub, int port1)
889 if (!hub_is_superspeed(hub->hdev)) 889 if (!hub_is_superspeed(hub->hdev))
890 return -EINVAL; 890 return -EINVAL;
891 891
892 ret = hub_port_status(hub, port1, &portstatus, &portchange);
893 if (ret < 0)
894 return ret;
895
896 /*
897 * USB controller Advanced Micro Devices, Inc. [AMD] FCH USB XHCI
898 * Controller [1022:7814] will have spurious result making the following
899 * usb 3.0 device hotplugging route to the 2.0 root hub and recognized
900 * as high-speed device if we set the usb 3.0 port link state to
901 * Disabled. Since it's already in USB_SS_PORT_LS_RX_DETECT state, we
902 * check the state here to avoid the bug.
903 */
904 if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
905 USB_SS_PORT_LS_RX_DETECT) {
906 dev_dbg(&hub->ports[port1 - 1]->dev,
907 "Not disabling port; link state is RxDetect\n");
908 return ret;
909 }
910
892 ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED); 911 ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED);
893 if (ret) 912 if (ret)
894 return ret; 913 return ret;
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 762e4a5f5ae9..330df5ce435b 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -153,6 +153,7 @@ static const struct usb_device_id id_table[] = {
153 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ 153 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
154 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ 154 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
155 { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ 155 { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
156 { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
156 { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */ 157 { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
157 { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */ 158 { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
158 { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */ 159 { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 115662c16dcc..8a3813be1b28 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -720,7 +720,8 @@ static const struct usb_device_id id_table_combined[] = {
720 { USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) }, 720 { USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) },
721 { USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) }, 721 { USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) },
722 { USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) }, 722 { USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) },
723 { USB_DEVICE(TESTO_VID, TESTO_USB_INTERFACE_PID) }, 723 { USB_DEVICE(TESTO_VID, TESTO_1_PID) },
724 { USB_DEVICE(TESTO_VID, TESTO_3_PID) },
724 { USB_DEVICE(FTDI_VID, FTDI_GAMMA_SCOUT_PID) }, 725 { USB_DEVICE(FTDI_VID, FTDI_GAMMA_SCOUT_PID) },
725 { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13M_PID) }, 726 { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13M_PID) },
726 { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13S_PID) }, 727 { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13S_PID) },
@@ -944,6 +945,8 @@ static const struct usb_device_id id_table_combined[] = {
944 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_2_PID) }, 945 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_2_PID) },
945 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_3_PID) }, 946 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_3_PID) },
946 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_4_PID) }, 947 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_4_PID) },
948 /* Infineon Devices */
949 { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
947 { } /* Terminating entry */ 950 { } /* Terminating entry */
948}; 951};
949 952
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 500474c48f4b..c4777bc6aee0 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -584,6 +584,12 @@
584#define RATOC_PRODUCT_ID_USB60F 0xb020 584#define RATOC_PRODUCT_ID_USB60F 0xb020
585 585
586/* 586/*
587 * Infineon Technologies
588 */
589#define INFINEON_VID 0x058b
590#define INFINEON_TRIBOARD_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
591
592/*
587 * Acton Research Corp. 593 * Acton Research Corp.
588 */ 594 */
589#define ACTON_VID 0x0647 /* Vendor ID */ 595#define ACTON_VID 0x0647 /* Vendor ID */
@@ -798,7 +804,8 @@
798 * Submitted by Colin Leroy 804 * Submitted by Colin Leroy
799 */ 805 */
800#define TESTO_VID 0x128D 806#define TESTO_VID 0x128D
801#define TESTO_USB_INTERFACE_PID 0x0001 807#define TESTO_1_PID 0x0001
808#define TESTO_3_PID 0x0003
802 809
803/* 810/*
804 * Mobility Electronics products. 811 * Mobility Electronics products.
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index ac73f49cd9f0..a9688940543d 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1487,6 +1487,8 @@ static const struct usb_device_id option_ids[] = {
1487 .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, 1487 .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
1488 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */ 1488 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */
1489 .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, 1489 .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
1490 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */
1491 .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
1490 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) }, 1492 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
1491 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) }, 1493 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
1492 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) }, 1494 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index b7a506f2bb14..5c660c77f03b 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -426,20 +426,18 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
426 * p2m are consistent. 426 * p2m are consistent.
427 */ 427 */
428 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 428 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
429 unsigned long p;
430 struct page *scratch_page = get_balloon_scratch_page();
431
432 if (!PageHighMem(page)) { 429 if (!PageHighMem(page)) {
430 struct page *scratch_page = get_balloon_scratch_page();
431
433 ret = HYPERVISOR_update_va_mapping( 432 ret = HYPERVISOR_update_va_mapping(
434 (unsigned long)__va(pfn << PAGE_SHIFT), 433 (unsigned long)__va(pfn << PAGE_SHIFT),
435 pfn_pte(page_to_pfn(scratch_page), 434 pfn_pte(page_to_pfn(scratch_page),
436 PAGE_KERNEL_RO), 0); 435 PAGE_KERNEL_RO), 0);
437 BUG_ON(ret); 436 BUG_ON(ret);
438 }
439 p = page_to_pfn(scratch_page);
440 __set_phys_to_machine(pfn, pfn_to_mfn(p));
441 437
442 put_balloon_scratch_page(); 438 put_balloon_scratch_page();
439 }
440 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
443 } 441 }
444#endif 442#endif
445 443
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index c3667b202f2f..5f1e1f3cd186 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -88,7 +88,6 @@ static int xen_suspend(void *data)
88 88
89 if (!si->cancelled) { 89 if (!si->cancelled) {
90 xen_irq_resume(); 90 xen_irq_resume();
91 xen_console_resume();
92 xen_timer_resume(); 91 xen_timer_resume();
93 } 92 }
94 93
@@ -135,6 +134,10 @@ static void do_suspend(void)
135 134
136 err = stop_machine(xen_suspend, &si, cpumask_of(0)); 135 err = stop_machine(xen_suspend, &si, cpumask_of(0));
137 136
137 /* Resume console as early as possible. */
138 if (!si.cancelled)
139 xen_console_resume();
140
138 raw_notifier_call_chain(&xen_resume_notifier, 0, NULL); 141 raw_notifier_call_chain(&xen_resume_notifier, 0, NULL);
139 142
140 dpm_resume_start(si.cancelled ? PMSG_THAW : PMSG_RESTORE); 143 dpm_resume_start(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
diff --git a/firmware/Makefile b/firmware/Makefile
index 5747417069ca..0862d34cf7d1 100644
--- a/firmware/Makefile
+++ b/firmware/Makefile
@@ -219,6 +219,12 @@ $(obj)/%.fw: $(obj)/%.H16 $(ihex2fw_dep)
219obj-y += $(patsubst %,%.gen.o, $(fw-external-y)) 219obj-y += $(patsubst %,%.gen.o, $(fw-external-y))
220obj-$(CONFIG_FIRMWARE_IN_KERNEL) += $(patsubst %,%.gen.o, $(fw-shipped-y)) 220obj-$(CONFIG_FIRMWARE_IN_KERNEL) += $(patsubst %,%.gen.o, $(fw-shipped-y))
221 221
222ifeq ($(KBUILD_SRC),)
223# Makefile.build only creates subdirectories for O= builds, but external
224# firmware might live outside the kernel source tree
225_dummy := $(foreach d,$(addprefix $(obj)/,$(dir $(fw-external-y))), $(shell [ -d $(d) ] || mkdir -p $(d)))
226endif
227
222# Remove .S files and binaries created from ihex 228# Remove .S files and binaries created from ihex
223# (during 'make clean' .config isn't included so they're all in $(fw-shipped-)) 229# (during 'make clean' .config isn't included so they're all in $(fw-shipped-))
224targets := $(fw-shipped-) $(patsubst $(obj)/%,%, \ 230targets := $(fw-shipped-) $(patsubst $(obj)/%,%, \
diff --git a/fs/afs/main.c b/fs/afs/main.c
index 42dd2e499ed8..35de0c04729f 100644
--- a/fs/afs/main.c
+++ b/fs/afs/main.c
@@ -55,13 +55,13 @@ static int __init afs_get_client_UUID(void)
55 afs_uuid.time_low = uuidtime; 55 afs_uuid.time_low = uuidtime;
56 afs_uuid.time_mid = uuidtime >> 32; 56 afs_uuid.time_mid = uuidtime >> 32;
57 afs_uuid.time_hi_and_version = (uuidtime >> 48) & AFS_UUID_TIMEHI_MASK; 57 afs_uuid.time_hi_and_version = (uuidtime >> 48) & AFS_UUID_TIMEHI_MASK;
58 afs_uuid.time_hi_and_version = AFS_UUID_VERSION_TIME; 58 afs_uuid.time_hi_and_version |= AFS_UUID_VERSION_TIME;
59 59
60 get_random_bytes(&clockseq, 2); 60 get_random_bytes(&clockseq, 2);
61 afs_uuid.clock_seq_low = clockseq; 61 afs_uuid.clock_seq_low = clockseq;
62 afs_uuid.clock_seq_hi_and_reserved = 62 afs_uuid.clock_seq_hi_and_reserved =
63 (clockseq >> 8) & AFS_UUID_CLOCKHI_MASK; 63 (clockseq >> 8) & AFS_UUID_CLOCKHI_MASK;
64 afs_uuid.clock_seq_hi_and_reserved = AFS_UUID_VARIANT_STD; 64 afs_uuid.clock_seq_hi_and_reserved |= AFS_UUID_VARIANT_STD;
65 65
66 _debug("AFS UUID: %08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x", 66 _debug("AFS UUID: %08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x",
67 afs_uuid.time_low, 67 afs_uuid.time_low,
diff --git a/fs/aio.c b/fs/aio.c
index 955947ef3e02..1c9c5f0a9e2b 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -830,16 +830,20 @@ void exit_aio(struct mm_struct *mm)
830static void put_reqs_available(struct kioctx *ctx, unsigned nr) 830static void put_reqs_available(struct kioctx *ctx, unsigned nr)
831{ 831{
832 struct kioctx_cpu *kcpu; 832 struct kioctx_cpu *kcpu;
833 unsigned long flags;
833 834
834 preempt_disable(); 835 preempt_disable();
835 kcpu = this_cpu_ptr(ctx->cpu); 836 kcpu = this_cpu_ptr(ctx->cpu);
836 837
838 local_irq_save(flags);
837 kcpu->reqs_available += nr; 839 kcpu->reqs_available += nr;
840
838 while (kcpu->reqs_available >= ctx->req_batch * 2) { 841 while (kcpu->reqs_available >= ctx->req_batch * 2) {
839 kcpu->reqs_available -= ctx->req_batch; 842 kcpu->reqs_available -= ctx->req_batch;
840 atomic_add(ctx->req_batch, &ctx->reqs_available); 843 atomic_add(ctx->req_batch, &ctx->reqs_available);
841 } 844 }
842 845
846 local_irq_restore(flags);
843 preempt_enable(); 847 preempt_enable();
844} 848}
845 849
@@ -847,10 +851,12 @@ static bool get_reqs_available(struct kioctx *ctx)
847{ 851{
848 struct kioctx_cpu *kcpu; 852 struct kioctx_cpu *kcpu;
849 bool ret = false; 853 bool ret = false;
854 unsigned long flags;
850 855
851 preempt_disable(); 856 preempt_disable();
852 kcpu = this_cpu_ptr(ctx->cpu); 857 kcpu = this_cpu_ptr(ctx->cpu);
853 858
859 local_irq_save(flags);
854 if (!kcpu->reqs_available) { 860 if (!kcpu->reqs_available) {
855 int old, avail = atomic_read(&ctx->reqs_available); 861 int old, avail = atomic_read(&ctx->reqs_available);
856 862
@@ -869,6 +875,7 @@ static bool get_reqs_available(struct kioctx *ctx)
869 ret = true; 875 ret = true;
870 kcpu->reqs_available--; 876 kcpu->reqs_available--;
871out: 877out:
878 local_irq_restore(flags);
872 preempt_enable(); 879 preempt_enable();
873 return ret; 880 return ret;
874} 881}
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index e12441c7cf1d..7187b14faa6c 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -484,8 +484,19 @@ void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid)
484 log_list); 484 log_list);
485 list_del_init(&ordered->log_list); 485 list_del_init(&ordered->log_list);
486 spin_unlock_irq(&log->log_extents_lock[index]); 486 spin_unlock_irq(&log->log_extents_lock[index]);
487
488 if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
489 !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
490 struct inode *inode = ordered->inode;
491 u64 start = ordered->file_offset;
492 u64 end = ordered->file_offset + ordered->len - 1;
493
494 WARN_ON(!inode);
495 filemap_fdatawrite_range(inode->i_mapping, start, end);
496 }
487 wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE, 497 wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
488 &ordered->flags)); 498 &ordered->flags));
499
489 btrfs_put_ordered_extent(ordered); 500 btrfs_put_ordered_extent(ordered);
490 spin_lock_irq(&log->log_extents_lock[index]); 501 spin_lock_irq(&log->log_extents_lock[index]);
491 } 502 }
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 6104676857f5..6cb82f62cb7c 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1680,11 +1680,11 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1680 if (device->bdev == root->fs_info->fs_devices->latest_bdev) 1680 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1681 root->fs_info->fs_devices->latest_bdev = next_device->bdev; 1681 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1682 1682
1683 if (device->bdev) 1683 if (device->bdev) {
1684 device->fs_devices->open_devices--; 1684 device->fs_devices->open_devices--;
1685 1685 /* remove sysfs entry */
1686 /* remove sysfs entry */ 1686 btrfs_kobj_rm_device(root->fs_info, device);
1687 btrfs_kobj_rm_device(root->fs_info, device); 1687 }
1688 1688
1689 call_rcu(&device->rcu, free_device); 1689 call_rcu(&device->rcu, free_device);
1690 1690
diff --git a/fs/coredump.c b/fs/coredump.c
index 0b2528fb640e..a93f7e6ea4cf 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -306,7 +306,7 @@ static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
306 if (unlikely(nr < 0)) 306 if (unlikely(nr < 0))
307 return nr; 307 return nr;
308 308
309 tsk->flags = PF_DUMPCORE; 309 tsk->flags |= PF_DUMPCORE;
310 if (atomic_read(&mm->mm_users) == nr + 1) 310 if (atomic_read(&mm->mm_users) == nr + 1)
311 goto done; 311 goto done;
312 /* 312 /*
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 98040ba388ac..194d0d122cae 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -198,9 +198,8 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
198 * L1 cache. 198 * L1 cache.
199 */ 199 */
200static inline struct page *dio_get_page(struct dio *dio, 200static inline struct page *dio_get_page(struct dio *dio,
201 struct dio_submit *sdio, size_t *from, size_t *to) 201 struct dio_submit *sdio)
202{ 202{
203 int n;
204 if (dio_pages_present(sdio) == 0) { 203 if (dio_pages_present(sdio) == 0) {
205 int ret; 204 int ret;
206 205
@@ -209,10 +208,7 @@ static inline struct page *dio_get_page(struct dio *dio,
209 return ERR_PTR(ret); 208 return ERR_PTR(ret);
210 BUG_ON(dio_pages_present(sdio) == 0); 209 BUG_ON(dio_pages_present(sdio) == 0);
211 } 210 }
212 n = sdio->head++; 211 return dio->pages[sdio->head];
213 *from = n ? 0 : sdio->from;
214 *to = (n == sdio->tail - 1) ? sdio->to : PAGE_SIZE;
215 return dio->pages[n];
216} 212}
217 213
218/** 214/**
@@ -911,11 +907,15 @@ static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
911 while (sdio->block_in_file < sdio->final_block_in_request) { 907 while (sdio->block_in_file < sdio->final_block_in_request) {
912 struct page *page; 908 struct page *page;
913 size_t from, to; 909 size_t from, to;
914 page = dio_get_page(dio, sdio, &from, &to); 910
911 page = dio_get_page(dio, sdio);
915 if (IS_ERR(page)) { 912 if (IS_ERR(page)) {
916 ret = PTR_ERR(page); 913 ret = PTR_ERR(page);
917 goto out; 914 goto out;
918 } 915 }
916 from = sdio->head ? 0 : sdio->from;
917 to = (sdio->head == sdio->tail - 1) ? sdio->to : PAGE_SIZE;
918 sdio->head++;
919 919
920 while (from < to) { 920 while (from < to) {
921 unsigned this_chunk_bytes; /* # of bytes mapped */ 921 unsigned this_chunk_bytes; /* # of bytes mapped */
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index 3f5c188953a4..0b7e28e7eaa4 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -966,10 +966,10 @@ retry:
966 continue; 966 continue;
967 } 967 }
968 968
969 if (ei->i_es_lru_nr == 0 || ei == locked_ei) 969 if (ei->i_es_lru_nr == 0 || ei == locked_ei ||
970 !write_trylock(&ei->i_es_lock))
970 continue; 971 continue;
971 972
972 write_lock(&ei->i_es_lock);
973 shrunk = __es_try_to_reclaim_extents(ei, nr_to_scan); 973 shrunk = __es_try_to_reclaim_extents(ei, nr_to_scan);
974 if (ei->i_es_lru_nr == 0) 974 if (ei->i_es_lru_nr == 0)
975 list_del_init(&ei->i_es_lru); 975 list_del_init(&ei->i_es_lru);
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index a87455df38bc..5b87fc36aab8 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -338,7 +338,7 @@ out:
338 fatal = err; 338 fatal = err;
339 } else { 339 } else {
340 ext4_error(sb, "bit already cleared for inode %lu", ino); 340 ext4_error(sb, "bit already cleared for inode %lu", ino);
341 if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) { 341 if (gdp && !EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
342 int count; 342 int count;
343 count = ext4_free_inodes_count(sb, gdp); 343 count = ext4_free_inodes_count(sb, gdp);
344 percpu_counter_sub(&sbi->s_freeinodes_counter, 344 percpu_counter_sub(&sbi->s_freeinodes_counter,
@@ -874,6 +874,13 @@ got:
874 goto out; 874 goto out;
875 } 875 }
876 876
877 BUFFER_TRACE(group_desc_bh, "get_write_access");
878 err = ext4_journal_get_write_access(handle, group_desc_bh);
879 if (err) {
880 ext4_std_error(sb, err);
881 goto out;
882 }
883
877 /* We may have to initialize the block bitmap if it isn't already */ 884 /* We may have to initialize the block bitmap if it isn't already */
878 if (ext4_has_group_desc_csum(sb) && 885 if (ext4_has_group_desc_csum(sb) &&
879 gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 886 gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
@@ -910,13 +917,6 @@ got:
910 } 917 }
911 } 918 }
912 919
913 BUFFER_TRACE(group_desc_bh, "get_write_access");
914 err = ext4_journal_get_write_access(handle, group_desc_bh);
915 if (err) {
916 ext4_std_error(sb, err);
917 goto out;
918 }
919
920 /* Update the relevant bg descriptor fields */ 920 /* Update the relevant bg descriptor fields */
921 if (ext4_has_group_desc_csum(sb)) { 921 if (ext4_has_group_desc_csum(sb)) {
922 int free; 922 int free;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 7f72f50a8fa7..2dcb936be90e 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -752,8 +752,8 @@ void ext4_mb_generate_buddy(struct super_block *sb,
752 752
753 if (free != grp->bb_free) { 753 if (free != grp->bb_free) {
754 ext4_grp_locked_error(sb, group, 0, 0, 754 ext4_grp_locked_error(sb, group, 0, 0,
755 "%u clusters in bitmap, %u in gd; " 755 "block bitmap and bg descriptor "
756 "block bitmap corrupt.", 756 "inconsistent: %u vs %u free clusters",
757 free, grp->bb_free); 757 free, grp->bb_free);
758 /* 758 /*
759 * If we intend to continue, we consider group descriptor 759 * If we intend to continue, we consider group descriptor
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index b9b9aabfb4d2..6df7bc611dbd 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1525,8 +1525,6 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
1525 arg = JBD2_DEFAULT_MAX_COMMIT_AGE; 1525 arg = JBD2_DEFAULT_MAX_COMMIT_AGE;
1526 sbi->s_commit_interval = HZ * arg; 1526 sbi->s_commit_interval = HZ * arg;
1527 } else if (token == Opt_max_batch_time) { 1527 } else if (token == Opt_max_batch_time) {
1528 if (arg == 0)
1529 arg = EXT4_DEF_MAX_BATCH_TIME;
1530 sbi->s_max_batch_time = arg; 1528 sbi->s_max_batch_time = arg;
1531 } else if (token == Opt_min_batch_time) { 1529 } else if (token == Opt_min_batch_time) {
1532 sbi->s_min_batch_time = arg; 1530 sbi->s_min_batch_time = arg;
@@ -2809,10 +2807,11 @@ static void print_daily_error_info(unsigned long arg)
2809 es = sbi->s_es; 2807 es = sbi->s_es;
2810 2808
2811 if (es->s_error_count) 2809 if (es->s_error_count)
2812 ext4_msg(sb, KERN_NOTICE, "error count: %u", 2810 /* fsck newer than v1.41.13 is needed to clean this condition. */
2811 ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u",
2813 le32_to_cpu(es->s_error_count)); 2812 le32_to_cpu(es->s_error_count));
2814 if (es->s_first_error_time) { 2813 if (es->s_first_error_time) {
2815 printk(KERN_NOTICE "EXT4-fs (%s): initial error at %u: %.*s:%d", 2814 printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %u: %.*s:%d",
2816 sb->s_id, le32_to_cpu(es->s_first_error_time), 2815 sb->s_id, le32_to_cpu(es->s_first_error_time),
2817 (int) sizeof(es->s_first_error_func), 2816 (int) sizeof(es->s_first_error_func),
2818 es->s_first_error_func, 2817 es->s_first_error_func,
@@ -2826,7 +2825,7 @@ static void print_daily_error_info(unsigned long arg)
2826 printk("\n"); 2825 printk("\n");
2827 } 2826 }
2828 if (es->s_last_error_time) { 2827 if (es->s_last_error_time) {
2829 printk(KERN_NOTICE "EXT4-fs (%s): last error at %u: %.*s:%d", 2828 printk(KERN_NOTICE "EXT4-fs (%s): last error at time %u: %.*s:%d",
2830 sb->s_id, le32_to_cpu(es->s_last_error_time), 2829 sb->s_id, le32_to_cpu(es->s_last_error_time),
2831 (int) sizeof(es->s_last_error_func), 2830 (int) sizeof(es->s_last_error_func),
2832 es->s_last_error_func, 2831 es->s_last_error_func,
@@ -3880,38 +3879,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3880 goto failed_mount2; 3879 goto failed_mount2;
3881 } 3880 }
3882 } 3881 }
3883
3884 /*
3885 * set up enough so that it can read an inode,
3886 * and create new inode for buddy allocator
3887 */
3888 sbi->s_gdb_count = db_count;
3889 if (!test_opt(sb, NOLOAD) &&
3890 EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL))
3891 sb->s_op = &ext4_sops;
3892 else
3893 sb->s_op = &ext4_nojournal_sops;
3894
3895 ext4_ext_init(sb);
3896 err = ext4_mb_init(sb);
3897 if (err) {
3898 ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
3899 err);
3900 goto failed_mount2;
3901 }
3902
3903 if (!ext4_check_descriptors(sb, &first_not_zeroed)) { 3882 if (!ext4_check_descriptors(sb, &first_not_zeroed)) {
3904 ext4_msg(sb, KERN_ERR, "group descriptors corrupted!"); 3883 ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
3905 goto failed_mount2a; 3884 goto failed_mount2;
3906 } 3885 }
3907 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) 3886 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
3908 if (!ext4_fill_flex_info(sb)) { 3887 if (!ext4_fill_flex_info(sb)) {
3909 ext4_msg(sb, KERN_ERR, 3888 ext4_msg(sb, KERN_ERR,
3910 "unable to initialize " 3889 "unable to initialize "
3911 "flex_bg meta info!"); 3890 "flex_bg meta info!");
3912 goto failed_mount2a; 3891 goto failed_mount2;
3913 } 3892 }
3914 3893
3894 sbi->s_gdb_count = db_count;
3915 get_random_bytes(&sbi->s_next_generation, sizeof(u32)); 3895 get_random_bytes(&sbi->s_next_generation, sizeof(u32));
3916 spin_lock_init(&sbi->s_next_gen_lock); 3896 spin_lock_init(&sbi->s_next_gen_lock);
3917 3897
@@ -3946,6 +3926,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3946 sbi->s_stripe = ext4_get_stripe_size(sbi); 3926 sbi->s_stripe = ext4_get_stripe_size(sbi);
3947 sbi->s_extent_max_zeroout_kb = 32; 3927 sbi->s_extent_max_zeroout_kb = 32;
3948 3928
3929 /*
3930 * set up enough so that it can read an inode
3931 */
3932 if (!test_opt(sb, NOLOAD) &&
3933 EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL))
3934 sb->s_op = &ext4_sops;
3935 else
3936 sb->s_op = &ext4_nojournal_sops;
3949 sb->s_export_op = &ext4_export_ops; 3937 sb->s_export_op = &ext4_export_ops;
3950 sb->s_xattr = ext4_xattr_handlers; 3938 sb->s_xattr = ext4_xattr_handlers;
3951#ifdef CONFIG_QUOTA 3939#ifdef CONFIG_QUOTA
@@ -4135,13 +4123,21 @@ no_journal:
4135 if (err) { 4123 if (err) {
4136 ext4_msg(sb, KERN_ERR, "failed to reserve %llu clusters for " 4124 ext4_msg(sb, KERN_ERR, "failed to reserve %llu clusters for "
4137 "reserved pool", ext4_calculate_resv_clusters(sb)); 4125 "reserved pool", ext4_calculate_resv_clusters(sb));
4138 goto failed_mount5; 4126 goto failed_mount4a;
4139 } 4127 }
4140 4128
4141 err = ext4_setup_system_zone(sb); 4129 err = ext4_setup_system_zone(sb);
4142 if (err) { 4130 if (err) {
4143 ext4_msg(sb, KERN_ERR, "failed to initialize system " 4131 ext4_msg(sb, KERN_ERR, "failed to initialize system "
4144 "zone (%d)", err); 4132 "zone (%d)", err);
4133 goto failed_mount4a;
4134 }
4135
4136 ext4_ext_init(sb);
4137 err = ext4_mb_init(sb);
4138 if (err) {
4139 ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
4140 err);
4145 goto failed_mount5; 4141 goto failed_mount5;
4146 } 4142 }
4147 4143
@@ -4218,8 +4214,11 @@ failed_mount8:
4218failed_mount7: 4214failed_mount7:
4219 ext4_unregister_li_request(sb); 4215 ext4_unregister_li_request(sb);
4220failed_mount6: 4216failed_mount6:
4221 ext4_release_system_zone(sb); 4217 ext4_mb_release(sb);
4222failed_mount5: 4218failed_mount5:
4219 ext4_ext_release(sb);
4220 ext4_release_system_zone(sb);
4221failed_mount4a:
4223 dput(sb->s_root); 4222 dput(sb->s_root);
4224 sb->s_root = NULL; 4223 sb->s_root = NULL;
4225failed_mount4: 4224failed_mount4:
@@ -4243,14 +4242,11 @@ failed_mount3:
4243 percpu_counter_destroy(&sbi->s_extent_cache_cnt); 4242 percpu_counter_destroy(&sbi->s_extent_cache_cnt);
4244 if (sbi->s_mmp_tsk) 4243 if (sbi->s_mmp_tsk)
4245 kthread_stop(sbi->s_mmp_tsk); 4244 kthread_stop(sbi->s_mmp_tsk);
4246failed_mount2a:
4247 ext4_mb_release(sb);
4248failed_mount2: 4245failed_mount2:
4249 for (i = 0; i < db_count; i++) 4246 for (i = 0; i < db_count; i++)
4250 brelse(sbi->s_group_desc[i]); 4247 brelse(sbi->s_group_desc[i]);
4251 ext4_kvfree(sbi->s_group_desc); 4248 ext4_kvfree(sbi->s_group_desc);
4252failed_mount: 4249failed_mount:
4253 ext4_ext_release(sb);
4254 if (sbi->s_chksum_driver) 4250 if (sbi->s_chksum_driver)
4255 crypto_free_shash(sbi->s_chksum_driver); 4251 crypto_free_shash(sbi->s_chksum_driver);
4256 if (sbi->s_proc) { 4252 if (sbi->s_proc) {
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 0924521306b4..f8cf619edb5f 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -608,8 +608,8 @@ static int __allocate_data_block(struct dnode_of_data *dn)
608 * b. do not use extent cache for better performance 608 * b. do not use extent cache for better performance
609 * c. give the block addresses to blockdev 609 * c. give the block addresses to blockdev
610 */ 610 */
611static int get_data_block(struct inode *inode, sector_t iblock, 611static int __get_data_block(struct inode *inode, sector_t iblock,
612 struct buffer_head *bh_result, int create) 612 struct buffer_head *bh_result, int create, bool fiemap)
613{ 613{
614 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 614 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
615 unsigned int blkbits = inode->i_sb->s_blocksize_bits; 615 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
@@ -637,7 +637,7 @@ static int get_data_block(struct inode *inode, sector_t iblock,
637 err = 0; 637 err = 0;
638 goto unlock_out; 638 goto unlock_out;
639 } 639 }
640 if (dn.data_blkaddr == NEW_ADDR) 640 if (dn.data_blkaddr == NEW_ADDR && !fiemap)
641 goto put_out; 641 goto put_out;
642 642
643 if (dn.data_blkaddr != NULL_ADDR) { 643 if (dn.data_blkaddr != NULL_ADDR) {
@@ -671,7 +671,7 @@ get_next:
671 err = 0; 671 err = 0;
672 goto unlock_out; 672 goto unlock_out;
673 } 673 }
674 if (dn.data_blkaddr == NEW_ADDR) 674 if (dn.data_blkaddr == NEW_ADDR && !fiemap)
675 goto put_out; 675 goto put_out;
676 676
677 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); 677 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
@@ -708,10 +708,23 @@ out:
708 return err; 708 return err;
709} 709}
710 710
711static int get_data_block(struct inode *inode, sector_t iblock,
712 struct buffer_head *bh_result, int create)
713{
714 return __get_data_block(inode, iblock, bh_result, create, false);
715}
716
717static int get_data_block_fiemap(struct inode *inode, sector_t iblock,
718 struct buffer_head *bh_result, int create)
719{
720 return __get_data_block(inode, iblock, bh_result, create, true);
721}
722
711int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 723int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
712 u64 start, u64 len) 724 u64 start, u64 len)
713{ 725{
714 return generic_block_fiemap(inode, fieinfo, start, len, get_data_block); 726 return generic_block_fiemap(inode, fieinfo,
727 start, len, get_data_block_fiemap);
715} 728}
716 729
717static int f2fs_read_data_page(struct file *file, struct page *page) 730static int f2fs_read_data_page(struct file *file, struct page *page)
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 966acb039e3b..a4addd72ebbd 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -376,11 +376,11 @@ static struct page *init_inode_metadata(struct inode *inode,
376 376
377put_error: 377put_error:
378 f2fs_put_page(page, 1); 378 f2fs_put_page(page, 1);
379error:
379 /* once the failed inode becomes a bad inode, i_mode is S_IFREG */ 380 /* once the failed inode becomes a bad inode, i_mode is S_IFREG */
380 truncate_inode_pages(&inode->i_data, 0); 381 truncate_inode_pages(&inode->i_data, 0);
381 truncate_blocks(inode, 0); 382 truncate_blocks(inode, 0);
382 remove_dirty_dir_inode(inode); 383 remove_dirty_dir_inode(inode);
383error:
384 remove_inode_page(inode); 384 remove_inode_page(inode);
385 return ERR_PTR(err); 385 return ERR_PTR(err);
386} 386}
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index e51c732b0dd9..58df97e174d0 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -342,9 +342,6 @@ struct f2fs_sm_info {
342 struct dirty_seglist_info *dirty_info; /* dirty segment information */ 342 struct dirty_seglist_info *dirty_info; /* dirty segment information */
343 struct curseg_info *curseg_array; /* active segment information */ 343 struct curseg_info *curseg_array; /* active segment information */
344 344
345 struct list_head wblist_head; /* list of under-writeback pages */
346 spinlock_t wblist_lock; /* lock for checkpoint */
347
348 block_t seg0_blkaddr; /* block address of 0'th segment */ 345 block_t seg0_blkaddr; /* block address of 0'th segment */
349 block_t main_blkaddr; /* start block address of main area */ 346 block_t main_blkaddr; /* start block address of main area */
350 block_t ssa_blkaddr; /* start block address of SSA area */ 347 block_t ssa_blkaddr; /* start block address of SSA area */
@@ -644,7 +641,8 @@ static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
644 */ 641 */
645static inline int check_nid_range(struct f2fs_sb_info *sbi, nid_t nid) 642static inline int check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
646{ 643{
647 WARN_ON((nid >= NM_I(sbi)->max_nid)); 644 if (unlikely(nid < F2FS_ROOT_INO(sbi)))
645 return -EINVAL;
648 if (unlikely(nid >= NM_I(sbi)->max_nid)) 646 if (unlikely(nid >= NM_I(sbi)->max_nid))
649 return -EINVAL; 647 return -EINVAL;
650 return 0; 648 return 0;
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index c58e33075719..7d8b96275092 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -659,16 +659,19 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
659 off_start = offset & (PAGE_CACHE_SIZE - 1); 659 off_start = offset & (PAGE_CACHE_SIZE - 1);
660 off_end = (offset + len) & (PAGE_CACHE_SIZE - 1); 660 off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
661 661
662 f2fs_lock_op(sbi);
663
662 for (index = pg_start; index <= pg_end; index++) { 664 for (index = pg_start; index <= pg_end; index++) {
663 struct dnode_of_data dn; 665 struct dnode_of_data dn;
664 666
665 f2fs_lock_op(sbi); 667 if (index == pg_end && !off_end)
668 goto noalloc;
669
666 set_new_dnode(&dn, inode, NULL, NULL, 0); 670 set_new_dnode(&dn, inode, NULL, NULL, 0);
667 ret = f2fs_reserve_block(&dn, index); 671 ret = f2fs_reserve_block(&dn, index);
668 f2fs_unlock_op(sbi);
669 if (ret) 672 if (ret)
670 break; 673 break;
671 674noalloc:
672 if (pg_start == pg_end) 675 if (pg_start == pg_end)
673 new_size = offset + len; 676 new_size = offset + len;
674 else if (index == pg_start && off_start) 677 else if (index == pg_start && off_start)
@@ -683,8 +686,9 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
683 i_size_read(inode) < new_size) { 686 i_size_read(inode) < new_size) {
684 i_size_write(inode, new_size); 687 i_size_write(inode, new_size);
685 mark_inode_dirty(inode); 688 mark_inode_dirty(inode);
686 f2fs_write_inode(inode, NULL); 689 update_inode_page(inode);
687 } 690 }
691 f2fs_unlock_op(sbi);
688 692
689 return ret; 693 return ret;
690} 694}
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index adc622c6bdce..2cf6962f6cc8 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -78,6 +78,7 @@ static int do_read_inode(struct inode *inode)
78 if (check_nid_range(sbi, inode->i_ino)) { 78 if (check_nid_range(sbi, inode->i_ino)) {
79 f2fs_msg(inode->i_sb, KERN_ERR, "bad inode number: %lu", 79 f2fs_msg(inode->i_sb, KERN_ERR, "bad inode number: %lu",
80 (unsigned long) inode->i_ino); 80 (unsigned long) inode->i_ino);
81 WARN_ON(1);
81 return -EINVAL; 82 return -EINVAL;
82 } 83 }
83 84
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 9138c32aa698..a6bdddc33ce2 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -417,9 +417,6 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
417 } 417 }
418 418
419 f2fs_set_link(new_dir, new_entry, new_page, old_inode); 419 f2fs_set_link(new_dir, new_entry, new_page, old_inode);
420 down_write(&F2FS_I(old_inode)->i_sem);
421 F2FS_I(old_inode)->i_pino = new_dir->i_ino;
422 up_write(&F2FS_I(old_inode)->i_sem);
423 420
424 new_inode->i_ctime = CURRENT_TIME; 421 new_inode->i_ctime = CURRENT_TIME;
425 down_write(&F2FS_I(new_inode)->i_sem); 422 down_write(&F2FS_I(new_inode)->i_sem);
@@ -448,6 +445,10 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
448 } 445 }
449 } 446 }
450 447
448 down_write(&F2FS_I(old_inode)->i_sem);
449 file_lost_pino(old_inode);
450 up_write(&F2FS_I(old_inode)->i_sem);
451
451 old_inode->i_ctime = CURRENT_TIME; 452 old_inode->i_ctime = CURRENT_TIME;
452 mark_inode_dirty(old_inode); 453 mark_inode_dirty(old_inode);
453 454
@@ -457,9 +458,6 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
457 if (old_dir != new_dir) { 458 if (old_dir != new_dir) {
458 f2fs_set_link(old_inode, old_dir_entry, 459 f2fs_set_link(old_inode, old_dir_entry,
459 old_dir_page, new_dir); 460 old_dir_page, new_dir);
460 down_write(&F2FS_I(old_inode)->i_sem);
461 F2FS_I(old_inode)->i_pino = new_dir->i_ino;
462 up_write(&F2FS_I(old_inode)->i_sem);
463 update_inode_page(old_inode); 461 update_inode_page(old_inode);
464 } else { 462 } else {
465 kunmap(old_dir_page); 463 kunmap(old_dir_page);
@@ -474,7 +472,8 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
474 return 0; 472 return 0;
475 473
476put_out_dir: 474put_out_dir:
477 f2fs_put_page(new_page, 1); 475 kunmap(new_page);
476 f2fs_put_page(new_page, 0);
478out_dir: 477out_dir:
479 if (old_dir_entry) { 478 if (old_dir_entry) {
480 kunmap(old_dir_page); 479 kunmap(old_dir_page);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 9dfb9a042fd2..4b697ccc9b0c 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -42,6 +42,8 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
42 mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> 12; 42 mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> 12;
43 res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 2); 43 res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 2);
44 } else if (type == DIRTY_DENTS) { 44 } else if (type == DIRTY_DENTS) {
45 if (sbi->sb->s_bdi->dirty_exceeded)
46 return false;
45 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS); 47 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
46 res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 1); 48 res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 1);
47 } 49 }
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index f25f0e07e26f..d04613df710a 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -272,14 +272,15 @@ int create_flush_cmd_control(struct f2fs_sb_info *sbi)
272 return -ENOMEM; 272 return -ENOMEM;
273 spin_lock_init(&fcc->issue_lock); 273 spin_lock_init(&fcc->issue_lock);
274 init_waitqueue_head(&fcc->flush_wait_queue); 274 init_waitqueue_head(&fcc->flush_wait_queue);
275 sbi->sm_info->cmd_control_info = fcc;
275 fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi, 276 fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
276 "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev)); 277 "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
277 if (IS_ERR(fcc->f2fs_issue_flush)) { 278 if (IS_ERR(fcc->f2fs_issue_flush)) {
278 err = PTR_ERR(fcc->f2fs_issue_flush); 279 err = PTR_ERR(fcc->f2fs_issue_flush);
279 kfree(fcc); 280 kfree(fcc);
281 sbi->sm_info->cmd_control_info = NULL;
280 return err; 282 return err;
281 } 283 }
282 sbi->sm_info->cmd_control_info = fcc;
283 284
284 return err; 285 return err;
285} 286}
@@ -1885,8 +1886,6 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
1885 1886
1886 /* init sm info */ 1887 /* init sm info */
1887 sbi->sm_info = sm_info; 1888 sbi->sm_info = sm_info;
1888 INIT_LIST_HEAD(&sm_info->wblist_head);
1889 spin_lock_init(&sm_info->wblist_lock);
1890 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr); 1889 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
1891 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr); 1890 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
1892 sm_info->segment_count = le32_to_cpu(raw_super->segment_count); 1891 sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index b2b18637cb9e..8f96d9372ade 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -689,9 +689,7 @@ static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
689 struct f2fs_sb_info *sbi = F2FS_SB(sb); 689 struct f2fs_sb_info *sbi = F2FS_SB(sb);
690 struct inode *inode; 690 struct inode *inode;
691 691
692 if (unlikely(ino < F2FS_ROOT_INO(sbi))) 692 if (check_nid_range(sbi, ino))
693 return ERR_PTR(-ESTALE);
694 if (unlikely(ino >= NM_I(sbi)->max_nid))
695 return ERR_PTR(-ESTALE); 693 return ERR_PTR(-ESTALE);
696 694
697 /* 695 /*
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 098f97bdcf1b..ca887314aba9 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -643,9 +643,8 @@ struct fuse_copy_state {
643 unsigned long seglen; 643 unsigned long seglen;
644 unsigned long addr; 644 unsigned long addr;
645 struct page *pg; 645 struct page *pg;
646 void *mapaddr;
647 void *buf;
648 unsigned len; 646 unsigned len;
647 unsigned offset;
649 unsigned move_pages:1; 648 unsigned move_pages:1;
650}; 649};
651 650
@@ -666,23 +665,17 @@ static void fuse_copy_finish(struct fuse_copy_state *cs)
666 if (cs->currbuf) { 665 if (cs->currbuf) {
667 struct pipe_buffer *buf = cs->currbuf; 666 struct pipe_buffer *buf = cs->currbuf;
668 667
669 if (!cs->write) { 668 if (cs->write)
670 kunmap_atomic(cs->mapaddr);
671 } else {
672 kunmap_atomic(cs->mapaddr);
673 buf->len = PAGE_SIZE - cs->len; 669 buf->len = PAGE_SIZE - cs->len;
674 }
675 cs->currbuf = NULL; 670 cs->currbuf = NULL;
676 cs->mapaddr = NULL; 671 } else if (cs->pg) {
677 } else if (cs->mapaddr) {
678 kunmap_atomic(cs->mapaddr);
679 if (cs->write) { 672 if (cs->write) {
680 flush_dcache_page(cs->pg); 673 flush_dcache_page(cs->pg);
681 set_page_dirty_lock(cs->pg); 674 set_page_dirty_lock(cs->pg);
682 } 675 }
683 put_page(cs->pg); 676 put_page(cs->pg);
684 cs->mapaddr = NULL;
685 } 677 }
678 cs->pg = NULL;
686} 679}
687 680
688/* 681/*
@@ -691,7 +684,7 @@ static void fuse_copy_finish(struct fuse_copy_state *cs)
691 */ 684 */
692static int fuse_copy_fill(struct fuse_copy_state *cs) 685static int fuse_copy_fill(struct fuse_copy_state *cs)
693{ 686{
694 unsigned long offset; 687 struct page *page;
695 int err; 688 int err;
696 689
697 unlock_request(cs->fc, cs->req); 690 unlock_request(cs->fc, cs->req);
@@ -706,14 +699,12 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
706 699
707 BUG_ON(!cs->nr_segs); 700 BUG_ON(!cs->nr_segs);
708 cs->currbuf = buf; 701 cs->currbuf = buf;
709 cs->mapaddr = kmap_atomic(buf->page); 702 cs->pg = buf->page;
703 cs->offset = buf->offset;
710 cs->len = buf->len; 704 cs->len = buf->len;
711 cs->buf = cs->mapaddr + buf->offset;
712 cs->pipebufs++; 705 cs->pipebufs++;
713 cs->nr_segs--; 706 cs->nr_segs--;
714 } else { 707 } else {
715 struct page *page;
716
717 if (cs->nr_segs == cs->pipe->buffers) 708 if (cs->nr_segs == cs->pipe->buffers)
718 return -EIO; 709 return -EIO;
719 710
@@ -726,8 +717,8 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
726 buf->len = 0; 717 buf->len = 0;
727 718
728 cs->currbuf = buf; 719 cs->currbuf = buf;
729 cs->mapaddr = kmap_atomic(page); 720 cs->pg = page;
730 cs->buf = cs->mapaddr; 721 cs->offset = 0;
731 cs->len = PAGE_SIZE; 722 cs->len = PAGE_SIZE;
732 cs->pipebufs++; 723 cs->pipebufs++;
733 cs->nr_segs++; 724 cs->nr_segs++;
@@ -740,14 +731,13 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
740 cs->iov++; 731 cs->iov++;
741 cs->nr_segs--; 732 cs->nr_segs--;
742 } 733 }
743 err = get_user_pages_fast(cs->addr, 1, cs->write, &cs->pg); 734 err = get_user_pages_fast(cs->addr, 1, cs->write, &page);
744 if (err < 0) 735 if (err < 0)
745 return err; 736 return err;
746 BUG_ON(err != 1); 737 BUG_ON(err != 1);
747 offset = cs->addr % PAGE_SIZE; 738 cs->pg = page;
748 cs->mapaddr = kmap_atomic(cs->pg); 739 cs->offset = cs->addr % PAGE_SIZE;
749 cs->buf = cs->mapaddr + offset; 740 cs->len = min(PAGE_SIZE - cs->offset, cs->seglen);
750 cs->len = min(PAGE_SIZE - offset, cs->seglen);
751 cs->seglen -= cs->len; 741 cs->seglen -= cs->len;
752 cs->addr += cs->len; 742 cs->addr += cs->len;
753 } 743 }
@@ -760,15 +750,20 @@ static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
760{ 750{
761 unsigned ncpy = min(*size, cs->len); 751 unsigned ncpy = min(*size, cs->len);
762 if (val) { 752 if (val) {
753 void *pgaddr = kmap_atomic(cs->pg);
754 void *buf = pgaddr + cs->offset;
755
763 if (cs->write) 756 if (cs->write)
764 memcpy(cs->buf, *val, ncpy); 757 memcpy(buf, *val, ncpy);
765 else 758 else
766 memcpy(*val, cs->buf, ncpy); 759 memcpy(*val, buf, ncpy);
760
761 kunmap_atomic(pgaddr);
767 *val += ncpy; 762 *val += ncpy;
768 } 763 }
769 *size -= ncpy; 764 *size -= ncpy;
770 cs->len -= ncpy; 765 cs->len -= ncpy;
771 cs->buf += ncpy; 766 cs->offset += ncpy;
772 return ncpy; 767 return ncpy;
773} 768}
774 769
@@ -874,8 +869,8 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
874out_fallback_unlock: 869out_fallback_unlock:
875 unlock_page(newpage); 870 unlock_page(newpage);
876out_fallback: 871out_fallback:
877 cs->mapaddr = kmap_atomic(buf->page); 872 cs->pg = buf->page;
878 cs->buf = cs->mapaddr + buf->offset; 873 cs->offset = buf->offset;
879 874
880 err = lock_request(cs->fc, cs->req); 875 err = lock_request(cs->fc, cs->req);
881 if (err) 876 if (err)
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 42198359fa1b..0c6048247a34 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -198,7 +198,8 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
198 inode = ACCESS_ONCE(entry->d_inode); 198 inode = ACCESS_ONCE(entry->d_inode);
199 if (inode && is_bad_inode(inode)) 199 if (inode && is_bad_inode(inode))
200 goto invalid; 200 goto invalid;
201 else if (fuse_dentry_time(entry) < get_jiffies_64()) { 201 else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) ||
202 (flags & LOOKUP_REVAL)) {
202 int err; 203 int err;
203 struct fuse_entry_out outarg; 204 struct fuse_entry_out outarg;
204 struct fuse_req *req; 205 struct fuse_req *req;
@@ -814,13 +815,6 @@ static int fuse_rename_common(struct inode *olddir, struct dentry *oldent,
814 return err; 815 return err;
815} 816}
816 817
817static int fuse_rename(struct inode *olddir, struct dentry *oldent,
818 struct inode *newdir, struct dentry *newent)
819{
820 return fuse_rename_common(olddir, oldent, newdir, newent, 0,
821 FUSE_RENAME, sizeof(struct fuse_rename_in));
822}
823
824static int fuse_rename2(struct inode *olddir, struct dentry *oldent, 818static int fuse_rename2(struct inode *olddir, struct dentry *oldent,
825 struct inode *newdir, struct dentry *newent, 819 struct inode *newdir, struct dentry *newent,
826 unsigned int flags) 820 unsigned int flags)
@@ -831,17 +825,30 @@ static int fuse_rename2(struct inode *olddir, struct dentry *oldent,
831 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE)) 825 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE))
832 return -EINVAL; 826 return -EINVAL;
833 827
834 if (fc->no_rename2 || fc->minor < 23) 828 if (flags) {
835 return -EINVAL; 829 if (fc->no_rename2 || fc->minor < 23)
830 return -EINVAL;
836 831
837 err = fuse_rename_common(olddir, oldent, newdir, newent, flags, 832 err = fuse_rename_common(olddir, oldent, newdir, newent, flags,
838 FUSE_RENAME2, sizeof(struct fuse_rename2_in)); 833 FUSE_RENAME2,
839 if (err == -ENOSYS) { 834 sizeof(struct fuse_rename2_in));
840 fc->no_rename2 = 1; 835 if (err == -ENOSYS) {
841 err = -EINVAL; 836 fc->no_rename2 = 1;
837 err = -EINVAL;
838 }
839 } else {
840 err = fuse_rename_common(olddir, oldent, newdir, newent, 0,
841 FUSE_RENAME,
842 sizeof(struct fuse_rename_in));
842 } 843 }
844
843 return err; 845 return err;
846}
844 847
848static int fuse_rename(struct inode *olddir, struct dentry *oldent,
849 struct inode *newdir, struct dentry *newent)
850{
851 return fuse_rename2(olddir, oldent, newdir, newent, 0);
845} 852}
846 853
847static int fuse_link(struct dentry *entry, struct inode *newdir, 854static int fuse_link(struct dentry *entry, struct inode *newdir,
@@ -985,7 +992,7 @@ int fuse_update_attributes(struct inode *inode, struct kstat *stat,
985 int err; 992 int err;
986 bool r; 993 bool r;
987 994
988 if (fi->i_time < get_jiffies_64()) { 995 if (time_before64(fi->i_time, get_jiffies_64())) {
989 r = true; 996 r = true;
990 err = fuse_do_getattr(inode, stat, file); 997 err = fuse_do_getattr(inode, stat, file);
991 } else { 998 } else {
@@ -1171,7 +1178,7 @@ static int fuse_permission(struct inode *inode, int mask)
1171 ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) { 1178 ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) {
1172 struct fuse_inode *fi = get_fuse_inode(inode); 1179 struct fuse_inode *fi = get_fuse_inode(inode);
1173 1180
1174 if (fi->i_time < get_jiffies_64()) { 1181 if (time_before64(fi->i_time, get_jiffies_64())) {
1175 refreshed = true; 1182 refreshed = true;
1176 1183
1177 err = fuse_perm_getattr(inode, mask); 1184 err = fuse_perm_getattr(inode, mask);
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 6e16dad13e9b..40ac2628ddcf 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1687,7 +1687,7 @@ static int fuse_writepage_locked(struct page *page)
1687 error = -EIO; 1687 error = -EIO;
1688 req->ff = fuse_write_file_get(fc, fi); 1688 req->ff = fuse_write_file_get(fc, fi);
1689 if (!req->ff) 1689 if (!req->ff)
1690 goto err_free; 1690 goto err_nofile;
1691 1691
1692 fuse_write_fill(req, req->ff, page_offset(page), 0); 1692 fuse_write_fill(req, req->ff, page_offset(page), 0);
1693 1693
@@ -1715,6 +1715,8 @@ static int fuse_writepage_locked(struct page *page)
1715 1715
1716 return 0; 1716 return 0;
1717 1717
1718err_nofile:
1719 __free_page(tmp_page);
1718err_free: 1720err_free:
1719 fuse_request_free(req); 1721 fuse_request_free(req);
1720err: 1722err:
@@ -1955,8 +1957,8 @@ static int fuse_writepages(struct address_space *mapping,
1955 data.ff = NULL; 1957 data.ff = NULL;
1956 1958
1957 err = -ENOMEM; 1959 err = -ENOMEM;
1958 data.orig_pages = kzalloc(sizeof(struct page *) * 1960 data.orig_pages = kcalloc(FUSE_MAX_PAGES_PER_REQ,
1959 FUSE_MAX_PAGES_PER_REQ, 1961 sizeof(struct page *),
1960 GFP_NOFS); 1962 GFP_NOFS);
1961 if (!data.orig_pages) 1963 if (!data.orig_pages)
1962 goto out; 1964 goto out;
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 754dcf23de8a..03246cd9d47a 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -478,6 +478,17 @@ static const match_table_t tokens = {
478 {OPT_ERR, NULL} 478 {OPT_ERR, NULL}
479}; 479};
480 480
481static int fuse_match_uint(substring_t *s, unsigned int *res)
482{
483 int err = -ENOMEM;
484 char *buf = match_strdup(s);
485 if (buf) {
486 err = kstrtouint(buf, 10, res);
487 kfree(buf);
488 }
489 return err;
490}
491
481static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev) 492static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
482{ 493{
483 char *p; 494 char *p;
@@ -488,6 +499,7 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
488 while ((p = strsep(&opt, ",")) != NULL) { 499 while ((p = strsep(&opt, ",")) != NULL) {
489 int token; 500 int token;
490 int value; 501 int value;
502 unsigned uv;
491 substring_t args[MAX_OPT_ARGS]; 503 substring_t args[MAX_OPT_ARGS];
492 if (!*p) 504 if (!*p)
493 continue; 505 continue;
@@ -511,18 +523,18 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
511 break; 523 break;
512 524
513 case OPT_USER_ID: 525 case OPT_USER_ID:
514 if (match_int(&args[0], &value)) 526 if (fuse_match_uint(&args[0], &uv))
515 return 0; 527 return 0;
516 d->user_id = make_kuid(current_user_ns(), value); 528 d->user_id = make_kuid(current_user_ns(), uv);
517 if (!uid_valid(d->user_id)) 529 if (!uid_valid(d->user_id))
518 return 0; 530 return 0;
519 d->user_id_present = 1; 531 d->user_id_present = 1;
520 break; 532 break;
521 533
522 case OPT_GROUP_ID: 534 case OPT_GROUP_ID:
523 if (match_int(&args[0], &value)) 535 if (fuse_match_uint(&args[0], &uv))
524 return 0; 536 return 0;
525 d->group_id = make_kgid(current_user_ns(), value); 537 d->group_id = make_kgid(current_user_ns(), uv);
526 if (!gid_valid(d->group_id)) 538 if (!gid_valid(d->group_id))
527 return 0; 539 return 0;
528 d->group_id_present = 1; 540 d->group_id_present = 1;
@@ -895,9 +907,6 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
895 fc->writeback_cache = 1; 907 fc->writeback_cache = 1;
896 if (arg->time_gran && arg->time_gran <= 1000000000) 908 if (arg->time_gran && arg->time_gran <= 1000000000)
897 fc->sb->s_time_gran = arg->time_gran; 909 fc->sb->s_time_gran = arg->time_gran;
898 else
899 fc->sb->s_time_gran = 1000000000;
900
901 } else { 910 } else {
902 ra_pages = fc->max_read / PAGE_CACHE_SIZE; 911 ra_pages = fc->max_read / PAGE_CACHE_SIZE;
903 fc->no_lock = 1; 912 fc->no_lock = 1;
@@ -926,7 +935,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
926 FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ | 935 FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
927 FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA | 936 FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
928 FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO | 937 FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO |
929 FUSE_WRITEBACK_CACHE; 938 FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT;
930 req->in.h.opcode = FUSE_INIT; 939 req->in.h.opcode = FUSE_INIT;
931 req->in.numargs = 1; 940 req->in.numargs = 1;
932 req->in.args[0].size = sizeof(*arg); 941 req->in.args[0].size = sizeof(*arg);
@@ -1006,7 +1015,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
1006 1015
1007 sb->s_flags &= ~(MS_NOSEC | MS_I_VERSION); 1016 sb->s_flags &= ~(MS_NOSEC | MS_I_VERSION);
1008 1017
1009 if (!parse_fuse_opt((char *) data, &d, is_bdev)) 1018 if (!parse_fuse_opt(data, &d, is_bdev))
1010 goto err; 1019 goto err;
1011 1020
1012 if (is_bdev) { 1021 if (is_bdev) {
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 4fc3a3046174..26b3f952e6b1 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -981,7 +981,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
981 int error = 0; 981 int error = 0;
982 982
983 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED; 983 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
984 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE; 984 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT;
985 985
986 mutex_lock(&fp->f_fl_mutex); 986 mutex_lock(&fp->f_fl_mutex);
987 987
@@ -991,7 +991,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
991 goto out; 991 goto out;
992 flock_lock_file_wait(file, 992 flock_lock_file_wait(file,
993 &(struct file_lock){.fl_type = F_UNLCK}); 993 &(struct file_lock){.fl_type = F_UNLCK});
994 gfs2_glock_dq_wait(fl_gh); 994 gfs2_glock_dq(fl_gh);
995 gfs2_holder_reinit(state, flags, fl_gh); 995 gfs2_holder_reinit(state, flags, fl_gh);
996 } else { 996 } else {
997 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr, 997 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index c355f7320e44..ee4e04fe60fc 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -731,14 +731,14 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
731 cachep = gfs2_glock_aspace_cachep; 731 cachep = gfs2_glock_aspace_cachep;
732 else 732 else
733 cachep = gfs2_glock_cachep; 733 cachep = gfs2_glock_cachep;
734 gl = kmem_cache_alloc(cachep, GFP_KERNEL); 734 gl = kmem_cache_alloc(cachep, GFP_NOFS);
735 if (!gl) 735 if (!gl)
736 return -ENOMEM; 736 return -ENOMEM;
737 737
738 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb)); 738 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
739 739
740 if (glops->go_flags & GLOF_LVB) { 740 if (glops->go_flags & GLOF_LVB) {
741 gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_KERNEL); 741 gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS);
742 if (!gl->gl_lksb.sb_lvbptr) { 742 if (!gl->gl_lksb.sb_lvbptr) {
743 kmem_cache_free(cachep, gl); 743 kmem_cache_free(cachep, gl);
744 return -ENOMEM; 744 return -ENOMEM;
@@ -1404,12 +1404,16 @@ __acquires(&lru_lock)
1404 gl = list_entry(list->next, struct gfs2_glock, gl_lru); 1404 gl = list_entry(list->next, struct gfs2_glock, gl_lru);
1405 list_del_init(&gl->gl_lru); 1405 list_del_init(&gl->gl_lru);
1406 if (!spin_trylock(&gl->gl_spin)) { 1406 if (!spin_trylock(&gl->gl_spin)) {
1407add_back_to_lru:
1407 list_add(&gl->gl_lru, &lru_list); 1408 list_add(&gl->gl_lru, &lru_list);
1408 atomic_inc(&lru_count); 1409 atomic_inc(&lru_count);
1409 continue; 1410 continue;
1410 } 1411 }
1412 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1413 spin_unlock(&gl->gl_spin);
1414 goto add_back_to_lru;
1415 }
1411 clear_bit(GLF_LRU, &gl->gl_flags); 1416 clear_bit(GLF_LRU, &gl->gl_flags);
1412 spin_unlock(&lru_lock);
1413 gl->gl_lockref.count++; 1417 gl->gl_lockref.count++;
1414 if (demote_ok(gl)) 1418 if (demote_ok(gl))
1415 handle_callback(gl, LM_ST_UNLOCKED, 0, false); 1419 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
@@ -1417,7 +1421,7 @@ __acquires(&lru_lock)
1417 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1421 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1418 gl->gl_lockref.count--; 1422 gl->gl_lockref.count--;
1419 spin_unlock(&gl->gl_spin); 1423 spin_unlock(&gl->gl_spin);
1420 spin_lock(&lru_lock); 1424 cond_resched_lock(&lru_lock);
1421 } 1425 }
1422} 1426}
1423 1427
@@ -1442,7 +1446,7 @@ static long gfs2_scan_glock_lru(int nr)
1442 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru); 1446 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1443 1447
1444 /* Test for being demotable */ 1448 /* Test for being demotable */
1445 if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { 1449 if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
1446 list_move(&gl->gl_lru, &dispose); 1450 list_move(&gl->gl_lru, &dispose);
1447 atomic_dec(&lru_count); 1451 atomic_dec(&lru_count);
1448 freed++; 1452 freed++;
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index fc1100781bbc..2ffc67dce87f 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -234,8 +234,8 @@ static void inode_go_sync(struct gfs2_glock *gl)
234 * inode_go_inval - prepare a inode glock to be released 234 * inode_go_inval - prepare a inode glock to be released
235 * @gl: the glock 235 * @gl: the glock
236 * @flags: 236 * @flags:
237 * 237 *
238 * Normally we invlidate everything, but if we are moving into 238 * Normally we invalidate everything, but if we are moving into
239 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we 239 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
240 * can keep hold of the metadata, since it won't have changed. 240 * can keep hold of the metadata, since it won't have changed.
241 * 241 *
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 91f274de1246..4fafea1c9ecf 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -1036,8 +1036,8 @@ static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots,
1036 1036
1037 new_size = old_size + RECOVER_SIZE_INC; 1037 new_size = old_size + RECOVER_SIZE_INC;
1038 1038
1039 submit = kzalloc(new_size * sizeof(uint32_t), GFP_NOFS); 1039 submit = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS);
1040 result = kzalloc(new_size * sizeof(uint32_t), GFP_NOFS); 1040 result = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS);
1041 if (!submit || !result) { 1041 if (!submit || !result) {
1042 kfree(submit); 1042 kfree(submit);
1043 kfree(result); 1043 kfree(result);
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index db629d1bd1bd..f4cb9c0d6bbd 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -337,7 +337,7 @@ static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *le
337 337
338/** 338/**
339 * gfs2_free_extlen - Return extent length of free blocks 339 * gfs2_free_extlen - Return extent length of free blocks
340 * @rbm: Starting position 340 * @rrbm: Starting position
341 * @len: Max length to check 341 * @len: Max length to check
342 * 342 *
343 * Starting at the block specified by the rbm, see how many free blocks 343 * Starting at the block specified by the rbm, see how many free blocks
@@ -2522,7 +2522,7 @@ void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
2522 2522
2523/** 2523/**
2524 * gfs2_rlist_free - free a resource group list 2524 * gfs2_rlist_free - free a resource group list
2525 * @list: the list of resource groups 2525 * @rlist: the list of resource groups
2526 * 2526 *
2527 */ 2527 */
2528 2528
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 38cfcf5f6fce..6f0f590cc5a3 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -1588,9 +1588,12 @@ int jbd2_journal_stop(handle_t *handle)
1588 * to perform a synchronous write. We do this to detect the 1588 * to perform a synchronous write. We do this to detect the
1589 * case where a single process is doing a stream of sync 1589 * case where a single process is doing a stream of sync
1590 * writes. No point in waiting for joiners in that case. 1590 * writes. No point in waiting for joiners in that case.
1591 *
1592 * Setting max_batch_time to 0 disables this completely.
1591 */ 1593 */
1592 pid = current->pid; 1594 pid = current->pid;
1593 if (handle->h_sync && journal->j_last_sync_writer != pid) { 1595 if (handle->h_sync && journal->j_last_sync_writer != pid &&
1596 journal->j_max_batch_time) {
1594 u64 commit_time, trans_time; 1597 u64 commit_time, trans_time;
1595 1598
1596 journal->j_last_sync_writer = pid; 1599 journal->j_last_sync_writer = pid;
diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
index d171b98a6cdd..f973ae9b05f1 100644
--- a/fs/kernfs/mount.c
+++ b/fs/kernfs/mount.c
@@ -211,6 +211,36 @@ void kernfs_kill_sb(struct super_block *sb)
211 kernfs_put(root_kn); 211 kernfs_put(root_kn);
212} 212}
213 213
214/**
215 * kernfs_pin_sb: try to pin the superblock associated with a kernfs_root
216 * @kernfs_root: the kernfs_root in question
217 * @ns: the namespace tag
218 *
219 * Pin the superblock so the superblock won't be destroyed in subsequent
220 * operations. This can be used to block ->kill_sb() which may be useful
221 * for kernfs users which dynamically manage superblocks.
222 *
223 * Returns NULL if there's no superblock associated to this kernfs_root, or
224 * -EINVAL if the superblock is being freed.
225 */
226struct super_block *kernfs_pin_sb(struct kernfs_root *root, const void *ns)
227{
228 struct kernfs_super_info *info;
229 struct super_block *sb = NULL;
230
231 mutex_lock(&kernfs_mutex);
232 list_for_each_entry(info, &root->supers, node) {
233 if (info->ns == ns) {
234 sb = info->sb;
235 if (!atomic_inc_not_zero(&info->sb->s_active))
236 sb = ERR_PTR(-EINVAL);
237 break;
238 }
239 }
240 mutex_unlock(&kernfs_mutex);
241 return sb;
242}
243
214void __init kernfs_init(void) 244void __init kernfs_init(void)
215{ 245{
216 kernfs_node_cache = kmem_cache_create("kernfs_node_cache", 246 kernfs_node_cache = kmem_cache_create("kernfs_node_cache",
diff --git a/fs/namei.c b/fs/namei.c
index 985c6f368485..9eb787e5c167 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2256,9 +2256,10 @@ done:
2256 goto out; 2256 goto out;
2257 } 2257 }
2258 path->dentry = dentry; 2258 path->dentry = dentry;
2259 path->mnt = mntget(nd->path.mnt); 2259 path->mnt = nd->path.mnt;
2260 if (should_follow_link(dentry, nd->flags & LOOKUP_FOLLOW)) 2260 if (should_follow_link(dentry, nd->flags & LOOKUP_FOLLOW))
2261 return 1; 2261 return 1;
2262 mntget(path->mnt);
2262 follow_mount(path); 2263 follow_mount(path);
2263 error = 0; 2264 error = 0;
2264out: 2265out:
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 8f98138cbc43..f11b9eed0de1 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -756,7 +756,6 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
756 spin_unlock(&dreq->lock); 756 spin_unlock(&dreq->lock);
757 757
758 while (!list_empty(&hdr->pages)) { 758 while (!list_empty(&hdr->pages)) {
759 bool do_destroy = true;
760 759
761 req = nfs_list_entry(hdr->pages.next); 760 req = nfs_list_entry(hdr->pages.next);
762 nfs_list_remove_request(req); 761 nfs_list_remove_request(req);
@@ -765,7 +764,6 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
765 case NFS_IOHDR_NEED_COMMIT: 764 case NFS_IOHDR_NEED_COMMIT:
766 kref_get(&req->wb_kref); 765 kref_get(&req->wb_kref);
767 nfs_mark_request_commit(req, hdr->lseg, &cinfo); 766 nfs_mark_request_commit(req, hdr->lseg, &cinfo);
768 do_destroy = false;
769 } 767 }
770 nfs_unlock_and_release_request(req); 768 nfs_unlock_and_release_request(req);
771 } 769 }
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 82ddbf46660e..f415cbf9f6c3 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -244,6 +244,7 @@ void nfs_pgio_data_release(struct nfs_pgio_data *);
244int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *); 244int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *);
245int nfs_initiate_pgio(struct rpc_clnt *, struct nfs_pgio_data *, 245int nfs_initiate_pgio(struct rpc_clnt *, struct nfs_pgio_data *,
246 const struct rpc_call_ops *, int, int); 246 const struct rpc_call_ops *, int, int);
247void nfs_free_request(struct nfs_page *req);
247 248
248static inline void nfs_iocounter_init(struct nfs_io_counter *c) 249static inline void nfs_iocounter_init(struct nfs_io_counter *c)
249{ 250{
diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
index 871d6eda8dba..8f854dde4150 100644
--- a/fs/nfs/nfs3acl.c
+++ b/fs/nfs/nfs3acl.c
@@ -247,3 +247,46 @@ const struct xattr_handler *nfs3_xattr_handlers[] = {
247 &posix_acl_default_xattr_handler, 247 &posix_acl_default_xattr_handler,
248 NULL, 248 NULL,
249}; 249};
250
251static int
252nfs3_list_one_acl(struct inode *inode, int type, const char *name, void *data,
253 size_t size, ssize_t *result)
254{
255 struct posix_acl *acl;
256 char *p = data + *result;
257
258 acl = get_acl(inode, type);
259 if (!acl)
260 return 0;
261
262 posix_acl_release(acl);
263
264 *result += strlen(name);
265 *result += 1;
266 if (!size)
267 return 0;
268 if (*result > size)
269 return -ERANGE;
270
271 strcpy(p, name);
272 return 0;
273}
274
275ssize_t
276nfs3_listxattr(struct dentry *dentry, char *data, size_t size)
277{
278 struct inode *inode = dentry->d_inode;
279 ssize_t result = 0;
280 int error;
281
282 error = nfs3_list_one_acl(inode, ACL_TYPE_ACCESS,
283 POSIX_ACL_XATTR_ACCESS, data, size, &result);
284 if (error)
285 return error;
286
287 error = nfs3_list_one_acl(inode, ACL_TYPE_DEFAULT,
288 POSIX_ACL_XATTR_DEFAULT, data, size, &result);
289 if (error)
290 return error;
291 return result;
292}
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index e7daa42bbc86..f0afa291fd58 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -885,7 +885,7 @@ static const struct inode_operations nfs3_dir_inode_operations = {
885 .getattr = nfs_getattr, 885 .getattr = nfs_getattr,
886 .setattr = nfs_setattr, 886 .setattr = nfs_setattr,
887#ifdef CONFIG_NFS_V3_ACL 887#ifdef CONFIG_NFS_V3_ACL
888 .listxattr = generic_listxattr, 888 .listxattr = nfs3_listxattr,
889 .getxattr = generic_getxattr, 889 .getxattr = generic_getxattr,
890 .setxattr = generic_setxattr, 890 .setxattr = generic_setxattr,
891 .removexattr = generic_removexattr, 891 .removexattr = generic_removexattr,
@@ -899,7 +899,7 @@ static const struct inode_operations nfs3_file_inode_operations = {
899 .getattr = nfs_getattr, 899 .getattr = nfs_getattr,
900 .setattr = nfs_setattr, 900 .setattr = nfs_setattr,
901#ifdef CONFIG_NFS_V3_ACL 901#ifdef CONFIG_NFS_V3_ACL
902 .listxattr = generic_listxattr, 902 .listxattr = nfs3_listxattr,
903 .getxattr = generic_getxattr, 903 .getxattr = generic_getxattr,
904 .setxattr = generic_setxattr, 904 .setxattr = generic_setxattr,
905 .removexattr = generic_removexattr, 905 .removexattr = generic_removexattr,
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index b6ee3a6ee96d..17fab89f6358 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -29,8 +29,6 @@
29static struct kmem_cache *nfs_page_cachep; 29static struct kmem_cache *nfs_page_cachep;
30static const struct rpc_call_ops nfs_pgio_common_ops; 30static const struct rpc_call_ops nfs_pgio_common_ops;
31 31
32static void nfs_free_request(struct nfs_page *);
33
34static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount) 32static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount)
35{ 33{
36 p->npages = pagecount; 34 p->npages = pagecount;
@@ -239,20 +237,28 @@ nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev)
239 WARN_ON_ONCE(prev == req); 237 WARN_ON_ONCE(prev == req);
240 238
241 if (!prev) { 239 if (!prev) {
240 /* a head request */
242 req->wb_head = req; 241 req->wb_head = req;
243 req->wb_this_page = req; 242 req->wb_this_page = req;
244 } else { 243 } else {
244 /* a subrequest */
245 WARN_ON_ONCE(prev->wb_this_page != prev->wb_head); 245 WARN_ON_ONCE(prev->wb_this_page != prev->wb_head);
246 WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags)); 246 WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags));
247 req->wb_head = prev->wb_head; 247 req->wb_head = prev->wb_head;
248 req->wb_this_page = prev->wb_this_page; 248 req->wb_this_page = prev->wb_this_page;
249 prev->wb_this_page = req; 249 prev->wb_this_page = req;
250 250
251 /* All subrequests take a ref on the head request until
252 * nfs_page_group_destroy is called */
253 kref_get(&req->wb_head->wb_kref);
254
251 /* grab extra ref if head request has extra ref from 255 /* grab extra ref if head request has extra ref from
252 * the write/commit path to handle handoff between write 256 * the write/commit path to handle handoff between write
253 * and commit lists */ 257 * and commit lists */
254 if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) 258 if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) {
259 set_bit(PG_INODE_REF, &req->wb_flags);
255 kref_get(&req->wb_kref); 260 kref_get(&req->wb_kref);
261 }
256 } 262 }
257} 263}
258 264
@@ -269,6 +275,10 @@ nfs_page_group_destroy(struct kref *kref)
269 struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref); 275 struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
270 struct nfs_page *tmp, *next; 276 struct nfs_page *tmp, *next;
271 277
278 /* subrequests must release the ref on the head request */
279 if (req->wb_head != req)
280 nfs_release_request(req->wb_head);
281
272 if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN)) 282 if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN))
273 return; 283 return;
274 284
@@ -394,7 +404,7 @@ static void nfs_clear_request(struct nfs_page *req)
394 * 404 *
395 * Note: Should never be called with the spinlock held! 405 * Note: Should never be called with the spinlock held!
396 */ 406 */
397static void nfs_free_request(struct nfs_page *req) 407void nfs_free_request(struct nfs_page *req)
398{ 408{
399 WARN_ON_ONCE(req->wb_this_page != req); 409 WARN_ON_ONCE(req->wb_this_page != req);
400 410
@@ -925,7 +935,6 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
925 nfs_pageio_doio(desc); 935 nfs_pageio_doio(desc);
926 if (desc->pg_error < 0) 936 if (desc->pg_error < 0)
927 return 0; 937 return 0;
928 desc->pg_moreio = 0;
929 if (desc->pg_recoalesce) 938 if (desc->pg_recoalesce)
930 return 0; 939 return 0;
931 /* retry add_request for this subreq */ 940 /* retry add_request for this subreq */
@@ -972,6 +981,7 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
972 desc->pg_count = 0; 981 desc->pg_count = 0;
973 desc->pg_base = 0; 982 desc->pg_base = 0;
974 desc->pg_recoalesce = 0; 983 desc->pg_recoalesce = 0;
984 desc->pg_moreio = 0;
975 985
976 while (!list_empty(&head)) { 986 while (!list_empty(&head)) {
977 struct nfs_page *req; 987 struct nfs_page *req;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 98ff061ccaf3..5e2f10304548 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -46,6 +46,7 @@ static const struct rpc_call_ops nfs_commit_ops;
46static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops; 46static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
47static const struct nfs_commit_completion_ops nfs_commit_completion_ops; 47static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
48static const struct nfs_rw_ops nfs_rw_write_ops; 48static const struct nfs_rw_ops nfs_rw_write_ops;
49static void nfs_clear_request_commit(struct nfs_page *req);
49 50
50static struct kmem_cache *nfs_wdata_cachep; 51static struct kmem_cache *nfs_wdata_cachep;
51static mempool_t *nfs_wdata_mempool; 52static mempool_t *nfs_wdata_mempool;
@@ -91,8 +92,15 @@ static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
91 set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags); 92 set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
92} 93}
93 94
95/*
96 * nfs_page_find_head_request_locked - find head request associated with @page
97 *
98 * must be called while holding the inode lock.
99 *
100 * returns matching head request with reference held, or NULL if not found.
101 */
94static struct nfs_page * 102static struct nfs_page *
95nfs_page_find_request_locked(struct nfs_inode *nfsi, struct page *page) 103nfs_page_find_head_request_locked(struct nfs_inode *nfsi, struct page *page)
96{ 104{
97 struct nfs_page *req = NULL; 105 struct nfs_page *req = NULL;
98 106
@@ -104,25 +112,33 @@ nfs_page_find_request_locked(struct nfs_inode *nfsi, struct page *page)
104 /* Linearly search the commit list for the correct req */ 112 /* Linearly search the commit list for the correct req */
105 list_for_each_entry_safe(freq, t, &nfsi->commit_info.list, wb_list) { 113 list_for_each_entry_safe(freq, t, &nfsi->commit_info.list, wb_list) {
106 if (freq->wb_page == page) { 114 if (freq->wb_page == page) {
107 req = freq; 115 req = freq->wb_head;
108 break; 116 break;
109 } 117 }
110 } 118 }
111 } 119 }
112 120
113 if (req) 121 if (req) {
122 WARN_ON_ONCE(req->wb_head != req);
123
114 kref_get(&req->wb_kref); 124 kref_get(&req->wb_kref);
125 }
115 126
116 return req; 127 return req;
117} 128}
118 129
119static struct nfs_page *nfs_page_find_request(struct page *page) 130/*
131 * nfs_page_find_head_request - find head request associated with @page
132 *
133 * returns matching head request with reference held, or NULL if not found.
134 */
135static struct nfs_page *nfs_page_find_head_request(struct page *page)
120{ 136{
121 struct inode *inode = page_file_mapping(page)->host; 137 struct inode *inode = page_file_mapping(page)->host;
122 struct nfs_page *req = NULL; 138 struct nfs_page *req = NULL;
123 139
124 spin_lock(&inode->i_lock); 140 spin_lock(&inode->i_lock);
125 req = nfs_page_find_request_locked(NFS_I(inode), page); 141 req = nfs_page_find_head_request_locked(NFS_I(inode), page);
126 spin_unlock(&inode->i_lock); 142 spin_unlock(&inode->i_lock);
127 return req; 143 return req;
128} 144}
@@ -274,36 +290,246 @@ static void nfs_end_page_writeback(struct nfs_page *req)
274 clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC); 290 clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
275} 291}
276 292
277static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblock) 293
294/* nfs_page_group_clear_bits
295 * @req - an nfs request
296 * clears all page group related bits from @req
297 */
298static void
299nfs_page_group_clear_bits(struct nfs_page *req)
300{
301 clear_bit(PG_TEARDOWN, &req->wb_flags);
302 clear_bit(PG_UNLOCKPAGE, &req->wb_flags);
303 clear_bit(PG_UPTODATE, &req->wb_flags);
304 clear_bit(PG_WB_END, &req->wb_flags);
305 clear_bit(PG_REMOVE, &req->wb_flags);
306}
307
308
309/*
310 * nfs_unroll_locks_and_wait - unlock all newly locked reqs and wait on @req
311 *
312 * this is a helper function for nfs_lock_and_join_requests
313 *
314 * @inode - inode associated with request page group, must be holding inode lock
315 * @head - head request of page group, must be holding head lock
316 * @req - request that couldn't lock and needs to wait on the req bit lock
317 * @nonblock - if true, don't actually wait
318 *
319 * NOTE: this must be called holding page_group bit lock and inode spin lock
320 * and BOTH will be released before returning.
321 *
322 * returns 0 on success, < 0 on error.
323 */
324static int
325nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head,
326 struct nfs_page *req, bool nonblock)
327 __releases(&inode->i_lock)
328{
329 struct nfs_page *tmp;
330 int ret;
331
332 /* relinquish all the locks successfully grabbed this run */
333 for (tmp = head ; tmp != req; tmp = tmp->wb_this_page)
334 nfs_unlock_request(tmp);
335
336 WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags));
337
338 /* grab a ref on the request that will be waited on */
339 kref_get(&req->wb_kref);
340
341 nfs_page_group_unlock(head);
342 spin_unlock(&inode->i_lock);
343
344 /* release ref from nfs_page_find_head_request_locked */
345 nfs_release_request(head);
346
347 if (!nonblock)
348 ret = nfs_wait_on_request(req);
349 else
350 ret = -EAGAIN;
351 nfs_release_request(req);
352
353 return ret;
354}
355
356/*
357 * nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests
358 *
359 * @destroy_list - request list (using wb_this_page) terminated by @old_head
360 * @old_head - the old head of the list
361 *
362 * All subrequests must be locked and removed from all lists, so at this point
363 * they are only "active" in this function, and possibly in nfs_wait_on_request
364 * with a reference held by some other context.
365 */
366static void
367nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
368 struct nfs_page *old_head)
369{
370 while (destroy_list) {
371 struct nfs_page *subreq = destroy_list;
372
373 destroy_list = (subreq->wb_this_page == old_head) ?
374 NULL : subreq->wb_this_page;
375
376 WARN_ON_ONCE(old_head != subreq->wb_head);
377
378 /* make sure old group is not used */
379 subreq->wb_head = subreq;
380 subreq->wb_this_page = subreq;
381
382 nfs_clear_request_commit(subreq);
383
384 /* subreq is now totally disconnected from page group or any
385 * write / commit lists. last chance to wake any waiters */
386 nfs_unlock_request(subreq);
387
388 if (!test_bit(PG_TEARDOWN, &subreq->wb_flags)) {
389 /* release ref on old head request */
390 nfs_release_request(old_head);
391
392 nfs_page_group_clear_bits(subreq);
393
394 /* release the PG_INODE_REF reference */
395 if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags))
396 nfs_release_request(subreq);
397 else
398 WARN_ON_ONCE(1);
399 } else {
400 WARN_ON_ONCE(test_bit(PG_CLEAN, &subreq->wb_flags));
401 /* zombie requests have already released the last
402 * reference and were waiting on the rest of the
403 * group to complete. Since it's no longer part of a
404 * group, simply free the request */
405 nfs_page_group_clear_bits(subreq);
406 nfs_free_request(subreq);
407 }
408 }
409}
410
411/*
412 * nfs_lock_and_join_requests - join all subreqs to the head req and return
413 * a locked reference, cancelling any pending
414 * operations for this page.
415 *
416 * @page - the page used to lookup the "page group" of nfs_page structures
417 * @nonblock - if true, don't block waiting for request locks
418 *
419 * This function joins all sub requests to the head request by first
420 * locking all requests in the group, cancelling any pending operations
421 * and finally updating the head request to cover the whole range covered by
422 * the (former) group. All subrequests are removed from any write or commit
423 * lists, unlinked from the group and destroyed.
424 *
425 * Returns a locked, referenced pointer to the head request - which after
426 * this call is guaranteed to be the only request associated with the page.
427 * Returns NULL if no requests are found for @page, or a ERR_PTR if an
428 * error was encountered.
429 */
430static struct nfs_page *
431nfs_lock_and_join_requests(struct page *page, bool nonblock)
278{ 432{
279 struct inode *inode = page_file_mapping(page)->host; 433 struct inode *inode = page_file_mapping(page)->host;
280 struct nfs_page *req; 434 struct nfs_page *head, *subreq;
435 struct nfs_page *destroy_list = NULL;
436 unsigned int total_bytes;
281 int ret; 437 int ret;
282 438
439try_again:
440 total_bytes = 0;
441
442 WARN_ON_ONCE(destroy_list);
443
283 spin_lock(&inode->i_lock); 444 spin_lock(&inode->i_lock);
284 for (;;) { 445
285 req = nfs_page_find_request_locked(NFS_I(inode), page); 446 /*
286 if (req == NULL) 447 * A reference is taken only on the head request which acts as a
287 break; 448 * reference to the whole page group - the group will not be destroyed
288 if (nfs_lock_request(req)) 449 * until the head reference is released.
289 break; 450 */
290 /* Note: If we hold the page lock, as is the case in nfs_writepage, 451 head = nfs_page_find_head_request_locked(NFS_I(inode), page);
291 * then the call to nfs_lock_request() will always 452
292 * succeed provided that someone hasn't already marked the 453 if (!head) {
293 * request as dirty (in which case we don't care).
294 */
295 spin_unlock(&inode->i_lock); 454 spin_unlock(&inode->i_lock);
296 if (!nonblock) 455 return NULL;
297 ret = nfs_wait_on_request(req); 456 }
298 else 457
299 ret = -EAGAIN; 458 /* lock each request in the page group */
300 nfs_release_request(req); 459 nfs_page_group_lock(head);
301 if (ret != 0) 460 subreq = head;
461 do {
462 /*
463 * Subrequests are always contiguous, non overlapping
464 * and in order. If not, it's a programming error.
465 */
466 WARN_ON_ONCE(subreq->wb_offset !=
467 (head->wb_offset + total_bytes));
468
469 /* keep track of how many bytes this group covers */
470 total_bytes += subreq->wb_bytes;
471
472 if (!nfs_lock_request(subreq)) {
473 /* releases page group bit lock and
474 * inode spin lock and all references */
475 ret = nfs_unroll_locks_and_wait(inode, head,
476 subreq, nonblock);
477
478 if (ret == 0)
479 goto try_again;
480
302 return ERR_PTR(ret); 481 return ERR_PTR(ret);
303 spin_lock(&inode->i_lock); 482 }
483
484 subreq = subreq->wb_this_page;
485 } while (subreq != head);
486
487 /* Now that all requests are locked, make sure they aren't on any list.
488 * Commit list removal accounting is done after locks are dropped */
489 subreq = head;
490 do {
491 nfs_list_remove_request(subreq);
492 subreq = subreq->wb_this_page;
493 } while (subreq != head);
494
495 /* unlink subrequests from head, destroy them later */
496 if (head->wb_this_page != head) {
497 /* destroy list will be terminated by head */
498 destroy_list = head->wb_this_page;
499 head->wb_this_page = head;
500
501 /* change head request to cover whole range that
502 * the former page group covered */
503 head->wb_bytes = total_bytes;
304 } 504 }
505
506 /*
507 * prepare head request to be added to new pgio descriptor
508 */
509 nfs_page_group_clear_bits(head);
510
511 /*
512 * some part of the group was still on the inode list - otherwise
513 * the group wouldn't be involved in async write.
514 * grab a reference for the head request, iff it needs one.
515 */
516 if (!test_and_set_bit(PG_INODE_REF, &head->wb_flags))
517 kref_get(&head->wb_kref);
518
519 nfs_page_group_unlock(head);
520
521 /* drop lock to clear_request_commit the head req and clean up
522 * requests on destroy list */
305 spin_unlock(&inode->i_lock); 523 spin_unlock(&inode->i_lock);
306 return req; 524
525 nfs_destroy_unlinked_subrequests(destroy_list, head);
526
527 /* clean up commit list state */
528 nfs_clear_request_commit(head);
529
530 /* still holds ref on head from nfs_page_find_head_request_locked
531 * and still has lock on head from lock loop */
532 return head;
307} 533}
308 534
309/* 535/*
@@ -316,7 +542,7 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
316 struct nfs_page *req; 542 struct nfs_page *req;
317 int ret = 0; 543 int ret = 0;
318 544
319 req = nfs_find_and_lock_request(page, nonblock); 545 req = nfs_lock_and_join_requests(page, nonblock);
320 if (!req) 546 if (!req)
321 goto out; 547 goto out;
322 ret = PTR_ERR(req); 548 ret = PTR_ERR(req);
@@ -448,7 +674,9 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
448 set_page_private(req->wb_page, (unsigned long)req); 674 set_page_private(req->wb_page, (unsigned long)req);
449 } 675 }
450 nfsi->npages++; 676 nfsi->npages++;
451 set_bit(PG_INODE_REF, &req->wb_flags); 677 /* this a head request for a page group - mark it as having an
678 * extra reference so sub groups can follow suit */
679 WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags));
452 kref_get(&req->wb_kref); 680 kref_get(&req->wb_kref);
453 spin_unlock(&inode->i_lock); 681 spin_unlock(&inode->i_lock);
454} 682}
@@ -474,7 +702,9 @@ static void nfs_inode_remove_request(struct nfs_page *req)
474 nfsi->npages--; 702 nfsi->npages--;
475 spin_unlock(&inode->i_lock); 703 spin_unlock(&inode->i_lock);
476 } 704 }
477 nfs_release_request(req); 705
706 if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags))
707 nfs_release_request(req);
478} 708}
479 709
480static void 710static void
@@ -638,7 +868,6 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr)
638{ 868{
639 struct nfs_commit_info cinfo; 869 struct nfs_commit_info cinfo;
640 unsigned long bytes = 0; 870 unsigned long bytes = 0;
641 bool do_destroy;
642 871
643 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) 872 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
644 goto out; 873 goto out;
@@ -668,7 +897,6 @@ remove_req:
668next: 897next:
669 nfs_unlock_request(req); 898 nfs_unlock_request(req);
670 nfs_end_page_writeback(req); 899 nfs_end_page_writeback(req);
671 do_destroy = !test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags);
672 nfs_release_request(req); 900 nfs_release_request(req);
673 } 901 }
674out: 902out:
@@ -769,7 +997,7 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
769 spin_lock(&inode->i_lock); 997 spin_lock(&inode->i_lock);
770 998
771 for (;;) { 999 for (;;) {
772 req = nfs_page_find_request_locked(NFS_I(inode), page); 1000 req = nfs_page_find_head_request_locked(NFS_I(inode), page);
773 if (req == NULL) 1001 if (req == NULL)
774 goto out_unlock; 1002 goto out_unlock;
775 1003
@@ -877,7 +1105,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
877 * dropped page. 1105 * dropped page.
878 */ 1106 */
879 do { 1107 do {
880 req = nfs_page_find_request(page); 1108 req = nfs_page_find_head_request(page);
881 if (req == NULL) 1109 if (req == NULL)
882 return 0; 1110 return 0;
883 l_ctx = req->wb_lock_context; 1111 l_ctx = req->wb_lock_context;
@@ -1569,27 +1797,28 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
1569 struct nfs_page *req; 1797 struct nfs_page *req;
1570 int ret = 0; 1798 int ret = 0;
1571 1799
1572 for (;;) { 1800 wait_on_page_writeback(page);
1573 wait_on_page_writeback(page); 1801
1574 req = nfs_page_find_request(page); 1802 /* blocking call to cancel all requests and join to a single (head)
1575 if (req == NULL) 1803 * request */
1576 break; 1804 req = nfs_lock_and_join_requests(page, false);
1577 if (nfs_lock_request(req)) { 1805
1578 nfs_clear_request_commit(req); 1806 if (IS_ERR(req)) {
1579 nfs_inode_remove_request(req); 1807 ret = PTR_ERR(req);
1580 /* 1808 } else if (req) {
1581 * In case nfs_inode_remove_request has marked the 1809 /* all requests from this page have been cancelled by
1582 * page as being dirty 1810 * nfs_lock_and_join_requests, so just remove the head
1583 */ 1811 * request from the inode / page_private pointer and
1584 cancel_dirty_page(page, PAGE_CACHE_SIZE); 1812 * release it */
1585 nfs_unlock_and_release_request(req); 1813 nfs_inode_remove_request(req);
1586 break; 1814 /*
1587 } 1815 * In case nfs_inode_remove_request has marked the
1588 ret = nfs_wait_on_request(req); 1816 * page as being dirty
1589 nfs_release_request(req); 1817 */
1590 if (ret < 0) 1818 cancel_dirty_page(page, PAGE_CACHE_SIZE);
1591 break; 1819 nfs_unlock_and_release_request(req);
1592 } 1820 }
1821
1593 return ret; 1822 return ret;
1594} 1823}
1595 1824
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 2fc7abebeb9b..944275c8f56d 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2641,7 +2641,7 @@ nfsd4_encode_rdattr_error(struct xdr_stream *xdr, __be32 nfserr)
2641{ 2641{
2642 __be32 *p; 2642 __be32 *p;
2643 2643
2644 p = xdr_reserve_space(xdr, 6); 2644 p = xdr_reserve_space(xdr, 20);
2645 if (!p) 2645 if (!p)
2646 return NULL; 2646 return NULL;
2647 *p++ = htonl(2); 2647 *p++ = htonl(2);
@@ -2879,6 +2879,7 @@ again:
2879 * return the conflicting open: 2879 * return the conflicting open:
2880 */ 2880 */
2881 if (conf->len) { 2881 if (conf->len) {
2882 kfree(conf->data);
2882 conf->len = 0; 2883 conf->len = 0;
2883 conf->data = NULL; 2884 conf->data = NULL;
2884 goto again; 2885 goto again;
@@ -2891,6 +2892,7 @@ again:
2891 if (conf->len) { 2892 if (conf->len) {
2892 p = xdr_encode_opaque_fixed(p, &ld->ld_clientid, 8); 2893 p = xdr_encode_opaque_fixed(p, &ld->ld_clientid, 8);
2893 p = xdr_encode_opaque(p, conf->data, conf->len); 2894 p = xdr_encode_opaque(p, conf->data, conf->len);
2895 kfree(conf->data);
2894 } else { /* non - nfsv4 lock in conflict, no clientid nor owner */ 2896 } else { /* non - nfsv4 lock in conflict, no clientid nor owner */
2895 p = xdr_encode_hyper(p, (u64)0); /* clientid */ 2897 p = xdr_encode_hyper(p, (u64)0); /* clientid */
2896 *p++ = cpu_to_be32(0); /* length of owner name */ 2898 *p++ = cpu_to_be32(0); /* length of owner name */
@@ -2907,7 +2909,7 @@ nfsd4_encode_lock(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_lo
2907 nfserr = nfsd4_encode_stateid(xdr, &lock->lk_resp_stateid); 2909 nfserr = nfsd4_encode_stateid(xdr, &lock->lk_resp_stateid);
2908 else if (nfserr == nfserr_denied) 2910 else if (nfserr == nfserr_denied)
2909 nfserr = nfsd4_encode_lock_denied(xdr, &lock->lk_denied); 2911 nfserr = nfsd4_encode_lock_denied(xdr, &lock->lk_denied);
2910 kfree(lock->lk_denied.ld_owner.data); 2912
2911 return nfserr; 2913 return nfserr;
2912} 2914}
2913 2915
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 9cd5f63715c0..7f30bdc57d13 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -702,6 +702,7 @@ dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
702 struct dquot *dquot; 702 struct dquot *dquot;
703 unsigned long freed = 0; 703 unsigned long freed = 0;
704 704
705 spin_lock(&dq_list_lock);
705 head = free_dquots.prev; 706 head = free_dquots.prev;
706 while (head != &free_dquots && sc->nr_to_scan) { 707 while (head != &free_dquots && sc->nr_to_scan) {
707 dquot = list_entry(head, struct dquot, dq_free); 708 dquot = list_entry(head, struct dquot, dq_free);
@@ -713,6 +714,7 @@ dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
713 freed++; 714 freed++;
714 head = free_dquots.prev; 715 head = free_dquots.prev;
715 } 716 }
717 spin_unlock(&dq_list_lock);
716 return freed; 718 return freed;
717} 719}
718 720
diff --git a/fs/xattr.c b/fs/xattr.c
index 3377dff18404..c69e6d43a0d2 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -843,7 +843,7 @@ struct simple_xattr *simple_xattr_alloc(const void *value, size_t size)
843 843
844 /* wrap around? */ 844 /* wrap around? */
845 len = sizeof(*new_xattr) + size; 845 len = sizeof(*new_xattr) + size;
846 if (len <= sizeof(*new_xattr)) 846 if (len < sizeof(*new_xattr))
847 return NULL; 847 return NULL;
848 848
849 new_xattr = kmalloc(len, GFP_KERNEL); 849 new_xattr = kmalloc(len, GFP_KERNEL);
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 96175df211b1..75c3fe5f3d9d 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -4298,8 +4298,8 @@ xfs_bmapi_delay(
4298} 4298}
4299 4299
4300 4300
4301int 4301static int
4302__xfs_bmapi_allocate( 4302xfs_bmapi_allocate(
4303 struct xfs_bmalloca *bma) 4303 struct xfs_bmalloca *bma)
4304{ 4304{
4305 struct xfs_mount *mp = bma->ip->i_mount; 4305 struct xfs_mount *mp = bma->ip->i_mount;
@@ -4578,9 +4578,6 @@ xfs_bmapi_write(
4578 bma.flist = flist; 4578 bma.flist = flist;
4579 bma.firstblock = firstblock; 4579 bma.firstblock = firstblock;
4580 4580
4581 if (flags & XFS_BMAPI_STACK_SWITCH)
4582 bma.stack_switch = 1;
4583
4584 while (bno < end && n < *nmap) { 4581 while (bno < end && n < *nmap) {
4585 inhole = eof || bma.got.br_startoff > bno; 4582 inhole = eof || bma.got.br_startoff > bno;
4586 wasdelay = !inhole && isnullstartblock(bma.got.br_startblock); 4583 wasdelay = !inhole && isnullstartblock(bma.got.br_startblock);
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h
index 38ba36e9b2f0..b879ca56a64c 100644
--- a/fs/xfs/xfs_bmap.h
+++ b/fs/xfs/xfs_bmap.h
@@ -77,7 +77,6 @@ typedef struct xfs_bmap_free
77 * from written to unwritten, otherwise convert from unwritten to written. 77 * from written to unwritten, otherwise convert from unwritten to written.
78 */ 78 */
79#define XFS_BMAPI_CONVERT 0x040 79#define XFS_BMAPI_CONVERT 0x040
80#define XFS_BMAPI_STACK_SWITCH 0x080
81 80
82#define XFS_BMAPI_FLAGS \ 81#define XFS_BMAPI_FLAGS \
83 { XFS_BMAPI_ENTIRE, "ENTIRE" }, \ 82 { XFS_BMAPI_ENTIRE, "ENTIRE" }, \
@@ -86,8 +85,7 @@ typedef struct xfs_bmap_free
86 { XFS_BMAPI_PREALLOC, "PREALLOC" }, \ 85 { XFS_BMAPI_PREALLOC, "PREALLOC" }, \
87 { XFS_BMAPI_IGSTATE, "IGSTATE" }, \ 86 { XFS_BMAPI_IGSTATE, "IGSTATE" }, \
88 { XFS_BMAPI_CONTIG, "CONTIG" }, \ 87 { XFS_BMAPI_CONTIG, "CONTIG" }, \
89 { XFS_BMAPI_CONVERT, "CONVERT" }, \ 88 { XFS_BMAPI_CONVERT, "CONVERT" }
90 { XFS_BMAPI_STACK_SWITCH, "STACK_SWITCH" }
91 89
92 90
93static inline int xfs_bmapi_aflag(int w) 91static inline int xfs_bmapi_aflag(int w)
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 703b3ec1796c..64731ef3324d 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -249,59 +249,6 @@ xfs_bmap_rtalloc(
249} 249}
250 250
251/* 251/*
252 * Stack switching interfaces for allocation
253 */
254static void
255xfs_bmapi_allocate_worker(
256 struct work_struct *work)
257{
258 struct xfs_bmalloca *args = container_of(work,
259 struct xfs_bmalloca, work);
260 unsigned long pflags;
261 unsigned long new_pflags = PF_FSTRANS;
262
263 /*
264 * we are in a transaction context here, but may also be doing work
265 * in kswapd context, and hence we may need to inherit that state
266 * temporarily to ensure that we don't block waiting for memory reclaim
267 * in any way.
268 */
269 if (args->kswapd)
270 new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
271
272 current_set_flags_nested(&pflags, new_pflags);
273
274 args->result = __xfs_bmapi_allocate(args);
275 complete(args->done);
276
277 current_restore_flags_nested(&pflags, new_pflags);
278}
279
280/*
281 * Some allocation requests often come in with little stack to work on. Push
282 * them off to a worker thread so there is lots of stack to use. Otherwise just
283 * call directly to avoid the context switch overhead here.
284 */
285int
286xfs_bmapi_allocate(
287 struct xfs_bmalloca *args)
288{
289 DECLARE_COMPLETION_ONSTACK(done);
290
291 if (!args->stack_switch)
292 return __xfs_bmapi_allocate(args);
293
294
295 args->done = &done;
296 args->kswapd = current_is_kswapd();
297 INIT_WORK_ONSTACK(&args->work, xfs_bmapi_allocate_worker);
298 queue_work(xfs_alloc_wq, &args->work);
299 wait_for_completion(&done);
300 destroy_work_on_stack(&args->work);
301 return args->result;
302}
303
304/*
305 * Check if the endoff is outside the last extent. If so the caller will grow 252 * Check if the endoff is outside the last extent. If so the caller will grow
306 * the allocation to a stripe unit boundary. All offsets are considered outside 253 * the allocation to a stripe unit boundary. All offsets are considered outside
307 * the end of file for an empty fork, so 1 is returned in *eof in that case. 254 * the end of file for an empty fork, so 1 is returned in *eof in that case.
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
index 075f72232a64..2fdb72d2c908 100644
--- a/fs/xfs/xfs_bmap_util.h
+++ b/fs/xfs/xfs_bmap_util.h
@@ -55,8 +55,6 @@ struct xfs_bmalloca {
55 bool userdata;/* set if is user data */ 55 bool userdata;/* set if is user data */
56 bool aeof; /* allocated space at eof */ 56 bool aeof; /* allocated space at eof */
57 bool conv; /* overwriting unwritten extents */ 57 bool conv; /* overwriting unwritten extents */
58 bool stack_switch;
59 bool kswapd; /* allocation in kswapd context */
60 int flags; 58 int flags;
61 struct completion *done; 59 struct completion *done;
62 struct work_struct work; 60 struct work_struct work;
@@ -66,8 +64,6 @@ struct xfs_bmalloca {
66int xfs_bmap_finish(struct xfs_trans **tp, struct xfs_bmap_free *flist, 64int xfs_bmap_finish(struct xfs_trans **tp, struct xfs_bmap_free *flist,
67 int *committed); 65 int *committed);
68int xfs_bmap_rtalloc(struct xfs_bmalloca *ap); 66int xfs_bmap_rtalloc(struct xfs_bmalloca *ap);
69int xfs_bmapi_allocate(struct xfs_bmalloca *args);
70int __xfs_bmapi_allocate(struct xfs_bmalloca *args);
71int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff, 67int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff,
72 int whichfork, int *eof); 68 int whichfork, int *eof);
73int xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip, 69int xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip,
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index bf810c6baf2b..cf893bc1e373 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -33,6 +33,7 @@
33#include "xfs_error.h" 33#include "xfs_error.h"
34#include "xfs_trace.h" 34#include "xfs_trace.h"
35#include "xfs_cksum.h" 35#include "xfs_cksum.h"
36#include "xfs_alloc.h"
36 37
37/* 38/*
38 * Cursor allocation zone. 39 * Cursor allocation zone.
@@ -2323,7 +2324,7 @@ error1:
2323 * record (to be inserted into parent). 2324 * record (to be inserted into parent).
2324 */ 2325 */
2325STATIC int /* error */ 2326STATIC int /* error */
2326xfs_btree_split( 2327__xfs_btree_split(
2327 struct xfs_btree_cur *cur, 2328 struct xfs_btree_cur *cur,
2328 int level, 2329 int level,
2329 union xfs_btree_ptr *ptrp, 2330 union xfs_btree_ptr *ptrp,
@@ -2503,6 +2504,85 @@ error0:
2503 return error; 2504 return error;
2504} 2505}
2505 2506
2507struct xfs_btree_split_args {
2508 struct xfs_btree_cur *cur;
2509 int level;
2510 union xfs_btree_ptr *ptrp;
2511 union xfs_btree_key *key;
2512 struct xfs_btree_cur **curp;
2513 int *stat; /* success/failure */
2514 int result;
2515 bool kswapd; /* allocation in kswapd context */
2516 struct completion *done;
2517 struct work_struct work;
2518};
2519
2520/*
2521 * Stack switching interfaces for allocation
2522 */
2523static void
2524xfs_btree_split_worker(
2525 struct work_struct *work)
2526{
2527 struct xfs_btree_split_args *args = container_of(work,
2528 struct xfs_btree_split_args, work);
2529 unsigned long pflags;
2530 unsigned long new_pflags = PF_FSTRANS;
2531
2532 /*
2533 * we are in a transaction context here, but may also be doing work
2534 * in kswapd context, and hence we may need to inherit that state
2535 * temporarily to ensure that we don't block waiting for memory reclaim
2536 * in any way.
2537 */
2538 if (args->kswapd)
2539 new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
2540
2541 current_set_flags_nested(&pflags, new_pflags);
2542
2543 args->result = __xfs_btree_split(args->cur, args->level, args->ptrp,
2544 args->key, args->curp, args->stat);
2545 complete(args->done);
2546
2547 current_restore_flags_nested(&pflags, new_pflags);
2548}
2549
2550/*
2551 * BMBT split requests often come in with little stack to work on. Push
2552 * them off to a worker thread so there is lots of stack to use. For the other
2553 * btree types, just call directly to avoid the context switch overhead here.
2554 */
2555STATIC int /* error */
2556xfs_btree_split(
2557 struct xfs_btree_cur *cur,
2558 int level,
2559 union xfs_btree_ptr *ptrp,
2560 union xfs_btree_key *key,
2561 struct xfs_btree_cur **curp,
2562 int *stat) /* success/failure */
2563{
2564 struct xfs_btree_split_args args;
2565 DECLARE_COMPLETION_ONSTACK(done);
2566
2567 if (cur->bc_btnum != XFS_BTNUM_BMAP)
2568 return __xfs_btree_split(cur, level, ptrp, key, curp, stat);
2569
2570 args.cur = cur;
2571 args.level = level;
2572 args.ptrp = ptrp;
2573 args.key = key;
2574 args.curp = curp;
2575 args.stat = stat;
2576 args.done = &done;
2577 args.kswapd = current_is_kswapd();
2578 INIT_WORK_ONSTACK(&args.work, xfs_btree_split_worker);
2579 queue_work(xfs_alloc_wq, &args.work);
2580 wait_for_completion(&done);
2581 destroy_work_on_stack(&args.work);
2582 return args.result;
2583}
2584
2585
2506/* 2586/*
2507 * Copy the old inode root contents into a real block and make the 2587 * Copy the old inode root contents into a real block and make the
2508 * broot point to it. 2588 * broot point to it.
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 6c5eb4c551e3..6d3ec2b6ee29 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -749,8 +749,7 @@ xfs_iomap_write_allocate(
749 * pointer that the caller gave to us. 749 * pointer that the caller gave to us.
750 */ 750 */
751 error = xfs_bmapi_write(tp, ip, map_start_fsb, 751 error = xfs_bmapi_write(tp, ip, map_start_fsb,
752 count_fsb, 752 count_fsb, 0,
753 XFS_BMAPI_STACK_SWITCH,
754 &first_block, 1, 753 &first_block, 1,
755 imap, &nimaps, &free_list); 754 imap, &nimaps, &free_list);
756 if (error) 755 if (error)
diff --git a/fs/xfs/xfs_sb.c b/fs/xfs/xfs_sb.c
index c3453b11f563..7703fa6770ff 100644
--- a/fs/xfs/xfs_sb.c
+++ b/fs/xfs/xfs_sb.c
@@ -483,10 +483,16 @@ xfs_sb_quota_to_disk(
483 } 483 }
484 484
485 /* 485 /*
486 * GQUOTINO and PQUOTINO cannot be used together in versions 486 * GQUOTINO and PQUOTINO cannot be used together in versions of
487 * of superblock that do not have pquotino. from->sb_flags 487 * superblock that do not have pquotino. from->sb_flags tells us which
488 * tells us which quota is active and should be copied to 488 * quota is active and should be copied to disk. If neither are active,
489 * disk. 489 * make sure we write NULLFSINO to the sb_gquotino field as a quota
490 * inode value of "0" is invalid when the XFS_SB_VERSION_QUOTA feature
491 * bit is set.
492 *
493 * Note that we don't need to handle the sb_uquotino or sb_pquotino here
494 * as they do not require any translation. Hence the main sb field loop
495 * will write them appropriately from the in-core superblock.
490 */ 496 */
491 if ((*fields & XFS_SB_GQUOTINO) && 497 if ((*fields & XFS_SB_GQUOTINO) &&
492 (from->sb_qflags & XFS_GQUOTA_ACCT)) 498 (from->sb_qflags & XFS_GQUOTA_ACCT))
@@ -494,6 +500,17 @@ xfs_sb_quota_to_disk(
494 else if ((*fields & XFS_SB_PQUOTINO) && 500 else if ((*fields & XFS_SB_PQUOTINO) &&
495 (from->sb_qflags & XFS_PQUOTA_ACCT)) 501 (from->sb_qflags & XFS_PQUOTA_ACCT))
496 to->sb_gquotino = cpu_to_be64(from->sb_pquotino); 502 to->sb_gquotino = cpu_to_be64(from->sb_pquotino);
503 else {
504 /*
505 * We can't rely on just the fields being logged to tell us
506 * that it is safe to write NULLFSINO - we should only do that
507 * if quotas are not actually enabled. Hence only write
508 * NULLFSINO if both in-core quota inodes are NULL.
509 */
510 if (from->sb_gquotino == NULLFSINO &&
511 from->sb_pquotino == NULLFSINO)
512 to->sb_gquotino = cpu_to_be64(NULLFSINO);
513 }
497 514
498 *fields &= ~(XFS_SB_PQUOTINO | XFS_SB_GQUOTINO); 515 *fields &= ~(XFS_SB_PQUOTINO | XFS_SB_GQUOTINO);
499} 516}
diff --git a/include/acpi/video.h b/include/acpi/video.h
index ea4c7bbded4d..843ef1adfbfa 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -22,6 +22,7 @@ extern void acpi_video_unregister(void);
22extern void acpi_video_unregister_backlight(void); 22extern void acpi_video_unregister_backlight(void);
23extern int acpi_video_get_edid(struct acpi_device *device, int type, 23extern int acpi_video_get_edid(struct acpi_device *device, int type,
24 int device_id, void **edid); 24 int device_id, void **edid);
25extern bool acpi_video_verify_backlight_support(void);
25#else 26#else
26static inline int acpi_video_register(void) { return 0; } 27static inline int acpi_video_register(void) { return 0; }
27static inline void acpi_video_unregister(void) { return; } 28static inline void acpi_video_unregister(void) { return; }
@@ -31,6 +32,7 @@ static inline int acpi_video_get_edid(struct acpi_device *device, int type,
31{ 32{
32 return -ENODEV; 33 return -ENODEV;
33} 34}
35static inline bool acpi_video_verify_backlight_support(void) { return false; }
34#endif 36#endif
35 37
36#endif 38#endif
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 471ba48c7ae4..c1c0b0cf39b4 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -693,7 +693,7 @@
693 . = ALIGN(PAGE_SIZE); \ 693 . = ALIGN(PAGE_SIZE); \
694 *(.data..percpu..page_aligned) \ 694 *(.data..percpu..page_aligned) \
695 . = ALIGN(cacheline); \ 695 . = ALIGN(cacheline); \
696 *(.data..percpu..readmostly) \ 696 *(.data..percpu..read_mostly) \
697 . = ALIGN(cacheline); \ 697 . = ALIGN(cacheline); \
698 *(.data..percpu) \ 698 *(.data..percpu) \
699 *(.data..percpu..shared_aligned) \ 699 *(.data..percpu..shared_aligned) \
diff --git a/include/dt-bindings/clock/exynos5420.h b/include/dt-bindings/clock/exynos5420.h
index 97dcb89d37d3..21d51ae1d242 100644
--- a/include/dt-bindings/clock/exynos5420.h
+++ b/include/dt-bindings/clock/exynos5420.h
@@ -63,7 +63,6 @@
63#define CLK_SCLK_MPHY_IXTAL24 161 63#define CLK_SCLK_MPHY_IXTAL24 161
64 64
65/* gate clocks */ 65/* gate clocks */
66#define CLK_ACLK66_PERIC 256
67#define CLK_UART0 257 66#define CLK_UART0 257
68#define CLK_UART1 258 67#define CLK_UART1 258
69#define CLK_UART2 259 68#define CLK_UART2 259
@@ -203,6 +202,8 @@
203#define CLK_MOUT_G3D 641 202#define CLK_MOUT_G3D 641
204#define CLK_MOUT_VPLL 642 203#define CLK_MOUT_VPLL 642
205#define CLK_MOUT_MAUDIO0 643 204#define CLK_MOUT_MAUDIO0 643
205#define CLK_MOUT_USER_ACLK333 644
206#define CLK_MOUT_SW_ACLK333 645
206 207
207/* divider clocks */ 208/* divider clocks */
208#define CLK_DOUT_PIXEL 768 209#define CLK_DOUT_PIXEL 768
diff --git a/include/dt-bindings/pinctrl/dra.h b/include/dt-bindings/pinctrl/dra.h
index 002a2855c046..3d33794e4f3e 100644
--- a/include/dt-bindings/pinctrl/dra.h
+++ b/include/dt-bindings/pinctrl/dra.h
@@ -30,7 +30,8 @@
30#define MUX_MODE14 0xe 30#define MUX_MODE14 0xe
31#define MUX_MODE15 0xf 31#define MUX_MODE15 0xf
32 32
33#define PULL_ENA (1 << 16) 33#define PULL_ENA (0 << 16)
34#define PULL_DIS (1 << 16)
34#define PULL_UP (1 << 17) 35#define PULL_UP (1 << 17)
35#define INPUT_EN (1 << 18) 36#define INPUT_EN (1 << 18)
36#define SLEWCONTROL (1 << 19) 37#define SLEWCONTROL (1 << 19)
@@ -38,10 +39,10 @@
38#define WAKEUP_EVENT (1 << 25) 39#define WAKEUP_EVENT (1 << 25)
39 40
40/* Active pin states */ 41/* Active pin states */
41#define PIN_OUTPUT 0 42#define PIN_OUTPUT (0 | PULL_DIS)
42#define PIN_OUTPUT_PULLUP (PIN_OUTPUT | PULL_ENA | PULL_UP) 43#define PIN_OUTPUT_PULLUP (PIN_OUTPUT | PULL_ENA | PULL_UP)
43#define PIN_OUTPUT_PULLDOWN (PIN_OUTPUT | PULL_ENA) 44#define PIN_OUTPUT_PULLDOWN (PIN_OUTPUT | PULL_ENA)
44#define PIN_INPUT INPUT_EN 45#define PIN_INPUT (INPUT_EN | PULL_DIS)
45#define PIN_INPUT_SLEW (INPUT_EN | SLEWCONTROL) 46#define PIN_INPUT_SLEW (INPUT_EN | SLEWCONTROL)
46#define PIN_INPUT_PULLUP (PULL_ENA | INPUT_EN | PULL_UP) 47#define PIN_INPUT_PULLUP (PULL_ENA | INPUT_EN | PULL_UP)
47#define PIN_INPUT_PULLDOWN (PULL_ENA | INPUT_EN) 48#define PIN_INPUT_PULLDOWN (PULL_ENA | INPUT_EN)
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index ec4112d257bc..8f8ae95c6e27 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -482,8 +482,8 @@ extern struct cpufreq_governor cpufreq_gov_conservative;
482 *********************************************************************/ 482 *********************************************************************/
483 483
484/* Special Values of .frequency field */ 484/* Special Values of .frequency field */
485#define CPUFREQ_ENTRY_INVALID ~0 485#define CPUFREQ_ENTRY_INVALID ~0u
486#define CPUFREQ_TABLE_END ~1 486#define CPUFREQ_TABLE_END ~1u
487/* Special Values of .flags field */ 487/* Special Values of .flags field */
488#define CPUFREQ_BOOST_FREQ (1 << 0) 488#define CPUFREQ_BOOST_FREQ (1 << 0)
489 489
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 145375ea0bd9..30faf797c2c3 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -305,6 +305,7 @@ struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags,
305 struct kernfs_root *root, unsigned long magic, 305 struct kernfs_root *root, unsigned long magic,
306 bool *new_sb_created, const void *ns); 306 bool *new_sb_created, const void *ns);
307void kernfs_kill_sb(struct super_block *sb); 307void kernfs_kill_sb(struct super_block *sb);
308struct super_block *kernfs_pin_sb(struct kernfs_root *root, const void *ns);
308 309
309void kernfs_init(void); 310void kernfs_init(void);
310 311
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 5ab4e3a76721..92abb497ab14 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -593,6 +593,7 @@ struct ata_host {
593 struct device *dev; 593 struct device *dev;
594 void __iomem * const *iomap; 594 void __iomem * const *iomap;
595 unsigned int n_ports; 595 unsigned int n_ports;
596 unsigned int n_tags; /* nr of NCQ tags */
596 void *private_data; 597 void *private_data;
597 struct ata_port_operations *ops; 598 struct ata_port_operations *ops;
598 unsigned long flags; 599 unsigned long flags;
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index b12f4bbd064c..35b51e7af886 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -578,8 +578,6 @@ struct mlx4_cq {
578 u32 cons_index; 578 u32 cons_index;
579 579
580 u16 irq; 580 u16 irq;
581 bool irq_affinity_change;
582
583 __be32 *set_ci_db; 581 __be32 *set_ci_db;
584 __be32 *arm_db; 582 __be32 *arm_db;
585 int arm_sn; 583 int arm_sn;
@@ -1167,6 +1165,8 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
1167 int *vector); 1165 int *vector);
1168void mlx4_release_eq(struct mlx4_dev *dev, int vec); 1166void mlx4_release_eq(struct mlx4_dev *dev, int vec);
1169 1167
1168int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec);
1169
1170int mlx4_get_phys_port_id(struct mlx4_dev *dev); 1170int mlx4_get_phys_port_id(struct mlx4_dev *dev);
1171int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port); 1171int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port);
1172int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port); 1172int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 11692dea18aa..42aa9b9ecd5f 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -17,6 +17,7 @@
17#include <linux/lockdep.h> 17#include <linux/lockdep.h>
18#include <linux/atomic.h> 18#include <linux/atomic.h>
19#include <asm/processor.h> 19#include <asm/processor.h>
20#include <linux/osq_lock.h>
20 21
21/* 22/*
22 * Simple, straightforward mutexes with strict semantics: 23 * Simple, straightforward mutexes with strict semantics:
@@ -46,7 +47,6 @@
46 * - detects multi-task circular deadlocks and prints out all affected 47 * - detects multi-task circular deadlocks and prints out all affected
47 * locks and tasks (and only those tasks) 48 * locks and tasks (and only those tasks)
48 */ 49 */
49struct optimistic_spin_queue;
50struct mutex { 50struct mutex {
51 /* 1: unlocked, 0: locked, negative: locked, possible waiters */ 51 /* 1: unlocked, 0: locked, negative: locked, possible waiters */
52 atomic_t count; 52 atomic_t count;
@@ -56,7 +56,7 @@ struct mutex {
56 struct task_struct *owner; 56 struct task_struct *owner;
57#endif 57#endif
58#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 58#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
59 struct optimistic_spin_queue *osq; /* Spinner MCS lock */ 59 struct optimistic_spin_queue osq; /* Spinner MCS lock */
60#endif 60#endif
61#ifdef CONFIG_DEBUG_MUTEXES 61#ifdef CONFIG_DEBUG_MUTEXES
62 const char *name; 62 const char *name;
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index a70c9493d55a..d449018d0726 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -25,9 +25,6 @@ struct phy_device *of_phy_attach(struct net_device *dev,
25 25
26extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np); 26extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
27 27
28extern void of_mdiobus_link_phydev(struct mii_bus *mdio,
29 struct phy_device *phydev);
30
31#else /* CONFIG_OF */ 28#else /* CONFIG_OF */
32static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) 29static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
33{ 30{
@@ -63,11 +60,6 @@ static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np)
63{ 60{
64 return NULL; 61 return NULL;
65} 62}
66
67static inline void of_mdiobus_link_phydev(struct mii_bus *mdio,
68 struct phy_device *phydev)
69{
70}
71#endif /* CONFIG_OF */ 63#endif /* CONFIG_OF */
72 64
73#if defined(CONFIG_OF) && defined(CONFIG_FIXED_PHY) 65#if defined(CONFIG_OF) && defined(CONFIG_FIXED_PHY)
diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h
new file mode 100644
index 000000000000..90230d5811c5
--- /dev/null
+++ b/include/linux/osq_lock.h
@@ -0,0 +1,27 @@
1#ifndef __LINUX_OSQ_LOCK_H
2#define __LINUX_OSQ_LOCK_H
3
4/*
5 * An MCS like lock especially tailored for optimistic spinning for sleeping
6 * lock implementations (mutex, rwsem, etc).
7 */
8
9#define OSQ_UNLOCKED_VAL (0)
10
11struct optimistic_spin_queue {
12 /*
13 * Stores an encoded value of the CPU # of the tail node in the queue.
14 * If the queue is empty, then it's set to OSQ_UNLOCKED_VAL.
15 */
16 atomic_t tail;
17};
18
19/* Init macro and function. */
20#define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) }
21
22static inline void osq_lock_init(struct optimistic_spin_queue *lock)
23{
24 atomic_set(&lock->tail, OSQ_UNLOCKED_VAL);
25}
26
27#endif
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 0a97b583ee8d..e1474ae18c88 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -399,6 +399,18 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
399} 399}
400 400
401/* 401/*
402 * Get the offset in PAGE_SIZE.
403 * (TODO: hugepage should have ->index in PAGE_SIZE)
404 */
405static inline pgoff_t page_to_pgoff(struct page *page)
406{
407 if (unlikely(PageHeadHuge(page)))
408 return page->index << compound_order(page);
409 else
410 return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
411}
412
413/*
402 * Return byte-offset into filesystem object for page. 414 * Return byte-offset into filesystem object for page.
403 */ 415 */
404static inline loff_t page_offset(struct page *page) 416static inline loff_t page_offset(struct page *page)
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index a5fc7d01aad6..dec01d6c3f80 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -146,10 +146,10 @@
146 * Declaration/definition used for per-CPU variables that must be read mostly. 146 * Declaration/definition used for per-CPU variables that must be read mostly.
147 */ 147 */
148#define DECLARE_PER_CPU_READ_MOSTLY(type, name) \ 148#define DECLARE_PER_CPU_READ_MOSTLY(type, name) \
149 DECLARE_PER_CPU_SECTION(type, name, "..readmostly") 149 DECLARE_PER_CPU_SECTION(type, name, "..read_mostly")
150 150
151#define DEFINE_PER_CPU_READ_MOSTLY(type, name) \ 151#define DEFINE_PER_CPU_READ_MOSTLY(type, name) \
152 DEFINE_PER_CPU_SECTION(type, name, "..readmostly") 152 DEFINE_PER_CPU_SECTION(type, name, "..read_mostly")
153 153
154/* 154/*
155 * Intermodule exports for per-CPU variables. sparse forgets about 155 * Intermodule exports for per-CPU variables. sparse forgets about
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 5a75d19aa661..6a94cc8b1ca0 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -44,7 +44,6 @@
44#include <linux/debugobjects.h> 44#include <linux/debugobjects.h>
45#include <linux/bug.h> 45#include <linux/bug.h>
46#include <linux/compiler.h> 46#include <linux/compiler.h>
47#include <linux/percpu.h>
48#include <asm/barrier.h> 47#include <asm/barrier.h>
49 48
50extern int rcu_expedited; /* for sysctl */ 49extern int rcu_expedited; /* for sysctl */
@@ -300,41 +299,6 @@ bool __rcu_is_watching(void);
300#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ 299#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
301 300
302/* 301/*
303 * Hooks for cond_resched() and friends to avoid RCU CPU stall warnings.
304 */
305
306#define RCU_COND_RESCHED_LIM 256 /* ms vs. 100s of ms. */
307DECLARE_PER_CPU(int, rcu_cond_resched_count);
308void rcu_resched(void);
309
310/*
311 * Is it time to report RCU quiescent states?
312 *
313 * Note unsynchronized access to rcu_cond_resched_count. Yes, we might
314 * increment some random CPU's count, and possibly also load the result from
315 * yet another CPU's count. We might even clobber some other CPU's attempt
316 * to zero its counter. This is all OK because the goal is not precision,
317 * but rather reasonable amortization of rcu_note_context_switch() overhead
318 * and extremely high probability of avoiding RCU CPU stall warnings.
319 * Note that this function has to be preempted in just the wrong place,
320 * many thousands of times in a row, for anything bad to happen.
321 */
322static inline bool rcu_should_resched(void)
323{
324 return raw_cpu_inc_return(rcu_cond_resched_count) >=
325 RCU_COND_RESCHED_LIM;
326}
327
328/*
329 * Report quiscent states to RCU if it is time to do so.
330 */
331static inline void rcu_cond_resched(void)
332{
333 if (unlikely(rcu_should_resched()))
334 rcu_resched();
335}
336
337/*
338 * Infrastructure to implement the synchronize_() primitives in 302 * Infrastructure to implement the synchronize_() primitives in
339 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. 303 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
340 */ 304 */
@@ -358,9 +322,19 @@ void wait_rcu_gp(call_rcu_func_t crf);
358 * initialization. 322 * initialization.
359 */ 323 */
360#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 324#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
325void init_rcu_head(struct rcu_head *head);
326void destroy_rcu_head(struct rcu_head *head);
361void init_rcu_head_on_stack(struct rcu_head *head); 327void init_rcu_head_on_stack(struct rcu_head *head);
362void destroy_rcu_head_on_stack(struct rcu_head *head); 328void destroy_rcu_head_on_stack(struct rcu_head *head);
363#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 329#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
330static inline void init_rcu_head(struct rcu_head *head)
331{
332}
333
334static inline void destroy_rcu_head(struct rcu_head *head)
335{
336}
337
364static inline void init_rcu_head_on_stack(struct rcu_head *head) 338static inline void init_rcu_head_on_stack(struct rcu_head *head)
365{ 339{
366} 340}
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index d5b13bc07a0b..561e8615528d 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -15,13 +15,13 @@
15#ifdef __KERNEL__ 15#ifdef __KERNEL__
16/* 16/*
17 * the rw-semaphore definition 17 * the rw-semaphore definition
18 * - if activity is 0 then there are no active readers or writers 18 * - if count is 0 then there are no active readers or writers
19 * - if activity is +ve then that is the number of active readers 19 * - if count is +ve then that is the number of active readers
20 * - if activity is -1 then there is one active writer 20 * - if count is -1 then there is one active writer
21 * - if wait_list is not empty, then there are processes waiting for the semaphore 21 * - if wait_list is not empty, then there are processes waiting for the semaphore
22 */ 22 */
23struct rw_semaphore { 23struct rw_semaphore {
24 __s32 activity; 24 __s32 count;
25 raw_spinlock_t wait_lock; 25 raw_spinlock_t wait_lock;
26 struct list_head wait_list; 26 struct list_head wait_list;
27#ifdef CONFIG_DEBUG_LOCK_ALLOC 27#ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 8d79708146aa..035d3c57fc8a 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -13,10 +13,11 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/list.h> 14#include <linux/list.h>
15#include <linux/spinlock.h> 15#include <linux/spinlock.h>
16
17#include <linux/atomic.h> 16#include <linux/atomic.h>
17#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
18#include <linux/osq_lock.h>
19#endif
18 20
19struct optimistic_spin_queue;
20struct rw_semaphore; 21struct rw_semaphore;
21 22
22#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK 23#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
@@ -25,15 +26,15 @@ struct rw_semaphore;
25/* All arch specific implementations share the same struct */ 26/* All arch specific implementations share the same struct */
26struct rw_semaphore { 27struct rw_semaphore {
27 long count; 28 long count;
28 raw_spinlock_t wait_lock;
29 struct list_head wait_list; 29 struct list_head wait_list;
30#ifdef CONFIG_SMP 30 raw_spinlock_t wait_lock;
31#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
32 struct optimistic_spin_queue osq; /* spinner MCS lock */
31 /* 33 /*
32 * Write owner. Used as a speculative check to see 34 * Write owner. Used as a speculative check to see
33 * if the owner is running on the cpu. 35 * if the owner is running on the cpu.
34 */ 36 */
35 struct task_struct *owner; 37 struct task_struct *owner;
36 struct optimistic_spin_queue *osq; /* spinner MCS lock */
37#endif 38#endif
38#ifdef CONFIG_DEBUG_LOCK_ALLOC 39#ifdef CONFIG_DEBUG_LOCK_ALLOC
39 struct lockdep_map dep_map; 40 struct lockdep_map dep_map;
@@ -64,22 +65,19 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
64# define __RWSEM_DEP_MAP_INIT(lockname) 65# define __RWSEM_DEP_MAP_INIT(lockname)
65#endif 66#endif
66 67
67#if defined(CONFIG_SMP) && !defined(CONFIG_RWSEM_GENERIC_SPINLOCK) 68#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
68#define __RWSEM_INITIALIZER(name) \ 69#define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED, .owner = NULL
69 { RWSEM_UNLOCKED_VALUE, \
70 __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
71 LIST_HEAD_INIT((name).wait_list), \
72 NULL, /* owner */ \
73 NULL /* mcs lock */ \
74 __RWSEM_DEP_MAP_INIT(name) }
75#else 70#else
76#define __RWSEM_INITIALIZER(name) \ 71#define __RWSEM_OPT_INIT(lockname)
77 { RWSEM_UNLOCKED_VALUE, \
78 __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
79 LIST_HEAD_INIT((name).wait_list) \
80 __RWSEM_DEP_MAP_INIT(name) }
81#endif 72#endif
82 73
74#define __RWSEM_INITIALIZER(name) \
75 { .count = RWSEM_UNLOCKED_VALUE, \
76 .wait_list = LIST_HEAD_INIT((name).wait_list), \
77 .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \
78 __RWSEM_OPT_INIT(name) \
79 __RWSEM_DEP_MAP_INIT(name) }
80
83#define DECLARE_RWSEM(name) \ 81#define DECLARE_RWSEM(name) \
84 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 82 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
85 83
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 306f4f0c987a..0376b054a0d0 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -872,21 +872,21 @@ enum cpu_idle_type {
872#define SD_NUMA 0x4000 /* cross-node balancing */ 872#define SD_NUMA 0x4000 /* cross-node balancing */
873 873
874#ifdef CONFIG_SCHED_SMT 874#ifdef CONFIG_SCHED_SMT
875static inline const int cpu_smt_flags(void) 875static inline int cpu_smt_flags(void)
876{ 876{
877 return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; 877 return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
878} 878}
879#endif 879#endif
880 880
881#ifdef CONFIG_SCHED_MC 881#ifdef CONFIG_SCHED_MC
882static inline const int cpu_core_flags(void) 882static inline int cpu_core_flags(void)
883{ 883{
884 return SD_SHARE_PKG_RESOURCES; 884 return SD_SHARE_PKG_RESOURCES;
885} 885}
886#endif 886#endif
887 887
888#ifdef CONFIG_NUMA 888#ifdef CONFIG_NUMA
889static inline const int cpu_numa_flags(void) 889static inline int cpu_numa_flags(void)
890{ 890{
891 return SD_NUMA; 891 return SD_NUMA;
892} 892}
@@ -999,7 +999,7 @@ void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
999bool cpus_share_cache(int this_cpu, int that_cpu); 999bool cpus_share_cache(int this_cpu, int that_cpu);
1000 1000
1001typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); 1001typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
1002typedef const int (*sched_domain_flags_f)(void); 1002typedef int (*sched_domain_flags_f)(void);
1003 1003
1004#define SDTL_OVERLAP 0x01 1004#define SDTL_OVERLAP 0x01
1005 1005
diff --git a/include/net/ip.h b/include/net/ip.h
index 0e795df05ec9..7596eb22e1ce 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -309,16 +309,7 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
309 } 309 }
310} 310}
311 311
312#define IP_IDENTS_SZ 2048u 312u32 ip_idents_reserve(u32 hash, int segs);
313extern atomic_t *ip_idents;
314
315static inline u32 ip_idents_reserve(u32 hash, int segs)
316{
317 atomic_t *id_ptr = ip_idents + hash % IP_IDENTS_SZ;
318
319 return atomic_add_return(segs, id_ptr) - segs;
320}
321
322void __ip_select_ident(struct iphdr *iph, int segs); 313void __ip_select_ident(struct iphdr *iph, int segs);
323 314
324static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs) 315static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 7277caf3743d..47f425464f84 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -203,7 +203,6 @@ struct neigh_table {
203 void (*proxy_redo)(struct sk_buff *skb); 203 void (*proxy_redo)(struct sk_buff *skb);
204 char *id; 204 char *id;
205 struct neigh_parms parms; 205 struct neigh_parms parms;
206 /* HACK. gc_* should follow parms without a gap! */
207 int gc_interval; 206 int gc_interval;
208 int gc_thresh1; 207 int gc_thresh1;
209 int gc_thresh2; 208 int gc_thresh2;
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 713b0b88bd5a..c4d86198d3d6 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -6,6 +6,7 @@
6#include <linux/netfilter/nfnetlink.h> 6#include <linux/netfilter/nfnetlink.h>
7#include <linux/netfilter/x_tables.h> 7#include <linux/netfilter/x_tables.h>
8#include <linux/netfilter/nf_tables.h> 8#include <linux/netfilter/nf_tables.h>
9#include <linux/u64_stats_sync.h>
9#include <net/netlink.h> 10#include <net/netlink.h>
10 11
11#define NFT_JUMP_STACK_SIZE 16 12#define NFT_JUMP_STACK_SIZE 16
@@ -528,8 +529,9 @@ enum nft_chain_type {
528}; 529};
529 530
530struct nft_stats { 531struct nft_stats {
531 u64 bytes; 532 u64 bytes;
532 u64 pkts; 533 u64 pkts;
534 struct u64_stats_sync syncp;
533}; 535};
534 536
535#define NFT_HOOK_OPS_MAX 2 537#define NFT_HOOK_OPS_MAX 2
diff --git a/include/net/netns/ieee802154_6lowpan.h b/include/net/netns/ieee802154_6lowpan.h
index 079030c853d8..e2070960bac0 100644
--- a/include/net/netns/ieee802154_6lowpan.h
+++ b/include/net/netns/ieee802154_6lowpan.h
@@ -16,7 +16,7 @@ struct netns_sysctl_lowpan {
16struct netns_ieee802154_lowpan { 16struct netns_ieee802154_lowpan {
17 struct netns_sysctl_lowpan sysctl; 17 struct netns_sysctl_lowpan sysctl;
18 struct netns_frags frags; 18 struct netns_frags frags;
19 u16 max_dsize; 19 int max_dsize;
20}; 20};
21 21
22#endif 22#endif
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
index 26a394cb91a8..eee608b12cc9 100644
--- a/include/net/netns/nftables.h
+++ b/include/net/netns/nftables.h
@@ -13,8 +13,8 @@ struct netns_nftables {
13 struct nft_af_info *inet; 13 struct nft_af_info *inet;
14 struct nft_af_info *arp; 14 struct nft_af_info *arp;
15 struct nft_af_info *bridge; 15 struct nft_af_info *bridge;
16 unsigned int base_seq;
16 u8 gencursor; 17 u8 gencursor;
17 u8 genctr;
18}; 18};
19 19
20#endif 20#endif
diff --git a/include/net/sock.h b/include/net/sock.h
index 173cae485de1..156350745700 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1768,9 +1768,11 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst)
1768static inline void 1768static inline void
1769sk_dst_set(struct sock *sk, struct dst_entry *dst) 1769sk_dst_set(struct sock *sk, struct dst_entry *dst)
1770{ 1770{
1771 spin_lock(&sk->sk_dst_lock); 1771 struct dst_entry *old_dst;
1772 __sk_dst_set(sk, dst); 1772
1773 spin_unlock(&sk->sk_dst_lock); 1773 sk_tx_queue_clear(sk);
1774 old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
1775 dst_release(old_dst);
1774} 1776}
1775 1777
1776static inline void 1778static inline void
@@ -1782,9 +1784,7 @@ __sk_dst_reset(struct sock *sk)
1782static inline void 1784static inline void
1783sk_dst_reset(struct sock *sk) 1785sk_dst_reset(struct sock *sk)
1784{ 1786{
1785 spin_lock(&sk->sk_dst_lock); 1787 sk_dst_set(sk, NULL);
1786 __sk_dst_reset(sk);
1787 spin_unlock(&sk->sk_dst_lock);
1788} 1788}
1789 1789
1790struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); 1790struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 40b5ca8a1b1f..25084a052a1e 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -101,6 +101,7 @@
101 * - add FATTR_CTIME 101 * - add FATTR_CTIME
102 * - add ctime and ctimensec to fuse_setattr_in 102 * - add ctime and ctimensec to fuse_setattr_in
103 * - add FUSE_RENAME2 request 103 * - add FUSE_RENAME2 request
104 * - add FUSE_NO_OPEN_SUPPORT flag
104 */ 105 */
105 106
106#ifndef _LINUX_FUSE_H 107#ifndef _LINUX_FUSE_H
@@ -229,6 +230,7 @@ struct fuse_file_lock {
229 * FUSE_READDIRPLUS_AUTO: adaptive readdirplus 230 * FUSE_READDIRPLUS_AUTO: adaptive readdirplus
230 * FUSE_ASYNC_DIO: asynchronous direct I/O submission 231 * FUSE_ASYNC_DIO: asynchronous direct I/O submission
231 * FUSE_WRITEBACK_CACHE: use writeback cache for buffered writes 232 * FUSE_WRITEBACK_CACHE: use writeback cache for buffered writes
233 * FUSE_NO_OPEN_SUPPORT: kernel supports zero-message opens
232 */ 234 */
233#define FUSE_ASYNC_READ (1 << 0) 235#define FUSE_ASYNC_READ (1 << 0)
234#define FUSE_POSIX_LOCKS (1 << 1) 236#define FUSE_POSIX_LOCKS (1 << 1)
@@ -247,6 +249,7 @@ struct fuse_file_lock {
247#define FUSE_READDIRPLUS_AUTO (1 << 14) 249#define FUSE_READDIRPLUS_AUTO (1 << 14)
248#define FUSE_ASYNC_DIO (1 << 15) 250#define FUSE_ASYNC_DIO (1 << 15)
249#define FUSE_WRITEBACK_CACHE (1 << 16) 251#define FUSE_WRITEBACK_CACHE (1 << 16)
252#define FUSE_NO_OPEN_SUPPORT (1 << 17)
250 253
251/** 254/**
252 * CUSE INIT request/reply flags 255 * CUSE INIT request/reply flags
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
index 35536d9c0964..76768ee812b2 100644
--- a/kernel/Kconfig.locks
+++ b/kernel/Kconfig.locks
@@ -220,9 +220,16 @@ config INLINE_WRITE_UNLOCK_IRQRESTORE
220 220
221endif 221endif
222 222
223config ARCH_SUPPORTS_ATOMIC_RMW
224 bool
225
223config MUTEX_SPIN_ON_OWNER 226config MUTEX_SPIN_ON_OWNER
224 def_bool y 227 def_bool y
225 depends on SMP && !DEBUG_MUTEXES 228 depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW
229
230config RWSEM_SPIN_ON_OWNER
231 def_bool y
232 depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW
226 233
227config ARCH_USE_QUEUE_RWLOCK 234config ARCH_USE_QUEUE_RWLOCK
228 bool 235 bool
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 7868fc3c0bc5..70776aec2562 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1648,10 +1648,13 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1648 int flags, const char *unused_dev_name, 1648 int flags, const char *unused_dev_name,
1649 void *data) 1649 void *data)
1650{ 1650{
1651 struct super_block *pinned_sb = NULL;
1652 struct cgroup_subsys *ss;
1651 struct cgroup_root *root; 1653 struct cgroup_root *root;
1652 struct cgroup_sb_opts opts; 1654 struct cgroup_sb_opts opts;
1653 struct dentry *dentry; 1655 struct dentry *dentry;
1654 int ret; 1656 int ret;
1657 int i;
1655 bool new_sb; 1658 bool new_sb;
1656 1659
1657 /* 1660 /*
@@ -1677,6 +1680,27 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1677 goto out_unlock; 1680 goto out_unlock;
1678 } 1681 }
1679 1682
1683 /*
1684 * Destruction of cgroup root is asynchronous, so subsystems may
1685 * still be dying after the previous unmount. Let's drain the
1686 * dying subsystems. We just need to ensure that the ones
1687 * unmounted previously finish dying and don't care about new ones
1688 * starting. Testing ref liveliness is good enough.
1689 */
1690 for_each_subsys(ss, i) {
1691 if (!(opts.subsys_mask & (1 << i)) ||
1692 ss->root == &cgrp_dfl_root)
1693 continue;
1694
1695 if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) {
1696 mutex_unlock(&cgroup_mutex);
1697 msleep(10);
1698 ret = restart_syscall();
1699 goto out_free;
1700 }
1701 cgroup_put(&ss->root->cgrp);
1702 }
1703
1680 for_each_root(root) { 1704 for_each_root(root) {
1681 bool name_match = false; 1705 bool name_match = false;
1682 1706
@@ -1717,15 +1741,23 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1717 } 1741 }
1718 1742
1719 /* 1743 /*
1720 * A root's lifetime is governed by its root cgroup. 1744 * We want to reuse @root whose lifetime is governed by its
1721 * tryget_live failure indicate that the root is being 1745 * ->cgrp. Let's check whether @root is alive and keep it
1722 * destroyed. Wait for destruction to complete so that the 1746 * that way. As cgroup_kill_sb() can happen anytime, we
1723 * subsystems are free. We can use wait_queue for the wait 1747 * want to block it by pinning the sb so that @root doesn't
1724 * but this path is super cold. Let's just sleep for a bit 1748 * get killed before mount is complete.
1725 * and retry. 1749 *
1750 * With the sb pinned, tryget_live can reliably indicate
1751 * whether @root can be reused. If it's being killed,
1752 * drain it. We can use wait_queue for the wait but this
1753 * path is super cold. Let's just sleep a bit and retry.
1726 */ 1754 */
1727 if (!percpu_ref_tryget_live(&root->cgrp.self.refcnt)) { 1755 pinned_sb = kernfs_pin_sb(root->kf_root, NULL);
1756 if (IS_ERR(pinned_sb) ||
1757 !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
1728 mutex_unlock(&cgroup_mutex); 1758 mutex_unlock(&cgroup_mutex);
1759 if (!IS_ERR_OR_NULL(pinned_sb))
1760 deactivate_super(pinned_sb);
1729 msleep(10); 1761 msleep(10);
1730 ret = restart_syscall(); 1762 ret = restart_syscall();
1731 goto out_free; 1763 goto out_free;
@@ -1770,6 +1802,16 @@ out_free:
1770 CGROUP_SUPER_MAGIC, &new_sb); 1802 CGROUP_SUPER_MAGIC, &new_sb);
1771 if (IS_ERR(dentry) || !new_sb) 1803 if (IS_ERR(dentry) || !new_sb)
1772 cgroup_put(&root->cgrp); 1804 cgroup_put(&root->cgrp);
1805
1806 /*
1807 * If @pinned_sb, we're reusing an existing root and holding an
1808 * extra ref on its sb. Mount is complete. Put the extra ref.
1809 */
1810 if (pinned_sb) {
1811 WARN_ON(new_sb);
1812 deactivate_super(pinned_sb);
1813 }
1814
1773 return dentry; 1815 return dentry;
1774} 1816}
1775 1817
@@ -3328,7 +3370,7 @@ bool css_has_online_children(struct cgroup_subsys_state *css)
3328 3370
3329 rcu_read_lock(); 3371 rcu_read_lock();
3330 css_for_each_child(child, css) { 3372 css_for_each_child(child, css) {
3331 if (css->flags & CSS_ONLINE) { 3373 if (child->flags & CSS_ONLINE) {
3332 ret = true; 3374 ret = true;
3333 break; 3375 break;
3334 } 3376 }
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index f6b33c696224..116a4164720a 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1181,7 +1181,13 @@ done:
1181 1181
1182int current_cpuset_is_being_rebound(void) 1182int current_cpuset_is_being_rebound(void)
1183{ 1183{
1184 return task_cs(current) == cpuset_being_rebound; 1184 int ret;
1185
1186 rcu_read_lock();
1187 ret = task_cs(current) == cpuset_being_rebound;
1188 rcu_read_unlock();
1189
1190 return ret;
1185} 1191}
1186 1192
1187static int update_relax_domain_level(struct cpuset *cs, s64 val) 1193static int update_relax_domain_level(struct cpuset *cs, s64 val)
@@ -1617,7 +1623,17 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
1617 * resources, wait for the previously scheduled operations before 1623 * resources, wait for the previously scheduled operations before
1618 * proceeding, so that we don't end up keep removing tasks added 1624 * proceeding, so that we don't end up keep removing tasks added
1619 * after execution capability is restored. 1625 * after execution capability is restored.
1626 *
1627 * cpuset_hotplug_work calls back into cgroup core via
1628 * cgroup_transfer_tasks() and waiting for it from a cgroupfs
1629 * operation like this one can lead to a deadlock through kernfs
1630 * active_ref protection. Let's break the protection. Losing the
1631 * protection is okay as we check whether @cs is online after
1632 * grabbing cpuset_mutex anyway. This only happens on the legacy
1633 * hierarchies.
1620 */ 1634 */
1635 css_get(&cs->css);
1636 kernfs_break_active_protection(of->kn);
1621 flush_work(&cpuset_hotplug_work); 1637 flush_work(&cpuset_hotplug_work);
1622 1638
1623 mutex_lock(&cpuset_mutex); 1639 mutex_lock(&cpuset_mutex);
@@ -1645,6 +1661,8 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
1645 free_trial_cpuset(trialcs); 1661 free_trial_cpuset(trialcs);
1646out_unlock: 1662out_unlock:
1647 mutex_unlock(&cpuset_mutex); 1663 mutex_unlock(&cpuset_mutex);
1664 kernfs_unbreak_active_protection(of->kn);
1665 css_put(&cs->css);
1648 return retval ?: nbytes; 1666 return retval ?: nbytes;
1649} 1667}
1650 1668
diff --git a/kernel/events/core.c b/kernel/events/core.c
index a33d9a2bcbd7..6b17ac1b0c2a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2320,7 +2320,7 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
2320 next_parent = rcu_dereference(next_ctx->parent_ctx); 2320 next_parent = rcu_dereference(next_ctx->parent_ctx);
2321 2321
2322 /* If neither context have a parent context; they cannot be clones. */ 2322 /* If neither context have a parent context; they cannot be clones. */
2323 if (!parent && !next_parent) 2323 if (!parent || !next_parent)
2324 goto unlock; 2324 goto unlock;
2325 2325
2326 if (next_parent == ctx || next_ctx == parent || next_parent == parent) { 2326 if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
@@ -7458,7 +7458,19 @@ __perf_event_exit_task(struct perf_event *child_event,
7458 struct perf_event_context *child_ctx, 7458 struct perf_event_context *child_ctx,
7459 struct task_struct *child) 7459 struct task_struct *child)
7460{ 7460{
7461 perf_remove_from_context(child_event, true); 7461 /*
7462 * Do not destroy the 'original' grouping; because of the context
7463 * switch optimization the original events could've ended up in a
7464 * random child task.
7465 *
7466 * If we were to destroy the original group, all group related
7467 * operations would cease to function properly after this random
7468 * child dies.
7469 *
7470 * Do destroy all inherited groups, we don't care about those
7471 * and being thorough is better.
7472 */
7473 perf_remove_from_context(child_event, !!child_event->parent);
7462 7474
7463 /* 7475 /*
7464 * It can happen that the parent exits first, and has events 7476 * It can happen that the parent exits first, and has events
@@ -7474,7 +7486,7 @@ __perf_event_exit_task(struct perf_event *child_event,
7474static void perf_event_exit_task_context(struct task_struct *child, int ctxn) 7486static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
7475{ 7487{
7476 struct perf_event *child_event, *next; 7488 struct perf_event *child_event, *next;
7477 struct perf_event_context *child_ctx; 7489 struct perf_event_context *child_ctx, *parent_ctx;
7478 unsigned long flags; 7490 unsigned long flags;
7479 7491
7480 if (likely(!child->perf_event_ctxp[ctxn])) { 7492 if (likely(!child->perf_event_ctxp[ctxn])) {
@@ -7499,6 +7511,15 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
7499 raw_spin_lock(&child_ctx->lock); 7511 raw_spin_lock(&child_ctx->lock);
7500 task_ctx_sched_out(child_ctx); 7512 task_ctx_sched_out(child_ctx);
7501 child->perf_event_ctxp[ctxn] = NULL; 7513 child->perf_event_ctxp[ctxn] = NULL;
7514
7515 /*
7516 * In order to avoid freeing: child_ctx->parent_ctx->task
7517 * under perf_event_context::lock, grab another reference.
7518 */
7519 parent_ctx = child_ctx->parent_ctx;
7520 if (parent_ctx)
7521 get_ctx(parent_ctx);
7522
7502 /* 7523 /*
7503 * If this context is a clone; unclone it so it can't get 7524 * If this context is a clone; unclone it so it can't get
7504 * swapped to another process while we're removing all 7525 * swapped to another process while we're removing all
@@ -7509,6 +7530,13 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
7509 raw_spin_unlock_irqrestore(&child_ctx->lock, flags); 7530 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
7510 7531
7511 /* 7532 /*
7533 * Now that we no longer hold perf_event_context::lock, drop
7534 * our extra child_ctx->parent_ctx reference.
7535 */
7536 if (parent_ctx)
7537 put_ctx(parent_ctx);
7538
7539 /*
7512 * Report the task dead after unscheduling the events so that we 7540 * Report the task dead after unscheduling the events so that we
7513 * won't get any samples after PERF_RECORD_EXIT. We can however still 7541 * won't get any samples after PERF_RECORD_EXIT. We can however still
7514 * get a few PERF_RECORD_READ events. 7542 * get a few PERF_RECORD_READ events.
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 3214289df5a7..734e9a7d280b 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -2037,19 +2037,23 @@ static int __init populate_kprobe_blacklist(unsigned long *start,
2037{ 2037{
2038 unsigned long *iter; 2038 unsigned long *iter;
2039 struct kprobe_blacklist_entry *ent; 2039 struct kprobe_blacklist_entry *ent;
2040 unsigned long offset = 0, size = 0; 2040 unsigned long entry, offset = 0, size = 0;
2041 2041
2042 for (iter = start; iter < end; iter++) { 2042 for (iter = start; iter < end; iter++) {
2043 if (!kallsyms_lookup_size_offset(*iter, &size, &offset)) { 2043 entry = arch_deref_entry_point((void *)*iter);
2044 pr_err("Failed to find blacklist %p\n", (void *)*iter); 2044
2045 if (!kernel_text_address(entry) ||
2046 !kallsyms_lookup_size_offset(entry, &size, &offset)) {
2047 pr_err("Failed to find blacklist at %p\n",
2048 (void *)entry);
2045 continue; 2049 continue;
2046 } 2050 }
2047 2051
2048 ent = kmalloc(sizeof(*ent), GFP_KERNEL); 2052 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
2049 if (!ent) 2053 if (!ent)
2050 return -ENOMEM; 2054 return -ENOMEM;
2051 ent->start_addr = *iter; 2055 ent->start_addr = entry;
2052 ent->end_addr = *iter + size; 2056 ent->end_addr = entry + size;
2053 INIT_LIST_HEAD(&ent->list); 2057 INIT_LIST_HEAD(&ent->list);
2054 list_add_tail(&ent->list, &kprobe_blacklist); 2058 list_add_tail(&ent->list, &kprobe_blacklist);
2055 } 2059 }
diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/mcs_spinlock.c
index 838dc9e00669..be9ee1559fca 100644
--- a/kernel/locking/mcs_spinlock.c
+++ b/kernel/locking/mcs_spinlock.c
@@ -14,21 +14,47 @@
14 * called from interrupt context and we have preemption disabled while 14 * called from interrupt context and we have preemption disabled while
15 * spinning. 15 * spinning.
16 */ 16 */
17static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_queue, osq_node); 17static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
18
19/*
20 * We use the value 0 to represent "no CPU", thus the encoded value
21 * will be the CPU number incremented by 1.
22 */
23static inline int encode_cpu(int cpu_nr)
24{
25 return cpu_nr + 1;
26}
27
28static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
29{
30 int cpu_nr = encoded_cpu_val - 1;
31
32 return per_cpu_ptr(&osq_node, cpu_nr);
33}
18 34
19/* 35/*
20 * Get a stable @node->next pointer, either for unlock() or unqueue() purposes. 36 * Get a stable @node->next pointer, either for unlock() or unqueue() purposes.
21 * Can return NULL in case we were the last queued and we updated @lock instead. 37 * Can return NULL in case we were the last queued and we updated @lock instead.
22 */ 38 */
23static inline struct optimistic_spin_queue * 39static inline struct optimistic_spin_node *
24osq_wait_next(struct optimistic_spin_queue **lock, 40osq_wait_next(struct optimistic_spin_queue *lock,
25 struct optimistic_spin_queue *node, 41 struct optimistic_spin_node *node,
26 struct optimistic_spin_queue *prev) 42 struct optimistic_spin_node *prev)
27{ 43{
28 struct optimistic_spin_queue *next = NULL; 44 struct optimistic_spin_node *next = NULL;
45 int curr = encode_cpu(smp_processor_id());
46 int old;
47
48 /*
49 * If there is a prev node in queue, then the 'old' value will be
50 * the prev node's CPU #, else it's set to OSQ_UNLOCKED_VAL since if
51 * we're currently last in queue, then the queue will then become empty.
52 */
53 old = prev ? prev->cpu : OSQ_UNLOCKED_VAL;
29 54
30 for (;;) { 55 for (;;) {
31 if (*lock == node && cmpxchg(lock, node, prev) == node) { 56 if (atomic_read(&lock->tail) == curr &&
57 atomic_cmpxchg(&lock->tail, curr, old) == curr) {
32 /* 58 /*
33 * We were the last queued, we moved @lock back. @prev 59 * We were the last queued, we moved @lock back. @prev
34 * will now observe @lock and will complete its 60 * will now observe @lock and will complete its
@@ -59,18 +85,23 @@ osq_wait_next(struct optimistic_spin_queue **lock,
59 return next; 85 return next;
60} 86}
61 87
62bool osq_lock(struct optimistic_spin_queue **lock) 88bool osq_lock(struct optimistic_spin_queue *lock)
63{ 89{
64 struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node); 90 struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
65 struct optimistic_spin_queue *prev, *next; 91 struct optimistic_spin_node *prev, *next;
92 int curr = encode_cpu(smp_processor_id());
93 int old;
66 94
67 node->locked = 0; 95 node->locked = 0;
68 node->next = NULL; 96 node->next = NULL;
97 node->cpu = curr;
69 98
70 node->prev = prev = xchg(lock, node); 99 old = atomic_xchg(&lock->tail, curr);
71 if (likely(prev == NULL)) 100 if (old == OSQ_UNLOCKED_VAL)
72 return true; 101 return true;
73 102
103 prev = decode_cpu(old);
104 node->prev = prev;
74 ACCESS_ONCE(prev->next) = node; 105 ACCESS_ONCE(prev->next) = node;
75 106
76 /* 107 /*
@@ -149,20 +180,21 @@ unqueue:
149 return false; 180 return false;
150} 181}
151 182
152void osq_unlock(struct optimistic_spin_queue **lock) 183void osq_unlock(struct optimistic_spin_queue *lock)
153{ 184{
154 struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node); 185 struct optimistic_spin_node *node, *next;
155 struct optimistic_spin_queue *next; 186 int curr = encode_cpu(smp_processor_id());
156 187
157 /* 188 /*
158 * Fast path for the uncontended case. 189 * Fast path for the uncontended case.
159 */ 190 */
160 if (likely(cmpxchg(lock, node, NULL) == node)) 191 if (likely(atomic_cmpxchg(&lock->tail, curr, OSQ_UNLOCKED_VAL) == curr))
161 return; 192 return;
162 193
163 /* 194 /*
164 * Second most likely case. 195 * Second most likely case.
165 */ 196 */
197 node = this_cpu_ptr(&osq_node);
166 next = xchg(&node->next, NULL); 198 next = xchg(&node->next, NULL);
167 if (next) { 199 if (next) {
168 ACCESS_ONCE(next->locked) = 1; 200 ACCESS_ONCE(next->locked) = 1;
diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
index a2dbac4aca6b..74356dc0ce29 100644
--- a/kernel/locking/mcs_spinlock.h
+++ b/kernel/locking/mcs_spinlock.h
@@ -118,12 +118,13 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
118 * mutex_lock()/rwsem_down_{read,write}() etc. 118 * mutex_lock()/rwsem_down_{read,write}() etc.
119 */ 119 */
120 120
121struct optimistic_spin_queue { 121struct optimistic_spin_node {
122 struct optimistic_spin_queue *next, *prev; 122 struct optimistic_spin_node *next, *prev;
123 int locked; /* 1 if lock acquired */ 123 int locked; /* 1 if lock acquired */
124 int cpu; /* encoded CPU # value */
124}; 125};
125 126
126extern bool osq_lock(struct optimistic_spin_queue **lock); 127extern bool osq_lock(struct optimistic_spin_queue *lock);
127extern void osq_unlock(struct optimistic_spin_queue **lock); 128extern void osq_unlock(struct optimistic_spin_queue *lock);
128 129
129#endif /* __LINUX_MCS_SPINLOCK_H */ 130#endif /* __LINUX_MCS_SPINLOCK_H */
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index bc73d33c6760..acca2c1a3c5e 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -60,7 +60,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
60 INIT_LIST_HEAD(&lock->wait_list); 60 INIT_LIST_HEAD(&lock->wait_list);
61 mutex_clear_owner(lock); 61 mutex_clear_owner(lock);
62#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 62#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
63 lock->osq = NULL; 63 osq_lock_init(&lock->osq);
64#endif 64#endif
65 65
66 debug_mutex_init(lock, name, key); 66 debug_mutex_init(lock, name, key);
diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c
index 9be8a9144978..2c93571162cb 100644
--- a/kernel/locking/rwsem-spinlock.c
+++ b/kernel/locking/rwsem-spinlock.c
@@ -26,7 +26,7 @@ int rwsem_is_locked(struct rw_semaphore *sem)
26 unsigned long flags; 26 unsigned long flags;
27 27
28 if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) { 28 if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) {
29 ret = (sem->activity != 0); 29 ret = (sem->count != 0);
30 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 30 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
31 } 31 }
32 return ret; 32 return ret;
@@ -46,7 +46,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
46 debug_check_no_locks_freed((void *)sem, sizeof(*sem)); 46 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
47 lockdep_init_map(&sem->dep_map, name, key, 0); 47 lockdep_init_map(&sem->dep_map, name, key, 0);
48#endif 48#endif
49 sem->activity = 0; 49 sem->count = 0;
50 raw_spin_lock_init(&sem->wait_lock); 50 raw_spin_lock_init(&sem->wait_lock);
51 INIT_LIST_HEAD(&sem->wait_list); 51 INIT_LIST_HEAD(&sem->wait_list);
52} 52}
@@ -95,7 +95,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
95 waiter = list_entry(next, struct rwsem_waiter, list); 95 waiter = list_entry(next, struct rwsem_waiter, list);
96 } while (waiter->type != RWSEM_WAITING_FOR_WRITE); 96 } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
97 97
98 sem->activity += woken; 98 sem->count += woken;
99 99
100 out: 100 out:
101 return sem; 101 return sem;
@@ -126,9 +126,9 @@ void __sched __down_read(struct rw_semaphore *sem)
126 126
127 raw_spin_lock_irqsave(&sem->wait_lock, flags); 127 raw_spin_lock_irqsave(&sem->wait_lock, flags);
128 128
129 if (sem->activity >= 0 && list_empty(&sem->wait_list)) { 129 if (sem->count >= 0 && list_empty(&sem->wait_list)) {
130 /* granted */ 130 /* granted */
131 sem->activity++; 131 sem->count++;
132 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 132 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
133 goto out; 133 goto out;
134 } 134 }
@@ -170,9 +170,9 @@ int __down_read_trylock(struct rw_semaphore *sem)
170 170
171 raw_spin_lock_irqsave(&sem->wait_lock, flags); 171 raw_spin_lock_irqsave(&sem->wait_lock, flags);
172 172
173 if (sem->activity >= 0 && list_empty(&sem->wait_list)) { 173 if (sem->count >= 0 && list_empty(&sem->wait_list)) {
174 /* granted */ 174 /* granted */
175 sem->activity++; 175 sem->count++;
176 ret = 1; 176 ret = 1;
177 } 177 }
178 178
@@ -206,7 +206,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
206 * itself into sleep and waiting for system woke it or someone 206 * itself into sleep and waiting for system woke it or someone
207 * else in the head of the wait list up. 207 * else in the head of the wait list up.
208 */ 208 */
209 if (sem->activity == 0) 209 if (sem->count == 0)
210 break; 210 break;
211 set_task_state(tsk, TASK_UNINTERRUPTIBLE); 211 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
212 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 212 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
@@ -214,7 +214,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
214 raw_spin_lock_irqsave(&sem->wait_lock, flags); 214 raw_spin_lock_irqsave(&sem->wait_lock, flags);
215 } 215 }
216 /* got the lock */ 216 /* got the lock */
217 sem->activity = -1; 217 sem->count = -1;
218 list_del(&waiter.list); 218 list_del(&waiter.list);
219 219
220 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 220 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
@@ -235,9 +235,9 @@ int __down_write_trylock(struct rw_semaphore *sem)
235 235
236 raw_spin_lock_irqsave(&sem->wait_lock, flags); 236 raw_spin_lock_irqsave(&sem->wait_lock, flags);
237 237
238 if (sem->activity == 0) { 238 if (sem->count == 0) {
239 /* got the lock */ 239 /* got the lock */
240 sem->activity = -1; 240 sem->count = -1;
241 ret = 1; 241 ret = 1;
242 } 242 }
243 243
@@ -255,7 +255,7 @@ void __up_read(struct rw_semaphore *sem)
255 255
256 raw_spin_lock_irqsave(&sem->wait_lock, flags); 256 raw_spin_lock_irqsave(&sem->wait_lock, flags);
257 257
258 if (--sem->activity == 0 && !list_empty(&sem->wait_list)) 258 if (--sem->count == 0 && !list_empty(&sem->wait_list))
259 sem = __rwsem_wake_one_writer(sem); 259 sem = __rwsem_wake_one_writer(sem);
260 260
261 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 261 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
@@ -270,7 +270,7 @@ void __up_write(struct rw_semaphore *sem)
270 270
271 raw_spin_lock_irqsave(&sem->wait_lock, flags); 271 raw_spin_lock_irqsave(&sem->wait_lock, flags);
272 272
273 sem->activity = 0; 273 sem->count = 0;
274 if (!list_empty(&sem->wait_list)) 274 if (!list_empty(&sem->wait_list))
275 sem = __rwsem_do_wake(sem, 1); 275 sem = __rwsem_do_wake(sem, 1);
276 276
@@ -287,7 +287,7 @@ void __downgrade_write(struct rw_semaphore *sem)
287 287
288 raw_spin_lock_irqsave(&sem->wait_lock, flags); 288 raw_spin_lock_irqsave(&sem->wait_lock, flags);
289 289
290 sem->activity = 1; 290 sem->count = 1;
291 if (!list_empty(&sem->wait_list)) 291 if (!list_empty(&sem->wait_list))
292 sem = __rwsem_do_wake(sem, 0); 292 sem = __rwsem_do_wake(sem, 0);
293 293
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index dacc32142fcc..a2391ac135c8 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -82,9 +82,9 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
82 sem->count = RWSEM_UNLOCKED_VALUE; 82 sem->count = RWSEM_UNLOCKED_VALUE;
83 raw_spin_lock_init(&sem->wait_lock); 83 raw_spin_lock_init(&sem->wait_lock);
84 INIT_LIST_HEAD(&sem->wait_list); 84 INIT_LIST_HEAD(&sem->wait_list);
85#ifdef CONFIG_SMP 85#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
86 sem->owner = NULL; 86 sem->owner = NULL;
87 sem->osq = NULL; 87 osq_lock_init(&sem->osq);
88#endif 88#endif
89} 89}
90 90
@@ -262,7 +262,7 @@ static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
262 return false; 262 return false;
263} 263}
264 264
265#ifdef CONFIG_SMP 265#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
266/* 266/*
267 * Try to acquire write lock before the writer has been put on wait queue. 267 * Try to acquire write lock before the writer has been put on wait queue.
268 */ 268 */
@@ -285,10 +285,10 @@ static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
285static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) 285static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
286{ 286{
287 struct task_struct *owner; 287 struct task_struct *owner;
288 bool on_cpu = true; 288 bool on_cpu = false;
289 289
290 if (need_resched()) 290 if (need_resched())
291 return 0; 291 return false;
292 292
293 rcu_read_lock(); 293 rcu_read_lock();
294 owner = ACCESS_ONCE(sem->owner); 294 owner = ACCESS_ONCE(sem->owner);
@@ -297,9 +297,9 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
297 rcu_read_unlock(); 297 rcu_read_unlock();
298 298
299 /* 299 /*
300 * If sem->owner is not set, the rwsem owner may have 300 * If sem->owner is not set, yet we have just recently entered the
301 * just acquired it and not set the owner yet or the rwsem 301 * slowpath, then there is a possibility reader(s) may have the lock.
302 * has been released. 302 * To be safe, avoid spinning in these situations.
303 */ 303 */
304 return on_cpu; 304 return on_cpu;
305} 305}
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index 42f806de49d4..e2d3bc7f03b4 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -12,7 +12,7 @@
12 12
13#include <linux/atomic.h> 13#include <linux/atomic.h>
14 14
15#if defined(CONFIG_SMP) && defined(CONFIG_RWSEM_XCHGADD_ALGORITHM) 15#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
16static inline void rwsem_set_owner(struct rw_semaphore *sem) 16static inline void rwsem_set_owner(struct rw_semaphore *sem)
17{ 17{
18 sem->owner = current; 18 sem->owner = current;
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 0ca8d83e2369..4ee194eb524b 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -186,6 +186,7 @@ void thaw_processes(void)
186 186
187 printk("Restarting tasks ... "); 187 printk("Restarting tasks ... ");
188 188
189 __usermodehelper_set_disable_depth(UMH_FREEZING);
189 thaw_workqueues(); 190 thaw_workqueues();
190 191
191 read_lock(&tasklist_lock); 192 read_lock(&tasklist_lock);
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 4dd8822f732a..ed35a4790afe 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -306,7 +306,7 @@ int suspend_devices_and_enter(suspend_state_t state)
306 error = suspend_ops->begin(state); 306 error = suspend_ops->begin(state);
307 if (error) 307 if (error)
308 goto Close; 308 goto Close;
309 } else if (state == PM_SUSPEND_FREEZE && freeze_ops->begin) { 309 } else if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->begin) {
310 error = freeze_ops->begin(); 310 error = freeze_ops->begin();
311 if (error) 311 if (error)
312 goto Close; 312 goto Close;
@@ -335,7 +335,7 @@ int suspend_devices_and_enter(suspend_state_t state)
335 Close: 335 Close:
336 if (need_suspend_ops(state) && suspend_ops->end) 336 if (need_suspend_ops(state) && suspend_ops->end)
337 suspend_ops->end(); 337 suspend_ops->end();
338 else if (state == PM_SUSPEND_FREEZE && freeze_ops->end) 338 else if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->end)
339 freeze_ops->end(); 339 freeze_ops->end();
340 340
341 return error; 341 return error;
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index f1ba77363fbb..625d0b0cd75a 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -206,6 +206,70 @@ void rcu_bh_qs(int cpu)
206 rdp->passed_quiesce = 1; 206 rdp->passed_quiesce = 1;
207} 207}
208 208
209static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
210
211static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
212 .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
213 .dynticks = ATOMIC_INIT(1),
214#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
215 .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
216 .dynticks_idle = ATOMIC_INIT(1),
217#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
218};
219
220/*
221 * Let the RCU core know that this CPU has gone through the scheduler,
222 * which is a quiescent state. This is called when the need for a
223 * quiescent state is urgent, so we burn an atomic operation and full
224 * memory barriers to let the RCU core know about it, regardless of what
225 * this CPU might (or might not) do in the near future.
226 *
227 * We inform the RCU core by emulating a zero-duration dyntick-idle
228 * period, which we in turn do by incrementing the ->dynticks counter
229 * by two.
230 */
231static void rcu_momentary_dyntick_idle(void)
232{
233 unsigned long flags;
234 struct rcu_data *rdp;
235 struct rcu_dynticks *rdtp;
236 int resched_mask;
237 struct rcu_state *rsp;
238
239 local_irq_save(flags);
240
241 /*
242 * Yes, we can lose flag-setting operations. This is OK, because
243 * the flag will be set again after some delay.
244 */
245 resched_mask = raw_cpu_read(rcu_sched_qs_mask);
246 raw_cpu_write(rcu_sched_qs_mask, 0);
247
248 /* Find the flavor that needs a quiescent state. */
249 for_each_rcu_flavor(rsp) {
250 rdp = raw_cpu_ptr(rsp->rda);
251 if (!(resched_mask & rsp->flavor_mask))
252 continue;
253 smp_mb(); /* rcu_sched_qs_mask before cond_resched_completed. */
254 if (ACCESS_ONCE(rdp->mynode->completed) !=
255 ACCESS_ONCE(rdp->cond_resched_completed))
256 continue;
257
258 /*
259 * Pretend to be momentarily idle for the quiescent state.
260 * This allows the grace-period kthread to record the
261 * quiescent state, with no need for this CPU to do anything
262 * further.
263 */
264 rdtp = this_cpu_ptr(&rcu_dynticks);
265 smp_mb__before_atomic(); /* Earlier stuff before QS. */
266 atomic_add(2, &rdtp->dynticks); /* QS. */
267 smp_mb__after_atomic(); /* Later stuff after QS. */
268 break;
269 }
270 local_irq_restore(flags);
271}
272
209/* 273/*
210 * Note a context switch. This is a quiescent state for RCU-sched, 274 * Note a context switch. This is a quiescent state for RCU-sched,
211 * and requires special handling for preemptible RCU. 275 * and requires special handling for preemptible RCU.
@@ -216,19 +280,12 @@ void rcu_note_context_switch(int cpu)
216 trace_rcu_utilization(TPS("Start context switch")); 280 trace_rcu_utilization(TPS("Start context switch"));
217 rcu_sched_qs(cpu); 281 rcu_sched_qs(cpu);
218 rcu_preempt_note_context_switch(cpu); 282 rcu_preempt_note_context_switch(cpu);
283 if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
284 rcu_momentary_dyntick_idle();
219 trace_rcu_utilization(TPS("End context switch")); 285 trace_rcu_utilization(TPS("End context switch"));
220} 286}
221EXPORT_SYMBOL_GPL(rcu_note_context_switch); 287EXPORT_SYMBOL_GPL(rcu_note_context_switch);
222 288
223static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
224 .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
225 .dynticks = ATOMIC_INIT(1),
226#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
227 .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
228 .dynticks_idle = ATOMIC_INIT(1),
229#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
230};
231
232static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */ 289static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */
233static long qhimark = 10000; /* If this many pending, ignore blimit. */ 290static long qhimark = 10000; /* If this many pending, ignore blimit. */
234static long qlowmark = 100; /* Once only this many pending, use blimit. */ 291static long qlowmark = 100; /* Once only this many pending, use blimit. */
@@ -243,6 +300,13 @@ static ulong jiffies_till_next_fqs = ULONG_MAX;
243module_param(jiffies_till_first_fqs, ulong, 0644); 300module_param(jiffies_till_first_fqs, ulong, 0644);
244module_param(jiffies_till_next_fqs, ulong, 0644); 301module_param(jiffies_till_next_fqs, ulong, 0644);
245 302
303/*
304 * How long the grace period must be before we start recruiting
305 * quiescent-state help from rcu_note_context_switch().
306 */
307static ulong jiffies_till_sched_qs = HZ / 20;
308module_param(jiffies_till_sched_qs, ulong, 0644);
309
246static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp, 310static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
247 struct rcu_data *rdp); 311 struct rcu_data *rdp);
248static void force_qs_rnp(struct rcu_state *rsp, 312static void force_qs_rnp(struct rcu_state *rsp,
@@ -853,6 +917,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
853 bool *isidle, unsigned long *maxj) 917 bool *isidle, unsigned long *maxj)
854{ 918{
855 unsigned int curr; 919 unsigned int curr;
920 int *rcrmp;
856 unsigned int snap; 921 unsigned int snap;
857 922
858 curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks); 923 curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
@@ -893,27 +958,43 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
893 } 958 }
894 959
895 /* 960 /*
896 * There is a possibility that a CPU in adaptive-ticks state 961 * A CPU running for an extended time within the kernel can
897 * might run in the kernel with the scheduling-clock tick disabled 962 * delay RCU grace periods. When the CPU is in NO_HZ_FULL mode,
898 * for an extended time period. Invoke rcu_kick_nohz_cpu() to 963 * even context-switching back and forth between a pair of
899 * force the CPU to restart the scheduling-clock tick in this 964 * in-kernel CPU-bound tasks cannot advance grace periods.
900 * CPU is in this state. 965 * So if the grace period is old enough, make the CPU pay attention.
901 */ 966 * Note that the unsynchronized assignments to the per-CPU
902 rcu_kick_nohz_cpu(rdp->cpu); 967 * rcu_sched_qs_mask variable are safe. Yes, setting of
903 968 * bits can be lost, but they will be set again on the next
904 /* 969 * force-quiescent-state pass. So lost bit sets do not result
905 * Alternatively, the CPU might be running in the kernel 970 * in incorrect behavior, merely in a grace period lasting
906 * for an extended period of time without a quiescent state. 971 * a few jiffies longer than it might otherwise. Because
907 * Attempt to force the CPU through the scheduler to gain the 972 * there are at most four threads involved, and because the
908 * needed quiescent state, but only if the grace period has gone 973 * updates are only once every few jiffies, the probability of
909 * on for an uncommonly long time. If there are many stuck CPUs, 974 * lossage (and thus of slight grace-period extension) is
910 * we will beat on the first one until it gets unstuck, then move 975 * quite low.
911 * to the next. Only do this for the primary flavor of RCU. 976 *
977 * Note that if the jiffies_till_sched_qs boot/sysfs parameter
978 * is set too high, we override with half of the RCU CPU stall
979 * warning delay.
912 */ 980 */
913 if (rdp->rsp == rcu_state_p && 981 rcrmp = &per_cpu(rcu_sched_qs_mask, rdp->cpu);
982 if (ULONG_CMP_GE(jiffies,
983 rdp->rsp->gp_start + jiffies_till_sched_qs) ||
914 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) { 984 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
915 rdp->rsp->jiffies_resched += 5; 985 if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
916 resched_cpu(rdp->cpu); 986 ACCESS_ONCE(rdp->cond_resched_completed) =
987 ACCESS_ONCE(rdp->mynode->completed);
988 smp_mb(); /* ->cond_resched_completed before *rcrmp. */
989 ACCESS_ONCE(*rcrmp) =
990 ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
991 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
992 rdp->rsp->jiffies_resched += 5; /* Enable beating. */
993 } else if (ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
994 /* Time to beat on that CPU again! */
995 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
996 rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */
997 }
917 } 998 }
918 999
919 return 0; 1000 return 0;
@@ -3491,6 +3572,7 @@ static void __init rcu_init_one(struct rcu_state *rsp,
3491 "rcu_node_fqs_1", 3572 "rcu_node_fqs_1",
3492 "rcu_node_fqs_2", 3573 "rcu_node_fqs_2",
3493 "rcu_node_fqs_3" }; /* Match MAX_RCU_LVLS */ 3574 "rcu_node_fqs_3" }; /* Match MAX_RCU_LVLS */
3575 static u8 fl_mask = 0x1;
3494 int cpustride = 1; 3576 int cpustride = 1;
3495 int i; 3577 int i;
3496 int j; 3578 int j;
@@ -3509,6 +3591,8 @@ static void __init rcu_init_one(struct rcu_state *rsp,
3509 for (i = 1; i < rcu_num_lvls; i++) 3591 for (i = 1; i < rcu_num_lvls; i++)
3510 rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1]; 3592 rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
3511 rcu_init_levelspread(rsp); 3593 rcu_init_levelspread(rsp);
3594 rsp->flavor_mask = fl_mask;
3595 fl_mask <<= 1;
3512 3596
3513 /* Initialize the elements themselves, starting from the leaves. */ 3597 /* Initialize the elements themselves, starting from the leaves. */
3514 3598
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index bf2c1e669691..0f69a79c5b7d 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -307,6 +307,9 @@ struct rcu_data {
307 /* 4) reasons this CPU needed to be kicked by force_quiescent_state */ 307 /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
308 unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */ 308 unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */
309 unsigned long offline_fqs; /* Kicked due to being offline. */ 309 unsigned long offline_fqs; /* Kicked due to being offline. */
310 unsigned long cond_resched_completed;
311 /* Grace period that needs help */
312 /* from cond_resched(). */
310 313
311 /* 5) __rcu_pending() statistics. */ 314 /* 5) __rcu_pending() statistics. */
312 unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */ 315 unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */
@@ -392,6 +395,7 @@ struct rcu_state {
392 struct rcu_node *level[RCU_NUM_LVLS]; /* Hierarchy levels. */ 395 struct rcu_node *level[RCU_NUM_LVLS]; /* Hierarchy levels. */
393 u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */ 396 u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */
394 u8 levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */ 397 u8 levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */
398 u8 flavor_mask; /* bit in flavor mask. */
395 struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */ 399 struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */
396 void (*call)(struct rcu_head *head, /* call_rcu() flavor. */ 400 void (*call)(struct rcu_head *head, /* call_rcu() flavor. */
397 void (*func)(struct rcu_head *head)); 401 void (*func)(struct rcu_head *head));
@@ -563,7 +567,7 @@ static bool rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
563static void do_nocb_deferred_wakeup(struct rcu_data *rdp); 567static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
564static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp); 568static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
565static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp); 569static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp);
566static void rcu_kick_nohz_cpu(int cpu); 570static void __maybe_unused rcu_kick_nohz_cpu(int cpu);
567static bool init_nocb_callback_list(struct rcu_data *rdp); 571static bool init_nocb_callback_list(struct rcu_data *rdp);
568static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq); 572static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq);
569static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq); 573static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index cbc2c45265e2..02ac0fb186b8 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -2404,7 +2404,7 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
2404 * if an adaptive-ticks CPU is failing to respond to the current grace 2404 * if an adaptive-ticks CPU is failing to respond to the current grace
2405 * period and has not be idle from an RCU perspective, kick it. 2405 * period and has not be idle from an RCU perspective, kick it.
2406 */ 2406 */
2407static void rcu_kick_nohz_cpu(int cpu) 2407static void __maybe_unused rcu_kick_nohz_cpu(int cpu)
2408{ 2408{
2409#ifdef CONFIG_NO_HZ_FULL 2409#ifdef CONFIG_NO_HZ_FULL
2410 if (tick_nohz_full_cpu(cpu)) 2410 if (tick_nohz_full_cpu(cpu))
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index a2aeb4df0f60..bc7883570530 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -200,12 +200,12 @@ void wait_rcu_gp(call_rcu_func_t crf)
200EXPORT_SYMBOL_GPL(wait_rcu_gp); 200EXPORT_SYMBOL_GPL(wait_rcu_gp);
201 201
202#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 202#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
203static inline void debug_init_rcu_head(struct rcu_head *head) 203void init_rcu_head(struct rcu_head *head)
204{ 204{
205 debug_object_init(head, &rcuhead_debug_descr); 205 debug_object_init(head, &rcuhead_debug_descr);
206} 206}
207 207
208static inline void debug_rcu_head_free(struct rcu_head *head) 208void destroy_rcu_head(struct rcu_head *head)
209{ 209{
210 debug_object_free(head, &rcuhead_debug_descr); 210 debug_object_free(head, &rcuhead_debug_descr);
211} 211}
@@ -350,21 +350,3 @@ static int __init check_cpu_stall_init(void)
350early_initcall(check_cpu_stall_init); 350early_initcall(check_cpu_stall_init);
351 351
352#endif /* #ifdef CONFIG_RCU_STALL_COMMON */ 352#endif /* #ifdef CONFIG_RCU_STALL_COMMON */
353
354/*
355 * Hooks for cond_resched() and friends to avoid RCU CPU stall warnings.
356 */
357
358DEFINE_PER_CPU(int, rcu_cond_resched_count);
359
360/*
361 * Report a set of RCU quiescent states, for use by cond_resched()
362 * and friends. Out of line due to being called infrequently.
363 */
364void rcu_resched(void)
365{
366 preempt_disable();
367 __this_cpu_write(rcu_cond_resched_count, 0);
368 rcu_note_context_switch(smp_processor_id());
369 preempt_enable();
370}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 3bdf01b494fe..bc1638b33449 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4147,7 +4147,6 @@ static void __cond_resched(void)
4147 4147
4148int __sched _cond_resched(void) 4148int __sched _cond_resched(void)
4149{ 4149{
4150 rcu_cond_resched();
4151 if (should_resched()) { 4150 if (should_resched()) {
4152 __cond_resched(); 4151 __cond_resched();
4153 return 1; 4152 return 1;
@@ -4166,18 +4165,15 @@ EXPORT_SYMBOL(_cond_resched);
4166 */ 4165 */
4167int __cond_resched_lock(spinlock_t *lock) 4166int __cond_resched_lock(spinlock_t *lock)
4168{ 4167{
4169 bool need_rcu_resched = rcu_should_resched();
4170 int resched = should_resched(); 4168 int resched = should_resched();
4171 int ret = 0; 4169 int ret = 0;
4172 4170
4173 lockdep_assert_held(lock); 4171 lockdep_assert_held(lock);
4174 4172
4175 if (spin_needbreak(lock) || resched || need_rcu_resched) { 4173 if (spin_needbreak(lock) || resched) {
4176 spin_unlock(lock); 4174 spin_unlock(lock);
4177 if (resched) 4175 if (resched)
4178 __cond_resched(); 4176 __cond_resched();
4179 else if (unlikely(need_rcu_resched))
4180 rcu_resched();
4181 else 4177 else
4182 cpu_relax(); 4178 cpu_relax();
4183 ret = 1; 4179 ret = 1;
@@ -4191,7 +4187,6 @@ int __sched __cond_resched_softirq(void)
4191{ 4187{
4192 BUG_ON(!in_softirq()); 4188 BUG_ON(!in_softirq());
4193 4189
4194 rcu_cond_resched(); /* BH disabled OK, just recording QSes. */
4195 if (should_resched()) { 4190 if (should_resched()) {
4196 local_bh_enable(); 4191 local_bh_enable();
4197 __cond_resched(); 4192 __cond_resched();
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 695f9773bb60..627b3c34b821 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -608,7 +608,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
608 608
609 avg_atom = p->se.sum_exec_runtime; 609 avg_atom = p->se.sum_exec_runtime;
610 if (nr_switches) 610 if (nr_switches)
611 do_div(avg_atom, nr_switches); 611 avg_atom = div64_ul(avg_atom, nr_switches);
612 else 612 else
613 avg_atom = -1LL; 613 avg_atom = -1LL;
614 614
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 88c9c65a430d..fe75444ae7ec 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -585,9 +585,14 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
585 struct itimerspec *new_setting, 585 struct itimerspec *new_setting,
586 struct itimerspec *old_setting) 586 struct itimerspec *old_setting)
587{ 587{
588 ktime_t exp;
589
588 if (!rtcdev) 590 if (!rtcdev)
589 return -ENOTSUPP; 591 return -ENOTSUPP;
590 592
593 if (flags & ~TIMER_ABSTIME)
594 return -EINVAL;
595
591 if (old_setting) 596 if (old_setting)
592 alarm_timer_get(timr, old_setting); 597 alarm_timer_get(timr, old_setting);
593 598
@@ -597,8 +602,16 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
597 602
598 /* start the timer */ 603 /* start the timer */
599 timr->it.alarm.interval = timespec_to_ktime(new_setting->it_interval); 604 timr->it.alarm.interval = timespec_to_ktime(new_setting->it_interval);
600 alarm_start(&timr->it.alarm.alarmtimer, 605 exp = timespec_to_ktime(new_setting->it_value);
601 timespec_to_ktime(new_setting->it_value)); 606 /* Convert (if necessary) to absolute time */
607 if (flags != TIMER_ABSTIME) {
608 ktime_t now;
609
610 now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime();
611 exp = ktime_add(now, exp);
612 }
613
614 alarm_start(&timr->it.alarm.alarmtimer, exp);
602 return 0; 615 return 0;
603} 616}
604 617
@@ -730,6 +743,9 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
730 if (!alarmtimer_get_rtcdev()) 743 if (!alarmtimer_get_rtcdev())
731 return -ENOTSUPP; 744 return -ENOTSUPP;
732 745
746 if (flags & ~TIMER_ABSTIME)
747 return -EINVAL;
748
733 if (!capable(CAP_WAKE_ALARM)) 749 if (!capable(CAP_WAKE_ALARM))
734 return -EPERM; 750 return -EPERM;
735 751
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 5b372e3ed675..ac9d1dad630b 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -265,12 +265,12 @@ static void update_ftrace_function(void)
265 func = ftrace_ops_list_func; 265 func = ftrace_ops_list_func;
266 } 266 }
267 267
268 update_function_graph_func();
269
268 /* If there's no change, then do nothing more here */ 270 /* If there's no change, then do nothing more here */
269 if (ftrace_trace_function == func) 271 if (ftrace_trace_function == func)
270 return; 272 return;
271 273
272 update_function_graph_func();
273
274 /* 274 /*
275 * If we are using the list function, it doesn't care 275 * If we are using the list function, it doesn't care
276 * about the function_trace_ops. 276 * about the function_trace_ops.
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 7c56c3d06943..ff7027199a9a 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -616,10 +616,6 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
616 struct ring_buffer_per_cpu *cpu_buffer; 616 struct ring_buffer_per_cpu *cpu_buffer;
617 struct rb_irq_work *work; 617 struct rb_irq_work *work;
618 618
619 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
620 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
621 return POLLIN | POLLRDNORM;
622
623 if (cpu == RING_BUFFER_ALL_CPUS) 619 if (cpu == RING_BUFFER_ALL_CPUS)
624 work = &buffer->irq_work; 620 work = &buffer->irq_work;
625 else { 621 else {
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f243444a3772..291397e66669 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -466,6 +466,12 @@ int __trace_puts(unsigned long ip, const char *str, int size)
466 struct print_entry *entry; 466 struct print_entry *entry;
467 unsigned long irq_flags; 467 unsigned long irq_flags;
468 int alloc; 468 int alloc;
469 int pc;
470
471 if (!(trace_flags & TRACE_ITER_PRINTK))
472 return 0;
473
474 pc = preempt_count();
469 475
470 if (unlikely(tracing_selftest_running || tracing_disabled)) 476 if (unlikely(tracing_selftest_running || tracing_disabled))
471 return 0; 477 return 0;
@@ -475,7 +481,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
475 local_save_flags(irq_flags); 481 local_save_flags(irq_flags);
476 buffer = global_trace.trace_buffer.buffer; 482 buffer = global_trace.trace_buffer.buffer;
477 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 483 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
478 irq_flags, preempt_count()); 484 irq_flags, pc);
479 if (!event) 485 if (!event)
480 return 0; 486 return 0;
481 487
@@ -492,6 +498,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
492 entry->buf[size] = '\0'; 498 entry->buf[size] = '\0';
493 499
494 __buffer_unlock_commit(buffer, event); 500 __buffer_unlock_commit(buffer, event);
501 ftrace_trace_stack(buffer, irq_flags, 4, pc);
495 502
496 return size; 503 return size;
497} 504}
@@ -509,6 +516,12 @@ int __trace_bputs(unsigned long ip, const char *str)
509 struct bputs_entry *entry; 516 struct bputs_entry *entry;
510 unsigned long irq_flags; 517 unsigned long irq_flags;
511 int size = sizeof(struct bputs_entry); 518 int size = sizeof(struct bputs_entry);
519 int pc;
520
521 if (!(trace_flags & TRACE_ITER_PRINTK))
522 return 0;
523
524 pc = preempt_count();
512 525
513 if (unlikely(tracing_selftest_running || tracing_disabled)) 526 if (unlikely(tracing_selftest_running || tracing_disabled))
514 return 0; 527 return 0;
@@ -516,7 +529,7 @@ int __trace_bputs(unsigned long ip, const char *str)
516 local_save_flags(irq_flags); 529 local_save_flags(irq_flags);
517 buffer = global_trace.trace_buffer.buffer; 530 buffer = global_trace.trace_buffer.buffer;
518 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size, 531 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
519 irq_flags, preempt_count()); 532 irq_flags, pc);
520 if (!event) 533 if (!event)
521 return 0; 534 return 0;
522 535
@@ -525,6 +538,7 @@ int __trace_bputs(unsigned long ip, const char *str)
525 entry->str = str; 538 entry->str = str;
526 539
527 __buffer_unlock_commit(buffer, event); 540 __buffer_unlock_commit(buffer, event);
541 ftrace_trace_stack(buffer, irq_flags, 4, pc);
528 542
529 return 1; 543 return 1;
530} 544}
@@ -809,7 +823,7 @@ static struct {
809 { trace_clock_local, "local", 1 }, 823 { trace_clock_local, "local", 1 },
810 { trace_clock_global, "global", 1 }, 824 { trace_clock_global, "global", 1 },
811 { trace_clock_counter, "counter", 0 }, 825 { trace_clock_counter, "counter", 0 },
812 { trace_clock_jiffies, "uptime", 1 }, 826 { trace_clock_jiffies, "uptime", 0 },
813 { trace_clock, "perf", 1 }, 827 { trace_clock, "perf", 1 },
814 ARCH_TRACE_CLOCKS 828 ARCH_TRACE_CLOCKS
815}; 829};
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index 26dc348332b7..57b67b1f24d1 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -59,13 +59,14 @@ u64 notrace trace_clock(void)
59 59
60/* 60/*
61 * trace_jiffy_clock(): Simply use jiffies as a clock counter. 61 * trace_jiffy_clock(): Simply use jiffies as a clock counter.
62 * Note that this use of jiffies_64 is not completely safe on
63 * 32-bit systems. But the window is tiny, and the effect if
64 * we are affected is that we will have an obviously bogus
65 * timestamp on a trace event - i.e. not life threatening.
62 */ 66 */
63u64 notrace trace_clock_jiffies(void) 67u64 notrace trace_clock_jiffies(void)
64{ 68{
65 u64 jiffy = jiffies - INITIAL_JIFFIES; 69 return jiffies_64_to_clock_t(jiffies_64 - INITIAL_JIFFIES);
66
67 /* Return nsecs */
68 return (u64)jiffies_to_usecs(jiffy) * 1000ULL;
69} 70}
70 71
71/* 72/*
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index f99e0b3bca8c..2de53628689f 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -470,6 +470,7 @@ static void remove_event_file_dir(struct ftrace_event_file *file)
470 470
471 list_del(&file->list); 471 list_del(&file->list);
472 remove_subsystem(file->system); 472 remove_subsystem(file->system);
473 free_event_filter(file->filter);
473 kmem_cache_free(file_cachep, file); 474 kmem_cache_free(file_cachep, file);
474} 475}
475 476
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 6203d2900877..35974ac69600 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3284,6 +3284,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq)
3284 } 3284 }
3285 } 3285 }
3286 3286
3287 dev_set_uevent_suppress(&wq_dev->dev, false);
3287 kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD); 3288 kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
3288 return 0; 3289 return 0;
3289} 3290}
@@ -4879,7 +4880,7 @@ static void __init wq_numa_init(void)
4879 BUG_ON(!tbl); 4880 BUG_ON(!tbl);
4880 4881
4881 for_each_node(node) 4882 for_each_node(node)
4882 BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL, 4883 BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
4883 node_online(node) ? node : NUMA_NO_NODE)); 4884 node_online(node) ? node : NUMA_NO_NODE));
4884 4885
4885 for_each_possible_cpu(cpu) { 4886 for_each_possible_cpu(cpu) {
diff --git a/lib/cpumask.c b/lib/cpumask.c
index c101230658eb..b6513a9f2892 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -191,7 +191,7 @@ int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp)
191 191
192 i %= num_online_cpus(); 192 i %= num_online_cpus();
193 193
194 if (!cpumask_of_node(numa_node)) { 194 if (numa_node == -1 || !cpumask_of_node(numa_node)) {
195 /* Use all online cpu's for non numa aware system */ 195 /* Use all online cpu's for non numa aware system */
196 cpumask_copy(mask, cpu_online_mask); 196 cpumask_copy(mask, cpu_online_mask);
197 } else { 197 } else {
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 2024bbd573d2..9221c02ed9e2 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2604,6 +2604,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2604 } else { 2604 } else {
2605 if (cow) 2605 if (cow)
2606 huge_ptep_set_wrprotect(src, addr, src_pte); 2606 huge_ptep_set_wrprotect(src, addr, src_pte);
2607 entry = huge_ptep_get(src_pte);
2607 ptepage = pte_page(entry); 2608 ptepage = pte_page(entry);
2608 get_page(ptepage); 2609 get_page(ptepage);
2609 page_dup_rmap(ptepage); 2610 page_dup_rmap(ptepage);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index c6399e328931..7211a73ba14d 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -435,7 +435,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
435 if (av == NULL) /* Not actually mapped anymore */ 435 if (av == NULL) /* Not actually mapped anymore */
436 return; 436 return;
437 437
438 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 438 pgoff = page_to_pgoff(page);
439 read_lock(&tasklist_lock); 439 read_lock(&tasklist_lock);
440 for_each_process (tsk) { 440 for_each_process (tsk) {
441 struct anon_vma_chain *vmac; 441 struct anon_vma_chain *vmac;
@@ -469,7 +469,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
469 mutex_lock(&mapping->i_mmap_mutex); 469 mutex_lock(&mapping->i_mmap_mutex);
470 read_lock(&tasklist_lock); 470 read_lock(&tasklist_lock);
471 for_each_process(tsk) { 471 for_each_process(tsk) {
472 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 472 pgoff_t pgoff = page_to_pgoff(page);
473 struct task_struct *t = task_early_kill(tsk, force_early); 473 struct task_struct *t = task_early_kill(tsk, force_early);
474 474
475 if (!t) 475 if (!t)
diff --git a/mm/memory.c b/mm/memory.c
index d67fd9fcf1f2..7e8d8205b610 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2882,7 +2882,8 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2882 * if page by the offset is not ready to be mapped (cold cache or 2882 * if page by the offset is not ready to be mapped (cold cache or
2883 * something). 2883 * something).
2884 */ 2884 */
2885 if (vma->vm_ops->map_pages && fault_around_pages() > 1) { 2885 if (vma->vm_ops->map_pages && !(flags & FAULT_FLAG_NONLINEAR) &&
2886 fault_around_pages() > 1) {
2886 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 2887 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
2887 do_fault_around(vma, address, pte, pgoff, flags); 2888 do_fault_around(vma, address, pte, pgoff, flags);
2888 if (!pte_same(*pte, orig_pte)) 2889 if (!pte_same(*pte, orig_pte))
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index eb58de19f815..8f5330d74f47 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2139,7 +2139,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old)
2139 } else 2139 } else
2140 *new = *old; 2140 *new = *old;
2141 2141
2142 rcu_read_lock();
2143 if (current_cpuset_is_being_rebound()) { 2142 if (current_cpuset_is_being_rebound()) {
2144 nodemask_t mems = cpuset_mems_allowed(current); 2143 nodemask_t mems = cpuset_mems_allowed(current);
2145 if (new->flags & MPOL_F_REBINDING) 2144 if (new->flags & MPOL_F_REBINDING)
@@ -2147,7 +2146,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old)
2147 else 2146 else
2148 mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE); 2147 mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
2149 } 2148 }
2150 rcu_read_unlock();
2151 atomic_set(&new->refcnt, 1); 2149 atomic_set(&new->refcnt, 1);
2152 return new; 2150 return new;
2153} 2151}
diff --git a/mm/migrate.c b/mm/migrate.c
index 9e0beaa91845..be6dbf995c0c 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -988,9 +988,10 @@ out:
988 * it. Otherwise, putback_lru_page() will drop the reference grabbed 988 * it. Otherwise, putback_lru_page() will drop the reference grabbed
989 * during isolation. 989 * during isolation.
990 */ 990 */
991 if (rc != MIGRATEPAGE_SUCCESS && put_new_page) 991 if (rc != MIGRATEPAGE_SUCCESS && put_new_page) {
992 ClearPageSwapBacked(newpage);
992 put_new_page(newpage, private); 993 put_new_page(newpage, private);
993 else 994 } else
994 putback_lru_page(newpage); 995 putback_lru_page(newpage);
995 996
996 if (result) { 997 if (result) {
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0ea758b898fd..8bcfe3ae20cb 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6062,11 +6062,13 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
6062} 6062}
6063 6063
6064/** 6064/**
6065 * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages 6065 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
6066 * @page: The page within the block of interest 6066 * @page: The page within the block of interest
6067 * @start_bitidx: The first bit of interest to retrieve 6067 * @pfn: The target page frame number
6068 * @end_bitidx: The last bit of interest 6068 * @end_bitidx: The last bit of interest to retrieve
6069 * returns pageblock_bits flags 6069 * @mask: mask of bits that the caller is interested in
6070 *
6071 * Return: pageblock_bits flags
6070 */ 6072 */
6071unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn, 6073unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
6072 unsigned long end_bitidx, 6074 unsigned long end_bitidx,
@@ -6091,9 +6093,10 @@ unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
6091/** 6093/**
6092 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages 6094 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
6093 * @page: The page within the block of interest 6095 * @page: The page within the block of interest
6094 * @start_bitidx: The first bit of interest
6095 * @end_bitidx: The last bit of interest
6096 * @flags: The flags to set 6096 * @flags: The flags to set
6097 * @pfn: The target page frame number
6098 * @end_bitidx: The last bit of interest
6099 * @mask: mask of bits that the caller is interested in
6097 */ 6100 */
6098void set_pfnblock_flags_mask(struct page *page, unsigned long flags, 6101void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
6099 unsigned long pfn, 6102 unsigned long pfn,
diff --git a/mm/rmap.c b/mm/rmap.c
index b7e94ebbd09e..22a4a7699cdb 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -517,11 +517,7 @@ void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
517static inline unsigned long 517static inline unsigned long
518__vma_address(struct page *page, struct vm_area_struct *vma) 518__vma_address(struct page *page, struct vm_area_struct *vma)
519{ 519{
520 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 520 pgoff_t pgoff = page_to_pgoff(page);
521
522 if (unlikely(is_vm_hugetlb_page(vma)))
523 pgoff = page->index << huge_page_order(page_hstate(page));
524
525 return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 521 return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
526} 522}
527 523
@@ -1639,7 +1635,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
1639static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc) 1635static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
1640{ 1636{
1641 struct anon_vma *anon_vma; 1637 struct anon_vma *anon_vma;
1642 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 1638 pgoff_t pgoff = page_to_pgoff(page);
1643 struct anon_vma_chain *avc; 1639 struct anon_vma_chain *avc;
1644 int ret = SWAP_AGAIN; 1640 int ret = SWAP_AGAIN;
1645 1641
@@ -1680,7 +1676,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
1680static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) 1676static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
1681{ 1677{
1682 struct address_space *mapping = page->mapping; 1678 struct address_space *mapping = page->mapping;
1683 pgoff_t pgoff = page->index << compound_order(page); 1679 pgoff_t pgoff = page_to_pgoff(page);
1684 struct vm_area_struct *vma; 1680 struct vm_area_struct *vma;
1685 int ret = SWAP_AGAIN; 1681 int ret = SWAP_AGAIN;
1686 1682
diff --git a/mm/shmem.c b/mm/shmem.c
index 1140f49b6ded..af68b15a8fc1 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -85,7 +85,7 @@ static struct vfsmount *shm_mnt;
85 * a time): we would prefer not to enlarge the shmem inode just for that. 85 * a time): we would prefer not to enlarge the shmem inode just for that.
86 */ 86 */
87struct shmem_falloc { 87struct shmem_falloc {
88 int mode; /* FALLOC_FL mode currently operating */ 88 wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
89 pgoff_t start; /* start of range currently being fallocated */ 89 pgoff_t start; /* start of range currently being fallocated */
90 pgoff_t next; /* the next page offset to be fallocated */ 90 pgoff_t next; /* the next page offset to be fallocated */
91 pgoff_t nr_falloced; /* how many new pages have been fallocated */ 91 pgoff_t nr_falloced; /* how many new pages have been fallocated */
@@ -468,23 +468,20 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
468 return; 468 return;
469 469
470 index = start; 470 index = start;
471 for ( ; ; ) { 471 while (index < end) {
472 cond_resched(); 472 cond_resched();
473 473
474 pvec.nr = find_get_entries(mapping, index, 474 pvec.nr = find_get_entries(mapping, index,
475 min(end - index, (pgoff_t)PAGEVEC_SIZE), 475 min(end - index, (pgoff_t)PAGEVEC_SIZE),
476 pvec.pages, indices); 476 pvec.pages, indices);
477 if (!pvec.nr) { 477 if (!pvec.nr) {
478 if (index == start || unfalloc) 478 /* If all gone or hole-punch or unfalloc, we're done */
479 if (index == start || end != -1)
479 break; 480 break;
481 /* But if truncating, restart to make sure all gone */
480 index = start; 482 index = start;
481 continue; 483 continue;
482 } 484 }
483 if ((index == start || unfalloc) && indices[0] >= end) {
484 pagevec_remove_exceptionals(&pvec);
485 pagevec_release(&pvec);
486 break;
487 }
488 mem_cgroup_uncharge_start(); 485 mem_cgroup_uncharge_start();
489 for (i = 0; i < pagevec_count(&pvec); i++) { 486 for (i = 0; i < pagevec_count(&pvec); i++) {
490 struct page *page = pvec.pages[i]; 487 struct page *page = pvec.pages[i];
@@ -496,8 +493,12 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
496 if (radix_tree_exceptional_entry(page)) { 493 if (radix_tree_exceptional_entry(page)) {
497 if (unfalloc) 494 if (unfalloc)
498 continue; 495 continue;
499 nr_swaps_freed += !shmem_free_swap(mapping, 496 if (shmem_free_swap(mapping, index, page)) {
500 index, page); 497 /* Swap was replaced by page: retry */
498 index--;
499 break;
500 }
501 nr_swaps_freed++;
501 continue; 502 continue;
502 } 503 }
503 504
@@ -506,6 +507,11 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
506 if (page->mapping == mapping) { 507 if (page->mapping == mapping) {
507 VM_BUG_ON_PAGE(PageWriteback(page), page); 508 VM_BUG_ON_PAGE(PageWriteback(page), page);
508 truncate_inode_page(mapping, page); 509 truncate_inode_page(mapping, page);
510 } else {
511 /* Page was replaced by swap: retry */
512 unlock_page(page);
513 index--;
514 break;
509 } 515 }
510 } 516 }
511 unlock_page(page); 517 unlock_page(page);
@@ -760,7 +766,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
760 spin_lock(&inode->i_lock); 766 spin_lock(&inode->i_lock);
761 shmem_falloc = inode->i_private; 767 shmem_falloc = inode->i_private;
762 if (shmem_falloc && 768 if (shmem_falloc &&
763 !shmem_falloc->mode && 769 !shmem_falloc->waitq &&
764 index >= shmem_falloc->start && 770 index >= shmem_falloc->start &&
765 index < shmem_falloc->next) 771 index < shmem_falloc->next)
766 shmem_falloc->nr_unswapped++; 772 shmem_falloc->nr_unswapped++;
@@ -1248,38 +1254,58 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1248 * Trinity finds that probing a hole which tmpfs is punching can 1254 * Trinity finds that probing a hole which tmpfs is punching can
1249 * prevent the hole-punch from ever completing: which in turn 1255 * prevent the hole-punch from ever completing: which in turn
1250 * locks writers out with its hold on i_mutex. So refrain from 1256 * locks writers out with its hold on i_mutex. So refrain from
1251 * faulting pages into the hole while it's being punched, and 1257 * faulting pages into the hole while it's being punched. Although
1252 * wait on i_mutex to be released if vmf->flags permits. 1258 * shmem_undo_range() does remove the additions, it may be unable to
1259 * keep up, as each new page needs its own unmap_mapping_range() call,
1260 * and the i_mmap tree grows ever slower to scan if new vmas are added.
1261 *
1262 * It does not matter if we sometimes reach this check just before the
1263 * hole-punch begins, so that one fault then races with the punch:
1264 * we just need to make racing faults a rare case.
1265 *
1266 * The implementation below would be much simpler if we just used a
1267 * standard mutex or completion: but we cannot take i_mutex in fault,
1268 * and bloating every shmem inode for this unlikely case would be sad.
1253 */ 1269 */
1254 if (unlikely(inode->i_private)) { 1270 if (unlikely(inode->i_private)) {
1255 struct shmem_falloc *shmem_falloc; 1271 struct shmem_falloc *shmem_falloc;
1256 1272
1257 spin_lock(&inode->i_lock); 1273 spin_lock(&inode->i_lock);
1258 shmem_falloc = inode->i_private; 1274 shmem_falloc = inode->i_private;
1259 if (!shmem_falloc || 1275 if (shmem_falloc &&
1260 shmem_falloc->mode != FALLOC_FL_PUNCH_HOLE || 1276 shmem_falloc->waitq &&
1261 vmf->pgoff < shmem_falloc->start || 1277 vmf->pgoff >= shmem_falloc->start &&
1262 vmf->pgoff >= shmem_falloc->next) 1278 vmf->pgoff < shmem_falloc->next) {
1263 shmem_falloc = NULL; 1279 wait_queue_head_t *shmem_falloc_waitq;
1264 spin_unlock(&inode->i_lock); 1280 DEFINE_WAIT(shmem_fault_wait);
1265 /* 1281
1266 * i_lock has protected us from taking shmem_falloc seriously 1282 ret = VM_FAULT_NOPAGE;
1267 * once return from shmem_fallocate() went back up that stack.
1268 * i_lock does not serialize with i_mutex at all, but it does
1269 * not matter if sometimes we wait unnecessarily, or sometimes
1270 * miss out on waiting: we just need to make those cases rare.
1271 */
1272 if (shmem_falloc) {
1273 if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) && 1283 if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
1274 !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { 1284 !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
1285 /* It's polite to up mmap_sem if we can */
1275 up_read(&vma->vm_mm->mmap_sem); 1286 up_read(&vma->vm_mm->mmap_sem);
1276 mutex_lock(&inode->i_mutex); 1287 ret = VM_FAULT_RETRY;
1277 mutex_unlock(&inode->i_mutex);
1278 return VM_FAULT_RETRY;
1279 } 1288 }
1280 /* cond_resched? Leave that to GUP or return to user */ 1289
1281 return VM_FAULT_NOPAGE; 1290 shmem_falloc_waitq = shmem_falloc->waitq;
1291 prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
1292 TASK_UNINTERRUPTIBLE);
1293 spin_unlock(&inode->i_lock);
1294 schedule();
1295
1296 /*
1297 * shmem_falloc_waitq points into the shmem_fallocate()
1298 * stack of the hole-punching task: shmem_falloc_waitq
1299 * is usually invalid by the time we reach here, but
1300 * finish_wait() does not dereference it in that case;
1301 * though i_lock needed lest racing with wake_up_all().
1302 */
1303 spin_lock(&inode->i_lock);
1304 finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
1305 spin_unlock(&inode->i_lock);
1306 return ret;
1282 } 1307 }
1308 spin_unlock(&inode->i_lock);
1283 } 1309 }
1284 1310
1285 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); 1311 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
@@ -1774,13 +1800,13 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
1774 1800
1775 mutex_lock(&inode->i_mutex); 1801 mutex_lock(&inode->i_mutex);
1776 1802
1777 shmem_falloc.mode = mode & ~FALLOC_FL_KEEP_SIZE;
1778
1779 if (mode & FALLOC_FL_PUNCH_HOLE) { 1803 if (mode & FALLOC_FL_PUNCH_HOLE) {
1780 struct address_space *mapping = file->f_mapping; 1804 struct address_space *mapping = file->f_mapping;
1781 loff_t unmap_start = round_up(offset, PAGE_SIZE); 1805 loff_t unmap_start = round_up(offset, PAGE_SIZE);
1782 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; 1806 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
1807 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
1783 1808
1809 shmem_falloc.waitq = &shmem_falloc_waitq;
1784 shmem_falloc.start = unmap_start >> PAGE_SHIFT; 1810 shmem_falloc.start = unmap_start >> PAGE_SHIFT;
1785 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT; 1811 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
1786 spin_lock(&inode->i_lock); 1812 spin_lock(&inode->i_lock);
@@ -1792,8 +1818,13 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
1792 1 + unmap_end - unmap_start, 0); 1818 1 + unmap_end - unmap_start, 0);
1793 shmem_truncate_range(inode, offset, offset + len - 1); 1819 shmem_truncate_range(inode, offset, offset + len - 1);
1794 /* No need to unmap again: hole-punching leaves COWed pages */ 1820 /* No need to unmap again: hole-punching leaves COWed pages */
1821
1822 spin_lock(&inode->i_lock);
1823 inode->i_private = NULL;
1824 wake_up_all(&shmem_falloc_waitq);
1825 spin_unlock(&inode->i_lock);
1795 error = 0; 1826 error = 0;
1796 goto undone; 1827 goto out;
1797 } 1828 }
1798 1829
1799 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 1830 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
@@ -1809,6 +1840,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
1809 goto out; 1840 goto out;
1810 } 1841 }
1811 1842
1843 shmem_falloc.waitq = NULL;
1812 shmem_falloc.start = start; 1844 shmem_falloc.start = start;
1813 shmem_falloc.next = start; 1845 shmem_falloc.next = start;
1814 shmem_falloc.nr_falloced = 0; 1846 shmem_falloc.nr_falloced = 0;
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 735e01a0db6f..d31c4bacc6a2 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -55,7 +55,7 @@ static int kmem_cache_sanity_check(const char *name, size_t size)
55 continue; 55 continue;
56 } 56 }
57 57
58#if !defined(CONFIG_SLUB) || !defined(CONFIG_SLUB_DEBUG_ON) 58#if !defined(CONFIG_SLUB)
59 if (!strcmp(s->name, name)) { 59 if (!strcmp(s->name, name)) {
60 pr_err("%s (%s): Cache name already exists.\n", 60 pr_err("%s (%s): Cache name already exists.\n",
61 __func__, name); 61 __func__, name);
diff --git a/mm/truncate.c b/mm/truncate.c
index 6a78c814bebf..eda247307164 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -355,14 +355,16 @@ void truncate_inode_pages_range(struct address_space *mapping,
355 for ( ; ; ) { 355 for ( ; ; ) {
356 cond_resched(); 356 cond_resched();
357 if (!pagevec_lookup_entries(&pvec, mapping, index, 357 if (!pagevec_lookup_entries(&pvec, mapping, index,
358 min(end - index, (pgoff_t)PAGEVEC_SIZE), 358 min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
359 indices)) { 359 /* If all gone from start onwards, we're done */
360 if (index == start) 360 if (index == start)
361 break; 361 break;
362 /* Otherwise restart to make sure all gone */
362 index = start; 363 index = start;
363 continue; 364 continue;
364 } 365 }
365 if (index == start && indices[0] >= end) { 366 if (index == start && indices[0] >= end) {
367 /* All gone out of hole to be punched, we're done */
366 pagevec_remove_exceptionals(&pvec); 368 pagevec_remove_exceptionals(&pvec);
367 pagevec_release(&pvec); 369 pagevec_release(&pvec);
368 break; 370 break;
@@ -373,8 +375,11 @@ void truncate_inode_pages_range(struct address_space *mapping,
373 375
374 /* We rely upon deletion not changing page->index */ 376 /* We rely upon deletion not changing page->index */
375 index = indices[i]; 377 index = indices[i];
376 if (index >= end) 378 if (index >= end) {
379 /* Restart punch to make sure all gone */
380 index = start - 1;
377 break; 381 break;
382 }
378 383
379 if (radix_tree_exceptional_entry(page)) { 384 if (radix_tree_exceptional_entry(page)) {
380 clear_exceptional_entry(mapping, index, page); 385 clear_exceptional_entry(mapping, index, page);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index ad2ac3c00398..dd11f612e03e 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -627,8 +627,6 @@ static void vlan_dev_uninit(struct net_device *dev)
627 struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 627 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
628 int i; 628 int i;
629 629
630 free_percpu(vlan->vlan_pcpu_stats);
631 vlan->vlan_pcpu_stats = NULL;
632 for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) { 630 for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
633 while ((pm = vlan->egress_priority_map[i]) != NULL) { 631 while ((pm = vlan->egress_priority_map[i]) != NULL) {
634 vlan->egress_priority_map[i] = pm->next; 632 vlan->egress_priority_map[i] = pm->next;
@@ -785,6 +783,15 @@ static const struct net_device_ops vlan_netdev_ops = {
785 .ndo_get_lock_subclass = vlan_dev_get_lock_subclass, 783 .ndo_get_lock_subclass = vlan_dev_get_lock_subclass,
786}; 784};
787 785
786static void vlan_dev_free(struct net_device *dev)
787{
788 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
789
790 free_percpu(vlan->vlan_pcpu_stats);
791 vlan->vlan_pcpu_stats = NULL;
792 free_netdev(dev);
793}
794
788void vlan_setup(struct net_device *dev) 795void vlan_setup(struct net_device *dev)
789{ 796{
790 ether_setup(dev); 797 ether_setup(dev);
@@ -794,7 +801,7 @@ void vlan_setup(struct net_device *dev)
794 dev->tx_queue_len = 0; 801 dev->tx_queue_len = 0;
795 802
796 dev->netdev_ops = &vlan_netdev_ops; 803 dev->netdev_ops = &vlan_netdev_ops;
797 dev->destructor = free_netdev; 804 dev->destructor = vlan_dev_free;
798 dev->ethtool_ops = &vlan_ethtool_ops; 805 dev->ethtool_ops = &vlan_ethtool_ops;
799 806
800 memset(dev->broadcast, 0, ETH_ALEN); 807 memset(dev->broadcast, 0, ETH_ALEN);
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 01a1082e02b3..bfcf6be1d665 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1489,8 +1489,6 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
1489 goto drop; 1489 goto drop;
1490 1490
1491 /* Queue packet (standard) */ 1491 /* Queue packet (standard) */
1492 skb->sk = sock;
1493
1494 if (sock_queue_rcv_skb(sock, skb) < 0) 1492 if (sock_queue_rcv_skb(sock, skb) < 0)
1495 goto drop; 1493 goto drop;
1496 1494
@@ -1644,7 +1642,6 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
1644 if (!skb) 1642 if (!skb)
1645 goto out; 1643 goto out;
1646 1644
1647 skb->sk = sk;
1648 skb_reserve(skb, ddp_dl->header_length); 1645 skb_reserve(skb, ddp_dl->header_length);
1649 skb_reserve(skb, dev->hard_header_len); 1646 skb_reserve(skb, dev->hard_header_len);
1650 skb->dev = dev; 1647 skb->dev = dev;
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 6f0d9ec37950..a957c8140721 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -800,11 +800,6 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv,
800 bla_dst = (struct batadv_bla_claim_dst *)hw_dst; 800 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
801 bla_dst_own = &bat_priv->bla.claim_dest; 801 bla_dst_own = &bat_priv->bla.claim_dest;
802 802
803 /* check if it is a claim packet in general */
804 if (memcmp(bla_dst->magic, bla_dst_own->magic,
805 sizeof(bla_dst->magic)) != 0)
806 return 0;
807
808 /* if announcement packet, use the source, 803 /* if announcement packet, use the source,
809 * otherwise assume it is in the hw_src 804 * otherwise assume it is in the hw_src
810 */ 805 */
@@ -866,12 +861,13 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
866 struct batadv_hard_iface *primary_if, 861 struct batadv_hard_iface *primary_if,
867 struct sk_buff *skb) 862 struct sk_buff *skb)
868{ 863{
869 struct batadv_bla_claim_dst *bla_dst; 864 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
870 uint8_t *hw_src, *hw_dst; 865 uint8_t *hw_src, *hw_dst;
871 struct vlan_ethhdr *vhdr; 866 struct vlan_hdr *vhdr, vhdr_buf;
872 struct ethhdr *ethhdr; 867 struct ethhdr *ethhdr;
873 struct arphdr *arphdr; 868 struct arphdr *arphdr;
874 unsigned short vid; 869 unsigned short vid;
870 int vlan_depth = 0;
875 __be16 proto; 871 __be16 proto;
876 int headlen; 872 int headlen;
877 int ret; 873 int ret;
@@ -882,9 +878,24 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
882 proto = ethhdr->h_proto; 878 proto = ethhdr->h_proto;
883 headlen = ETH_HLEN; 879 headlen = ETH_HLEN;
884 if (vid & BATADV_VLAN_HAS_TAG) { 880 if (vid & BATADV_VLAN_HAS_TAG) {
885 vhdr = vlan_eth_hdr(skb); 881 /* Traverse the VLAN/Ethertypes.
886 proto = vhdr->h_vlan_encapsulated_proto; 882 *
887 headlen += VLAN_HLEN; 883 * At this point it is known that the first protocol is a VLAN
884 * header, so start checking at the encapsulated protocol.
885 *
886 * The depth of the VLAN headers is recorded to drop BLA claim
887 * frames encapsulated into multiple VLAN headers (QinQ).
888 */
889 do {
890 vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN,
891 &vhdr_buf);
892 if (!vhdr)
893 return 0;
894
895 proto = vhdr->h_vlan_encapsulated_proto;
896 headlen += VLAN_HLEN;
897 vlan_depth++;
898 } while (proto == htons(ETH_P_8021Q));
888 } 899 }
889 900
890 if (proto != htons(ETH_P_ARP)) 901 if (proto != htons(ETH_P_ARP))
@@ -914,6 +925,19 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
914 hw_src = (uint8_t *)arphdr + sizeof(struct arphdr); 925 hw_src = (uint8_t *)arphdr + sizeof(struct arphdr);
915 hw_dst = hw_src + ETH_ALEN + 4; 926 hw_dst = hw_src + ETH_ALEN + 4;
916 bla_dst = (struct batadv_bla_claim_dst *)hw_dst; 927 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
928 bla_dst_own = &bat_priv->bla.claim_dest;
929
930 /* check if it is a claim frame in general */
931 if (memcmp(bla_dst->magic, bla_dst_own->magic,
932 sizeof(bla_dst->magic)) != 0)
933 return 0;
934
935 /* check if there is a claim frame encapsulated deeper in (QinQ) and
936 * drop that, as this is not supported by BLA but should also not be
937 * sent via the mesh.
938 */
939 if (vlan_depth > 1)
940 return 1;
917 941
918 /* check if it is a claim frame. */ 942 /* check if it is a claim frame. */
919 ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst, 943 ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index e7ee65dc20bf..cbd677f48c00 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -448,10 +448,15 @@ out:
448 * possibly free it 448 * possibly free it
449 * @softif_vlan: the vlan object to release 449 * @softif_vlan: the vlan object to release
450 */ 450 */
451void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *softif_vlan) 451void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan)
452{ 452{
453 if (atomic_dec_and_test(&softif_vlan->refcount)) 453 if (atomic_dec_and_test(&vlan->refcount)) {
454 kfree_rcu(softif_vlan, rcu); 454 spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock);
455 hlist_del_rcu(&vlan->list);
456 spin_unlock_bh(&vlan->bat_priv->softif_vlan_list_lock);
457
458 kfree_rcu(vlan, rcu);
459 }
455} 460}
456 461
457/** 462/**
@@ -505,6 +510,7 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
505 if (!vlan) 510 if (!vlan)
506 return -ENOMEM; 511 return -ENOMEM;
507 512
513 vlan->bat_priv = bat_priv;
508 vlan->vid = vid; 514 vlan->vid = vid;
509 atomic_set(&vlan->refcount, 1); 515 atomic_set(&vlan->refcount, 1);
510 516
@@ -516,6 +522,10 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
516 return err; 522 return err;
517 } 523 }
518 524
525 spin_lock_bh(&bat_priv->softif_vlan_list_lock);
526 hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
527 spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
528
519 /* add a new TT local entry. This one will be marked with the NOPURGE 529 /* add a new TT local entry. This one will be marked with the NOPURGE
520 * flag 530 * flag
521 */ 531 */
@@ -523,10 +533,6 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
523 bat_priv->soft_iface->dev_addr, vid, 533 bat_priv->soft_iface->dev_addr, vid,
524 BATADV_NULL_IFINDEX, BATADV_NO_MARK); 534 BATADV_NULL_IFINDEX, BATADV_NO_MARK);
525 535
526 spin_lock_bh(&bat_priv->softif_vlan_list_lock);
527 hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
528 spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
529
530 return 0; 536 return 0;
531} 537}
532 538
@@ -538,18 +544,13 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
538static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv, 544static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv,
539 struct batadv_softif_vlan *vlan) 545 struct batadv_softif_vlan *vlan)
540{ 546{
541 spin_lock_bh(&bat_priv->softif_vlan_list_lock);
542 hlist_del_rcu(&vlan->list);
543 spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
544
545 batadv_sysfs_del_vlan(bat_priv, vlan);
546
547 /* explicitly remove the associated TT local entry because it is marked 547 /* explicitly remove the associated TT local entry because it is marked
548 * with the NOPURGE flag 548 * with the NOPURGE flag
549 */ 549 */
550 batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr, 550 batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr,
551 vlan->vid, "vlan interface destroyed", false); 551 vlan->vid, "vlan interface destroyed", false);
552 552
553 batadv_sysfs_del_vlan(bat_priv, vlan);
553 batadv_softif_vlan_free_ref(vlan); 554 batadv_softif_vlan_free_ref(vlan);
554} 555}
555 556
@@ -567,6 +568,8 @@ static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
567 unsigned short vid) 568 unsigned short vid)
568{ 569{
569 struct batadv_priv *bat_priv = netdev_priv(dev); 570 struct batadv_priv *bat_priv = netdev_priv(dev);
571 struct batadv_softif_vlan *vlan;
572 int ret;
570 573
571 /* only 802.1Q vlans are supported. 574 /* only 802.1Q vlans are supported.
572 * batman-adv does not know how to handle other types 575 * batman-adv does not know how to handle other types
@@ -576,7 +579,36 @@ static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
576 579
577 vid |= BATADV_VLAN_HAS_TAG; 580 vid |= BATADV_VLAN_HAS_TAG;
578 581
579 return batadv_softif_create_vlan(bat_priv, vid); 582 /* if a new vlan is getting created and it already exists, it means that
583 * it was not deleted yet. batadv_softif_vlan_get() increases the
584 * refcount in order to revive the object.
585 *
586 * if it does not exist then create it.
587 */
588 vlan = batadv_softif_vlan_get(bat_priv, vid);
589 if (!vlan)
590 return batadv_softif_create_vlan(bat_priv, vid);
591
592 /* recreate the sysfs object if it was already destroyed (and it should
593 * be since we received a kill_vid() for this vlan
594 */
595 if (!vlan->kobj) {
596 ret = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
597 if (ret) {
598 batadv_softif_vlan_free_ref(vlan);
599 return ret;
600 }
601 }
602
603 /* add a new TT local entry. This one will be marked with the NOPURGE
604 * flag. This must be added again, even if the vlan object already
605 * exists, because the entry was deleted by kill_vid()
606 */
607 batadv_tt_local_add(bat_priv->soft_iface,
608 bat_priv->soft_iface->dev_addr, vid,
609 BATADV_NULL_IFINDEX, BATADV_NO_MARK);
610
611 return 0;
580} 612}
581 613
582/** 614/**
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index d636bde72c9a..5f59e7f899a0 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -511,6 +511,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
511 struct batadv_priv *bat_priv = netdev_priv(soft_iface); 511 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
512 struct batadv_tt_local_entry *tt_local; 512 struct batadv_tt_local_entry *tt_local;
513 struct batadv_tt_global_entry *tt_global = NULL; 513 struct batadv_tt_global_entry *tt_global = NULL;
514 struct batadv_softif_vlan *vlan;
514 struct net_device *in_dev = NULL; 515 struct net_device *in_dev = NULL;
515 struct hlist_head *head; 516 struct hlist_head *head;
516 struct batadv_tt_orig_list_entry *orig_entry; 517 struct batadv_tt_orig_list_entry *orig_entry;
@@ -572,6 +573,9 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
572 if (!tt_local) 573 if (!tt_local)
573 goto out; 574 goto out;
574 575
576 /* increase the refcounter of the related vlan */
577 vlan = batadv_softif_vlan_get(bat_priv, vid);
578
575 batadv_dbg(BATADV_DBG_TT, bat_priv, 579 batadv_dbg(BATADV_DBG_TT, bat_priv,
576 "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n", 580 "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
577 addr, BATADV_PRINT_VID(vid), 581 addr, BATADV_PRINT_VID(vid),
@@ -604,6 +608,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
604 if (unlikely(hash_added != 0)) { 608 if (unlikely(hash_added != 0)) {
605 /* remove the reference for the hash */ 609 /* remove the reference for the hash */
606 batadv_tt_local_entry_free_ref(tt_local); 610 batadv_tt_local_entry_free_ref(tt_local);
611 batadv_softif_vlan_free_ref(vlan);
607 goto out; 612 goto out;
608 } 613 }
609 614
@@ -1009,6 +1014,7 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
1009{ 1014{
1010 struct batadv_tt_local_entry *tt_local_entry; 1015 struct batadv_tt_local_entry *tt_local_entry;
1011 uint16_t flags, curr_flags = BATADV_NO_FLAGS; 1016 uint16_t flags, curr_flags = BATADV_NO_FLAGS;
1017 struct batadv_softif_vlan *vlan;
1012 1018
1013 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid); 1019 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
1014 if (!tt_local_entry) 1020 if (!tt_local_entry)
@@ -1039,6 +1045,11 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
1039 hlist_del_rcu(&tt_local_entry->common.hash_entry); 1045 hlist_del_rcu(&tt_local_entry->common.hash_entry);
1040 batadv_tt_local_entry_free_ref(tt_local_entry); 1046 batadv_tt_local_entry_free_ref(tt_local_entry);
1041 1047
1048 /* decrease the reference held for this vlan */
1049 vlan = batadv_softif_vlan_get(bat_priv, vid);
1050 batadv_softif_vlan_free_ref(vlan);
1051 batadv_softif_vlan_free_ref(vlan);
1052
1042out: 1053out:
1043 if (tt_local_entry) 1054 if (tt_local_entry)
1044 batadv_tt_local_entry_free_ref(tt_local_entry); 1055 batadv_tt_local_entry_free_ref(tt_local_entry);
@@ -1111,6 +1122,7 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
1111 spinlock_t *list_lock; /* protects write access to the hash lists */ 1122 spinlock_t *list_lock; /* protects write access to the hash lists */
1112 struct batadv_tt_common_entry *tt_common_entry; 1123 struct batadv_tt_common_entry *tt_common_entry;
1113 struct batadv_tt_local_entry *tt_local; 1124 struct batadv_tt_local_entry *tt_local;
1125 struct batadv_softif_vlan *vlan;
1114 struct hlist_node *node_tmp; 1126 struct hlist_node *node_tmp;
1115 struct hlist_head *head; 1127 struct hlist_head *head;
1116 uint32_t i; 1128 uint32_t i;
@@ -1131,6 +1143,13 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
1131 tt_local = container_of(tt_common_entry, 1143 tt_local = container_of(tt_common_entry,
1132 struct batadv_tt_local_entry, 1144 struct batadv_tt_local_entry,
1133 common); 1145 common);
1146
1147 /* decrease the reference held for this vlan */
1148 vlan = batadv_softif_vlan_get(bat_priv,
1149 tt_common_entry->vid);
1150 batadv_softif_vlan_free_ref(vlan);
1151 batadv_softif_vlan_free_ref(vlan);
1152
1134 batadv_tt_local_entry_free_ref(tt_local); 1153 batadv_tt_local_entry_free_ref(tt_local);
1135 } 1154 }
1136 spin_unlock_bh(list_lock); 1155 spin_unlock_bh(list_lock);
@@ -3139,6 +3158,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
3139 struct batadv_hashtable *hash = bat_priv->tt.local_hash; 3158 struct batadv_hashtable *hash = bat_priv->tt.local_hash;
3140 struct batadv_tt_common_entry *tt_common; 3159 struct batadv_tt_common_entry *tt_common;
3141 struct batadv_tt_local_entry *tt_local; 3160 struct batadv_tt_local_entry *tt_local;
3161 struct batadv_softif_vlan *vlan;
3142 struct hlist_node *node_tmp; 3162 struct hlist_node *node_tmp;
3143 struct hlist_head *head; 3163 struct hlist_head *head;
3144 spinlock_t *list_lock; /* protects write access to the hash lists */ 3164 spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -3167,6 +3187,12 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
3167 tt_local = container_of(tt_common, 3187 tt_local = container_of(tt_common,
3168 struct batadv_tt_local_entry, 3188 struct batadv_tt_local_entry,
3169 common); 3189 common);
3190
3191 /* decrease the reference held for this vlan */
3192 vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid);
3193 batadv_softif_vlan_free_ref(vlan);
3194 batadv_softif_vlan_free_ref(vlan);
3195
3170 batadv_tt_local_entry_free_ref(tt_local); 3196 batadv_tt_local_entry_free_ref(tt_local);
3171 } 3197 }
3172 spin_unlock_bh(list_lock); 3198 spin_unlock_bh(list_lock);
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 34891a56773f..8854c05622a9 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -687,6 +687,7 @@ struct batadv_priv_nc {
687 687
688/** 688/**
689 * struct batadv_softif_vlan - per VLAN attributes set 689 * struct batadv_softif_vlan - per VLAN attributes set
690 * @bat_priv: pointer to the mesh object
690 * @vid: VLAN identifier 691 * @vid: VLAN identifier
691 * @kobj: kobject for sysfs vlan subdirectory 692 * @kobj: kobject for sysfs vlan subdirectory
692 * @ap_isolation: AP isolation state 693 * @ap_isolation: AP isolation state
@@ -696,6 +697,7 @@ struct batadv_priv_nc {
696 * @rcu: struct used for freeing in a RCU-safe manner 697 * @rcu: struct used for freeing in a RCU-safe manner
697 */ 698 */
698struct batadv_softif_vlan { 699struct batadv_softif_vlan {
700 struct batadv_priv *bat_priv;
699 unsigned short vid; 701 unsigned short vid;
700 struct kobject *kobj; 702 struct kobject *kobj;
701 atomic_t ap_isolation; /* boolean */ 703 atomic_t ap_isolation; /* boolean */
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index ca01d1861854..a7a27bc2c0b1 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -289,10 +289,20 @@ static void hci_conn_timeout(struct work_struct *work)
289{ 289{
290 struct hci_conn *conn = container_of(work, struct hci_conn, 290 struct hci_conn *conn = container_of(work, struct hci_conn,
291 disc_work.work); 291 disc_work.work);
292 int refcnt = atomic_read(&conn->refcnt);
292 293
293 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state)); 294 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
294 295
295 if (atomic_read(&conn->refcnt)) 296 WARN_ON(refcnt < 0);
297
298 /* FIXME: It was observed that in pairing failed scenario, refcnt
299 * drops below 0. Probably this is because l2cap_conn_del calls
300 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
301 * dropped. After that loop hci_chan_del is called which also drops
302 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
303 * otherwise drop it.
304 */
305 if (refcnt > 0)
296 return; 306 return;
297 307
298 switch (conn->state) { 308 switch (conn->state) {
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index f2829a7932e2..e33a982161c1 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -385,6 +385,16 @@ static const u8 gen_method[5][5] = {
385 { CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, OVERLAP }, 385 { CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, OVERLAP },
386}; 386};
387 387
388static u8 get_auth_method(struct smp_chan *smp, u8 local_io, u8 remote_io)
389{
390 /* If either side has unknown io_caps, use JUST WORKS */
391 if (local_io > SMP_IO_KEYBOARD_DISPLAY ||
392 remote_io > SMP_IO_KEYBOARD_DISPLAY)
393 return JUST_WORKS;
394
395 return gen_method[remote_io][local_io];
396}
397
388static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth, 398static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
389 u8 local_io, u8 remote_io) 399 u8 local_io, u8 remote_io)
390{ 400{
@@ -401,14 +411,11 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
401 BT_DBG("tk_request: auth:%d lcl:%d rem:%d", auth, local_io, remote_io); 411 BT_DBG("tk_request: auth:%d lcl:%d rem:%d", auth, local_io, remote_io);
402 412
403 /* If neither side wants MITM, use JUST WORKS */ 413 /* If neither side wants MITM, use JUST WORKS */
404 /* If either side has unknown io_caps, use JUST WORKS */
405 /* Otherwise, look up method from the table */ 414 /* Otherwise, look up method from the table */
406 if (!(auth & SMP_AUTH_MITM) || 415 if (!(auth & SMP_AUTH_MITM))
407 local_io > SMP_IO_KEYBOARD_DISPLAY ||
408 remote_io > SMP_IO_KEYBOARD_DISPLAY)
409 method = JUST_WORKS; 416 method = JUST_WORKS;
410 else 417 else
411 method = gen_method[remote_io][local_io]; 418 method = get_auth_method(smp, local_io, remote_io);
412 419
413 /* If not bonding, don't ask user to confirm a Zero TK */ 420 /* If not bonding, don't ask user to confirm a Zero TK */
414 if (!(auth & SMP_AUTH_BONDING) && method == JUST_CFM) 421 if (!(auth & SMP_AUTH_BONDING) && method == JUST_CFM)
@@ -669,7 +676,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
669{ 676{
670 struct smp_cmd_pairing rsp, *req = (void *) skb->data; 677 struct smp_cmd_pairing rsp, *req = (void *) skb->data;
671 struct smp_chan *smp; 678 struct smp_chan *smp;
672 u8 key_size, auth; 679 u8 key_size, auth, sec_level;
673 int ret; 680 int ret;
674 681
675 BT_DBG("conn %p", conn); 682 BT_DBG("conn %p", conn);
@@ -695,7 +702,19 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
695 /* We didn't start the pairing, so match remote */ 702 /* We didn't start the pairing, so match remote */
696 auth = req->auth_req; 703 auth = req->auth_req;
697 704
698 conn->hcon->pending_sec_level = authreq_to_seclevel(auth); 705 sec_level = authreq_to_seclevel(auth);
706 if (sec_level > conn->hcon->pending_sec_level)
707 conn->hcon->pending_sec_level = sec_level;
708
709 /* If we need MITM check that it can be acheived */
710 if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) {
711 u8 method;
712
713 method = get_auth_method(smp, conn->hcon->io_capability,
714 req->io_capability);
715 if (method == JUST_WORKS || method == JUST_CFM)
716 return SMP_AUTH_REQUIREMENTS;
717 }
699 718
700 build_pairing_cmd(conn, req, &rsp, auth); 719 build_pairing_cmd(conn, req, &rsp, auth);
701 720
@@ -743,6 +762,16 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
743 if (check_enc_key_size(conn, key_size)) 762 if (check_enc_key_size(conn, key_size))
744 return SMP_ENC_KEY_SIZE; 763 return SMP_ENC_KEY_SIZE;
745 764
765 /* If we need MITM check that it can be acheived */
766 if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) {
767 u8 method;
768
769 method = get_auth_method(smp, req->io_capability,
770 rsp->io_capability);
771 if (method == JUST_WORKS || method == JUST_CFM)
772 return SMP_AUTH_REQUIREMENTS;
773 }
774
746 get_random_bytes(smp->prnd, sizeof(smp->prnd)); 775 get_random_bytes(smp->prnd, sizeof(smp->prnd));
747 776
748 smp->prsp[0] = SMP_CMD_PAIRING_RSP; 777 smp->prsp[0] = SMP_CMD_PAIRING_RSP;
@@ -838,6 +867,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
838 struct smp_cmd_pairing cp; 867 struct smp_cmd_pairing cp;
839 struct hci_conn *hcon = conn->hcon; 868 struct hci_conn *hcon = conn->hcon;
840 struct smp_chan *smp; 869 struct smp_chan *smp;
870 u8 sec_level;
841 871
842 BT_DBG("conn %p", conn); 872 BT_DBG("conn %p", conn);
843 873
@@ -847,7 +877,9 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
847 if (!(conn->hcon->link_mode & HCI_LM_MASTER)) 877 if (!(conn->hcon->link_mode & HCI_LM_MASTER))
848 return SMP_CMD_NOTSUPP; 878 return SMP_CMD_NOTSUPP;
849 879
850 hcon->pending_sec_level = authreq_to_seclevel(rp->auth_req); 880 sec_level = authreq_to_seclevel(rp->auth_req);
881 if (sec_level > hcon->pending_sec_level)
882 hcon->pending_sec_level = sec_level;
851 883
852 if (smp_ltk_encrypt(conn, hcon->pending_sec_level)) 884 if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
853 return 0; 885 return 0;
@@ -901,9 +933,12 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
901 if (smp_sufficient_security(hcon, sec_level)) 933 if (smp_sufficient_security(hcon, sec_level))
902 return 1; 934 return 1;
903 935
936 if (sec_level > hcon->pending_sec_level)
937 hcon->pending_sec_level = sec_level;
938
904 if (hcon->link_mode & HCI_LM_MASTER) 939 if (hcon->link_mode & HCI_LM_MASTER)
905 if (smp_ltk_encrypt(conn, sec_level)) 940 if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
906 goto done; 941 return 0;
907 942
908 if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) 943 if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
909 return 0; 944 return 0;
@@ -918,7 +953,7 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
918 * requires it. 953 * requires it.
919 */ 954 */
920 if (hcon->io_capability != HCI_IO_NO_INPUT_OUTPUT || 955 if (hcon->io_capability != HCI_IO_NO_INPUT_OUTPUT ||
921 sec_level > BT_SECURITY_MEDIUM) 956 hcon->pending_sec_level > BT_SECURITY_MEDIUM)
922 authreq |= SMP_AUTH_MITM; 957 authreq |= SMP_AUTH_MITM;
923 958
924 if (hcon->link_mode & HCI_LM_MASTER) { 959 if (hcon->link_mode & HCI_LM_MASTER) {
@@ -937,9 +972,6 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
937 972
938 set_bit(SMP_FLAG_INITIATOR, &smp->flags); 973 set_bit(SMP_FLAG_INITIATOR, &smp->flags);
939 974
940done:
941 hcon->pending_sec_level = sec_level;
942
943 return 0; 975 return 0;
944} 976}
945 977
diff --git a/net/compat.c b/net/compat.c
index 9a76eaf63184..bc8aeefddf3f 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
85{ 85{
86 int tot_len; 86 int tot_len;
87 87
88 if (kern_msg->msg_namelen) { 88 if (kern_msg->msg_name && kern_msg->msg_namelen) {
89 if (mode == VERIFY_READ) { 89 if (mode == VERIFY_READ) {
90 int err = move_addr_to_kernel(kern_msg->msg_name, 90 int err = move_addr_to_kernel(kern_msg->msg_name,
91 kern_msg->msg_namelen, 91 kern_msg->msg_namelen,
@@ -93,10 +93,11 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
93 if (err < 0) 93 if (err < 0)
94 return err; 94 return err;
95 } 95 }
96 if (kern_msg->msg_name) 96 kern_msg->msg_name = kern_address;
97 kern_msg->msg_name = kern_address; 97 } else {
98 } else
99 kern_msg->msg_name = NULL; 98 kern_msg->msg_name = NULL;
99 kern_msg->msg_namelen = 0;
100 }
100 101
101 tot_len = iov_from_user_compat_to_kern(kern_iov, 102 tot_len = iov_from_user_compat_to_kern(kern_iov,
102 (struct compat_iovec __user *)kern_msg->msg_iov, 103 (struct compat_iovec __user *)kern_msg->msg_iov,
diff --git a/net/core/dev.c b/net/core/dev.c
index 30eedf677913..367a586d0c8a 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -148,6 +148,9 @@ struct list_head ptype_all __read_mostly; /* Taps */
148static struct list_head offload_base __read_mostly; 148static struct list_head offload_base __read_mostly;
149 149
150static int netif_rx_internal(struct sk_buff *skb); 150static int netif_rx_internal(struct sk_buff *skb);
151static int call_netdevice_notifiers_info(unsigned long val,
152 struct net_device *dev,
153 struct netdev_notifier_info *info);
151 154
152/* 155/*
153 * The @dev_base_head list is protected by @dev_base_lock and the rtnl 156 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
@@ -1207,7 +1210,11 @@ EXPORT_SYMBOL(netdev_features_change);
1207void netdev_state_change(struct net_device *dev) 1210void netdev_state_change(struct net_device *dev)
1208{ 1211{
1209 if (dev->flags & IFF_UP) { 1212 if (dev->flags & IFF_UP) {
1210 call_netdevice_notifiers(NETDEV_CHANGE, dev); 1213 struct netdev_notifier_change_info change_info;
1214
1215 change_info.flags_changed = 0;
1216 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
1217 &change_info.info);
1211 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL); 1218 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1212 } 1219 }
1213} 1220}
@@ -4089,6 +4096,8 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
4089 skb->vlan_tci = 0; 4096 skb->vlan_tci = 0;
4090 skb->dev = napi->dev; 4097 skb->dev = napi->dev;
4091 skb->skb_iif = 0; 4098 skb->skb_iif = 0;
4099 skb->encapsulation = 0;
4100 skb_shinfo(skb)->gso_type = 0;
4092 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 4101 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
4093 4102
4094 napi->skb = skb; 4103 napi->skb = skb;
@@ -4227,9 +4236,8 @@ static int process_backlog(struct napi_struct *napi, int quota)
4227#endif 4236#endif
4228 napi->weight = weight_p; 4237 napi->weight = weight_p;
4229 local_irq_disable(); 4238 local_irq_disable();
4230 while (work < quota) { 4239 while (1) {
4231 struct sk_buff *skb; 4240 struct sk_buff *skb;
4232 unsigned int qlen;
4233 4241
4234 while ((skb = __skb_dequeue(&sd->process_queue))) { 4242 while ((skb = __skb_dequeue(&sd->process_queue))) {
4235 local_irq_enable(); 4243 local_irq_enable();
@@ -4243,24 +4251,24 @@ static int process_backlog(struct napi_struct *napi, int quota)
4243 } 4251 }
4244 4252
4245 rps_lock(sd); 4253 rps_lock(sd);
4246 qlen = skb_queue_len(&sd->input_pkt_queue); 4254 if (skb_queue_empty(&sd->input_pkt_queue)) {
4247 if (qlen)
4248 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4249 &sd->process_queue);
4250
4251 if (qlen < quota - work) {
4252 /* 4255 /*
4253 * Inline a custom version of __napi_complete(). 4256 * Inline a custom version of __napi_complete().
4254 * only current cpu owns and manipulates this napi, 4257 * only current cpu owns and manipulates this napi,
4255 * and NAPI_STATE_SCHED is the only possible flag set on backlog. 4258 * and NAPI_STATE_SCHED is the only possible flag set
4256 * we can use a plain write instead of clear_bit(), 4259 * on backlog.
4260 * We can use a plain write instead of clear_bit(),
4257 * and we dont need an smp_mb() memory barrier. 4261 * and we dont need an smp_mb() memory barrier.
4258 */ 4262 */
4259 list_del(&napi->poll_list); 4263 list_del(&napi->poll_list);
4260 napi->state = 0; 4264 napi->state = 0;
4265 rps_unlock(sd);
4261 4266
4262 quota = work + qlen; 4267 break;
4263 } 4268 }
4269
4270 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4271 &sd->process_queue);
4264 rps_unlock(sd); 4272 rps_unlock(sd);
4265 } 4273 }
4266 local_irq_enable(); 4274 local_irq_enable();
diff --git a/net/core/iovec.c b/net/core/iovec.c
index 827dd6beb49c..e1ec45ab1e63 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -39,7 +39,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
39{ 39{
40 int size, ct, err; 40 int size, ct, err;
41 41
42 if (m->msg_namelen) { 42 if (m->msg_name && m->msg_namelen) {
43 if (mode == VERIFY_READ) { 43 if (mode == VERIFY_READ) {
44 void __user *namep; 44 void __user *namep;
45 namep = (void __user __force *) m->msg_name; 45 namep = (void __user __force *) m->msg_name;
@@ -48,10 +48,10 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
48 if (err < 0) 48 if (err < 0)
49 return err; 49 return err;
50 } 50 }
51 if (m->msg_name) 51 m->msg_name = address;
52 m->msg_name = address;
53 } else { 52 } else {
54 m->msg_name = NULL; 53 m->msg_name = NULL;
54 m->msg_namelen = 0;
55 } 55 }
56 56
57 size = m->msg_iovlen * sizeof(struct iovec); 57 size = m->msg_iovlen * sizeof(struct iovec);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 32d872eec7f5..ef31fef25e5a 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2249,7 +2249,7 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2249 ndm->ndm_pad1 = 0; 2249 ndm->ndm_pad1 = 0;
2250 ndm->ndm_pad2 = 0; 2250 ndm->ndm_pad2 = 0;
2251 ndm->ndm_flags = pn->flags | NTF_PROXY; 2251 ndm->ndm_flags = pn->flags | NTF_PROXY;
2252 ndm->ndm_type = NDA_DST; 2252 ndm->ndm_type = RTN_UNICAST;
2253 ndm->ndm_ifindex = pn->dev->ifindex; 2253 ndm->ndm_ifindex = pn->dev->ifindex;
2254 ndm->ndm_state = NUD_NONE; 2254 ndm->ndm_state = NUD_NONE;
2255 2255
@@ -3059,11 +3059,12 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3059 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0, 3059 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3060 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL])); 3060 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3061 } else { 3061 } else {
3062 struct neigh_table *tbl = p->tbl;
3062 dev_name_source = "default"; 3063 dev_name_source = "default";
3063 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1); 3064 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3064 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1; 3065 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3065 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2; 3066 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3066 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = (int *)(p + 1) + 3; 3067 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3067 } 3068 }
3068 3069
3069 if (handler) { 3070 if (handler) {
diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c
index 9acec61f5433..dd8696a3dbec 100644
--- a/net/dns_resolver/dns_query.c
+++ b/net/dns_resolver/dns_query.c
@@ -150,7 +150,7 @@ int dns_query(const char *type, const char *name, size_t namelen,
150 goto put; 150 goto put;
151 151
152 memcpy(*_result, upayload->data, len); 152 memcpy(*_result, upayload->data, len);
153 *_result[len] = '\0'; 153 (*_result)[len] = '\0';
154 154
155 if (_expiry) 155 if (_expiry)
156 *_expiry = rkey->expiry; 156 *_expiry = rkey->expiry;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index d5e6836cf772..d156b3c5f363 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1429,6 +1429,9 @@ static int inet_gro_complete(struct sk_buff *skb, int nhoff)
1429 int proto = iph->protocol; 1429 int proto = iph->protocol;
1430 int err = -ENOSYS; 1430 int err = -ENOSYS;
1431 1431
1432 if (skb->encapsulation)
1433 skb_set_inner_network_header(skb, nhoff);
1434
1432 csum_replace2(&iph->check, iph->tot_len, newlen); 1435 csum_replace2(&iph->check, iph->tot_len, newlen);
1433 iph->tot_len = newlen; 1436 iph->tot_len = newlen;
1434 1437
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index 4e9619bca732..0485bf7f8f03 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -68,6 +68,7 @@ void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
68 68
69 skb_push(skb, hdr_len); 69 skb_push(skb, hdr_len);
70 70
71 skb_reset_transport_header(skb);
71 greh = (struct gre_base_hdr *)skb->data; 72 greh = (struct gre_base_hdr *)skb->data;
72 greh->flags = tnl_flags_to_gre_flags(tpi->flags); 73 greh->flags = tnl_flags_to_gre_flags(tpi->flags);
73 greh->protocol = tpi->proto; 74 greh->protocol = tpi->proto;
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index eb92deb12666..f0bdd47bbbcb 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -263,6 +263,9 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff)
263 int err = -ENOENT; 263 int err = -ENOENT;
264 __be16 type; 264 __be16 type;
265 265
266 skb->encapsulation = 1;
267 skb_shinfo(skb)->gso_type = SKB_GSO_GRE;
268
266 type = greh->protocol; 269 type = greh->protocol;
267 if (greh->flags & GRE_KEY) 270 if (greh->flags & GRE_KEY)
268 grehlen += GRE_HEADER_SECTION; 271 grehlen += GRE_HEADER_SECTION;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 79c3d947a481..42b7bcf8045b 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -739,8 +739,6 @@ static void icmp_unreach(struct sk_buff *skb)
739 /* fall through */ 739 /* fall through */
740 case 0: 740 case 0:
741 info = ntohs(icmph->un.frag.mtu); 741 info = ntohs(icmph->un.frag.mtu);
742 if (!info)
743 goto out;
744 } 742 }
745 break; 743 break;
746 case ICMP_SR_FAILED: 744 case ICMP_SR_FAILED:
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 6748d420f714..db710b059bab 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1944,6 +1944,10 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1944 1944
1945 rtnl_lock(); 1945 rtnl_lock();
1946 in_dev = ip_mc_find_dev(net, imr); 1946 in_dev = ip_mc_find_dev(net, imr);
1947 if (!in_dev) {
1948 ret = -ENODEV;
1949 goto out;
1950 }
1947 ifindex = imr->imr_ifindex; 1951 ifindex = imr->imr_ifindex;
1948 for (imlp = &inet->mc_list; 1952 for (imlp = &inet->mc_list;
1949 (iml = rtnl_dereference(*imlp)) != NULL; 1953 (iml = rtnl_dereference(*imlp)) != NULL;
@@ -1961,16 +1965,14 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1961 1965
1962 *imlp = iml->next_rcu; 1966 *imlp = iml->next_rcu;
1963 1967
1964 if (in_dev) 1968 ip_mc_dec_group(in_dev, group);
1965 ip_mc_dec_group(in_dev, group);
1966 rtnl_unlock(); 1969 rtnl_unlock();
1967 /* decrease mem now to avoid the memleak warning */ 1970 /* decrease mem now to avoid the memleak warning */
1968 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); 1971 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
1969 kfree_rcu(iml, rcu); 1972 kfree_rcu(iml, rcu);
1970 return 0; 1973 return 0;
1971 } 1974 }
1972 if (!in_dev) 1975out:
1973 ret = -ENODEV;
1974 rtnl_unlock(); 1976 rtnl_unlock();
1975 return ret; 1977 return ret;
1976} 1978}
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 5e7aecea05cd..ad382499bace 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -288,6 +288,10 @@ int ip_options_compile(struct net *net,
288 optptr++; 288 optptr++;
289 continue; 289 continue;
290 } 290 }
291 if (unlikely(l < 2)) {
292 pp_ptr = optptr;
293 goto error;
294 }
291 optlen = optptr[1]; 295 optlen = optptr[1];
292 if (optlen < 2 || optlen > l) { 296 if (optlen < 2 || optlen > l) {
293 pp_ptr = optptr; 297 pp_ptr = optptr;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 54b6731dab55..6f9de61dce5f 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -169,6 +169,7 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
169 169
170 hlist_for_each_entry_rcu(t, head, hash_node) { 170 hlist_for_each_entry_rcu(t, head, hash_node) {
171 if (remote != t->parms.iph.daddr || 171 if (remote != t->parms.iph.daddr ||
172 t->parms.iph.saddr != 0 ||
172 !(t->dev->flags & IFF_UP)) 173 !(t->dev->flags & IFF_UP))
173 continue; 174 continue;
174 175
@@ -185,10 +186,11 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
185 head = &itn->tunnels[hash]; 186 head = &itn->tunnels[hash];
186 187
187 hlist_for_each_entry_rcu(t, head, hash_node) { 188 hlist_for_each_entry_rcu(t, head, hash_node) {
188 if ((local != t->parms.iph.saddr && 189 if ((local != t->parms.iph.saddr || t->parms.iph.daddr != 0) &&
189 (local != t->parms.iph.daddr || 190 (local != t->parms.iph.daddr || !ipv4_is_multicast(local)))
190 !ipv4_is_multicast(local))) || 191 continue;
191 !(t->dev->flags & IFF_UP)) 192
193 if (!(t->dev->flags & IFF_UP))
192 continue; 194 continue;
193 195
194 if (!ip_tunnel_key_match(&t->parms, flags, key)) 196 if (!ip_tunnel_key_match(&t->parms, flags, key))
@@ -205,6 +207,8 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
205 207
206 hlist_for_each_entry_rcu(t, head, hash_node) { 208 hlist_for_each_entry_rcu(t, head, hash_node) {
207 if (t->parms.i_key != key || 209 if (t->parms.i_key != key ||
210 t->parms.iph.saddr != 0 ||
211 t->parms.iph.daddr != 0 ||
208 !(t->dev->flags & IFF_UP)) 212 !(t->dev->flags & IFF_UP))
209 continue; 213 continue;
210 214
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 082239ffe34a..190199851c9a 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -457,8 +457,31 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
457 return neigh_create(&arp_tbl, pkey, dev); 457 return neigh_create(&arp_tbl, pkey, dev);
458} 458}
459 459
460atomic_t *ip_idents __read_mostly; 460#define IP_IDENTS_SZ 2048u
461EXPORT_SYMBOL(ip_idents); 461struct ip_ident_bucket {
462 atomic_t id;
463 u32 stamp32;
464};
465
466static struct ip_ident_bucket *ip_idents __read_mostly;
467
468/* In order to protect privacy, we add a perturbation to identifiers
469 * if one generator is seldom used. This makes hard for an attacker
470 * to infer how many packets were sent between two points in time.
471 */
472u32 ip_idents_reserve(u32 hash, int segs)
473{
474 struct ip_ident_bucket *bucket = ip_idents + hash % IP_IDENTS_SZ;
475 u32 old = ACCESS_ONCE(bucket->stamp32);
476 u32 now = (u32)jiffies;
477 u32 delta = 0;
478
479 if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
480 delta = prandom_u32_max(now - old);
481
482 return atomic_add_return(segs + delta, &bucket->id) - segs;
483}
484EXPORT_SYMBOL(ip_idents_reserve);
462 485
463void __ip_select_ident(struct iphdr *iph, int segs) 486void __ip_select_ident(struct iphdr *iph, int segs)
464{ 487{
@@ -467,7 +490,10 @@ void __ip_select_ident(struct iphdr *iph, int segs)
467 490
468 net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd)); 491 net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
469 492
470 hash = jhash_1word((__force u32)iph->daddr, ip_idents_hashrnd); 493 hash = jhash_3words((__force u32)iph->daddr,
494 (__force u32)iph->saddr,
495 iph->protocol,
496 ip_idents_hashrnd);
471 id = ip_idents_reserve(hash, segs); 497 id = ip_idents_reserve(hash, segs);
472 iph->id = htons(id); 498 iph->id = htons(id);
473} 499}
@@ -1010,7 +1036,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1010 const struct iphdr *iph = (const struct iphdr *) skb->data; 1036 const struct iphdr *iph = (const struct iphdr *) skb->data;
1011 struct flowi4 fl4; 1037 struct flowi4 fl4;
1012 struct rtable *rt; 1038 struct rtable *rt;
1013 struct dst_entry *dst; 1039 struct dst_entry *odst = NULL;
1014 bool new = false; 1040 bool new = false;
1015 1041
1016 bh_lock_sock(sk); 1042 bh_lock_sock(sk);
@@ -1018,16 +1044,17 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1018 if (!ip_sk_accept_pmtu(sk)) 1044 if (!ip_sk_accept_pmtu(sk))
1019 goto out; 1045 goto out;
1020 1046
1021 rt = (struct rtable *) __sk_dst_get(sk); 1047 odst = sk_dst_get(sk);
1022 1048
1023 if (sock_owned_by_user(sk) || !rt) { 1049 if (sock_owned_by_user(sk) || !odst) {
1024 __ipv4_sk_update_pmtu(skb, sk, mtu); 1050 __ipv4_sk_update_pmtu(skb, sk, mtu);
1025 goto out; 1051 goto out;
1026 } 1052 }
1027 1053
1028 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0); 1054 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
1029 1055
1030 if (!__sk_dst_check(sk, 0)) { 1056 rt = (struct rtable *)odst;
1057 if (odst->obsolete && odst->ops->check(odst, 0) == NULL) {
1031 rt = ip_route_output_flow(sock_net(sk), &fl4, sk); 1058 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1032 if (IS_ERR(rt)) 1059 if (IS_ERR(rt))
1033 goto out; 1060 goto out;
@@ -1037,8 +1064,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1037 1064
1038 __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu); 1065 __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
1039 1066
1040 dst = dst_check(&rt->dst, 0); 1067 if (!dst_check(&rt->dst, 0)) {
1041 if (!dst) {
1042 if (new) 1068 if (new)
1043 dst_release(&rt->dst); 1069 dst_release(&rt->dst);
1044 1070
@@ -1050,10 +1076,11 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1050 } 1076 }
1051 1077
1052 if (new) 1078 if (new)
1053 __sk_dst_set(sk, &rt->dst); 1079 sk_dst_set(sk, &rt->dst);
1054 1080
1055out: 1081out:
1056 bh_unlock_sock(sk); 1082 bh_unlock_sock(sk);
1083 dst_release(odst);
1057} 1084}
1058EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu); 1085EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1059 1086
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index eb1dde37e678..9d2118e5fbc7 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1108,7 +1108,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1108 if (unlikely(tp->repair)) { 1108 if (unlikely(tp->repair)) {
1109 if (tp->repair_queue == TCP_RECV_QUEUE) { 1109 if (tp->repair_queue == TCP_RECV_QUEUE) {
1110 copied = tcp_send_rcvq(sk, msg, size); 1110 copied = tcp_send_rcvq(sk, msg, size);
1111 goto out; 1111 goto out_nopush;
1112 } 1112 }
1113 1113
1114 err = -EINVAL; 1114 err = -EINVAL;
@@ -1282,6 +1282,7 @@ wait_for_memory:
1282out: 1282out:
1283 if (copied) 1283 if (copied)
1284 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); 1284 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
1285out_nopush:
1285 release_sock(sk); 1286 release_sock(sk);
1286 return copied + copied_syn; 1287 return copied + copied_syn;
1287 1288
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b5c23756965a..40639c288dc2 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1106,7 +1106,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
1106 } 1106 }
1107 1107
1108 /* D-SACK for already forgotten data... Do dumb counting. */ 1108 /* D-SACK for already forgotten data... Do dumb counting. */
1109 if (dup_sack && tp->undo_marker && tp->undo_retrans && 1109 if (dup_sack && tp->undo_marker && tp->undo_retrans > 0 &&
1110 !after(end_seq_0, prior_snd_una) && 1110 !after(end_seq_0, prior_snd_una) &&
1111 after(end_seq_0, tp->undo_marker)) 1111 after(end_seq_0, tp->undo_marker))
1112 tp->undo_retrans--; 1112 tp->undo_retrans--;
@@ -1187,7 +1187,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
1187 1187
1188 /* Account D-SACK for retransmitted packet. */ 1188 /* Account D-SACK for retransmitted packet. */
1189 if (dup_sack && (sacked & TCPCB_RETRANS)) { 1189 if (dup_sack && (sacked & TCPCB_RETRANS)) {
1190 if (tp->undo_marker && tp->undo_retrans && 1190 if (tp->undo_marker && tp->undo_retrans > 0 &&
1191 after(end_seq, tp->undo_marker)) 1191 after(end_seq, tp->undo_marker))
1192 tp->undo_retrans--; 1192 tp->undo_retrans--;
1193 if (sacked & TCPCB_SACKED_ACKED) 1193 if (sacked & TCPCB_SACKED_ACKED)
@@ -1893,7 +1893,7 @@ static void tcp_clear_retrans_partial(struct tcp_sock *tp)
1893 tp->lost_out = 0; 1893 tp->lost_out = 0;
1894 1894
1895 tp->undo_marker = 0; 1895 tp->undo_marker = 0;
1896 tp->undo_retrans = 0; 1896 tp->undo_retrans = -1;
1897} 1897}
1898 1898
1899void tcp_clear_retrans(struct tcp_sock *tp) 1899void tcp_clear_retrans(struct tcp_sock *tp)
@@ -2665,7 +2665,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
2665 2665
2666 tp->prior_ssthresh = 0; 2666 tp->prior_ssthresh = 0;
2667 tp->undo_marker = tp->snd_una; 2667 tp->undo_marker = tp->snd_una;
2668 tp->undo_retrans = tp->retrans_out; 2668 tp->undo_retrans = tp->retrans_out ? : -1;
2669 2669
2670 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { 2670 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
2671 if (!ece_ack) 2671 if (!ece_ack)
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 4e86c59ec7f7..55046ecd083e 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -309,7 +309,7 @@ static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
309 309
310 th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr, 310 th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
311 iph->daddr, 0); 311 iph->daddr, 0);
312 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 312 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
313 313
314 return tcp_gro_complete(skb); 314 return tcp_gro_complete(skb);
315} 315}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index d92bce0ea24e..179b51e6bda3 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2525,8 +2525,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2525 if (!tp->retrans_stamp) 2525 if (!tp->retrans_stamp)
2526 tp->retrans_stamp = TCP_SKB_CB(skb)->when; 2526 tp->retrans_stamp = TCP_SKB_CB(skb)->when;
2527 2527
2528 tp->undo_retrans += tcp_skb_pcount(skb);
2529
2530 /* snd_nxt is stored to detect loss of retransmitted segment, 2528 /* snd_nxt is stored to detect loss of retransmitted segment,
2531 * see tcp_input.c tcp_sacktag_write_queue(). 2529 * see tcp_input.c tcp_sacktag_write_queue().
2532 */ 2530 */
@@ -2534,6 +2532,10 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2534 } else if (err != -EBUSY) { 2532 } else if (err != -EBUSY) {
2535 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); 2533 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
2536 } 2534 }
2535
2536 if (tp->undo_retrans < 0)
2537 tp->undo_retrans = 0;
2538 tp->undo_retrans += tcp_skb_pcount(skb);
2537 return err; 2539 return err;
2538} 2540}
2539 2541
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index d92f94b7e402..7d5a8661df76 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1588,8 +1588,11 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1588 goto csum_error; 1588 goto csum_error;
1589 1589
1590 1590
1591 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) 1591 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
1592 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
1593 is_udplite);
1592 goto drop; 1594 goto drop;
1595 }
1593 1596
1594 rc = 0; 1597 rc = 0;
1595 1598
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index cb9df0eb4023..45702b8cd141 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -545,6 +545,8 @@ static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
545 net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd)); 545 net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
546 546
547 hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd); 547 hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd);
548 hash = __ipv6_addr_jhash(&rt->rt6i_src.addr, hash);
549
548 id = ip_idents_reserve(hash, 1); 550 id = ip_idents_reserve(hash, 1);
549 fhdr->identification = htonl(id); 551 fhdr->identification = htonl(id);
550} 552}
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 08b367c6b9cf..617f0958e164 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1301,8 +1301,17 @@ int igmp6_event_query(struct sk_buff *skb)
1301 len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr); 1301 len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr);
1302 len -= skb_network_header_len(skb); 1302 len -= skb_network_header_len(skb);
1303 1303
1304 /* Drop queries with not link local source */ 1304 /* RFC3810 6.2
1305 if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) 1305 * Upon reception of an MLD message that contains a Query, the node
1306 * checks if the source address of the message is a valid link-local
1307 * address, if the Hop Limit is set to 1, and if the Router Alert
1308 * option is present in the Hop-By-Hop Options header of the IPv6
1309 * packet. If any of these checks fails, the packet is dropped.
1310 */
1311 if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL) ||
1312 ipv6_hdr(skb)->hop_limit != 1 ||
1313 !(IP6CB(skb)->flags & IP6SKB_ROUTERALERT) ||
1314 IP6CB(skb)->ra != htons(IPV6_OPT_ROUTERALERT_MLD))
1306 return -EINVAL; 1315 return -EINVAL;
1307 1316
1308 idev = __in6_dev_get(skb->dev); 1317 idev = __in6_dev_get(skb->dev);
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index 8517d3cd1aed..01b0ff9a0c2c 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -73,7 +73,7 @@ static int tcp6_gro_complete(struct sk_buff *skb, int thoff)
73 73
74 th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr, 74 th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr,
75 &iph->daddr, 0); 75 &iph->daddr, 0);
76 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 76 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
77 77
78 return tcp_gro_complete(skb); 78 return tcp_gro_complete(skb);
79} 79}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 95c834799288..7092ff78fd84 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -674,8 +674,11 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
674 goto csum_error; 674 goto csum_error;
675 } 675 }
676 676
677 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) 677 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
678 UDP6_INC_STATS_BH(sock_net(sk),
679 UDP_MIB_RCVBUFERRORS, is_udplite);
678 goto drop; 680 goto drop;
681 }
679 682
680 skb_dst_drop(skb); 683 skb_dst_drop(skb);
681 684
@@ -690,6 +693,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
690 bh_unlock_sock(sk); 693 bh_unlock_sock(sk);
691 694
692 return rc; 695 return rc;
696
693csum_error: 697csum_error:
694 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 698 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
695drop: 699drop:
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 950909f04ee6..13752d96275e 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -1365,7 +1365,7 @@ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
1365 int err; 1365 int err;
1366 1366
1367 if (level != SOL_PPPOL2TP) 1367 if (level != SOL_PPPOL2TP)
1368 return udp_prot.setsockopt(sk, level, optname, optval, optlen); 1368 return -EINVAL;
1369 1369
1370 if (optlen < sizeof(int)) 1370 if (optlen < sizeof(int))
1371 return -EINVAL; 1371 return -EINVAL;
@@ -1491,7 +1491,7 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
1491 struct pppol2tp_session *ps; 1491 struct pppol2tp_session *ps;
1492 1492
1493 if (level != SOL_PPPOL2TP) 1493 if (level != SOL_PPPOL2TP)
1494 return udp_prot.getsockopt(sk, level, optname, optval, optlen); 1494 return -EINVAL;
1495 1495
1496 if (get_user(len, optlen)) 1496 if (get_user(len, optlen))
1497 return -EFAULT; 1497 return -EFAULT;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index d7513a503be1..592f4b152ba8 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -472,12 +472,15 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
472{ 472{
473 struct ieee80211_sub_if_data *sdata = sta->sdata; 473 struct ieee80211_sub_if_data *sdata = sta->sdata;
474 struct ieee80211_local *local = sdata->local; 474 struct ieee80211_local *local = sdata->local;
475 struct rate_control_ref *ref = local->rate_ctrl; 475 struct rate_control_ref *ref = NULL;
476 struct timespec uptime; 476 struct timespec uptime;
477 u64 packets = 0; 477 u64 packets = 0;
478 u32 thr = 0; 478 u32 thr = 0;
479 int i, ac; 479 int i, ac;
480 480
481 if (test_sta_flag(sta, WLAN_STA_RATE_CONTROL))
482 ref = local->rate_ctrl;
483
481 sinfo->generation = sdata->local->sta_generation; 484 sinfo->generation = sdata->local->sta_generation;
482 485
483 sinfo->filled = STATION_INFO_INACTIVE_TIME | 486 sinfo->filled = STATION_INFO_INACTIVE_TIME |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 5214686d9fd1..1a252c606ad0 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -414,6 +414,9 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
414 if (ieee80211_has_order(hdr->frame_control)) 414 if (ieee80211_has_order(hdr->frame_control))
415 return TX_CONTINUE; 415 return TX_CONTINUE;
416 416
417 if (ieee80211_is_probe_req(hdr->frame_control))
418 return TX_CONTINUE;
419
417 if (tx->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) 420 if (tx->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
418 info->hw_queue = tx->sdata->vif.cab_queue; 421 info->hw_queue = tx->sdata->vif.cab_queue;
419 422
@@ -463,6 +466,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
463{ 466{
464 struct sta_info *sta = tx->sta; 467 struct sta_info *sta = tx->sta;
465 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 468 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
469 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
466 struct ieee80211_local *local = tx->local; 470 struct ieee80211_local *local = tx->local;
467 471
468 if (unlikely(!sta)) 472 if (unlikely(!sta))
@@ -473,6 +477,12 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
473 !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) { 477 !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) {
474 int ac = skb_get_queue_mapping(tx->skb); 478 int ac = skb_get_queue_mapping(tx->skb);
475 479
480 if (ieee80211_is_mgmt(hdr->frame_control) &&
481 !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
482 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
483 return TX_CONTINUE;
484 }
485
476 ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n", 486 ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n",
477 sta->sta.addr, sta->sta.aid, ac); 487 sta->sta.addr, sta->sta.aid, ac);
478 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) 488 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
@@ -531,19 +541,9 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
531static ieee80211_tx_result debug_noinline 541static ieee80211_tx_result debug_noinline
532ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx) 542ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
533{ 543{
534 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
535 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
536
537 if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED)) 544 if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED))
538 return TX_CONTINUE; 545 return TX_CONTINUE;
539 546
540 if (ieee80211_is_mgmt(hdr->frame_control) &&
541 !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
542 if (tx->flags & IEEE80211_TX_UNICAST)
543 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
544 return TX_CONTINUE;
545 }
546
547 if (tx->flags & IEEE80211_TX_UNICAST) 547 if (tx->flags & IEEE80211_TX_UNICAST)
548 return ieee80211_tx_h_unicast_ps_buf(tx); 548 return ieee80211_tx_h_unicast_ps_buf(tx);
549 else 549 else
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 6886601afe1c..a6cda52ed920 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1096,11 +1096,12 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
1096 int err; 1096 int err;
1097 1097
1098 /* 24 + 6 = header + auth_algo + auth_transaction + status_code */ 1098 /* 24 + 6 = header + auth_algo + auth_transaction + status_code */
1099 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24 + 6 + extra_len); 1099 skb = dev_alloc_skb(local->hw.extra_tx_headroom + IEEE80211_WEP_IV_LEN +
1100 24 + 6 + extra_len + IEEE80211_WEP_ICV_LEN);
1100 if (!skb) 1101 if (!skb)
1101 return; 1102 return;
1102 1103
1103 skb_reserve(skb, local->hw.extra_tx_headroom); 1104 skb_reserve(skb, local->hw.extra_tx_headroom + IEEE80211_WEP_IV_LEN);
1104 1105
1105 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6); 1106 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6);
1106 memset(mgmt, 0, 24 + 6); 1107 memset(mgmt, 0, 24 + 6);
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index a8eb0a89326a..610e19c0e13f 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -797,7 +797,6 @@ static void ip_vs_conn_expire(unsigned long data)
797 ip_vs_control_del(cp); 797 ip_vs_control_del(cp);
798 798
799 if (cp->flags & IP_VS_CONN_F_NFCT) { 799 if (cp->flags & IP_VS_CONN_F_NFCT) {
800 ip_vs_conn_drop_conntrack(cp);
801 /* Do not access conntracks during subsys cleanup 800 /* Do not access conntracks during subsys cleanup
802 * because nf_conntrack_find_get can not be used after 801 * because nf_conntrack_find_get can not be used after
803 * conntrack cleanup for the net. 802 * conntrack cleanup for the net.
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index ab4566cfcbe4..8746ff9a8357 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -35,7 +35,7 @@ int nft_register_afinfo(struct net *net, struct nft_af_info *afi)
35{ 35{
36 INIT_LIST_HEAD(&afi->tables); 36 INIT_LIST_HEAD(&afi->tables);
37 nfnl_lock(NFNL_SUBSYS_NFTABLES); 37 nfnl_lock(NFNL_SUBSYS_NFTABLES);
38 list_add_tail(&afi->list, &net->nft.af_info); 38 list_add_tail_rcu(&afi->list, &net->nft.af_info);
39 nfnl_unlock(NFNL_SUBSYS_NFTABLES); 39 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
40 return 0; 40 return 0;
41} 41}
@@ -51,7 +51,7 @@ EXPORT_SYMBOL_GPL(nft_register_afinfo);
51void nft_unregister_afinfo(struct nft_af_info *afi) 51void nft_unregister_afinfo(struct nft_af_info *afi)
52{ 52{
53 nfnl_lock(NFNL_SUBSYS_NFTABLES); 53 nfnl_lock(NFNL_SUBSYS_NFTABLES);
54 list_del(&afi->list); 54 list_del_rcu(&afi->list);
55 nfnl_unlock(NFNL_SUBSYS_NFTABLES); 55 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
56} 56}
57EXPORT_SYMBOL_GPL(nft_unregister_afinfo); 57EXPORT_SYMBOL_GPL(nft_unregister_afinfo);
@@ -277,11 +277,14 @@ static int nf_tables_dump_tables(struct sk_buff *skb,
277 struct net *net = sock_net(skb->sk); 277 struct net *net = sock_net(skb->sk);
278 int family = nfmsg->nfgen_family; 278 int family = nfmsg->nfgen_family;
279 279
280 list_for_each_entry(afi, &net->nft.af_info, list) { 280 rcu_read_lock();
281 cb->seq = net->nft.base_seq;
282
283 list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
281 if (family != NFPROTO_UNSPEC && family != afi->family) 284 if (family != NFPROTO_UNSPEC && family != afi->family)
282 continue; 285 continue;
283 286
284 list_for_each_entry(table, &afi->tables, list) { 287 list_for_each_entry_rcu(table, &afi->tables, list) {
285 if (idx < s_idx) 288 if (idx < s_idx)
286 goto cont; 289 goto cont;
287 if (idx > s_idx) 290 if (idx > s_idx)
@@ -294,11 +297,14 @@ static int nf_tables_dump_tables(struct sk_buff *skb,
294 NLM_F_MULTI, 297 NLM_F_MULTI,
295 afi->family, table) < 0) 298 afi->family, table) < 0)
296 goto done; 299 goto done;
300
301 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
297cont: 302cont:
298 idx++; 303 idx++;
299 } 304 }
300 } 305 }
301done: 306done:
307 rcu_read_unlock();
302 cb->args[0] = idx; 308 cb->args[0] = idx;
303 return skb->len; 309 return skb->len;
304} 310}
@@ -407,6 +413,9 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
407 if (flags & ~NFT_TABLE_F_DORMANT) 413 if (flags & ~NFT_TABLE_F_DORMANT)
408 return -EINVAL; 414 return -EINVAL;
409 415
416 if (flags == ctx->table->flags)
417 return 0;
418
410 trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE, 419 trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE,
411 sizeof(struct nft_trans_table)); 420 sizeof(struct nft_trans_table));
412 if (trans == NULL) 421 if (trans == NULL)
@@ -514,7 +523,7 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
514 module_put(afi->owner); 523 module_put(afi->owner);
515 return err; 524 return err;
516 } 525 }
517 list_add_tail(&table->list, &afi->tables); 526 list_add_tail_rcu(&table->list, &afi->tables);
518 return 0; 527 return 0;
519} 528}
520 529
@@ -546,7 +555,7 @@ static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb,
546 if (err < 0) 555 if (err < 0)
547 return err; 556 return err;
548 557
549 list_del(&table->list); 558 list_del_rcu(&table->list);
550 return 0; 559 return 0;
551} 560}
552 561
@@ -635,13 +644,20 @@ static int nft_dump_stats(struct sk_buff *skb, struct nft_stats __percpu *stats)
635{ 644{
636 struct nft_stats *cpu_stats, total; 645 struct nft_stats *cpu_stats, total;
637 struct nlattr *nest; 646 struct nlattr *nest;
647 unsigned int seq;
648 u64 pkts, bytes;
638 int cpu; 649 int cpu;
639 650
640 memset(&total, 0, sizeof(total)); 651 memset(&total, 0, sizeof(total));
641 for_each_possible_cpu(cpu) { 652 for_each_possible_cpu(cpu) {
642 cpu_stats = per_cpu_ptr(stats, cpu); 653 cpu_stats = per_cpu_ptr(stats, cpu);
643 total.pkts += cpu_stats->pkts; 654 do {
644 total.bytes += cpu_stats->bytes; 655 seq = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
656 pkts = cpu_stats->pkts;
657 bytes = cpu_stats->bytes;
658 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq));
659 total.pkts += pkts;
660 total.bytes += bytes;
645 } 661 }
646 nest = nla_nest_start(skb, NFTA_CHAIN_COUNTERS); 662 nest = nla_nest_start(skb, NFTA_CHAIN_COUNTERS);
647 if (nest == NULL) 663 if (nest == NULL)
@@ -761,12 +777,15 @@ static int nf_tables_dump_chains(struct sk_buff *skb,
761 struct net *net = sock_net(skb->sk); 777 struct net *net = sock_net(skb->sk);
762 int family = nfmsg->nfgen_family; 778 int family = nfmsg->nfgen_family;
763 779
764 list_for_each_entry(afi, &net->nft.af_info, list) { 780 rcu_read_lock();
781 cb->seq = net->nft.base_seq;
782
783 list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
765 if (family != NFPROTO_UNSPEC && family != afi->family) 784 if (family != NFPROTO_UNSPEC && family != afi->family)
766 continue; 785 continue;
767 786
768 list_for_each_entry(table, &afi->tables, list) { 787 list_for_each_entry_rcu(table, &afi->tables, list) {
769 list_for_each_entry(chain, &table->chains, list) { 788 list_for_each_entry_rcu(chain, &table->chains, list) {
770 if (idx < s_idx) 789 if (idx < s_idx)
771 goto cont; 790 goto cont;
772 if (idx > s_idx) 791 if (idx > s_idx)
@@ -778,17 +797,19 @@ static int nf_tables_dump_chains(struct sk_buff *skb,
778 NLM_F_MULTI, 797 NLM_F_MULTI,
779 afi->family, table, chain) < 0) 798 afi->family, table, chain) < 0)
780 goto done; 799 goto done;
800
801 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
781cont: 802cont:
782 idx++; 803 idx++;
783 } 804 }
784 } 805 }
785 } 806 }
786done: 807done:
808 rcu_read_unlock();
787 cb->args[0] = idx; 809 cb->args[0] = idx;
788 return skb->len; 810 return skb->len;
789} 811}
790 812
791
792static int nf_tables_getchain(struct sock *nlsk, struct sk_buff *skb, 813static int nf_tables_getchain(struct sock *nlsk, struct sk_buff *skb,
793 const struct nlmsghdr *nlh, 814 const struct nlmsghdr *nlh,
794 const struct nlattr * const nla[]) 815 const struct nlattr * const nla[])
@@ -861,7 +882,7 @@ static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr)
861 if (!tb[NFTA_COUNTER_BYTES] || !tb[NFTA_COUNTER_PACKETS]) 882 if (!tb[NFTA_COUNTER_BYTES] || !tb[NFTA_COUNTER_PACKETS])
862 return ERR_PTR(-EINVAL); 883 return ERR_PTR(-EINVAL);
863 884
864 newstats = alloc_percpu(struct nft_stats); 885 newstats = netdev_alloc_pcpu_stats(struct nft_stats);
865 if (newstats == NULL) 886 if (newstats == NULL)
866 return ERR_PTR(-ENOMEM); 887 return ERR_PTR(-ENOMEM);
867 888
@@ -1077,7 +1098,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
1077 } 1098 }
1078 basechain->stats = stats; 1099 basechain->stats = stats;
1079 } else { 1100 } else {
1080 stats = alloc_percpu(struct nft_stats); 1101 stats = netdev_alloc_pcpu_stats(struct nft_stats);
1081 if (IS_ERR(stats)) { 1102 if (IS_ERR(stats)) {
1082 module_put(type->owner); 1103 module_put(type->owner);
1083 kfree(basechain); 1104 kfree(basechain);
@@ -1130,7 +1151,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
1130 goto err2; 1151 goto err2;
1131 1152
1132 table->use++; 1153 table->use++;
1133 list_add_tail(&chain->list, &table->chains); 1154 list_add_tail_rcu(&chain->list, &table->chains);
1134 return 0; 1155 return 0;
1135err2: 1156err2:
1136 if (!(table->flags & NFT_TABLE_F_DORMANT) && 1157 if (!(table->flags & NFT_TABLE_F_DORMANT) &&
@@ -1180,7 +1201,7 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
1180 return err; 1201 return err;
1181 1202
1182 table->use--; 1203 table->use--;
1183 list_del(&chain->list); 1204 list_del_rcu(&chain->list);
1184 return 0; 1205 return 0;
1185} 1206}
1186 1207
@@ -1199,9 +1220,9 @@ int nft_register_expr(struct nft_expr_type *type)
1199{ 1220{
1200 nfnl_lock(NFNL_SUBSYS_NFTABLES); 1221 nfnl_lock(NFNL_SUBSYS_NFTABLES);
1201 if (type->family == NFPROTO_UNSPEC) 1222 if (type->family == NFPROTO_UNSPEC)
1202 list_add_tail(&type->list, &nf_tables_expressions); 1223 list_add_tail_rcu(&type->list, &nf_tables_expressions);
1203 else 1224 else
1204 list_add(&type->list, &nf_tables_expressions); 1225 list_add_rcu(&type->list, &nf_tables_expressions);
1205 nfnl_unlock(NFNL_SUBSYS_NFTABLES); 1226 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
1206 return 0; 1227 return 0;
1207} 1228}
@@ -1216,7 +1237,7 @@ EXPORT_SYMBOL_GPL(nft_register_expr);
1216void nft_unregister_expr(struct nft_expr_type *type) 1237void nft_unregister_expr(struct nft_expr_type *type)
1217{ 1238{
1218 nfnl_lock(NFNL_SUBSYS_NFTABLES); 1239 nfnl_lock(NFNL_SUBSYS_NFTABLES);
1219 list_del(&type->list); 1240 list_del_rcu(&type->list);
1220 nfnl_unlock(NFNL_SUBSYS_NFTABLES); 1241 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
1221} 1242}
1222EXPORT_SYMBOL_GPL(nft_unregister_expr); 1243EXPORT_SYMBOL_GPL(nft_unregister_expr);
@@ -1549,16 +1570,17 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
1549 unsigned int idx = 0, s_idx = cb->args[0]; 1570 unsigned int idx = 0, s_idx = cb->args[0];
1550 struct net *net = sock_net(skb->sk); 1571 struct net *net = sock_net(skb->sk);
1551 int family = nfmsg->nfgen_family; 1572 int family = nfmsg->nfgen_family;
1552 u8 genctr = ACCESS_ONCE(net->nft.genctr);
1553 u8 gencursor = ACCESS_ONCE(net->nft.gencursor);
1554 1573
1555 list_for_each_entry(afi, &net->nft.af_info, list) { 1574 rcu_read_lock();
1575 cb->seq = net->nft.base_seq;
1576
1577 list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
1556 if (family != NFPROTO_UNSPEC && family != afi->family) 1578 if (family != NFPROTO_UNSPEC && family != afi->family)
1557 continue; 1579 continue;
1558 1580
1559 list_for_each_entry(table, &afi->tables, list) { 1581 list_for_each_entry_rcu(table, &afi->tables, list) {
1560 list_for_each_entry(chain, &table->chains, list) { 1582 list_for_each_entry_rcu(chain, &table->chains, list) {
1561 list_for_each_entry(rule, &chain->rules, list) { 1583 list_for_each_entry_rcu(rule, &chain->rules, list) {
1562 if (!nft_rule_is_active(net, rule)) 1584 if (!nft_rule_is_active(net, rule))
1563 goto cont; 1585 goto cont;
1564 if (idx < s_idx) 1586 if (idx < s_idx)
@@ -1572,6 +1594,8 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
1572 NLM_F_MULTI | NLM_F_APPEND, 1594 NLM_F_MULTI | NLM_F_APPEND,
1573 afi->family, table, chain, rule) < 0) 1595 afi->family, table, chain, rule) < 0)
1574 goto done; 1596 goto done;
1597
1598 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1575cont: 1599cont:
1576 idx++; 1600 idx++;
1577 } 1601 }
@@ -1579,9 +1603,7 @@ cont:
1579 } 1603 }
1580 } 1604 }
1581done: 1605done:
1582 /* Invalidate this dump, a transition to the new generation happened */ 1606 rcu_read_unlock();
1583 if (gencursor != net->nft.gencursor || genctr != net->nft.genctr)
1584 return -EBUSY;
1585 1607
1586 cb->args[0] = idx; 1608 cb->args[0] = idx;
1587 return skb->len; 1609 return skb->len;
@@ -1932,7 +1954,7 @@ static LIST_HEAD(nf_tables_set_ops);
1932int nft_register_set(struct nft_set_ops *ops) 1954int nft_register_set(struct nft_set_ops *ops)
1933{ 1955{
1934 nfnl_lock(NFNL_SUBSYS_NFTABLES); 1956 nfnl_lock(NFNL_SUBSYS_NFTABLES);
1935 list_add_tail(&ops->list, &nf_tables_set_ops); 1957 list_add_tail_rcu(&ops->list, &nf_tables_set_ops);
1936 nfnl_unlock(NFNL_SUBSYS_NFTABLES); 1958 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
1937 return 0; 1959 return 0;
1938} 1960}
@@ -1941,7 +1963,7 @@ EXPORT_SYMBOL_GPL(nft_register_set);
1941void nft_unregister_set(struct nft_set_ops *ops) 1963void nft_unregister_set(struct nft_set_ops *ops)
1942{ 1964{
1943 nfnl_lock(NFNL_SUBSYS_NFTABLES); 1965 nfnl_lock(NFNL_SUBSYS_NFTABLES);
1944 list_del(&ops->list); 1966 list_del_rcu(&ops->list);
1945 nfnl_unlock(NFNL_SUBSYS_NFTABLES); 1967 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
1946} 1968}
1947EXPORT_SYMBOL_GPL(nft_unregister_set); 1969EXPORT_SYMBOL_GPL(nft_unregister_set);
@@ -2234,7 +2256,10 @@ static int nf_tables_dump_sets_table(struct nft_ctx *ctx, struct sk_buff *skb,
2234 if (cb->args[1]) 2256 if (cb->args[1])
2235 return skb->len; 2257 return skb->len;
2236 2258
2237 list_for_each_entry(set, &ctx->table->sets, list) { 2259 rcu_read_lock();
2260 cb->seq = ctx->net->nft.base_seq;
2261
2262 list_for_each_entry_rcu(set, &ctx->table->sets, list) {
2238 if (idx < s_idx) 2263 if (idx < s_idx)
2239 goto cont; 2264 goto cont;
2240 if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET, 2265 if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET,
@@ -2242,11 +2267,13 @@ static int nf_tables_dump_sets_table(struct nft_ctx *ctx, struct sk_buff *skb,
2242 cb->args[0] = idx; 2267 cb->args[0] = idx;
2243 goto done; 2268 goto done;
2244 } 2269 }
2270 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2245cont: 2271cont:
2246 idx++; 2272 idx++;
2247 } 2273 }
2248 cb->args[1] = 1; 2274 cb->args[1] = 1;
2249done: 2275done:
2276 rcu_read_unlock();
2250 return skb->len; 2277 return skb->len;
2251} 2278}
2252 2279
@@ -2260,7 +2287,10 @@ static int nf_tables_dump_sets_family(struct nft_ctx *ctx, struct sk_buff *skb,
2260 if (cb->args[1]) 2287 if (cb->args[1])
2261 return skb->len; 2288 return skb->len;
2262 2289
2263 list_for_each_entry(table, &ctx->afi->tables, list) { 2290 rcu_read_lock();
2291 cb->seq = ctx->net->nft.base_seq;
2292
2293 list_for_each_entry_rcu(table, &ctx->afi->tables, list) {
2264 if (cur_table) { 2294 if (cur_table) {
2265 if (cur_table != table) 2295 if (cur_table != table)
2266 continue; 2296 continue;
@@ -2269,7 +2299,7 @@ static int nf_tables_dump_sets_family(struct nft_ctx *ctx, struct sk_buff *skb,
2269 } 2299 }
2270 ctx->table = table; 2300 ctx->table = table;
2271 idx = 0; 2301 idx = 0;
2272 list_for_each_entry(set, &ctx->table->sets, list) { 2302 list_for_each_entry_rcu(set, &ctx->table->sets, list) {
2273 if (idx < s_idx) 2303 if (idx < s_idx)
2274 goto cont; 2304 goto cont;
2275 if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET, 2305 if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET,
@@ -2278,12 +2308,14 @@ static int nf_tables_dump_sets_family(struct nft_ctx *ctx, struct sk_buff *skb,
2278 cb->args[2] = (unsigned long) table; 2308 cb->args[2] = (unsigned long) table;
2279 goto done; 2309 goto done;
2280 } 2310 }
2311 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2281cont: 2312cont:
2282 idx++; 2313 idx++;
2283 } 2314 }
2284 } 2315 }
2285 cb->args[1] = 1; 2316 cb->args[1] = 1;
2286done: 2317done:
2318 rcu_read_unlock();
2287 return skb->len; 2319 return skb->len;
2288} 2320}
2289 2321
@@ -2300,7 +2332,10 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
2300 if (cb->args[1]) 2332 if (cb->args[1])
2301 return skb->len; 2333 return skb->len;
2302 2334
2303 list_for_each_entry(afi, &net->nft.af_info, list) { 2335 rcu_read_lock();
2336 cb->seq = net->nft.base_seq;
2337
2338 list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
2304 if (cur_family) { 2339 if (cur_family) {
2305 if (afi->family != cur_family) 2340 if (afi->family != cur_family)
2306 continue; 2341 continue;
@@ -2308,7 +2343,7 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
2308 cur_family = 0; 2343 cur_family = 0;
2309 } 2344 }
2310 2345
2311 list_for_each_entry(table, &afi->tables, list) { 2346 list_for_each_entry_rcu(table, &afi->tables, list) {
2312 if (cur_table) { 2347 if (cur_table) {
2313 if (cur_table != table) 2348 if (cur_table != table)
2314 continue; 2349 continue;
@@ -2319,7 +2354,7 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
2319 ctx->table = table; 2354 ctx->table = table;
2320 ctx->afi = afi; 2355 ctx->afi = afi;
2321 idx = 0; 2356 idx = 0;
2322 list_for_each_entry(set, &ctx->table->sets, list) { 2357 list_for_each_entry_rcu(set, &ctx->table->sets, list) {
2323 if (idx < s_idx) 2358 if (idx < s_idx)
2324 goto cont; 2359 goto cont;
2325 if (nf_tables_fill_set(skb, ctx, set, 2360 if (nf_tables_fill_set(skb, ctx, set,
@@ -2330,6 +2365,7 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
2330 cb->args[3] = afi->family; 2365 cb->args[3] = afi->family;
2331 goto done; 2366 goto done;
2332 } 2367 }
2368 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2333cont: 2369cont:
2334 idx++; 2370 idx++;
2335 } 2371 }
@@ -2339,6 +2375,7 @@ cont:
2339 } 2375 }
2340 cb->args[1] = 1; 2376 cb->args[1] = 1;
2341done: 2377done:
2378 rcu_read_unlock();
2342 return skb->len; 2379 return skb->len;
2343} 2380}
2344 2381
@@ -2597,7 +2634,7 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
2597 if (err < 0) 2634 if (err < 0)
2598 goto err2; 2635 goto err2;
2599 2636
2600 list_add_tail(&set->list, &table->sets); 2637 list_add_tail_rcu(&set->list, &table->sets);
2601 table->use++; 2638 table->use++;
2602 return 0; 2639 return 0;
2603 2640
@@ -2617,7 +2654,7 @@ static void nft_set_destroy(struct nft_set *set)
2617 2654
2618static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set) 2655static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
2619{ 2656{
2620 list_del(&set->list); 2657 list_del_rcu(&set->list);
2621 nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC); 2658 nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC);
2622 nft_set_destroy(set); 2659 nft_set_destroy(set);
2623} 2660}
@@ -2652,7 +2689,7 @@ static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
2652 if (err < 0) 2689 if (err < 0)
2653 return err; 2690 return err;
2654 2691
2655 list_del(&set->list); 2692 list_del_rcu(&set->list);
2656 ctx.table->use--; 2693 ctx.table->use--;
2657 return 0; 2694 return 0;
2658} 2695}
@@ -2704,14 +2741,14 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
2704 } 2741 }
2705bind: 2742bind:
2706 binding->chain = ctx->chain; 2743 binding->chain = ctx->chain;
2707 list_add_tail(&binding->list, &set->bindings); 2744 list_add_tail_rcu(&binding->list, &set->bindings);
2708 return 0; 2745 return 0;
2709} 2746}
2710 2747
2711void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, 2748void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
2712 struct nft_set_binding *binding) 2749 struct nft_set_binding *binding)
2713{ 2750{
2714 list_del(&binding->list); 2751 list_del_rcu(&binding->list);
2715 2752
2716 if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS && 2753 if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS &&
2717 !(set->flags & NFT_SET_INACTIVE)) 2754 !(set->flags & NFT_SET_INACTIVE))
@@ -3346,7 +3383,7 @@ static int nf_tables_commit(struct sk_buff *skb)
3346 struct nft_set *set; 3383 struct nft_set *set;
3347 3384
3348 /* Bump generation counter, invalidate any dump in progress */ 3385 /* Bump generation counter, invalidate any dump in progress */
3349 net->nft.genctr++; 3386 while (++net->nft.base_seq == 0);
3350 3387
3351 /* A new generation has just started */ 3388 /* A new generation has just started */
3352 net->nft.gencursor = gencursor_next(net); 3389 net->nft.gencursor = gencursor_next(net);
@@ -3491,12 +3528,12 @@ static int nf_tables_abort(struct sk_buff *skb)
3491 } 3528 }
3492 nft_trans_destroy(trans); 3529 nft_trans_destroy(trans);
3493 } else { 3530 } else {
3494 list_del(&trans->ctx.table->list); 3531 list_del_rcu(&trans->ctx.table->list);
3495 } 3532 }
3496 break; 3533 break;
3497 case NFT_MSG_DELTABLE: 3534 case NFT_MSG_DELTABLE:
3498 list_add_tail(&trans->ctx.table->list, 3535 list_add_tail_rcu(&trans->ctx.table->list,
3499 &trans->ctx.afi->tables); 3536 &trans->ctx.afi->tables);
3500 nft_trans_destroy(trans); 3537 nft_trans_destroy(trans);
3501 break; 3538 break;
3502 case NFT_MSG_NEWCHAIN: 3539 case NFT_MSG_NEWCHAIN:
@@ -3507,7 +3544,7 @@ static int nf_tables_abort(struct sk_buff *skb)
3507 nft_trans_destroy(trans); 3544 nft_trans_destroy(trans);
3508 } else { 3545 } else {
3509 trans->ctx.table->use--; 3546 trans->ctx.table->use--;
3510 list_del(&trans->ctx.chain->list); 3547 list_del_rcu(&trans->ctx.chain->list);
3511 if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT) && 3548 if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT) &&
3512 trans->ctx.chain->flags & NFT_BASE_CHAIN) { 3549 trans->ctx.chain->flags & NFT_BASE_CHAIN) {
3513 nf_unregister_hooks(nft_base_chain(trans->ctx.chain)->ops, 3550 nf_unregister_hooks(nft_base_chain(trans->ctx.chain)->ops,
@@ -3517,8 +3554,8 @@ static int nf_tables_abort(struct sk_buff *skb)
3517 break; 3554 break;
3518 case NFT_MSG_DELCHAIN: 3555 case NFT_MSG_DELCHAIN:
3519 trans->ctx.table->use++; 3556 trans->ctx.table->use++;
3520 list_add_tail(&trans->ctx.chain->list, 3557 list_add_tail_rcu(&trans->ctx.chain->list,
3521 &trans->ctx.table->chains); 3558 &trans->ctx.table->chains);
3522 nft_trans_destroy(trans); 3559 nft_trans_destroy(trans);
3523 break; 3560 break;
3524 case NFT_MSG_NEWRULE: 3561 case NFT_MSG_NEWRULE:
@@ -3532,12 +3569,12 @@ static int nf_tables_abort(struct sk_buff *skb)
3532 break; 3569 break;
3533 case NFT_MSG_NEWSET: 3570 case NFT_MSG_NEWSET:
3534 trans->ctx.table->use--; 3571 trans->ctx.table->use--;
3535 list_del(&nft_trans_set(trans)->list); 3572 list_del_rcu(&nft_trans_set(trans)->list);
3536 break; 3573 break;
3537 case NFT_MSG_DELSET: 3574 case NFT_MSG_DELSET:
3538 trans->ctx.table->use++; 3575 trans->ctx.table->use++;
3539 list_add_tail(&nft_trans_set(trans)->list, 3576 list_add_tail_rcu(&nft_trans_set(trans)->list,
3540 &trans->ctx.table->sets); 3577 &trans->ctx.table->sets);
3541 nft_trans_destroy(trans); 3578 nft_trans_destroy(trans);
3542 break; 3579 break;
3543 case NFT_MSG_NEWSETELEM: 3580 case NFT_MSG_NEWSETELEM:
@@ -3951,6 +3988,7 @@ static int nf_tables_init_net(struct net *net)
3951{ 3988{
3952 INIT_LIST_HEAD(&net->nft.af_info); 3989 INIT_LIST_HEAD(&net->nft.af_info);
3953 INIT_LIST_HEAD(&net->nft.commit_list); 3990 INIT_LIST_HEAD(&net->nft.commit_list);
3991 net->nft.base_seq = 1;
3954 return 0; 3992 return 0;
3955} 3993}
3956 3994
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index 345acfb1720b..3b90eb2b2c55 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -109,7 +109,7 @@ nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
109 struct nft_data data[NFT_REG_MAX + 1]; 109 struct nft_data data[NFT_REG_MAX + 1];
110 unsigned int stackptr = 0; 110 unsigned int stackptr = 0;
111 struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE]; 111 struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE];
112 struct nft_stats __percpu *stats; 112 struct nft_stats *stats;
113 int rulenum; 113 int rulenum;
114 /* 114 /*
115 * Cache cursor to avoid problems in case that the cursor is updated 115 * Cache cursor to avoid problems in case that the cursor is updated
@@ -205,9 +205,11 @@ next_rule:
205 nft_trace_packet(pkt, basechain, -1, NFT_TRACE_POLICY); 205 nft_trace_packet(pkt, basechain, -1, NFT_TRACE_POLICY);
206 206
207 rcu_read_lock_bh(); 207 rcu_read_lock_bh();
208 stats = rcu_dereference(nft_base_chain(basechain)->stats); 208 stats = this_cpu_ptr(rcu_dereference(nft_base_chain(basechain)->stats));
209 __this_cpu_inc(stats->pkts); 209 u64_stats_update_begin(&stats->syncp);
210 __this_cpu_add(stats->bytes, pkt->skb->len); 210 stats->pkts++;
211 stats->bytes += pkt->skb->len;
212 u64_stats_update_end(&stats->syncp);
211 rcu_read_unlock_bh(); 213 rcu_read_unlock_bh();
212 214
213 return nft_base_chain(basechain)->policy; 215 return nft_base_chain(basechain)->policy;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 15c731f03fa6..e6fac7e3db52 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -636,7 +636,7 @@ static unsigned int netlink_poll(struct file *file, struct socket *sock,
636 while (nlk->cb_running && netlink_dump_space(nlk)) { 636 while (nlk->cb_running && netlink_dump_space(nlk)) {
637 err = netlink_dump(sk); 637 err = netlink_dump(sk);
638 if (err < 0) { 638 if (err < 0) {
639 sk->sk_err = err; 639 sk->sk_err = -err;
640 sk->sk_error_report(sk); 640 sk->sk_error_report(sk);
641 break; 641 break;
642 } 642 }
@@ -2483,7 +2483,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
2483 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) { 2483 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
2484 ret = netlink_dump(sk); 2484 ret = netlink_dump(sk);
2485 if (ret) { 2485 if (ret) {
2486 sk->sk_err = ret; 2486 sk->sk_err = -ret;
2487 sk->sk_error_report(sk); 2487 sk->sk_error_report(sk);
2488 } 2488 }
2489 } 2489 }
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index c36856a457ca..e70d8b18e962 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -551,6 +551,8 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
551 551
552 case OVS_ACTION_ATTR_SAMPLE: 552 case OVS_ACTION_ATTR_SAMPLE:
553 err = sample(dp, skb, a); 553 err = sample(dp, skb, a);
554 if (unlikely(err)) /* skb already freed. */
555 return err;
554 break; 556 break;
555 } 557 }
556 558
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 0d407bca81e3..9db4bf6740d1 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2007-2013 Nicira, Inc. 2 * Copyright (c) 2007-2014 Nicira, Inc.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public 5 * modify it under the terms of version 2 of the GNU General Public
@@ -276,7 +276,7 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
276 OVS_CB(skb)->flow = flow; 276 OVS_CB(skb)->flow = flow;
277 OVS_CB(skb)->pkt_key = &key; 277 OVS_CB(skb)->pkt_key = &key;
278 278
279 ovs_flow_stats_update(OVS_CB(skb)->flow, skb); 279 ovs_flow_stats_update(OVS_CB(skb)->flow, key.tp.flags, skb);
280 ovs_execute_actions(dp, skb); 280 ovs_execute_actions(dp, skb);
281 stats_counter = &stats->n_hit; 281 stats_counter = &stats->n_hit;
282 282
@@ -889,8 +889,11 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
889 } 889 }
890 /* The unmasked key has to be the same for flow updates. */ 890 /* The unmasked key has to be the same for flow updates. */
891 if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) { 891 if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
892 error = -EEXIST; 892 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
893 goto err_unlock_ovs; 893 if (!flow) {
894 error = -ENOENT;
895 goto err_unlock_ovs;
896 }
894 } 897 }
895 /* Update actions. */ 898 /* Update actions. */
896 old_acts = ovsl_dereference(flow->sf_acts); 899 old_acts = ovsl_dereference(flow->sf_acts);
@@ -981,16 +984,12 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
981 goto err_unlock_ovs; 984 goto err_unlock_ovs;
982 } 985 }
983 /* Check that the flow exists. */ 986 /* Check that the flow exists. */
984 flow = ovs_flow_tbl_lookup(&dp->table, &key); 987 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
985 if (unlikely(!flow)) { 988 if (unlikely(!flow)) {
986 error = -ENOENT; 989 error = -ENOENT;
987 goto err_unlock_ovs; 990 goto err_unlock_ovs;
988 } 991 }
989 /* The unmasked key has to be the same for flow updates. */ 992
990 if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
991 error = -EEXIST;
992 goto err_unlock_ovs;
993 }
994 /* Update actions, if present. */ 993 /* Update actions, if present. */
995 if (likely(acts)) { 994 if (likely(acts)) {
996 old_acts = ovsl_dereference(flow->sf_acts); 995 old_acts = ovsl_dereference(flow->sf_acts);
@@ -1063,8 +1062,8 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1063 goto unlock; 1062 goto unlock;
1064 } 1063 }
1065 1064
1066 flow = ovs_flow_tbl_lookup(&dp->table, &key); 1065 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1067 if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) { 1066 if (!flow) {
1068 err = -ENOENT; 1067 err = -ENOENT;
1069 goto unlock; 1068 goto unlock;
1070 } 1069 }
@@ -1113,8 +1112,8 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1113 goto unlock; 1112 goto unlock;
1114 } 1113 }
1115 1114
1116 flow = ovs_flow_tbl_lookup(&dp->table, &key); 1115 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1117 if (unlikely(!flow || !ovs_flow_cmp_unmasked_key(flow, &match))) { 1116 if (unlikely(!flow)) {
1118 err = -ENOENT; 1117 err = -ENOENT;
1119 goto unlock; 1118 goto unlock;
1120 } 1119 }
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 334751cb1528..d07ab538fc9d 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -61,10 +61,10 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies)
61 61
62#define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF)) 62#define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF))
63 63
64void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb) 64void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
65 struct sk_buff *skb)
65{ 66{
66 struct flow_stats *stats; 67 struct flow_stats *stats;
67 __be16 tcp_flags = flow->key.tp.flags;
68 int node = numa_node_id(); 68 int node = numa_node_id();
69 69
70 stats = rcu_dereference(flow->stats[node]); 70 stats = rcu_dereference(flow->stats[node]);
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index ac395d2cd821..5e5aaed3a85b 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2007-2013 Nicira, Inc. 2 * Copyright (c) 2007-2014 Nicira, Inc.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public 5 * modify it under the terms of version 2 of the GNU General Public
@@ -180,7 +180,8 @@ struct arp_eth_header {
180 unsigned char ar_tip[4]; /* target IP address */ 180 unsigned char ar_tip[4]; /* target IP address */
181} __packed; 181} __packed;
182 182
183void ovs_flow_stats_update(struct sw_flow *, struct sk_buff *); 183void ovs_flow_stats_update(struct sw_flow *, __be16 tcp_flags,
184 struct sk_buff *);
184void ovs_flow_stats_get(const struct sw_flow *, struct ovs_flow_stats *, 185void ovs_flow_stats_get(const struct sw_flow *, struct ovs_flow_stats *,
185 unsigned long *used, __be16 *tcp_flags); 186 unsigned long *used, __be16 *tcp_flags);
186void ovs_flow_stats_clear(struct sw_flow *); 187void ovs_flow_stats_clear(struct sw_flow *);
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 574c3abc9b30..cf2d853646f0 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -456,6 +456,22 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
456 return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit); 456 return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
457} 457}
458 458
459struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
460 struct sw_flow_match *match)
461{
462 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
463 struct sw_flow_mask *mask;
464 struct sw_flow *flow;
465
466 /* Always called under ovs-mutex. */
467 list_for_each_entry(mask, &tbl->mask_list, list) {
468 flow = masked_flow_lookup(ti, match->key, mask);
469 if (flow && ovs_flow_cmp_unmasked_key(flow, match)) /* Found */
470 return flow;
471 }
472 return NULL;
473}
474
459int ovs_flow_tbl_num_masks(const struct flow_table *table) 475int ovs_flow_tbl_num_masks(const struct flow_table *table)
460{ 476{
461 struct sw_flow_mask *mask; 477 struct sw_flow_mask *mask;
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
index ca8a5820f615..5918bff7f3f6 100644
--- a/net/openvswitch/flow_table.h
+++ b/net/openvswitch/flow_table.h
@@ -76,7 +76,8 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *,
76 u32 *n_mask_hit); 76 u32 *n_mask_hit);
77struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *, 77struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *,
78 const struct sw_flow_key *); 78 const struct sw_flow_key *);
79 79struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
80 struct sw_flow_match *match);
80bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, 81bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
81 struct sw_flow_match *match); 82 struct sw_flow_match *match);
82 83
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index 35ec4fed09e2..f49148a07da2 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -110,6 +110,22 @@ static int gre_rcv(struct sk_buff *skb,
110 return PACKET_RCVD; 110 return PACKET_RCVD;
111} 111}
112 112
113/* Called with rcu_read_lock and BH disabled. */
114static int gre_err(struct sk_buff *skb, u32 info,
115 const struct tnl_ptk_info *tpi)
116{
117 struct ovs_net *ovs_net;
118 struct vport *vport;
119
120 ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
121 vport = rcu_dereference(ovs_net->vport_net.gre_vport);
122
123 if (unlikely(!vport))
124 return PACKET_REJECT;
125 else
126 return PACKET_RCVD;
127}
128
113static int gre_tnl_send(struct vport *vport, struct sk_buff *skb) 129static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
114{ 130{
115 struct net *net = ovs_dp_get_net(vport->dp); 131 struct net *net = ovs_dp_get_net(vport->dp);
@@ -186,6 +202,7 @@ error:
186 202
187static struct gre_cisco_protocol gre_protocol = { 203static struct gre_cisco_protocol gre_protocol = {
188 .handler = gre_rcv, 204 .handler = gre_rcv,
205 .err_handler = gre_err,
189 .priority = 1, 206 .priority = 1,
190}; 207};
191 208
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index c39b583ace32..70c0be8d0121 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -38,6 +38,7 @@
38#include <linux/errno.h> 38#include <linux/errno.h>
39#include <linux/rtnetlink.h> 39#include <linux/rtnetlink.h>
40#include <linux/skbuff.h> 40#include <linux/skbuff.h>
41#include <linux/bitmap.h>
41#include <net/netlink.h> 42#include <net/netlink.h>
42#include <net/act_api.h> 43#include <net/act_api.h>
43#include <net/pkt_cls.h> 44#include <net/pkt_cls.h>
@@ -460,17 +461,25 @@ static int u32_delete(struct tcf_proto *tp, unsigned long arg)
460 return 0; 461 return 0;
461} 462}
462 463
464#define NR_U32_NODE (1<<12)
463static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle) 465static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
464{ 466{
465 struct tc_u_knode *n; 467 struct tc_u_knode *n;
466 unsigned int i = 0x7FF; 468 unsigned long i;
469 unsigned long *bitmap = kzalloc(BITS_TO_LONGS(NR_U32_NODE) * sizeof(unsigned long),
470 GFP_KERNEL);
471 if (!bitmap)
472 return handle | 0xFFF;
467 473
468 for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next) 474 for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
469 if (i < TC_U32_NODE(n->handle)) 475 set_bit(TC_U32_NODE(n->handle), bitmap);
470 i = TC_U32_NODE(n->handle);
471 i++;
472 476
473 return handle | (i > 0xFFF ? 0xFFF : i); 477 i = find_next_zero_bit(bitmap, NR_U32_NODE, 0x800);
478 if (i >= NR_U32_NODE)
479 i = find_next_zero_bit(bitmap, NR_U32_NODE, 1);
480
481 kfree(bitmap);
482 return handle | (i >= NR_U32_NODE ? 0xFFF : i);
474} 483}
475 484
476static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = { 485static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 9de23a222d3f..06a9ee6b2d3a 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1097,6 +1097,7 @@ void sctp_assoc_update(struct sctp_association *asoc,
1097 asoc->c = new->c; 1097 asoc->c = new->c;
1098 asoc->peer.rwnd = new->peer.rwnd; 1098 asoc->peer.rwnd = new->peer.rwnd;
1099 asoc->peer.sack_needed = new->peer.sack_needed; 1099 asoc->peer.sack_needed = new->peer.sack_needed;
1100 asoc->peer.auth_capable = new->peer.auth_capable;
1100 asoc->peer.i = new->peer.i; 1101 asoc->peer.i = new->peer.i;
1101 sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL, 1102 sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
1102 asoc->peer.i.initial_tsn, GFP_ATOMIC); 1103 asoc->peer.i.initial_tsn, GFP_ATOMIC);
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 85c64658bd0b..b6842fdb53d4 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -366,9 +366,10 @@ fail:
366 * specification [SCTP] and any extensions for a list of possible 366 * specification [SCTP] and any extensions for a list of possible
367 * error formats. 367 * error formats.
368 */ 368 */
369struct sctp_ulpevent *sctp_ulpevent_make_remote_error( 369struct sctp_ulpevent *
370 const struct sctp_association *asoc, struct sctp_chunk *chunk, 370sctp_ulpevent_make_remote_error(const struct sctp_association *asoc,
371 __u16 flags, gfp_t gfp) 371 struct sctp_chunk *chunk, __u16 flags,
372 gfp_t gfp)
372{ 373{
373 struct sctp_ulpevent *event; 374 struct sctp_ulpevent *event;
374 struct sctp_remote_error *sre; 375 struct sctp_remote_error *sre;
@@ -387,8 +388,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
387 /* Copy the skb to a new skb with room for us to prepend 388 /* Copy the skb to a new skb with room for us to prepend
388 * notification with. 389 * notification with.
389 */ 390 */
390 skb = skb_copy_expand(chunk->skb, sizeof(struct sctp_remote_error), 391 skb = skb_copy_expand(chunk->skb, sizeof(*sre), 0, gfp);
391 0, gfp);
392 392
393 /* Pull off the rest of the cause TLV from the chunk. */ 393 /* Pull off the rest of the cause TLV from the chunk. */
394 skb_pull(chunk->skb, elen); 394 skb_pull(chunk->skb, elen);
@@ -399,62 +399,21 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
399 event = sctp_skb2event(skb); 399 event = sctp_skb2event(skb);
400 sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize); 400 sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
401 401
402 sre = (struct sctp_remote_error *) 402 sre = (struct sctp_remote_error *) skb_push(skb, sizeof(*sre));
403 skb_push(skb, sizeof(struct sctp_remote_error));
404 403
405 /* Trim the buffer to the right length. */ 404 /* Trim the buffer to the right length. */
406 skb_trim(skb, sizeof(struct sctp_remote_error) + elen); 405 skb_trim(skb, sizeof(*sre) + elen);
407 406
408 /* Socket Extensions for SCTP 407 /* RFC6458, Section 6.1.3. SCTP_REMOTE_ERROR */
409 * 5.3.1.3 SCTP_REMOTE_ERROR 408 memset(sre, 0, sizeof(*sre));
410 *
411 * sre_type:
412 * It should be SCTP_REMOTE_ERROR.
413 */
414 sre->sre_type = SCTP_REMOTE_ERROR; 409 sre->sre_type = SCTP_REMOTE_ERROR;
415
416 /*
417 * Socket Extensions for SCTP
418 * 5.3.1.3 SCTP_REMOTE_ERROR
419 *
420 * sre_flags: 16 bits (unsigned integer)
421 * Currently unused.
422 */
423 sre->sre_flags = 0; 410 sre->sre_flags = 0;
424
425 /* Socket Extensions for SCTP
426 * 5.3.1.3 SCTP_REMOTE_ERROR
427 *
428 * sre_length: sizeof (__u32)
429 *
430 * This field is the total length of the notification data,
431 * including the notification header.
432 */
433 sre->sre_length = skb->len; 411 sre->sre_length = skb->len;
434
435 /* Socket Extensions for SCTP
436 * 5.3.1.3 SCTP_REMOTE_ERROR
437 *
438 * sre_error: 16 bits (unsigned integer)
439 * This value represents one of the Operational Error causes defined in
440 * the SCTP specification, in network byte order.
441 */
442 sre->sre_error = cause; 412 sre->sre_error = cause;
443
444 /* Socket Extensions for SCTP
445 * 5.3.1.3 SCTP_REMOTE_ERROR
446 *
447 * sre_assoc_id: sizeof (sctp_assoc_t)
448 *
449 * The association id field, holds the identifier for the association.
450 * All notifications for a given association have the same association
451 * identifier. For TCP style socket, this field is ignored.
452 */
453 sctp_ulpevent_set_owner(event, asoc); 413 sctp_ulpevent_set_owner(event, asoc);
454 sre->sre_assoc_id = sctp_assoc2id(asoc); 414 sre->sre_assoc_id = sctp_assoc2id(asoc);
455 415
456 return event; 416 return event;
457
458fail: 417fail:
459 return NULL; 418 return NULL;
460} 419}
@@ -899,7 +858,9 @@ __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event)
899 return notification->sn_header.sn_type; 858 return notification->sn_header.sn_type;
900} 859}
901 860
902/* Copy out the sndrcvinfo into a msghdr. */ 861/* RFC6458, Section 5.3.2. SCTP Header Information Structure
862 * (SCTP_SNDRCV, DEPRECATED)
863 */
903void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event, 864void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
904 struct msghdr *msghdr) 865 struct msghdr *msghdr)
905{ 866{
@@ -908,74 +869,21 @@ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
908 if (sctp_ulpevent_is_notification(event)) 869 if (sctp_ulpevent_is_notification(event))
909 return; 870 return;
910 871
911 /* Sockets API Extensions for SCTP 872 memset(&sinfo, 0, sizeof(sinfo));
912 * Section 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
913 *
914 * sinfo_stream: 16 bits (unsigned integer)
915 *
916 * For recvmsg() the SCTP stack places the message's stream number in
917 * this value.
918 */
919 sinfo.sinfo_stream = event->stream; 873 sinfo.sinfo_stream = event->stream;
920 /* sinfo_ssn: 16 bits (unsigned integer)
921 *
922 * For recvmsg() this value contains the stream sequence number that
923 * the remote endpoint placed in the DATA chunk. For fragmented
924 * messages this is the same number for all deliveries of the message
925 * (if more than one recvmsg() is needed to read the message).
926 */
927 sinfo.sinfo_ssn = event->ssn; 874 sinfo.sinfo_ssn = event->ssn;
928 /* sinfo_ppid: 32 bits (unsigned integer)
929 *
930 * In recvmsg() this value is
931 * the same information that was passed by the upper layer in the peer
932 * application. Please note that byte order issues are NOT accounted
933 * for and this information is passed opaquely by the SCTP stack from
934 * one end to the other.
935 */
936 sinfo.sinfo_ppid = event->ppid; 875 sinfo.sinfo_ppid = event->ppid;
937 /* sinfo_flags: 16 bits (unsigned integer)
938 *
939 * This field may contain any of the following flags and is composed of
940 * a bitwise OR of these values.
941 *
942 * recvmsg() flags:
943 *
944 * SCTP_UNORDERED - This flag is present when the message was sent
945 * non-ordered.
946 */
947 sinfo.sinfo_flags = event->flags; 876 sinfo.sinfo_flags = event->flags;
948 /* sinfo_tsn: 32 bit (unsigned integer)
949 *
950 * For the receiving side, this field holds a TSN that was
951 * assigned to one of the SCTP Data Chunks.
952 */
953 sinfo.sinfo_tsn = event->tsn; 877 sinfo.sinfo_tsn = event->tsn;
954 /* sinfo_cumtsn: 32 bit (unsigned integer)
955 *
956 * This field will hold the current cumulative TSN as
957 * known by the underlying SCTP layer. Note this field is
958 * ignored when sending and only valid for a receive
959 * operation when sinfo_flags are set to SCTP_UNORDERED.
960 */
961 sinfo.sinfo_cumtsn = event->cumtsn; 878 sinfo.sinfo_cumtsn = event->cumtsn;
962 /* sinfo_assoc_id: sizeof (sctp_assoc_t)
963 *
964 * The association handle field, sinfo_assoc_id, holds the identifier
965 * for the association announced in the COMMUNICATION_UP notification.
966 * All notifications for a given association have the same identifier.
967 * Ignored for one-to-one style sockets.
968 */
969 sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc); 879 sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc);
970 880 /* Context value that is set via SCTP_CONTEXT socket option. */
971 /* context value that is set via SCTP_CONTEXT socket option. */
972 sinfo.sinfo_context = event->asoc->default_rcv_context; 881 sinfo.sinfo_context = event->asoc->default_rcv_context;
973
974 /* These fields are not used while receiving. */ 882 /* These fields are not used while receiving. */
975 sinfo.sinfo_timetolive = 0; 883 sinfo.sinfo_timetolive = 0;
976 884
977 put_cmsg(msghdr, IPPROTO_SCTP, SCTP_SNDRCV, 885 put_cmsg(msghdr, IPPROTO_SCTP, SCTP_SNDRCV,
978 sizeof(struct sctp_sndrcvinfo), (void *)&sinfo); 886 sizeof(sinfo), &sinfo);
979} 887}
980 888
981/* Do accounting for bytes received and hold a reference to the association 889/* Do accounting for bytes received and hold a reference to the association
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 26631679a1fa..55c6c9d3e1ce 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -559,6 +559,7 @@ receive:
559 559
560 buf = node->bclink.deferred_head; 560 buf = node->bclink.deferred_head;
561 node->bclink.deferred_head = buf->next; 561 node->bclink.deferred_head = buf->next;
562 buf->next = NULL;
562 node->bclink.deferred_size--; 563 node->bclink.deferred_size--;
563 goto receive; 564 goto receive;
564 } 565 }
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 8be6e94a1ca9..0a37a472c29f 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -101,9 +101,11 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
101} 101}
102 102
103/* tipc_buf_append(): Append a buffer to the fragment list of another buffer 103/* tipc_buf_append(): Append a buffer to the fragment list of another buffer
104 * Let first buffer become head buffer 104 * @*headbuf: in: NULL for first frag, otherwise value returned from prev call
105 * Returns 1 and sets *buf to headbuf if chain is complete, otherwise 0 105 * out: set when successful non-complete reassembly, otherwise NULL
106 * Leaves headbuf pointer at NULL if failure 106 * @*buf: in: the buffer to append. Always defined
107 * out: head buf after sucessful complete reassembly, otherwise NULL
108 * Returns 1 when reassembly complete, otherwise 0
107 */ 109 */
108int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) 110int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
109{ 111{
@@ -122,6 +124,7 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
122 goto out_free; 124 goto out_free;
123 head = *headbuf = frag; 125 head = *headbuf = frag;
124 skb_frag_list_init(head); 126 skb_frag_list_init(head);
127 *buf = NULL;
125 return 0; 128 return 0;
126 } 129 }
127 if (!head) 130 if (!head)
@@ -150,5 +153,7 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
150out_free: 153out_free:
151 pr_warn_ratelimited("Unable to build fragment list\n"); 154 pr_warn_ratelimited("Unable to build fragment list\n");
152 kfree_skb(*buf); 155 kfree_skb(*buf);
156 kfree_skb(*headbuf);
157 *buf = *headbuf = NULL;
153 return 0; 158 return 0;
154} 159}
diff --git a/net/wireless/core.h b/net/wireless/core.h
index e9afbf10e756..7e3a3cef7df9 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -424,7 +424,7 @@ static inline unsigned int elapsed_jiffies_msecs(unsigned long start)
424 if (end >= start) 424 if (end >= start)
425 return jiffies_to_msecs(end - start); 425 return jiffies_to_msecs(end - start);
426 426
427 return jiffies_to_msecs(end + (MAX_JIFFY_OFFSET - start) + 1); 427 return jiffies_to_msecs(end + (ULONG_MAX - start) + 1);
428} 428}
429 429
430void 430void
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index ba4f1723c83a..6668daf69326 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1497,18 +1497,17 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
1497 } 1497 }
1498 CMD(start_p2p_device, START_P2P_DEVICE); 1498 CMD(start_p2p_device, START_P2P_DEVICE);
1499 CMD(set_mcast_rate, SET_MCAST_RATE); 1499 CMD(set_mcast_rate, SET_MCAST_RATE);
1500#ifdef CONFIG_NL80211_TESTMODE
1501 CMD(testmode_cmd, TESTMODE);
1502#endif
1500 if (state->split) { 1503 if (state->split) {
1501 CMD(crit_proto_start, CRIT_PROTOCOL_START); 1504 CMD(crit_proto_start, CRIT_PROTOCOL_START);
1502 CMD(crit_proto_stop, CRIT_PROTOCOL_STOP); 1505 CMD(crit_proto_stop, CRIT_PROTOCOL_STOP);
1503 if (rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH) 1506 if (rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)
1504 CMD(channel_switch, CHANNEL_SWITCH); 1507 CMD(channel_switch, CHANNEL_SWITCH);
1508 CMD(set_qos_map, SET_QOS_MAP);
1505 } 1509 }
1506 CMD(set_qos_map, SET_QOS_MAP); 1510 /* add into the if now */
1507
1508#ifdef CONFIG_NL80211_TESTMODE
1509 CMD(testmode_cmd, TESTMODE);
1510#endif
1511
1512#undef CMD 1511#undef CMD
1513 1512
1514 if (rdev->ops->connect || rdev->ops->auth) { 1513 if (rdev->ops->connect || rdev->ops->auth) {
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 558b0e3a02d8..1afdf45db38f 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -935,7 +935,7 @@ freq_reg_info_regd(struct wiphy *wiphy, u32 center_freq,
935 if (!band_rule_found) 935 if (!band_rule_found)
936 band_rule_found = freq_in_rule_band(fr, center_freq); 936 band_rule_found = freq_in_rule_band(fr, center_freq);
937 937
938 bw_fits = reg_does_bw_fit(fr, center_freq, MHZ_TO_KHZ(5)); 938 bw_fits = reg_does_bw_fit(fr, center_freq, MHZ_TO_KHZ(20));
939 939
940 if (band_rule_found && bw_fits) 940 if (band_rule_found && bw_fits)
941 return rr; 941 return rr;
@@ -1019,10 +1019,10 @@ static void chan_reg_rule_print_dbg(const struct ieee80211_regdomain *regd,
1019} 1019}
1020#endif 1020#endif
1021 1021
1022/* Find an ieee80211_reg_rule such that a 5MHz channel with frequency 1022/*
1023 * chan->center_freq fits there. 1023 * Note that right now we assume the desired channel bandwidth
1024 * If there is no such reg_rule, disable the channel, otherwise set the 1024 * is always 20 MHz for each individual channel (HT40 uses 20 MHz
1025 * flags corresponding to the bandwidths allowed in the particular reg_rule 1025 * per channel, the primary and the extension channel).
1026 */ 1026 */
1027static void handle_channel(struct wiphy *wiphy, 1027static void handle_channel(struct wiphy *wiphy,
1028 enum nl80211_reg_initiator initiator, 1028 enum nl80211_reg_initiator initiator,
@@ -1083,12 +1083,8 @@ static void handle_channel(struct wiphy *wiphy,
1083 if (reg_rule->flags & NL80211_RRF_AUTO_BW) 1083 if (reg_rule->flags & NL80211_RRF_AUTO_BW)
1084 max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule); 1084 max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
1085 1085
1086 if (max_bandwidth_khz < MHZ_TO_KHZ(10))
1087 bw_flags = IEEE80211_CHAN_NO_10MHZ;
1088 if (max_bandwidth_khz < MHZ_TO_KHZ(20))
1089 bw_flags |= IEEE80211_CHAN_NO_20MHZ;
1090 if (max_bandwidth_khz < MHZ_TO_KHZ(40)) 1086 if (max_bandwidth_khz < MHZ_TO_KHZ(40))
1091 bw_flags |= IEEE80211_CHAN_NO_HT40; 1087 bw_flags = IEEE80211_CHAN_NO_HT40;
1092 if (max_bandwidth_khz < MHZ_TO_KHZ(80)) 1088 if (max_bandwidth_khz < MHZ_TO_KHZ(80))
1093 bw_flags |= IEEE80211_CHAN_NO_80MHZ; 1089 bw_flags |= IEEE80211_CHAN_NO_80MHZ;
1094 if (max_bandwidth_khz < MHZ_TO_KHZ(160)) 1090 if (max_bandwidth_khz < MHZ_TO_KHZ(160))
@@ -1522,12 +1518,8 @@ static void handle_channel_custom(struct wiphy *wiphy,
1522 if (reg_rule->flags & NL80211_RRF_AUTO_BW) 1518 if (reg_rule->flags & NL80211_RRF_AUTO_BW)
1523 max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule); 1519 max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
1524 1520
1525 if (max_bandwidth_khz < MHZ_TO_KHZ(10))
1526 bw_flags = IEEE80211_CHAN_NO_10MHZ;
1527 if (max_bandwidth_khz < MHZ_TO_KHZ(20))
1528 bw_flags |= IEEE80211_CHAN_NO_20MHZ;
1529 if (max_bandwidth_khz < MHZ_TO_KHZ(40)) 1521 if (max_bandwidth_khz < MHZ_TO_KHZ(40))
1530 bw_flags |= IEEE80211_CHAN_NO_HT40; 1522 bw_flags = IEEE80211_CHAN_NO_HT40;
1531 if (max_bandwidth_khz < MHZ_TO_KHZ(80)) 1523 if (max_bandwidth_khz < MHZ_TO_KHZ(80))
1532 bw_flags |= IEEE80211_CHAN_NO_80MHZ; 1524 bw_flags |= IEEE80211_CHAN_NO_80MHZ;
1533 if (max_bandwidth_khz < MHZ_TO_KHZ(160)) 1525 if (max_bandwidth_khz < MHZ_TO_KHZ(160))
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 560ed77084e9..7cc887f9da11 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -2094,7 +2094,8 @@ TRACE_EVENT(cfg80211_michael_mic_failure,
2094 MAC_ASSIGN(addr, addr); 2094 MAC_ASSIGN(addr, addr);
2095 __entry->key_type = key_type; 2095 __entry->key_type = key_type;
2096 __entry->key_id = key_id; 2096 __entry->key_id = key_id;
2097 memcpy(__entry->tsc, tsc, 6); 2097 if (tsc)
2098 memcpy(__entry->tsc, tsc, 6);
2098 ), 2099 ),
2099 TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT ", key type: %d, key id: %d, tsc: %pm", 2100 TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT ", key type: %d, key id: %d, tsc: %pm",
2100 NETDEV_PR_ARG, MAC_PR_ARG(addr), __entry->key_type, 2101 NETDEV_PR_ARG, MAC_PR_ARG(addr), __entry->key_type,
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index a8ef5108e0d8..0525d78ba328 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2097,6 +2097,8 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
2097 goto no_transform; 2097 goto no_transform;
2098 } 2098 }
2099 2099
2100 dst_hold(&xdst->u.dst);
2101 xdst->u.dst.flags |= DST_NOCACHE;
2100 route = xdst->route; 2102 route = xdst->route;
2101 } 2103 }
2102 } 2104 }
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 412d9dc3a873..d4db6ebb089d 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -177,9 +177,7 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
177 attrs[XFRMA_ALG_AEAD] || 177 attrs[XFRMA_ALG_AEAD] ||
178 attrs[XFRMA_ALG_CRYPT] || 178 attrs[XFRMA_ALG_CRYPT] ||
179 attrs[XFRMA_ALG_COMP] || 179 attrs[XFRMA_ALG_COMP] ||
180 attrs[XFRMA_TFCPAD] || 180 attrs[XFRMA_TFCPAD])
181 (ntohl(p->id.spi) >= 0x10000))
182
183 goto out; 181 goto out;
184 break; 182 break;
185 183
@@ -207,7 +205,8 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
207 attrs[XFRMA_ALG_AUTH] || 205 attrs[XFRMA_ALG_AUTH] ||
208 attrs[XFRMA_ALG_AUTH_TRUNC] || 206 attrs[XFRMA_ALG_AUTH_TRUNC] ||
209 attrs[XFRMA_ALG_CRYPT] || 207 attrs[XFRMA_ALG_CRYPT] ||
210 attrs[XFRMA_TFCPAD]) 208 attrs[XFRMA_TFCPAD] ||
209 (ntohl(p->id.spi) >= 0x10000))
211 goto out; 210 goto out;
212 break; 211 break;
213 212
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index da058da413e7..16a07cfa4d34 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -2073,6 +2073,7 @@ sub check_return_section {
2073sub dump_function($$) { 2073sub dump_function($$) {
2074 my $prototype = shift; 2074 my $prototype = shift;
2075 my $file = shift; 2075 my $file = shift;
2076 my $noret = 0;
2076 2077
2077 $prototype =~ s/^static +//; 2078 $prototype =~ s/^static +//;
2078 $prototype =~ s/^extern +//; 2079 $prototype =~ s/^extern +//;
@@ -2086,7 +2087,7 @@ sub dump_function($$) {
2086 $prototype =~ s/__init_or_module +//; 2087 $prototype =~ s/__init_or_module +//;
2087 $prototype =~ s/__must_check +//; 2088 $prototype =~ s/__must_check +//;
2088 $prototype =~ s/__weak +//; 2089 $prototype =~ s/__weak +//;
2089 $prototype =~ s/^#\s*define\s+//; #ak added 2090 my $define = $prototype =~ s/^#\s*define\s+//; #ak added
2090 $prototype =~ s/__attribute__\s*\(\([a-z,]*\)\)//; 2091 $prototype =~ s/__attribute__\s*\(\([a-z,]*\)\)//;
2091 2092
2092 # Yes, this truly is vile. We are looking for: 2093 # Yes, this truly is vile. We are looking for:
@@ -2105,7 +2106,15 @@ sub dump_function($$) {
2105 # - atomic_set (macro) 2106 # - atomic_set (macro)
2106 # - pci_match_device, __copy_to_user (long return type) 2107 # - pci_match_device, __copy_to_user (long return type)
2107 2108
2108 if ($prototype =~ m/^()([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ || 2109 if ($define && $prototype =~ m/^()([a-zA-Z0-9_~:]+)\s+/) {
2110 # This is an object-like macro, it has no return type and no parameter
2111 # list.
2112 # Function-like macros are not allowed to have spaces between
2113 # declaration_name and opening parenthesis (notice the \s+).
2114 $return_type = $1;
2115 $declaration_name = $2;
2116 $noret = 1;
2117 } elsif ($prototype =~ m/^()([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ ||
2109 $prototype =~ m/^(\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ || 2118 $prototype =~ m/^(\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ ||
2110 $prototype =~ m/^(\w+\s*\*)\s*([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ || 2119 $prototype =~ m/^(\w+\s*\*)\s*([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ ||
2111 $prototype =~ m/^(\w+\s+\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ || 2120 $prototype =~ m/^(\w+\s+\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ ||
@@ -2140,7 +2149,7 @@ sub dump_function($$) {
2140 # of warnings goes sufficiently down, the check is only performed in 2149 # of warnings goes sufficiently down, the check is only performed in
2141 # verbose mode. 2150 # verbose mode.
2142 # TODO: always perform the check. 2151 # TODO: always perform the check.
2143 if ($verbose) { 2152 if ($verbose && !$noret) {
2144 check_return_section($file, $declaration_name, $return_type); 2153 check_return_section($file, $declaration_name, $return_type);
2145 } 2154 }
2146 2155
diff --git a/sound/firewire/bebob/bebob_maudio.c b/sound/firewire/bebob/bebob_maudio.c
index 6af50eb80ea7..70faa3a32526 100644
--- a/sound/firewire/bebob/bebob_maudio.c
+++ b/sound/firewire/bebob/bebob_maudio.c
@@ -379,11 +379,11 @@ static int special_clk_ctl_put(struct snd_kcontrol *kctl,
379 struct special_params *params = bebob->maudio_special_quirk; 379 struct special_params *params = bebob->maudio_special_quirk;
380 int err, id; 380 int err, id;
381 381
382 mutex_lock(&bebob->mutex);
383
384 id = uval->value.enumerated.item[0]; 382 id = uval->value.enumerated.item[0];
385 if (id >= ARRAY_SIZE(special_clk_labels)) 383 if (id >= ARRAY_SIZE(special_clk_labels))
386 return 0; 384 return -EINVAL;
385
386 mutex_lock(&bebob->mutex);
387 387
388 err = avc_maudio_set_special_clk(bebob, id, 388 err = avc_maudio_set_special_clk(bebob, id,
389 params->dig_in_fmt, 389 params->dig_in_fmt,
@@ -391,7 +391,10 @@ static int special_clk_ctl_put(struct snd_kcontrol *kctl,
391 params->clk_lock); 391 params->clk_lock);
392 mutex_unlock(&bebob->mutex); 392 mutex_unlock(&bebob->mutex);
393 393
394 return err >= 0; 394 if (err >= 0)
395 err = 1;
396
397 return err;
395} 398}
396static struct snd_kcontrol_new special_clk_ctl = { 399static struct snd_kcontrol_new special_clk_ctl = {
397 .name = "Clock Source", 400 .name = "Clock Source",
@@ -434,8 +437,8 @@ static struct snd_kcontrol_new special_sync_ctl = {
434 .get = special_sync_ctl_get, 437 .get = special_sync_ctl_get,
435}; 438};
436 439
437/* Digital interface control for special firmware */ 440/* Digital input interface control for special firmware */
438static char *const special_dig_iface_labels[] = { 441static char *const special_dig_in_iface_labels[] = {
439 "S/PDIF Optical", "S/PDIF Coaxial", "ADAT Optical" 442 "S/PDIF Optical", "S/PDIF Coaxial", "ADAT Optical"
440}; 443};
441static int special_dig_in_iface_ctl_info(struct snd_kcontrol *kctl, 444static int special_dig_in_iface_ctl_info(struct snd_kcontrol *kctl,
@@ -443,13 +446,13 @@ static int special_dig_in_iface_ctl_info(struct snd_kcontrol *kctl,
443{ 446{
444 einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; 447 einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
445 einf->count = 1; 448 einf->count = 1;
446 einf->value.enumerated.items = ARRAY_SIZE(special_dig_iface_labels); 449 einf->value.enumerated.items = ARRAY_SIZE(special_dig_in_iface_labels);
447 450
448 if (einf->value.enumerated.item >= einf->value.enumerated.items) 451 if (einf->value.enumerated.item >= einf->value.enumerated.items)
449 einf->value.enumerated.item = einf->value.enumerated.items - 1; 452 einf->value.enumerated.item = einf->value.enumerated.items - 1;
450 453
451 strcpy(einf->value.enumerated.name, 454 strcpy(einf->value.enumerated.name,
452 special_dig_iface_labels[einf->value.enumerated.item]); 455 special_dig_in_iface_labels[einf->value.enumerated.item]);
453 456
454 return 0; 457 return 0;
455} 458}
@@ -491,26 +494,36 @@ static int special_dig_in_iface_ctl_set(struct snd_kcontrol *kctl,
491 unsigned int id, dig_in_fmt, dig_in_iface; 494 unsigned int id, dig_in_fmt, dig_in_iface;
492 int err; 495 int err;
493 496
494 mutex_lock(&bebob->mutex);
495
496 id = uval->value.enumerated.item[0]; 497 id = uval->value.enumerated.item[0];
498 if (id >= ARRAY_SIZE(special_dig_in_iface_labels))
499 return -EINVAL;
497 500
498 /* decode user value */ 501 /* decode user value */
499 dig_in_fmt = (id >> 1) & 0x01; 502 dig_in_fmt = (id >> 1) & 0x01;
500 dig_in_iface = id & 0x01; 503 dig_in_iface = id & 0x01;
501 504
505 mutex_lock(&bebob->mutex);
506
502 err = avc_maudio_set_special_clk(bebob, 507 err = avc_maudio_set_special_clk(bebob,
503 params->clk_src, 508 params->clk_src,
504 dig_in_fmt, 509 dig_in_fmt,
505 params->dig_out_fmt, 510 params->dig_out_fmt,
506 params->clk_lock); 511 params->clk_lock);
507 if ((err < 0) || (params->dig_in_fmt > 0)) /* ADAT */ 512 if (err < 0)
513 goto end;
514
515 /* For ADAT, optical interface is only available. */
516 if (params->dig_in_fmt > 0) {
517 err = 1;
508 goto end; 518 goto end;
519 }
509 520
521 /* For S/PDIF, optical/coaxial interfaces are selectable. */
510 err = avc_audio_set_selector(bebob->unit, 0x00, 0x04, dig_in_iface); 522 err = avc_audio_set_selector(bebob->unit, 0x00, 0x04, dig_in_iface);
511 if (err < 0) 523 if (err < 0)
512 dev_err(&bebob->unit->device, 524 dev_err(&bebob->unit->device,
513 "fail to set digital input interface: %d\n", err); 525 "fail to set digital input interface: %d\n", err);
526 err = 1;
514end: 527end:
515 special_stream_formation_set(bebob); 528 special_stream_formation_set(bebob);
516 mutex_unlock(&bebob->mutex); 529 mutex_unlock(&bebob->mutex);
@@ -525,18 +538,22 @@ static struct snd_kcontrol_new special_dig_in_iface_ctl = {
525 .put = special_dig_in_iface_ctl_set 538 .put = special_dig_in_iface_ctl_set
526}; 539};
527 540
541/* Digital output interface control for special firmware */
542static char *const special_dig_out_iface_labels[] = {
543 "S/PDIF Optical and Coaxial", "ADAT Optical"
544};
528static int special_dig_out_iface_ctl_info(struct snd_kcontrol *kctl, 545static int special_dig_out_iface_ctl_info(struct snd_kcontrol *kctl,
529 struct snd_ctl_elem_info *einf) 546 struct snd_ctl_elem_info *einf)
530{ 547{
531 einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; 548 einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
532 einf->count = 1; 549 einf->count = 1;
533 einf->value.enumerated.items = ARRAY_SIZE(special_dig_iface_labels) - 1; 550 einf->value.enumerated.items = ARRAY_SIZE(special_dig_out_iface_labels);
534 551
535 if (einf->value.enumerated.item >= einf->value.enumerated.items) 552 if (einf->value.enumerated.item >= einf->value.enumerated.items)
536 einf->value.enumerated.item = einf->value.enumerated.items - 1; 553 einf->value.enumerated.item = einf->value.enumerated.items - 1;
537 554
538 strcpy(einf->value.enumerated.name, 555 strcpy(einf->value.enumerated.name,
539 special_dig_iface_labels[einf->value.enumerated.item + 1]); 556 special_dig_out_iface_labels[einf->value.enumerated.item]);
540 557
541 return 0; 558 return 0;
542} 559}
@@ -558,16 +575,20 @@ static int special_dig_out_iface_ctl_set(struct snd_kcontrol *kctl,
558 unsigned int id; 575 unsigned int id;
559 int err; 576 int err;
560 577
561 mutex_lock(&bebob->mutex);
562
563 id = uval->value.enumerated.item[0]; 578 id = uval->value.enumerated.item[0];
579 if (id >= ARRAY_SIZE(special_dig_out_iface_labels))
580 return -EINVAL;
581
582 mutex_lock(&bebob->mutex);
564 583
565 err = avc_maudio_set_special_clk(bebob, 584 err = avc_maudio_set_special_clk(bebob,
566 params->clk_src, 585 params->clk_src,
567 params->dig_in_fmt, 586 params->dig_in_fmt,
568 id, params->clk_lock); 587 id, params->clk_lock);
569 if (err >= 0) 588 if (err >= 0) {
570 special_stream_formation_set(bebob); 589 special_stream_formation_set(bebob);
590 err = 1;
591 }
571 592
572 mutex_unlock(&bebob->mutex); 593 mutex_unlock(&bebob->mutex);
573 return err; 594 return err;
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 480bbddbd801..6df04d91c93c 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -193,7 +193,8 @@ azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
193 dsp_unlock(azx_dev); 193 dsp_unlock(azx_dev);
194 return azx_dev; 194 return azx_dev;
195 } 195 }
196 if (!res) 196 if (!res ||
197 (chip->driver_caps & AZX_DCAPS_REVERSE_ASSIGN))
197 res = azx_dev; 198 res = azx_dev;
198 } 199 }
199 dsp_unlock(azx_dev); 200 dsp_unlock(azx_dev);
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index b6b4e71a0b0b..83cd19017cf3 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -227,7 +227,7 @@ enum {
227/* quirks for Intel PCH */ 227/* quirks for Intel PCH */
228#define AZX_DCAPS_INTEL_PCH_NOPM \ 228#define AZX_DCAPS_INTEL_PCH_NOPM \
229 (AZX_DCAPS_SCH_SNOOP | AZX_DCAPS_BUFSIZE | \ 229 (AZX_DCAPS_SCH_SNOOP | AZX_DCAPS_BUFSIZE | \
230 AZX_DCAPS_COUNT_LPIB_DELAY) 230 AZX_DCAPS_COUNT_LPIB_DELAY | AZX_DCAPS_REVERSE_ASSIGN)
231 231
232#define AZX_DCAPS_INTEL_PCH \ 232#define AZX_DCAPS_INTEL_PCH \
233 (AZX_DCAPS_INTEL_PCH_NOPM | AZX_DCAPS_PM_RUNTIME) 233 (AZX_DCAPS_INTEL_PCH_NOPM | AZX_DCAPS_PM_RUNTIME)
@@ -596,7 +596,7 @@ static int azx_suspend(struct device *dev)
596 struct azx *chip = card->private_data; 596 struct azx *chip = card->private_data;
597 struct azx_pcm *p; 597 struct azx_pcm *p;
598 598
599 if (chip->disabled) 599 if (chip->disabled || chip->init_failed)
600 return 0; 600 return 0;
601 601
602 snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); 602 snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
@@ -628,7 +628,7 @@ static int azx_resume(struct device *dev)
628 struct snd_card *card = dev_get_drvdata(dev); 628 struct snd_card *card = dev_get_drvdata(dev);
629 struct azx *chip = card->private_data; 629 struct azx *chip = card->private_data;
630 630
631 if (chip->disabled) 631 if (chip->disabled || chip->init_failed)
632 return 0; 632 return 0;
633 633
634 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) { 634 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
@@ -665,7 +665,7 @@ static int azx_runtime_suspend(struct device *dev)
665 struct snd_card *card = dev_get_drvdata(dev); 665 struct snd_card *card = dev_get_drvdata(dev);
666 struct azx *chip = card->private_data; 666 struct azx *chip = card->private_data;
667 667
668 if (chip->disabled) 668 if (chip->disabled || chip->init_failed)
669 return 0; 669 return 0;
670 670
671 if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME)) 671 if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
@@ -692,7 +692,7 @@ static int azx_runtime_resume(struct device *dev)
692 struct hda_codec *codec; 692 struct hda_codec *codec;
693 int status; 693 int status;
694 694
695 if (chip->disabled) 695 if (chip->disabled || chip->init_failed)
696 return 0; 696 return 0;
697 697
698 if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME)) 698 if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
@@ -729,7 +729,7 @@ static int azx_runtime_idle(struct device *dev)
729 struct snd_card *card = dev_get_drvdata(dev); 729 struct snd_card *card = dev_get_drvdata(dev);
730 struct azx *chip = card->private_data; 730 struct azx *chip = card->private_data;
731 731
732 if (chip->disabled) 732 if (chip->disabled || chip->init_failed)
733 return 0; 733 return 0;
734 734
735 if (!power_save_controller || 735 if (!power_save_controller ||
diff --git a/sound/pci/hda/hda_priv.h b/sound/pci/hda/hda_priv.h
index 4a7cb01fa912..e9d1a5762a55 100644
--- a/sound/pci/hda/hda_priv.h
+++ b/sound/pci/hda/hda_priv.h
@@ -186,6 +186,7 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
186#define AZX_DCAPS_BUFSIZE (1 << 21) /* no buffer size alignment */ 186#define AZX_DCAPS_BUFSIZE (1 << 21) /* no buffer size alignment */
187#define AZX_DCAPS_ALIGN_BUFSIZE (1 << 22) /* buffer size alignment */ 187#define AZX_DCAPS_ALIGN_BUFSIZE (1 << 22) /* buffer size alignment */
188#define AZX_DCAPS_4K_BDLE_BOUNDARY (1 << 23) /* BDLE in 4k boundary */ 188#define AZX_DCAPS_4K_BDLE_BOUNDARY (1 << 23) /* BDLE in 4k boundary */
189#define AZX_DCAPS_REVERSE_ASSIGN (1 << 24) /* Assign devices in reverse order */
189#define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */ 190#define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */
190#define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */ 191#define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */
191#define AZX_DCAPS_I915_POWERWELL (1 << 27) /* HSW i915 powerwell support */ 192#define AZX_DCAPS_I915_POWERWELL (1 << 27) /* HSW i915 powerwell support */
diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
index a366ba9293a8..358414da6418 100644
--- a/sound/pci/hda/hda_tegra.c
+++ b/sound/pci/hda/hda_tegra.c
@@ -236,6 +236,7 @@ disable_hda:
236 return rc; 236 return rc;
237} 237}
238 238
239#ifdef CONFIG_PM_SLEEP
239static void hda_tegra_disable_clocks(struct hda_tegra *data) 240static void hda_tegra_disable_clocks(struct hda_tegra *data)
240{ 241{
241 clk_disable_unprepare(data->hda2hdmi_clk); 242 clk_disable_unprepare(data->hda2hdmi_clk);
@@ -243,7 +244,6 @@ static void hda_tegra_disable_clocks(struct hda_tegra *data)
243 clk_disable_unprepare(data->hda_clk); 244 clk_disable_unprepare(data->hda_clk);
244} 245}
245 246
246#ifdef CONFIG_PM_SLEEP
247/* 247/*
248 * power management 248 * power management
249 */ 249 */
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 4fe876b65fda..ba4ca52072ff 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -3337,6 +3337,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
3337{ .id = 0x10de0051, .name = "GPU 51 HDMI/DP", .patch = patch_nvhdmi }, 3337{ .id = 0x10de0051, .name = "GPU 51 HDMI/DP", .patch = patch_nvhdmi },
3338{ .id = 0x10de0060, .name = "GPU 60 HDMI/DP", .patch = patch_nvhdmi }, 3338{ .id = 0x10de0060, .name = "GPU 60 HDMI/DP", .patch = patch_nvhdmi },
3339{ .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch }, 3339{ .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch },
3340{ .id = 0x10de0070, .name = "GPU 70 HDMI/DP", .patch = patch_nvhdmi },
3340{ .id = 0x10de0071, .name = "GPU 71 HDMI/DP", .patch = patch_nvhdmi }, 3341{ .id = 0x10de0071, .name = "GPU 71 HDMI/DP", .patch = patch_nvhdmi },
3341{ .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch }, 3342{ .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch },
3342{ .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi }, 3343{ .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi },
@@ -3394,6 +3395,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0044");
3394MODULE_ALIAS("snd-hda-codec-id:10de0051"); 3395MODULE_ALIAS("snd-hda-codec-id:10de0051");
3395MODULE_ALIAS("snd-hda-codec-id:10de0060"); 3396MODULE_ALIAS("snd-hda-codec-id:10de0060");
3396MODULE_ALIAS("snd-hda-codec-id:10de0067"); 3397MODULE_ALIAS("snd-hda-codec-id:10de0067");
3398MODULE_ALIAS("snd-hda-codec-id:10de0070");
3397MODULE_ALIAS("snd-hda-codec-id:10de0071"); 3399MODULE_ALIAS("snd-hda-codec-id:10de0071");
3398MODULE_ALIAS("snd-hda-codec-id:10de8001"); 3400MODULE_ALIAS("snd-hda-codec-id:10de8001");
3399MODULE_ALIAS("snd-hda-codec-id:11069f80"); 3401MODULE_ALIAS("snd-hda-codec-id:11069f80");
diff --git a/sound/soc/fsl/imx-pcm-dma.c b/sound/soc/fsl/imx-pcm-dma.c
index 0849b7b83f0a..0db94f492e97 100644
--- a/sound/soc/fsl/imx-pcm-dma.c
+++ b/sound/soc/fsl/imx-pcm-dma.c
@@ -59,7 +59,6 @@ int imx_pcm_dma_init(struct platform_device *pdev)
59{ 59{
60 return devm_snd_dmaengine_pcm_register(&pdev->dev, 60 return devm_snd_dmaengine_pcm_register(&pdev->dev,
61 &imx_dmaengine_pcm_config, 61 &imx_dmaengine_pcm_config,
62 SND_DMAENGINE_PCM_FLAG_NO_RESIDUE |
63 SND_DMAENGINE_PCM_FLAG_COMPAT); 62 SND_DMAENGINE_PCM_FLAG_COMPAT);
64} 63}
65EXPORT_SYMBOL_GPL(imx_pcm_dma_init); 64EXPORT_SYMBOL_GPL(imx_pcm_dma_init);
diff --git a/tools/lib/lockdep/include/liblockdep/mutex.h b/tools/lib/lockdep/include/liblockdep/mutex.h
index c342f7087147..ee53a42818ca 100644
--- a/tools/lib/lockdep/include/liblockdep/mutex.h
+++ b/tools/lib/lockdep/include/liblockdep/mutex.h
@@ -35,7 +35,7 @@ static inline int __mutex_init(liblockdep_pthread_mutex_t *lock,
35 35
36static inline int liblockdep_pthread_mutex_lock(liblockdep_pthread_mutex_t *lock) 36static inline int liblockdep_pthread_mutex_lock(liblockdep_pthread_mutex_t *lock)
37{ 37{
38 lock_acquire(&lock->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_); 38 lock_acquire(&lock->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
39 return pthread_mutex_lock(&lock->mutex); 39 return pthread_mutex_lock(&lock->mutex);
40} 40}
41 41
@@ -47,7 +47,7 @@ static inline int liblockdep_pthread_mutex_unlock(liblockdep_pthread_mutex_t *lo
47 47
48static inline int liblockdep_pthread_mutex_trylock(liblockdep_pthread_mutex_t *lock) 48static inline int liblockdep_pthread_mutex_trylock(liblockdep_pthread_mutex_t *lock)
49{ 49{
50 lock_acquire(&lock->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_); 50 lock_acquire(&lock->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
51 return pthread_mutex_trylock(&lock->mutex) == 0 ? 1 : 0; 51 return pthread_mutex_trylock(&lock->mutex) == 0 ? 1 : 0;
52} 52}
53 53
diff --git a/tools/lib/lockdep/include/liblockdep/rwlock.h b/tools/lib/lockdep/include/liblockdep/rwlock.h
index a680ab8c2e36..4ec03f861551 100644
--- a/tools/lib/lockdep/include/liblockdep/rwlock.h
+++ b/tools/lib/lockdep/include/liblockdep/rwlock.h
@@ -36,7 +36,7 @@ static inline int __rwlock_init(liblockdep_pthread_rwlock_t *lock,
36 36
37static inline int liblockdep_pthread_rwlock_rdlock(liblockdep_pthread_rwlock_t *lock) 37static inline int liblockdep_pthread_rwlock_rdlock(liblockdep_pthread_rwlock_t *lock)
38{ 38{
39 lock_acquire(&lock->dep_map, 0, 0, 2, 2, NULL, (unsigned long)_RET_IP_); 39 lock_acquire(&lock->dep_map, 0, 0, 2, 1, NULL, (unsigned long)_RET_IP_);
40 return pthread_rwlock_rdlock(&lock->rwlock); 40 return pthread_rwlock_rdlock(&lock->rwlock);
41 41
42} 42}
@@ -49,19 +49,19 @@ static inline int liblockdep_pthread_rwlock_unlock(liblockdep_pthread_rwlock_t *
49 49
50static inline int liblockdep_pthread_rwlock_wrlock(liblockdep_pthread_rwlock_t *lock) 50static inline int liblockdep_pthread_rwlock_wrlock(liblockdep_pthread_rwlock_t *lock)
51{ 51{
52 lock_acquire(&lock->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_); 52 lock_acquire(&lock->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
53 return pthread_rwlock_wrlock(&lock->rwlock); 53 return pthread_rwlock_wrlock(&lock->rwlock);
54} 54}
55 55
56static inline int liblockdep_pthread_rwlock_tryrdlock(liblockdep_pthread_rwlock_t *lock) 56static inline int liblockdep_pthread_rwlock_tryrdlock(liblockdep_pthread_rwlock_t *lock)
57{ 57{
58 lock_acquire(&lock->dep_map, 0, 1, 2, 2, NULL, (unsigned long)_RET_IP_); 58 lock_acquire(&lock->dep_map, 0, 1, 2, 1, NULL, (unsigned long)_RET_IP_);
59 return pthread_rwlock_tryrdlock(&lock->rwlock) == 0 ? 1 : 0; 59 return pthread_rwlock_tryrdlock(&lock->rwlock) == 0 ? 1 : 0;
60} 60}
61 61
62static inline int liblockdep_pthread_rwlock_trywlock(liblockdep_pthread_rwlock_t *lock) 62static inline int liblockdep_pthread_rwlock_trywlock(liblockdep_pthread_rwlock_t *lock)
63{ 63{
64 lock_acquire(&lock->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_); 64 lock_acquire(&lock->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
65 return pthread_rwlock_trywlock(&lock->rwlock) == 0 ? 1 : 0; 65 return pthread_rwlock_trywlock(&lock->rwlock) == 0 ? 1 : 0;
66} 66}
67 67
diff --git a/tools/lib/lockdep/preload.c b/tools/lib/lockdep/preload.c
index 23bd69cb5ade..6f803609e498 100644
--- a/tools/lib/lockdep/preload.c
+++ b/tools/lib/lockdep/preload.c
@@ -92,7 +92,7 @@ enum { none, prepare, done, } __init_state;
92static void init_preload(void); 92static void init_preload(void);
93static void try_init_preload(void) 93static void try_init_preload(void)
94{ 94{
95 if (!__init_state != done) 95 if (__init_state != done)
96 init_preload(); 96 init_preload();
97} 97}
98 98
@@ -252,7 +252,7 @@ int pthread_mutex_lock(pthread_mutex_t *mutex)
252 252
253 try_init_preload(); 253 try_init_preload();
254 254
255 lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 2, NULL, 255 lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 1, NULL,
256 (unsigned long)_RET_IP_); 256 (unsigned long)_RET_IP_);
257 /* 257 /*
258 * Here's the thing with pthread mutexes: unlike the kernel variant, 258 * Here's the thing with pthread mutexes: unlike the kernel variant,
@@ -281,7 +281,7 @@ int pthread_mutex_trylock(pthread_mutex_t *mutex)
281 281
282 try_init_preload(); 282 try_init_preload();
283 283
284 lock_acquire(&__get_lock(mutex)->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_); 284 lock_acquire(&__get_lock(mutex)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
285 r = ll_pthread_mutex_trylock(mutex); 285 r = ll_pthread_mutex_trylock(mutex);
286 if (r) 286 if (r)
287 lock_release(&__get_lock(mutex)->dep_map, 0, (unsigned long)_RET_IP_); 287 lock_release(&__get_lock(mutex)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -303,7 +303,7 @@ int pthread_mutex_unlock(pthread_mutex_t *mutex)
303 */ 303 */
304 r = ll_pthread_mutex_unlock(mutex); 304 r = ll_pthread_mutex_unlock(mutex);
305 if (r) 305 if (r)
306 lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_); 306 lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
307 307
308 return r; 308 return r;
309} 309}
@@ -352,7 +352,7 @@ int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
352 352
353 init_preload(); 353 init_preload();
354 354
355 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 2, 2, NULL, (unsigned long)_RET_IP_); 355 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 2, 1, NULL, (unsigned long)_RET_IP_);
356 r = ll_pthread_rwlock_rdlock(rwlock); 356 r = ll_pthread_rwlock_rdlock(rwlock);
357 if (r) 357 if (r)
358 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_); 358 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -366,7 +366,7 @@ int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
366 366
367 init_preload(); 367 init_preload();
368 368
369 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 2, 2, NULL, (unsigned long)_RET_IP_); 369 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 2, 1, NULL, (unsigned long)_RET_IP_);
370 r = ll_pthread_rwlock_tryrdlock(rwlock); 370 r = ll_pthread_rwlock_tryrdlock(rwlock);
371 if (r) 371 if (r)
372 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_); 372 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -380,7 +380,7 @@ int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
380 380
381 init_preload(); 381 init_preload();
382 382
383 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_); 383 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
384 r = ll_pthread_rwlock_trywrlock(rwlock); 384 r = ll_pthread_rwlock_trywrlock(rwlock);
385 if (r) 385 if (r)
386 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_); 386 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -394,7 +394,7 @@ int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
394 394
395 init_preload(); 395 init_preload();
396 396
397 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_); 397 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
398 r = ll_pthread_rwlock_wrlock(rwlock); 398 r = ll_pthread_rwlock_wrlock(rwlock);
399 if (r) 399 if (r)
400 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_); 400 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -411,7 +411,7 @@ int pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
411 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_); 411 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
412 r = ll_pthread_rwlock_unlock(rwlock); 412 r = ll_pthread_rwlock_unlock(rwlock);
413 if (r) 413 if (r)
414 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_); 414 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
415 415
416 return r; 416 return r;
417} 417}
@@ -439,8 +439,6 @@ __attribute__((constructor)) static void init_preload(void)
439 ll_pthread_rwlock_unlock = dlsym(RTLD_NEXT, "pthread_rwlock_unlock"); 439 ll_pthread_rwlock_unlock = dlsym(RTLD_NEXT, "pthread_rwlock_unlock");
440#endif 440#endif
441 441
442 printf("%p\n", ll_pthread_mutex_trylock);fflush(stdout);
443
444 lockdep_init(); 442 lockdep_init();
445 443
446 __init_state = done; 444 __init_state = done;
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 52c03fbbba17..04a229aa5c0f 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -17,6 +17,7 @@
17#include "../util.h" 17#include "../util.h"
18#include "../ui.h" 18#include "../ui.h"
19#include "map.h" 19#include "map.h"
20#include "annotate.h"
20 21
21struct hist_browser { 22struct hist_browser {
22 struct ui_browser b; 23 struct ui_browser b;
@@ -1593,13 +1594,18 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
1593 bi->to.sym->name) > 0) 1594 bi->to.sym->name) > 0)
1594 annotate_t = nr_options++; 1595 annotate_t = nr_options++;
1595 } else { 1596 } else {
1596
1597 if (browser->selection != NULL && 1597 if (browser->selection != NULL &&
1598 browser->selection->sym != NULL && 1598 browser->selection->sym != NULL &&
1599 !browser->selection->map->dso->annotate_warned && 1599 !browser->selection->map->dso->annotate_warned) {
1600 asprintf(&options[nr_options], "Annotate %s", 1600 struct annotation *notes;
1601 browser->selection->sym->name) > 0) 1601
1602 annotate = nr_options++; 1602 notes = symbol__annotation(browser->selection->sym);
1603
1604 if (notes->src &&
1605 asprintf(&options[nr_options], "Annotate %s",
1606 browser->selection->sym->name) > 0)
1607 annotate = nr_options++;
1608 }
1603 } 1609 }
1604 1610
1605 if (thread != NULL && 1611 if (thread != NULL &&
@@ -1656,6 +1662,7 @@ retry_popup_menu:
1656 1662
1657 if (choice == annotate || choice == annotate_t || choice == annotate_f) { 1663 if (choice == annotate || choice == annotate_t || choice == annotate_f) {
1658 struct hist_entry *he; 1664 struct hist_entry *he;
1665 struct annotation *notes;
1659 int err; 1666 int err;
1660do_annotate: 1667do_annotate:
1661 if (!objdump_path && perf_session_env__lookup_objdump(env)) 1668 if (!objdump_path && perf_session_env__lookup_objdump(env))
@@ -1679,6 +1686,10 @@ do_annotate:
1679 he->ms.map = he->branch_info->to.map; 1686 he->ms.map = he->branch_info->to.map;
1680 } 1687 }
1681 1688
1689 notes = symbol__annotation(he->ms.sym);
1690 if (!notes->src)
1691 continue;
1692
1682 /* 1693 /*
1683 * Don't let this be freed, say, by hists__decay_entry. 1694 * Don't let this be freed, say, by hists__decay_entry.
1684 */ 1695 */
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 0e5fea95d596..c73e1fc12e53 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -496,18 +496,6 @@ struct process_args {
496 u64 start; 496 u64 start;
497}; 497};
498 498
499static int symbol__in_kernel(void *arg, const char *name,
500 char type __maybe_unused, u64 start)
501{
502 struct process_args *args = arg;
503
504 if (strchr(name, '['))
505 return 0;
506
507 args->start = start;
508 return 1;
509}
510
511static void machine__get_kallsyms_filename(struct machine *machine, char *buf, 499static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
512 size_t bufsz) 500 size_t bufsz)
513{ 501{
@@ -517,27 +505,41 @@ static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
517 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir); 505 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
518} 506}
519 507
520/* Figure out the start address of kernel map from /proc/kallsyms */ 508const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
521static u64 machine__get_kernel_start_addr(struct machine *machine) 509
510/* Figure out the start address of kernel map from /proc/kallsyms.
511 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
512 * symbol_name if it's not that important.
513 */
514static u64 machine__get_kernel_start_addr(struct machine *machine,
515 const char **symbol_name)
522{ 516{
523 char filename[PATH_MAX]; 517 char filename[PATH_MAX];
524 struct process_args args; 518 int i;
519 const char *name;
520 u64 addr = 0;
525 521
526 machine__get_kallsyms_filename(machine, filename, PATH_MAX); 522 machine__get_kallsyms_filename(machine, filename, PATH_MAX);
527 523
528 if (symbol__restricted_filename(filename, "/proc/kallsyms")) 524 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
529 return 0; 525 return 0;
530 526
531 if (kallsyms__parse(filename, &args, symbol__in_kernel) <= 0) 527 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
532 return 0; 528 addr = kallsyms__get_function_start(filename, name);
529 if (addr)
530 break;
531 }
532
533 if (symbol_name)
534 *symbol_name = name;
533 535
534 return args.start; 536 return addr;
535} 537}
536 538
537int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) 539int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
538{ 540{
539 enum map_type type; 541 enum map_type type;
540 u64 start = machine__get_kernel_start_addr(machine); 542 u64 start = machine__get_kernel_start_addr(machine, NULL);
541 543
542 for (type = 0; type < MAP__NR_TYPES; ++type) { 544 for (type = 0; type < MAP__NR_TYPES; ++type) {
543 struct kmap *kmap; 545 struct kmap *kmap;
@@ -852,23 +854,11 @@ static int machine__create_modules(struct machine *machine)
852 return 0; 854 return 0;
853} 855}
854 856
855const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
856
857int machine__create_kernel_maps(struct machine *machine) 857int machine__create_kernel_maps(struct machine *machine)
858{ 858{
859 struct dso *kernel = machine__get_kernel(machine); 859 struct dso *kernel = machine__get_kernel(machine);
860 char filename[PATH_MAX];
861 const char *name; 860 const char *name;
862 u64 addr = 0; 861 u64 addr = machine__get_kernel_start_addr(machine, &name);
863 int i;
864
865 machine__get_kallsyms_filename(machine, filename, PATH_MAX);
866
867 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
868 addr = kallsyms__get_function_start(filename, name);
869 if (addr)
870 break;
871 }
872 if (!addr) 862 if (!addr)
873 return -1; 863 return -1;
874 864
diff --git a/tools/thermal/tmon/Makefile b/tools/thermal/tmon/Makefile
index 447321104ec0..e775adcbd29f 100644
--- a/tools/thermal/tmon/Makefile
+++ b/tools/thermal/tmon/Makefile
@@ -21,7 +21,7 @@ OBJS = tmon.o tui.o sysfs.o pid.o
21OBJS += 21OBJS +=
22 22
23tmon: $(OBJS) Makefile tmon.h 23tmon: $(OBJS) Makefile tmon.h
24 $(CC) ${CFLAGS} $(LDFLAGS) $(OBJS) -o $(TARGET) -lm -lpanel -lncursesw -lpthread 24 $(CC) ${CFLAGS} $(LDFLAGS) $(OBJS) -o $(TARGET) -lm -lpanel -lncursesw -ltinfo -lpthread
25 25
26valgrind: tmon 26valgrind: tmon
27 sudo valgrind -v --track-origins=yes --tool=memcheck --leak-check=yes --show-reachable=yes --num-callers=20 --track-fds=yes ./$(TARGET) 1> /dev/null 27 sudo valgrind -v --track-origins=yes --tool=memcheck --leak-check=yes --show-reachable=yes --num-callers=20 --track-fds=yes ./$(TARGET) 1> /dev/null
diff --git a/tools/thermal/tmon/tmon.c b/tools/thermal/tmon/tmon.c
index b30f531173e4..09b7c3218334 100644
--- a/tools/thermal/tmon/tmon.c
+++ b/tools/thermal/tmon/tmon.c
@@ -142,6 +142,7 @@ static void start_syslog(void)
142static void prepare_logging(void) 142static void prepare_logging(void)
143{ 143{
144 int i; 144 int i;
145 struct stat logstat;
145 146
146 if (!logging) 147 if (!logging)
147 return; 148 return;
@@ -152,6 +153,29 @@ static void prepare_logging(void)
152 return; 153 return;
153 } 154 }
154 155
156 if (lstat(TMON_LOG_FILE, &logstat) < 0) {
157 syslog(LOG_ERR, "Unable to stat log file %s\n", TMON_LOG_FILE);
158 fclose(tmon_log);
159 tmon_log = NULL;
160 return;
161 }
162
163 /* The log file must be a regular file owned by us */
164 if (S_ISLNK(logstat.st_mode)) {
165 syslog(LOG_ERR, "Log file is a symlink. Will not log\n");
166 fclose(tmon_log);
167 tmon_log = NULL;
168 return;
169 }
170
171 if (logstat.st_uid != getuid()) {
172 syslog(LOG_ERR, "We don't own the log file. Not logging\n");
173 fclose(tmon_log);
174 tmon_log = NULL;
175 return;
176 }
177
178
155 fprintf(tmon_log, "#----------- THERMAL SYSTEM CONFIG -------------\n"); 179 fprintf(tmon_log, "#----------- THERMAL SYSTEM CONFIG -------------\n");
156 for (i = 0; i < ptdata.nr_tz_sensor; i++) { 180 for (i = 0; i < ptdata.nr_tz_sensor; i++) {
157 char binding_str[33]; /* size of long + 1 */ 181 char binding_str[33]; /* size of long + 1 */
@@ -331,7 +355,7 @@ static void start_daemon_mode()
331 disable_tui(); 355 disable_tui();
332 356
333 /* change the file mode mask */ 357 /* change the file mode mask */
334 umask(0); 358 umask(S_IWGRP | S_IWOTH);
335 359
336 /* new SID for the daemon process */ 360 /* new SID for the daemon process */
337 sid = setsid(); 361 sid = setsid();