aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2013-10-19 16:31:14 -0400
committerTakashi Iwai <tiwai@suse.de>2013-10-19 16:31:14 -0400
commit20c87bd40e6c1ff7e31cc5eea4fb37829a57eb58 (patch)
tree528c2f113e428e060e185ccab7eab8d8e87c7d36
parentd14df339c72b6efbba4eddd1d1f3f4b173273f74 (diff)
parent40f8989695660dee984338861ca9900b8e0b5183 (diff)
Merge tag 'asoc-v3.12-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-linus
ASoC: Fixes for v3.12 A number of small, fairly unexciting, fixes for drivers - nothing stand out.
-rw-r--r--CREDITS3
-rw-r--r--Documentation/block/00-INDEX2
-rw-r--r--Documentation/block/cmdline-partition.txt8
-rw-r--r--Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt10
-rw-r--r--Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt10
-rw-r--r--Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt (renamed from Documentation/devicetree/bindings/mmc/synopsis-dw-mshc.txt)8
-rw-r--r--Documentation/devicetree/bindings/mmc/tmio_mmc.txt17
-rw-r--r--Documentation/devicetree/bindings/net/fsl-tsec-phy.txt18
-rw-r--r--Documentation/devicetree/bindings/pci/designware-pcie.txt2
-rw-r--r--Documentation/devicetree/bindings/serial/qca,ar9330-uart.txt (renamed from Documentation/devicetree/bindings/tty/serial/qca,ar9330-uart.txt)0
-rw-r--r--Documentation/kernel-parameters.txt10
-rw-r--r--MAINTAINERS40
-rw-r--r--Makefile2
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/arc/include/asm/spinlock.h9
-rw-r--r--arch/arc/include/asm/uaccess.h4
-rw-r--r--arch/arc/kernel/ptrace.c2
-rw-r--r--arch/arc/kernel/signal.c25
-rw-r--r--arch/arc/kernel/time.c7
-rw-r--r--arch/arc/kernel/unaligned.c6
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/Makefile9
-rw-r--r--arch/arm/boot/Makefile16
-rw-r--r--arch/arm/boot/dts/Makefile2
-rw-r--r--arch/arm/boot/dts/armada-370-netgear-rn102.dts49
-rw-r--r--arch/arm/boot/dts/armada-xp.dtsi11
-rw-r--r--arch/arm/boot/dts/at91sam9x5.dtsi6
-rw-r--r--arch/arm/boot/dts/atlas6.dtsi12
-rw-r--r--arch/arm/boot/dts/exynos5250.dtsi5
-rw-r--r--arch/arm/boot/dts/kirkwood.dtsi3
-rw-r--r--arch/arm/boot/dts/omap3-beagle-xm.dts2
-rw-r--r--arch/arm/boot/dts/omap3.dtsi4
-rw-r--r--arch/arm/boot/dts/prima2.dtsi27
-rw-r--r--arch/arm/boot/dts/r8a73a4.dtsi6
-rw-r--r--arch/arm/boot/dts/r8a7778.dtsi1
-rw-r--r--arch/arm/boot/dts/r8a7779.dtsi1
-rw-r--r--arch/arm/boot/dts/r8a7790.dtsi9
-rw-r--r--arch/arm/boot/dts/sh73a0.dtsi6
-rw-r--r--arch/arm/boot/install.sh14
-rw-r--r--arch/arm/common/edma.c38
-rw-r--r--arch/arm/configs/multi_v7_defconfig1
-rw-r--r--arch/arm/crypto/aes-armv4.S6
-rw-r--r--arch/arm/include/asm/jump_label.h2
-rw-r--r--arch/arm/include/asm/uaccess.h7
-rw-r--r--arch/arm/kernel/entry-common.S4
-rw-r--r--arch/arm/kernel/entry-header.S8
-rw-r--r--arch/arm/kvm/reset.c6
-rw-r--r--arch/arm/mach-at91/at91rm9200_time.c2
-rw-r--r--arch/arm/mach-at91/at91sam926x_time.c2
-rw-r--r--arch/arm/mach-at91/at91sam9g45_reset.S8
-rw-r--r--arch/arm/mach-at91/at91x40_time.c2
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c2
-rw-r--r--arch/arm/mach-davinci/include/mach/serial.h4
-rw-r--r--arch/arm/mach-integrator/pci_v3.h7
-rw-r--r--arch/arm/mach-mvebu/coherency.c8
-rw-r--r--arch/arm/mach-mvebu/pmsu.c1
-rw-r--r--arch/arm/mach-mvebu/system-controller.c1
-rw-r--r--arch/arm/mach-omap2/board-generic.c18
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c9
-rw-r--r--arch/arm/mach-omap2/gpmc-onenand.c12
-rw-r--r--arch/arm/mach-omap2/mux.h4
-rw-r--r--arch/arm/mach-omap2/timer.c4
-rw-r--r--arch/arm/mach-shmobile/board-armadillo800eva.c4
-rw-r--r--arch/arm/mach-shmobile/board-lager.c27
-rw-r--r--arch/arm/mach-vexpress/tc2_pm.c11
-rw-r--r--arch/arm64/Kconfig.debug7
-rw-r--r--arch/arm64/configs/defconfig5
-rw-r--r--arch/arm64/include/asm/uaccess.h10
-rw-r--r--arch/arm64/kernel/fpsimd.c2
-rw-r--r--arch/arm64/mm/tlb.S2
-rw-r--r--arch/avr32/include/asm/Kbuild16
-rw-r--r--arch/avr32/include/asm/cputime.h6
-rw-r--r--arch/avr32/include/asm/delay.h1
-rw-r--r--arch/avr32/include/asm/device.h7
-rw-r--r--arch/avr32/include/asm/div64.h6
-rw-r--r--arch/avr32/include/asm/emergency-restart.h6
-rw-r--r--arch/avr32/include/asm/futex.h6
-rw-r--r--arch/avr32/include/asm/irq_regs.h1
-rw-r--r--arch/avr32/include/asm/local.h6
-rw-r--r--arch/avr32/include/asm/local64.h1
-rw-r--r--arch/avr32/include/asm/percpu.h6
-rw-r--r--arch/avr32/include/asm/scatterlist.h6
-rw-r--r--arch/avr32/include/asm/sections.h6
-rw-r--r--arch/avr32/include/asm/topology.h6
-rw-r--r--arch/avr32/include/asm/xor.h6
-rw-r--r--arch/avr32/kernel/process.c2
-rw-r--r--arch/avr32/kernel/time.c9
-rw-r--r--arch/mips/alchemy/board-mtx1.c2
-rw-r--r--arch/mips/include/asm/cpu-features.h2
-rw-r--r--arch/mips/include/asm/jump_label.h2
-rw-r--r--arch/mips/kernel/octeon_switch.S2
-rw-r--r--arch/mips/kernel/r2300_switch.S2
-rw-r--r--arch/mips/kernel/r4k_switch.S2
-rw-r--r--arch/mips/mm/c-r4k.c2
-rw-r--r--arch/mips/mm/dma-default.c12
-rw-r--r--arch/openrisc/include/asm/prom.h44
-rw-r--r--arch/parisc/include/asm/traps.h2
-rw-r--r--arch/parisc/kernel/cache.c1
-rw-r--r--arch/parisc/kernel/smp.c8
-rw-r--r--arch/parisc/kernel/traps.c11
-rw-r--r--arch/parisc/lib/memcpy.c15
-rw-r--r--arch/parisc/mm/fault.c18
-rw-r--r--arch/powerpc/boot/Makefile4
-rw-r--r--arch/powerpc/boot/epapr-wrapper.c9
-rw-r--r--arch/powerpc/boot/epapr.c4
-rw-r--r--arch/powerpc/boot/of.c16
-rwxr-xr-xarch/powerpc/boot/wrapper9
-rw-r--r--arch/powerpc/include/asm/irq.h4
-rw-r--r--arch/powerpc/include/asm/jump_label.h2
-rw-r--r--arch/powerpc/include/asm/processor.h4
-rw-r--r--arch/powerpc/kernel/asm-offsets.c3
-rw-r--r--arch/powerpc/kernel/iommu.c2
-rw-r--r--arch/powerpc/kernel/irq.c101
-rw-r--r--arch/powerpc/kernel/misc_32.S25
-rw-r--r--arch/powerpc/kernel/misc_64.S10
-rw-r--r--arch/powerpc/kernel/process.c3
-rw-r--r--arch/powerpc/kernel/prom_init.c21
-rw-r--r--arch/powerpc/kernel/sysfs.c18
-rw-r--r--arch/powerpc/kernel/tm.S95
-rw-r--r--arch/powerpc/kernel/vio.c12
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S2
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c18
-rw-r--r--arch/powerpc/lib/checksum_64.S58
-rw-r--r--arch/powerpc/lib/sstep.c3
-rw-r--r--arch/powerpc/mm/init_64.c4
-rw-r--r--arch/powerpc/mm/mem.c9
-rw-r--r--arch/powerpc/perf/power8-pmu.c5
-rw-r--r--arch/powerpc/platforms/pseries/smp.c26
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/include/asm/jump_label.h2
-rw-r--r--arch/s390/include/asm/mutex.h2
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/include/asm/spinlock.h5
-rw-r--r--arch/s390/kernel/crash_dump.c42
-rw-r--r--arch/s390/kernel/entry.S1
-rw-r--r--arch/s390/kernel/entry64.S1
-rw-r--r--arch/s390/kernel/kprobes.c6
-rw-r--r--arch/score/Kconfig4
-rw-r--r--arch/score/Makefile4
-rw-r--r--arch/score/include/asm/checksum.h93
-rw-r--r--arch/score/include/asm/io.h1
-rw-r--r--arch/score/include/asm/pgalloc.h2
-rw-r--r--arch/score/kernel/entry.S4
-rw-r--r--arch/score/kernel/process.c4
-rw-r--r--arch/sparc/Kconfig7
-rw-r--r--arch/sparc/include/asm/floppy_64.h2
-rw-r--r--arch/sparc/include/asm/jump_label.h2
-rw-r--r--arch/sparc/kernel/Makefile3
-rw-r--r--arch/sparc/kernel/ds.c5
-rw-r--r--arch/sparc/kernel/ldc.c4
-rw-r--r--arch/tile/include/asm/atomic.h5
-rw-r--r--arch/tile/include/asm/atomic_32.h27
-rw-r--r--arch/tile/include/asm/cmpxchg.h28
-rw-r--r--arch/tile/include/asm/percpu.h34
-rw-r--r--arch/tile/kernel/hardwall.c6
-rw-r--r--arch/tile/kernel/intvec_32.S3
-rw-r--r--arch/tile/kernel/intvec_64.S3
-rw-r--r--arch/tile/kernel/stack.c12
-rw-r--r--arch/tile/lib/atomic_32.c8
-rw-r--r--arch/x86/Kconfig6
-rw-r--r--arch/x86/include/asm/cpufeature.h6
-rw-r--r--arch/x86/include/asm/jump_label.h2
-rw-r--r--arch/x86/include/asm/mutex_64.h4
-rw-r--r--arch/x86/include/asm/xen/page.h31
-rw-r--r--arch/x86/kernel/cpu/perf_event.c21
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c10
-rw-r--r--arch/x86/kernel/microcode_amd.c1
-rw-r--r--arch/x86/kernel/reboot.c26
-rw-r--r--arch/x86/kernel/sysfb_simplefb.c4
-rw-r--r--arch/x86/kvm/vmx.c28
-rw-r--r--arch/x86/pci/mmconfig-shared.c7
-rw-r--r--arch/x86/platform/efi/efi.c11
-rw-r--r--arch/x86/xen/p2m.c10
-rw-r--r--arch/x86/xen/spinlock.c26
-rw-r--r--block/Kconfig9
-rw-r--r--block/Makefile2
-rw-r--r--block/partitions/Kconfig4
-rw-r--r--block/partitions/cmdline.c8
-rw-r--r--drivers/acpi/acpi_ipmi.c24
-rw-r--r--drivers/acpi/scan.c4
-rw-r--r--drivers/ata/sata_promise.c2
-rw-r--r--drivers/base/core.c14
-rw-r--r--drivers/bcma/driver_pci.c49
-rw-r--r--drivers/block/cciss.c1
-rw-r--r--drivers/block/cpqarray.c1
-rw-r--r--drivers/bluetooth/ath3k.c2
-rw-r--r--drivers/bluetooth/btusb.c5
-rw-r--r--drivers/bus/mvebu-mbus.c12
-rw-r--r--drivers/char/random.c11
-rw-r--r--drivers/char/tpm/xen-tpmfront.c36
-rw-r--r--drivers/clocksource/Kconfig1
-rw-r--r--drivers/clocksource/clksrc-of.c3
-rw-r--r--drivers/clocksource/em_sti.c2
-rw-r--r--drivers/clocksource/exynos_mct.c10
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c4
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c2
-rw-r--r--drivers/cpufreq/cpufreq.c3
-rw-r--r--drivers/cpufreq/exynos5440-cpufreq.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c5
-rw-r--r--drivers/cpufreq/spear-cpufreq.c2
-rw-r--r--drivers/dma/Kconfig1
-rw-r--r--drivers/dma/edma.c3
-rw-r--r--drivers/dma/imx-dma.c31
-rw-r--r--drivers/dma/sh/rcar-hpbdma.c9
-rw-r--r--drivers/gpio/gpio-omap.c158
-rw-r--r--drivers/gpio/gpio-rcar.c7
-rw-r--r--drivers/gpu/drm/drm_edid.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c8
-rw-r--r--drivers/gpu/drm/gma500/gtt.c1
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c3
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c15
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c6
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h6
-rw-r--r--drivers/gpu/drm/i915/intel_display.c42
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c15
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c9
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c8
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_kms.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c8
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/base.c2
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c57
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.h2
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c26
-rw-r--r--drivers/gpu/drm/radeon/cik.c23
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c3
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h4
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c24
-rw-r--r--drivers/gpu/drm/radeon/r100.c8
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c41
-rw-r--r--drivers/gpu/drm/radeon/r600d.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c66
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c6
-rw-r--r--drivers/gpu/drm/radeon/si.c10
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c30
-rw-r--r--drivers/gpu/drm/radeon/sid.h4
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c4
-rw-r--r--drivers/hid/Kconfig1
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-holtek-mouse.c4
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-roccat-kone.c2
-rw-r--r--drivers/hid/hid-roccat-koneplus.c4
-rw-r--r--drivers/hid/hid-roccat-kovaplus.c4
-rw-r--r--drivers/hid/hid-roccat-pyra.c4
-rw-r--r--drivers/hid/hid-wiimote-modules.c40
-rw-r--r--drivers/hid/hid-wiimote.h4
-rw-r--r--drivers/hid/hidraw.c21
-rw-r--r--drivers/hid/uhid.c3
-rw-r--r--drivers/hv/connection.c2
-rw-r--r--drivers/hv/hv_kvp.c38
-rw-r--r--drivers/hv/hv_snapshot.c6
-rw-r--r--drivers/hv/hv_util.c71
-rw-r--r--drivers/hwmon/applesmc.c24
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c26
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c5
-rw-r--r--drivers/i2c/busses/i2c-imx.c11
-rw-r--r--drivers/i2c/busses/i2c-ismt.c3
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c16
-rw-r--r--drivers/i2c/busses/i2c-mxs.c3
-rw-r--r--drivers/i2c/busses/i2c-omap.c3
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c2
-rw-r--r--drivers/i2c/busses/i2c-stu300.c11
-rw-r--r--drivers/i2c/i2c-core.c3
-rw-r--r--drivers/i2c/muxes/i2c-arb-gpio-challenge.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c14
-rw-r--r--drivers/i2c/muxes/i2c-mux-pinctrl.c4
-rw-r--r--drivers/iio/amplifiers/ad8366.c4
-rw-r--r--drivers/iio/industrialio-core.c2
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c18
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c14
-rw-r--r--drivers/iommu/Kconfig2
-rw-r--r--drivers/iommu/arm-smmu.c13
-rw-r--r--drivers/md/bcache/bcache.h7
-rw-r--r--drivers/md/bcache/bset.c39
-rw-r--r--drivers/md/bcache/btree.c4
-rw-r--r--drivers/md/bcache/journal.c33
-rw-r--r--drivers/md/bcache/request.c20
-rw-r--r--drivers/md/bcache/sysfs.c9
-rw-r--r--drivers/md/bcache/util.c11
-rw-r--r--drivers/md/bcache/util.h12
-rw-r--r--drivers/md/bcache/writeback.c42
-rw-r--r--drivers/md/dm-io.c7
-rw-r--r--drivers/md/dm-mpath.c18
-rw-r--r--drivers/md/dm-snap-persistent.c2
-rw-r--r--drivers/md/dm-snap.c5
-rw-r--r--drivers/md/dm-stats.c23
-rw-r--r--drivers/md/dm-thin.c14
-rw-r--r--drivers/md/dm.c71
-rw-r--r--drivers/md/dm.h3
-rw-r--r--drivers/misc/mei/amthif.c1
-rw-r--r--drivers/misc/mei/bus.c5
-rw-r--r--drivers/misc/mei/client.h6
-rw-r--r--drivers/misc/mei/hbm.c10
-rw-r--r--drivers/misc/mei/init.c3
-rw-r--r--drivers/misc/mei/main.c11
-rw-r--r--drivers/misc/mei/mei_dev.h6
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c16
-rw-r--r--drivers/mtd/devices/m25p80.c17
-rw-r--r--drivers/mtd/nand/nand_base.c8
-rw-r--r--drivers/net/bonding/bond_main.c13
-rw-r--r--drivers/net/can/flexcan.c12
-rw-r--r--drivers/net/can/slcan.c139
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c189
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c50
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c9
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c76
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c162
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c3
-rw-r--r--drivers/net/ethernet/marvell/skge.c9
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c39
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_mpi.c2
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c10
-rw-r--r--drivers/net/ethernet/via/via-rhine.c9
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c6
-rw-r--r--drivers/net/slip/slip.c3
-rw-r--r--drivers/net/usb/dm9601.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/usbnet.c27
-rw-r--r--drivers/net/vxlan.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c17
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c28
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c14
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c4
-rw-r--r--drivers/net/wireless/cw1200/cw1200_spi.c26
-rw-r--r--drivers/net/wireless/cw1200/fwio.c2
-rw-r--r--drivers/net/wireless/cw1200/hwbus.h1
-rw-r--r--drivers/net/wireless/cw1200/hwio.c15
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c3
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.h2
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c5
-rw-r--r--drivers/net/wireless/mwifiex/usb.c7
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c3
-rw-r--r--drivers/net/wireless/p54/p54usb.c2
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h2
-rw-r--r--drivers/net/xen-netback/xenbus.c148
-rw-r--r--drivers/pci/pci.c8
-rw-r--r--drivers/pinctrl/pinconf.c4
-rw-r--r--drivers/pinctrl/pinctrl-exynos.c12
-rw-r--r--drivers/pinctrl/pinctrl-palmas.c5
-rw-r--r--drivers/pinctrl/pinctrl-tegra114.c5
-rw-r--r--drivers/regulator/da9063-regulator.c2
-rw-r--r--drivers/regulator/palmas-regulator.c14
-rw-r--r--drivers/regulator/ti-abb-regulator.c16
-rw-r--r--drivers/regulator/wm831x-ldo.c4
-rw-r--r--drivers/regulator/wm8350-regulator.c2
-rw-r--r--drivers/s390/char/sclp_cmd.c8
-rw-r--r--drivers/s390/char/tty3270.c2
-rw-r--r--drivers/spi/spi-atmel.c3
-rw-r--r--drivers/spi/spi-clps711x.c3
-rw-r--r--drivers/spi/spi-fsl-dspi.c10
-rw-r--r--drivers/spi/spi-mpc512x-psc.c4
-rw-r--r--drivers/spi/spi-pxa2xx.c11
-rw-r--r--drivers/spi/spi-s3c64xx.c4
-rw-r--r--drivers/spi/spi-sh-hspi.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_65xx.c25
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c11
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c2
-rw-r--r--drivers/staging/octeon-usb/cvmx-usb.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mp.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_dm.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c1
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.c2
-rw-r--r--drivers/staging/vt6656/iwctl.c3
-rw-r--r--drivers/staging/vt6656/main_usb.c3
-rw-r--r--drivers/staging/vt6656/rxtx.c2
-rw-r--r--drivers/target/iscsi/iscsi_target.c13
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c4
-rw-r--r--drivers/target/target_core_sbc.c28
-rw-r--r--drivers/target/target_core_transport.c20
-rw-r--r--drivers/target/target_core_xcopy.c4
-rw-r--r--drivers/tty/hvc/hvc_xen.c1
-rw-r--r--drivers/tty/n_tty.c49
-rw-r--r--drivers/tty/serial/pch_uart.c13
-rw-r--r--drivers/tty/serial/serial-tegra.c4
-rw-r--r--drivers/tty/tty_ioctl.c3
-rw-r--r--drivers/usb/chipidea/Kconfig2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c7
-rw-r--r--drivers/usb/chipidea/ci_hdrc_pci.c7
-rw-r--r--drivers/usb/chipidea/core.c1
-rw-r--r--drivers/usb/chipidea/udc.c4
-rw-r--r--drivers/usb/core/devio.c16
-rw-r--r--drivers/usb/core/hub.c3
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/gadget/f_fs.c62
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c9
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c2
-rw-r--r--drivers/usb/host/ehci-fsl.c17
-rw-r--r--drivers/usb/host/ehci-pci.c2
-rw-r--r--drivers/usb/host/imx21-hcd.c8
-rw-r--r--drivers/usb/host/ohci-hcd.c22
-rw-r--r--drivers/usb/host/ohci-q.c26
-rw-r--r--drivers/usb/host/uhci-pci.c2
-rw-r--r--drivers/usb/host/uhci-q.c12
-rw-r--r--drivers/usb/host/xhci-hub.c47
-rw-r--r--drivers/usb/host/xhci-mem.c2
-rw-r--r--drivers/usb/host/xhci-pci.c2
-rw-r--r--drivers/usb/host/xhci-ring.c37
-rw-r--r--drivers/usb/host/xhci.c25
-rw-r--r--drivers/usb/host/xhci.h11
-rw-r--r--drivers/usb/musb/musb_dsps.c3
-rw-r--r--drivers/usb/musb/musb_gadget.c5
-rw-r--r--drivers/usb/phy/phy-gpio-vbus-usb.c11
-rw-r--r--drivers/usb/serial/option.c3
-rw-r--r--drivers/vhost/scsi.c7
-rw-r--r--drivers/video/mmp/hw/mmp_ctrl.c17
-rw-r--r--drivers/video/mxsfb.c1
-rw-r--r--drivers/video/neofb.c4
-rw-r--r--drivers/video/of_display_timing.c6
-rw-r--r--drivers/video/omap2/displays-new/Kconfig1
-rw-r--r--drivers/video/omap2/displays-new/connector-analog-tv.c2
-rw-r--r--drivers/video/omap2/displays-new/connector-dvi.c2
-rw-r--r--drivers/video/omap2/displays-new/connector-hdmi.c2
-rw-r--r--drivers/video/omap2/dss/dispc.c1
-rw-r--r--drivers/video/s3fb.c9
-rw-r--r--drivers/watchdog/hpwdt.c6
-rw-r--r--drivers/watchdog/kempld_wdt.c2
-rw-r--r--drivers/watchdog/sunxi_wdt.c4
-rw-r--r--drivers/watchdog/ts72xx_wdt.c3
-rw-r--r--drivers/xen/balloon.c23
-rw-r--r--fs/afs/dir.c3
-rw-r--r--fs/aio.c52
-rw-r--r--fs/binfmt_elf.c30
-rw-r--r--fs/bio.c4
-rw-r--r--fs/btrfs/async-thread.c25
-rw-r--r--fs/btrfs/async-thread.h2
-rw-r--r--fs/btrfs/dev-replace.c5
-rw-r--r--fs/btrfs/disk-io.c9
-rw-r--r--fs/btrfs/disk-io.h13
-rw-r--r--fs/btrfs/extent_io.c22
-rw-r--r--fs/btrfs/inode.c2
-rw-r--r--fs/btrfs/relocation.c2
-rw-r--r--fs/btrfs/root-tree.c8
-rw-r--r--fs/btrfs/transaction.c7
-rw-r--r--fs/btrfs/volumes.c7
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h5
-rw-r--r--fs/cifs/cifspdu.h21
-rw-r--r--fs/cifs/cifssmb.c1
-rw-r--r--fs/cifs/file.c8
-rw-r--r--fs/cifs/fscache.c7
-rw-r--r--fs/cifs/fscache.h13
-rw-r--r--fs/cifs/inode.c45
-rw-r--r--fs/cifs/readdir.c3
-rw-r--r--fs/cifs/sess.c84
-rw-r--r--fs/ext4/inode.c2
-rw-r--r--fs/ext4/xattr.c2
-rw-r--r--fs/fuse/dir.c20
-rw-r--r--fs/fuse/file.c23
-rw-r--r--fs/fuse/fuse_i.h2
-rw-r--r--fs/nfs/dir.c2
-rw-r--r--fs/nfs/nfs4file.c3
-rw-r--r--fs/nfs/nfs4filelayoutdev.c20
-rw-r--r--fs/nfs/nfs4proc.c58
-rw-r--r--fs/nilfs2/page.c2
-rw-r--r--fs/nilfs2/segment.c11
-rw-r--r--fs/ocfs2/dcache.c7
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/reiserfs/journal.c67
-rw-r--r--fs/statfs.c2
-rw-r--r--fs/super.c4
-rw-r--r--fs/sysv/super.c1
-rw-r--r--fs/udf/ialloc.c16
-rw-r--r--fs/udf/super.c64
-rw-r--r--fs/udf/udf_sb.h2
-rw-r--r--fs/xfs/xfs_buf_item.c1
-rw-r--r--fs/xfs/xfs_da_btree.c5
-rw-r--r--fs/xfs/xfs_dir2_block.c6
-rw-r--r--fs/xfs/xfs_dir2_format.h51
-rw-r--r--fs/xfs/xfs_dir2_readdir.c4
-rw-r--r--fs/xfs/xfs_dir2_sf.c6
-rw-r--r--fs/xfs/xfs_dquot.c19
-rw-r--r--fs/xfs/xfs_fs.h2
-rw-r--r--fs/xfs/xfs_icache.c9
-rw-r--r--fs/xfs/xfs_log_recover.c74
-rw-r--r--include/asm-generic/hugetlb.h4
-rw-r--r--include/asm-generic/vtime.h1
-rw-r--r--include/dt-bindings/pinctrl/omap.h4
-rw-r--r--include/linux/balloon_compaction.h25
-rw-r--r--include/linux/bcma/bcma_driver_pci.h1
-rw-r--r--include/linux/compiler-gcc4.h15
-rw-r--r--include/linux/device-mapper.h3
-rw-r--r--include/linux/hyperv.h7
-rw-r--r--include/linux/intel-iommu.h2
-rw-r--r--include/linux/kernel.h11
-rw-r--r--include/linux/memcontrol.h55
-rw-r--r--include/linux/miscdevice.h1
-rw-r--r--include/linux/mutex.h6
-rw-r--r--include/linux/nfs_xdr.h3
-rw-r--r--include/linux/of_irq.h20
-rw-r--r--include/linux/perf_event.h24
-rw-r--r--include/linux/random.h1
-rw-r--r--include/linux/regulator/driver.h2
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--include/linux/smp.h6
-rw-r--r--include/linux/timex.h14
-rw-r--r--include/linux/usb/usbnet.h1
-rw-r--r--include/linux/vgaarb.h7
-rw-r--r--include/net/addrconf.h4
-rw-r--r--include/net/bluetooth/hci.h1
-rw-r--r--include/net/ip_vs.h9
-rw-r--r--include/net/mrp.h1
-rw-r--r--include/net/net_namespace.h1
-rw-r--r--include/net/netfilter/nf_conntrack_synproxy.h2
-rw-r--r--include/net/secure_seq.h1
-rw-r--r--include/net/sock.h5
-rw-r--r--include/sound/rcar_snd.h1
-rw-r--r--include/uapi/drm/radeon_drm.h2
-rw-r--r--include/uapi/linux/perf_event.h15
-rw-r--r--init/main.c2
-rw-r--r--ipc/msg.c32
-rw-r--r--ipc/sem.c214
-rw-r--r--ipc/shm.c17
-rw-r--r--ipc/util.c32
-rw-r--r--ipc/util.h10
-rw-r--r--kernel/audit.c5
-rw-r--r--kernel/context_tracking.c12
-rw-r--r--kernel/events/core.c27
-rw-r--r--kernel/kmod.c4
-rw-r--r--kernel/params.c6
-rw-r--r--kernel/pid.c5
-rw-r--r--kernel/power/snapshot.c5
-rw-r--r--kernel/power/user.c8
-rw-r--r--kernel/reboot.c9
-rw-r--r--kernel/sched/fair.c9
-rw-r--r--kernel/softirq.c15
-rw-r--r--kernel/watchdog.c60
-rw-r--r--lib/hexdump.c2
-rw-r--r--lib/kobject.c7
-rw-r--r--lib/lockref.c23
-rw-r--r--mm/Kconfig2
-rw-r--r--mm/bounce.c2
-rw-r--r--mm/compaction.c7
-rw-r--r--mm/hwpoison-inject.c5
-rw-r--r--mm/madvise.c5
-rw-r--r--mm/memcontrol.c560
-rw-r--r--mm/memory-failure.c8
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/mlock.c9
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/vmscan.c87
-rw-r--r--net/802/mrp.c27
-rw-r--r--net/bluetooth/hci_core.c26
-rw-r--r--net/bluetooth/hci_event.c6
-rw-r--r--net/bluetooth/l2cap_core.c7
-rw-r--r--net/bluetooth/rfcomm/tty.c35
-rw-r--r--net/core/dev.c49
-rw-r--r--net/core/flow_dissector.c4
-rw-r--r--net/core/secure_seq.c27
-rw-r--r--net/ipv4/af_inet.c4
-rw-r--r--net/ipv4/igmp.c4
-rw-r--r--net/ipv4/ip_tunnel.c22
-rw-r--r--net/ipv4/ip_tunnel_core.c2
-rw-r--r--net/ipv4/netfilter/ipt_SYNPROXY.c10
-rw-r--r--net/ipv4/raw.c4
-rw-r--r--net/ipv4/tcp_output.c17
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv6/addrconf.c79
-rw-r--r--net/ipv6/ip6_gre.c4
-rw-r--r--net/ipv6/ip6_output.c53
-rw-r--r--net/ipv6/ip6_tunnel.c3
-rw-r--r--net/ipv6/mcast.c6
-rw-r--r--net/ipv6/netfilter/ip6t_SYNPROXY.c10
-rw-r--r--net/ipv6/raw.c4
-rw-r--r--net/ipv6/sit.c86
-rw-r--r--net/ipv6/udp.c4
-rw-r--r--net/lapb/lapb_timer.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c12
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c86
-rw-r--r--net/netfilter/ipvs/ip_vs_est.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c72
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c62
-rw-r--r--net/netfilter/ipvs/ip_vs_nq.c8
-rw-r--r--net/netfilter/ipvs/ip_vs_sed.c8
-rw-r--r--net/netfilter/ipvs/ip_vs_wlc.c6
-rw-r--r--net/netfilter/nf_synproxy_core.c12
-rw-r--r--net/sched/sch_fq.c102
-rw-r--r--net/sysctl_net.c4
-rwxr-xr-xscripts/checkpatch.pl4
-rw-r--r--security/apparmor/crypto.c34
-rw-r--r--security/apparmor/include/policy.h4
-rw-r--r--security/apparmor/policy.c3
-rw-r--r--security/selinux/avc.c9
-rw-r--r--security/selinux/hooks.c15
-rw-r--r--security/selinux/include/avc.h18
-rw-r--r--sound/soc/codecs/pcm1681.c2
-rw-r--r--sound/soc/codecs/pcm1792a.c2
-rw-r--r--sound/soc/codecs/tlv320aic3x.c4
-rw-r--r--sound/soc/fsl/fsl_ssi.c2
-rw-r--r--sound/soc/fsl/imx-mc13783.c2
-rw-r--r--sound/soc/fsl/imx-ssi.c23
-rw-r--r--sound/soc/fsl/imx-ssi.h2
-rw-r--r--sound/soc/omap/Kconfig4
-rw-r--r--sound/soc/sh/rcar/rsnd.h4
-rw-r--r--tools/lib/lk/debugfs.c1
-rw-r--r--tools/perf/Makefile1
-rw-r--r--tools/perf/arch/x86/util/tsc.c6
-rw-r--r--tools/perf/builtin-inject.c2
-rw-r--r--tools/perf/builtin-kmem.c2
-rw-r--r--tools/perf/builtin-report.c5
-rw-r--r--tools/perf/builtin-script.c2
-rw-r--r--tools/perf/builtin-stat.c1
-rw-r--r--tools/perf/builtin-trace.c18
-rw-r--r--tools/perf/config/Makefile5
-rw-r--r--tools/perf/config/feature-tests.mak10
-rw-r--r--tools/perf/util/annotate.c2
-rw-r--r--tools/perf/util/dwarf-aux.c44
-rw-r--r--tools/perf/util/dwarf-aux.h9
-rw-r--r--tools/perf/util/header.c53
-rw-r--r--tools/perf/util/hist.c2
-rw-r--r--tools/perf/util/machine.c2
-rw-r--r--tools/perf/util/probe-finder.c138
-rw-r--r--tools/perf/util/probe-finder.h3
-rw-r--r--tools/perf/util/session.c13
-rw-r--r--tools/perf/util/session.h4
-rw-r--r--tools/perf/util/symbol-elf.c16
-rw-r--r--tools/perf/util/trace-event-parse.c2
-rw-r--r--virt/kvm/kvm_main.c6
649 files changed, 5738 insertions, 3376 deletions
diff --git a/CREDITS b/CREDITS
index 9416a9a8b95e..0640e1650483 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2808,8 +2808,7 @@ S: Ottawa, Ontario
2808S: Canada K2P 0X8 2808S: Canada K2P 0X8
2809 2809
2810N: Mikael Pettersson 2810N: Mikael Pettersson
2811E: mikpe@it.uu.se 2811E: mikpelinux@gmail.com
2812W: http://user.it.uu.se/~mikpe/linux/
2813D: Miscellaneous fixes 2812D: Miscellaneous fixes
2814 2813
2815N: Reed H. Petty 2814N: Reed H. Petty
diff --git a/Documentation/block/00-INDEX b/Documentation/block/00-INDEX
index d18ecd827c40..929d9904f74b 100644
--- a/Documentation/block/00-INDEX
+++ b/Documentation/block/00-INDEX
@@ -6,6 +6,8 @@ capability.txt
6 - Generic Block Device Capability (/sys/block/<device>/capability) 6 - Generic Block Device Capability (/sys/block/<device>/capability)
7cfq-iosched.txt 7cfq-iosched.txt
8 - CFQ IO scheduler tunables 8 - CFQ IO scheduler tunables
9cmdline-partition.txt
10 - how to specify block device partitions on kernel command line
9data-integrity.txt 11data-integrity.txt
10 - Block data integrity 12 - Block data integrity
11deadline-iosched.txt 13deadline-iosched.txt
diff --git a/Documentation/block/cmdline-partition.txt b/Documentation/block/cmdline-partition.txt
index 2bbf4cc40c3f..525b9f6d7fb4 100644
--- a/Documentation/block/cmdline-partition.txt
+++ b/Documentation/block/cmdline-partition.txt
@@ -1,9 +1,9 @@
1Embedded device command line partition 1Embedded device command line partition parsing
2===================================================================== 2=====================================================================
3 3
4Read block device partition table from command line. 4Support for reading the block device partition table from the command line.
5The partition used for fixed block device (eMMC) embedded device. 5It is typically used for fixed block (eMMC) embedded devices.
6It is no MBR, save storage space. Bootloader can be easily accessed 6It has no MBR, so saves storage space. Bootloader can be easily accessed
7by absolute address of data on the block device. 7by absolute address of data on the block device.
8Users can easily change the partition. 8Users can easily change the partition.
9 9
diff --git a/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt
index 6d1c0988cfc7..c67b975c8906 100644
--- a/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt
@@ -1,11 +1,11 @@
1* Samsung Exynos specific extensions to the Synopsis Designware Mobile 1* Samsung Exynos specific extensions to the Synopsys Designware Mobile
2 Storage Host Controller 2 Storage Host Controller
3 3
4The Synopsis designware mobile storage host controller is used to interface 4The Synopsys designware mobile storage host controller is used to interface
5a SoC with storage medium such as eMMC or SD/MMC cards. This file documents 5a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
6differences between the core Synopsis dw mshc controller properties described 6differences between the core Synopsys dw mshc controller properties described
7by synopsis-dw-mshc.txt and the properties used by the Samsung Exynos specific 7by synopsys-dw-mshc.txt and the properties used by the Samsung Exynos specific
8extensions to the Synopsis Designware Mobile Storage Host Controller. 8extensions to the Synopsys Designware Mobile Storage Host Controller.
9 9
10Required Properties: 10Required Properties:
11 11
diff --git a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
index 8a3d91d47b6a..c559f3f36309 100644
--- a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
@@ -1,11 +1,11 @@
1* Rockchip specific extensions to the Synopsis Designware Mobile 1* Rockchip specific extensions to the Synopsys Designware Mobile
2 Storage Host Controller 2 Storage Host Controller
3 3
4The Synopsis designware mobile storage host controller is used to interface 4The Synopsys designware mobile storage host controller is used to interface
5a SoC with storage medium such as eMMC or SD/MMC cards. This file documents 5a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
6differences between the core Synopsis dw mshc controller properties described 6differences between the core Synopsys dw mshc controller properties described
7by synopsis-dw-mshc.txt and the properties used by the Rockchip specific 7by synopsys-dw-mshc.txt and the properties used by the Rockchip specific
8extensions to the Synopsis Designware Mobile Storage Host Controller. 8extensions to the Synopsys Designware Mobile Storage Host Controller.
9 9
10Required Properties: 10Required Properties:
11 11
diff --git a/Documentation/devicetree/bindings/mmc/synopsis-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
index cdcebea9c6f5..066a78b034ca 100644
--- a/Documentation/devicetree/bindings/mmc/synopsis-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
@@ -1,14 +1,14 @@
1* Synopsis Designware Mobile Storage Host Controller 1* Synopsys Designware Mobile Storage Host Controller
2 2
3The Synopsis designware mobile storage host controller is used to interface 3The Synopsys designware mobile storage host controller is used to interface
4a SoC with storage medium such as eMMC or SD/MMC cards. This file documents 4a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
5differences between the core mmc properties described by mmc.txt and the 5differences between the core mmc properties described by mmc.txt and the
6properties used by the Synopsis Designware Mobile Storage Host Controller. 6properties used by the Synopsys Designware Mobile Storage Host Controller.
7 7
8Required Properties: 8Required Properties:
9 9
10* compatible: should be 10* compatible: should be
11 - snps,dw-mshc: for controllers compliant with synopsis dw-mshc. 11 - snps,dw-mshc: for controllers compliant with synopsys dw-mshc.
12* #address-cells: should be 1. 12* #address-cells: should be 1.
13* #size-cells: should be 0. 13* #size-cells: should be 0.
14 14
diff --git a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
index df204e18e030..6a2a1160a70d 100644
--- a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
@@ -9,12 +9,15 @@ compulsory and any optional properties, common to all SD/MMC drivers, as
9described in mmc.txt, can be used. Additionally the following tmio_mmc-specific 9described in mmc.txt, can be used. Additionally the following tmio_mmc-specific
10optional bindings can be used. 10optional bindings can be used.
11 11
12Required properties:
13- compatible: "renesas,sdhi-shmobile" - a generic sh-mobile SDHI unit
14 "renesas,sdhi-sh7372" - SDHI IP on SH7372 SoC
15 "renesas,sdhi-sh73a0" - SDHI IP on SH73A0 SoC
16 "renesas,sdhi-r8a73a4" - SDHI IP on R8A73A4 SoC
17 "renesas,sdhi-r8a7740" - SDHI IP on R8A7740 SoC
18 "renesas,sdhi-r8a7778" - SDHI IP on R8A7778 SoC
19 "renesas,sdhi-r8a7779" - SDHI IP on R8A7779 SoC
20 "renesas,sdhi-r8a7790" - SDHI IP on R8A7790 SoC
21
12Optional properties: 22Optional properties:
13- toshiba,mmc-wrprotect-disable: write-protect detection is unavailable 23- toshiba,mmc-wrprotect-disable: write-protect detection is unavailable
14
15When used with Renesas SDHI hardware, the following compatibility strings
16configure various model-specific properties:
17
18"renesas,sh7372-sdhi": (default) compatible with SH7372
19"renesas,r8a7740-sdhi": compatible with R8A7740: certain MMC/SD commands have to
20 wait for the interface to become idle.
diff --git a/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
index 2c6be0377f55..d2ea4605d078 100644
--- a/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
+++ b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
@@ -86,6 +86,7 @@ General Properties:
86 86
87Clock Properties: 87Clock Properties:
88 88
89 - fsl,cksel Timer reference clock source.
89 - fsl,tclk-period Timer reference clock period in nanoseconds. 90 - fsl,tclk-period Timer reference clock period in nanoseconds.
90 - fsl,tmr-prsc Prescaler, divides the output clock. 91 - fsl,tmr-prsc Prescaler, divides the output clock.
91 - fsl,tmr-add Frequency compensation value. 92 - fsl,tmr-add Frequency compensation value.
@@ -97,7 +98,7 @@ Clock Properties:
97 clock. You must choose these carefully for the clock to work right. 98 clock. You must choose these carefully for the clock to work right.
98 Here is how to figure good values: 99 Here is how to figure good values:
99 100
100 TimerOsc = system clock MHz 101 TimerOsc = selected reference clock MHz
101 tclk_period = desired clock period nanoseconds 102 tclk_period = desired clock period nanoseconds
102 NominalFreq = 1000 / tclk_period MHz 103 NominalFreq = 1000 / tclk_period MHz
103 FreqDivRatio = TimerOsc / NominalFreq (must be greater that 1.0) 104 FreqDivRatio = TimerOsc / NominalFreq (must be greater that 1.0)
@@ -114,6 +115,20 @@ Clock Properties:
114 Pulse Per Second (PPS) signal, since this will be offered to the PPS 115 Pulse Per Second (PPS) signal, since this will be offered to the PPS
115 subsystem to synchronize the Linux clock. 116 subsystem to synchronize the Linux clock.
116 117
118 Reference clock source is determined by the value, which is holded
119 in CKSEL bits in TMR_CTRL register. "fsl,cksel" property keeps the
120 value, which will be directly written in those bits, that is why,
121 according to reference manual, the next clock sources can be used:
122
123 <0> - external high precision timer reference clock (TSEC_TMR_CLK
124 input is used for this purpose);
125 <1> - eTSEC system clock;
126 <2> - eTSEC1 transmit clock;
127 <3> - RTC clock input.
128
129 When this attribute is not used, eTSEC system clock will serve as
130 IEEE 1588 timer reference clock.
131
117Example: 132Example:
118 133
119 ptp_clock@24E00 { 134 ptp_clock@24E00 {
@@ -121,6 +136,7 @@ Example:
121 reg = <0x24E00 0xB0>; 136 reg = <0x24E00 0xB0>;
122 interrupts = <12 0x8 13 0x8>; 137 interrupts = <12 0x8 13 0x8>;
123 interrupt-parent = < &ipic >; 138 interrupt-parent = < &ipic >;
139 fsl,cksel = <1>;
124 fsl,tclk-period = <10>; 140 fsl,tclk-period = <10>;
125 fsl,tmr-prsc = <100>; 141 fsl,tmr-prsc = <100>;
126 fsl,tmr-add = <0x999999A4>; 142 fsl,tmr-add = <0x999999A4>;
diff --git a/Documentation/devicetree/bindings/pci/designware-pcie.txt b/Documentation/devicetree/bindings/pci/designware-pcie.txt
index eabcb4b5db6e..e216af356847 100644
--- a/Documentation/devicetree/bindings/pci/designware-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/designware-pcie.txt
@@ -1,4 +1,4 @@
1* Synopsis Designware PCIe interface 1* Synopsys Designware PCIe interface
2 2
3Required properties: 3Required properties:
4- compatible: should contain "snps,dw-pcie" to identify the 4- compatible: should contain "snps,dw-pcie" to identify the
diff --git a/Documentation/devicetree/bindings/tty/serial/qca,ar9330-uart.txt b/Documentation/devicetree/bindings/serial/qca,ar9330-uart.txt
index c5e032c85bf9..c5e032c85bf9 100644
--- a/Documentation/devicetree/bindings/tty/serial/qca,ar9330-uart.txt
+++ b/Documentation/devicetree/bindings/serial/qca,ar9330-uart.txt
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 1a036cd972fb..fcbb736d55fe 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -480,6 +480,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
480 Format: <io>,<irq>,<mode> 480 Format: <io>,<irq>,<mode>
481 See header of drivers/net/hamradio/baycom_ser_hdx.c. 481 See header of drivers/net/hamradio/baycom_ser_hdx.c.
482 482
483 blkdevparts= Manual partition parsing of block device(s) for
484 embedded devices based on command line input.
485 See Documentation/block/cmdline-partition.txt
486
483 boot_delay= Milliseconds to delay each printk during boot. 487 boot_delay= Milliseconds to delay each printk during boot.
484 Values larger than 10 seconds (10000) are changed to 488 Values larger than 10 seconds (10000) are changed to
485 no delay (0). 489 no delay (0).
@@ -1357,7 +1361,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1357 pages. In the event, a node is too small to have both 1361 pages. In the event, a node is too small to have both
1358 kernelcore and Movable pages, kernelcore pages will 1362 kernelcore and Movable pages, kernelcore pages will
1359 take priority and other nodes will have a larger number 1363 take priority and other nodes will have a larger number
1360 of kernelcore pages. The Movable zone is used for the 1364 of Movable pages. The Movable zone is used for the
1361 allocation of pages that may be reclaimed or moved 1365 allocation of pages that may be reclaimed or moved
1362 by the page migration subsystem. This means that 1366 by the page migration subsystem. This means that
1363 HugeTLB pages may not be allocated from this zone. 1367 HugeTLB pages may not be allocated from this zone.
@@ -3485,6 +3489,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
3485 the unplug protocol 3489 the unplug protocol
3486 never -- do not unplug even if version check succeeds 3490 never -- do not unplug even if version check succeeds
3487 3491
3492 xen_nopvspin [X86,XEN]
3493 Disables the ticketlock slowpath using Xen PV
3494 optimizations.
3495
3488 xirc2ps_cs= [NET,PCMCIA] 3496 xirc2ps_cs= [NET,PCMCIA]
3489 Format: 3497 Format:
3490 <irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]] 3498 <irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
diff --git a/MAINTAINERS b/MAINTAINERS
index e61c2e83fc2b..8a0cbf3cf2c8 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -824,15 +824,21 @@ S: Maintained
824F: arch/arm/mach-gemini/ 824F: arch/arm/mach-gemini/
825 825
826ARM/CSR SIRFPRIMA2 MACHINE SUPPORT 826ARM/CSR SIRFPRIMA2 MACHINE SUPPORT
827M: Barry Song <baohua.song@csr.com> 827M: Barry Song <baohua@kernel.org>
828L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 828L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
829T: git git://git.kernel.org/pub/scm/linux/kernel/git/baohua/linux.git 829T: git git://git.kernel.org/pub/scm/linux/kernel/git/baohua/linux.git
830S: Maintained 830S: Maintained
831F: arch/arm/mach-prima2/ 831F: arch/arm/mach-prima2/
832F: drivers/clk/clk-prima2.c
833F: drivers/clocksource/timer-prima2.c
834F: drivers/clocksource/timer-marco.c
832F: drivers/dma/sirf-dma.c 835F: drivers/dma/sirf-dma.c
833F: drivers/i2c/busses/i2c-sirf.c 836F: drivers/i2c/busses/i2c-sirf.c
837F: drivers/input/misc/sirfsoc-onkey.c
838F: drivers/irqchip/irq-sirfsoc.c
834F: drivers/mmc/host/sdhci-sirf.c 839F: drivers/mmc/host/sdhci-sirf.c
835F: drivers/pinctrl/sirf/ 840F: drivers/pinctrl/sirf/
841F: drivers/rtc/rtc-sirfsoc.c
836F: drivers/spi/spi-sirf.c 842F: drivers/spi/spi-sirf.c
837 843
838ARM/EBSA110 MACHINE SUPPORT 844ARM/EBSA110 MACHINE SUPPORT
@@ -1812,7 +1818,8 @@ S: Supported
1812F: drivers/net/ethernet/broadcom/bnx2x/ 1818F: drivers/net/ethernet/broadcom/bnx2x/
1813 1819
1814BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE 1820BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE
1815M: Christian Daudt <csd@broadcom.com> 1821M: Christian Daudt <bcm@fixthebug.org>
1822L: bcm-kernel-feedback-list@broadcom.com
1816T: git git://git.github.com/broadcom/bcm11351 1823T: git git://git.github.com/broadcom/bcm11351
1817S: Maintained 1824S: Maintained
1818F: arch/arm/mach-bcm/ 1825F: arch/arm/mach-bcm/
@@ -2639,6 +2646,18 @@ F: include/linux/device-mapper.h
2639F: include/linux/dm-*.h 2646F: include/linux/dm-*.h
2640F: include/uapi/linux/dm-*.h 2647F: include/uapi/linux/dm-*.h
2641 2648
2649DIGI NEO AND CLASSIC PCI PRODUCTS
2650M: Lidza Louina <lidza.louina@gmail.com>
2651L: driverdev-devel@linuxdriverproject.org
2652S: Maintained
2653F: drivers/staging/dgnc/
2654
2655DIGI EPCA PCI PRODUCTS
2656M: Lidza Louina <lidza.louina@gmail.com>
2657L: driverdev-devel@linuxdriverproject.org
2658S: Maintained
2659F: drivers/staging/dgap/
2660
2642DIOLAN U2C-12 I2C DRIVER 2661DIOLAN U2C-12 I2C DRIVER
2643M: Guenter Roeck <linux@roeck-us.net> 2662M: Guenter Roeck <linux@roeck-us.net>
2644L: linux-i2c@vger.kernel.org 2663L: linux-i2c@vger.kernel.org
@@ -4457,6 +4476,13 @@ L: linux-serial@vger.kernel.org
4457S: Maintained 4476S: Maintained
4458F: drivers/tty/serial/ioc3_serial.c 4477F: drivers/tty/serial/ioc3_serial.c
4459 4478
4479IOMMU DRIVERS
4480M: Joerg Roedel <joro@8bytes.org>
4481L: iommu@lists.linux-foundation.org
4482T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
4483S: Maintained
4484F: drivers/iommu/
4485
4460IP MASQUERADING 4486IP MASQUERADING
4461M: Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar> 4487M: Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar>
4462S: Maintained 4488S: Maintained
@@ -6595,7 +6621,7 @@ S: Obsolete
6595F: drivers/net/wireless/prism54/ 6621F: drivers/net/wireless/prism54/
6596 6622
6597PROMISE SATA TX2/TX4 CONTROLLER LIBATA DRIVER 6623PROMISE SATA TX2/TX4 CONTROLLER LIBATA DRIVER
6598M: Mikael Pettersson <mikpe@it.uu.se> 6624M: Mikael Pettersson <mikpelinux@gmail.com>
6599L: linux-ide@vger.kernel.org 6625L: linux-ide@vger.kernel.org
6600S: Maintained 6626S: Maintained
6601F: drivers/ata/sata_promise.* 6627F: drivers/ata/sata_promise.*
@@ -7258,9 +7284,9 @@ F: include/linux/sched.h
7258F: include/uapi/linux/sched.h 7284F: include/uapi/linux/sched.h
7259 7285
7260SCORE ARCHITECTURE 7286SCORE ARCHITECTURE
7261M: Chen Liqin <liqin.chen@sunplusct.com> 7287M: Chen Liqin <liqin.linux@gmail.com>
7262M: Lennox Wu <lennox.wu@gmail.com> 7288M: Lennox Wu <lennox.wu@gmail.com>
7263W: http://www.sunplusct.com 7289W: http://www.sunplus.com
7264S: Supported 7290S: Supported
7265F: arch/score/ 7291F: arch/score/
7266 7292
@@ -8724,9 +8750,8 @@ F: Documentation/hid/hiddev.txt
8724F: drivers/hid/usbhid/ 8750F: drivers/hid/usbhid/
8725 8751
8726USB/IP DRIVERS 8752USB/IP DRIVERS
8727M: Matt Mooney <mfm@muteddisk.com>
8728L: linux-usb@vger.kernel.org 8753L: linux-usb@vger.kernel.org
8729S: Maintained 8754S: Orphan
8730F: drivers/staging/usbip/ 8755F: drivers/staging/usbip/
8731 8756
8732USB ISP116X DRIVER 8757USB ISP116X DRIVER
@@ -9366,6 +9391,7 @@ F: arch/arm64/include/asm/xen/
9366 9391
9367XEN NETWORK BACKEND DRIVER 9392XEN NETWORK BACKEND DRIVER
9368M: Ian Campbell <ian.campbell@citrix.com> 9393M: Ian Campbell <ian.campbell@citrix.com>
9394M: Wei Liu <wei.liu2@citrix.com>
9369L: xen-devel@lists.xenproject.org (moderated for non-subscribers) 9395L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
9370L: netdev@vger.kernel.org 9396L: netdev@vger.kernel.org
9371S: Supported 9397S: Supported
diff --git a/Makefile b/Makefile
index 8d0668f473ba..deec08b7612b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 12 2PATCHLEVEL = 12
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc2 4EXTRAVERSION = -rc5
5NAME = One Giant Leap for Frogkind 5NAME = One Giant Leap for Frogkind
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/Kconfig b/arch/Kconfig
index 1feb169274fe..af2cc6eabcc7 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -286,9 +286,6 @@ config HAVE_PERF_USER_STACK_DUMP
286config HAVE_ARCH_JUMP_LABEL 286config HAVE_ARCH_JUMP_LABEL
287 bool 287 bool
288 288
289config HAVE_ARCH_MUTEX_CPU_RELAX
290 bool
291
292config HAVE_RCU_TABLE_FREE 289config HAVE_RCU_TABLE_FREE
293 bool 290 bool
294 291
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
index f158197ac5b0..b6a8c2dfbe6e 100644
--- a/arch/arc/include/asm/spinlock.h
+++ b/arch/arc/include/asm/spinlock.h
@@ -45,7 +45,14 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
45 45
46static inline void arch_spin_unlock(arch_spinlock_t *lock) 46static inline void arch_spin_unlock(arch_spinlock_t *lock)
47{ 47{
48 lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__; 48 unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__;
49
50 __asm__ __volatile__(
51 " ex %0, [%1] \n"
52 : "+r" (tmp)
53 : "r"(&(lock->slock))
54 : "memory");
55
49 smp_mb(); 56 smp_mb();
50} 57}
51 58
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
index 32420824375b..30c9baffa96f 100644
--- a/arch/arc/include/asm/uaccess.h
+++ b/arch/arc/include/asm/uaccess.h
@@ -43,7 +43,7 @@
43 * Because it essentially checks if buffer end is within limit and @len is 43 * Because it essentially checks if buffer end is within limit and @len is
44 * non-ngeative, which implies that buffer start will be within limit too. 44 * non-ngeative, which implies that buffer start will be within limit too.
45 * 45 *
46 * The reason for rewriting being, for majorit yof cases, @len is generally 46 * The reason for rewriting being, for majority of cases, @len is generally
47 * compile time constant, causing first sub-expression to be compile time 47 * compile time constant, causing first sub-expression to be compile time
48 * subsumed. 48 * subsumed.
49 * 49 *
@@ -53,7 +53,7 @@
53 * 53 *
54 */ 54 */
55#define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \ 55#define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \
56 (((addr)+(sz)) <= get_fs())) 56 ((addr) <= (get_fs() - (sz))))
57#define __access_ok(addr, sz) (unlikely(__kernel_ok) || \ 57#define __access_ok(addr, sz) (unlikely(__kernel_ok) || \
58 likely(__user_ok((addr), (sz)))) 58 likely(__user_ok((addr), (sz))))
59 59
diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c
index 333238564b67..5d76706139dd 100644
--- a/arch/arc/kernel/ptrace.c
+++ b/arch/arc/kernel/ptrace.c
@@ -102,7 +102,7 @@ static int genregs_set(struct task_struct *target,
102 REG_IGNORE_ONE(pad2); 102 REG_IGNORE_ONE(pad2);
103 REG_IN_CHUNK(callee, efa, cregs); /* callee_regs[r25..r13] */ 103 REG_IN_CHUNK(callee, efa, cregs); /* callee_regs[r25..r13] */
104 REG_IGNORE_ONE(efa); /* efa update invalid */ 104 REG_IGNORE_ONE(efa); /* efa update invalid */
105 REG_IN_ONE(stop_pc, &ptregs->ret); /* stop_pc: PC update */ 105 REG_IGNORE_ONE(stop_pc); /* PC updated via @ret */
106 106
107 return ret; 107 return ret;
108} 108}
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index ee6ef2f60a28..7e95e1a86510 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -101,7 +101,6 @@ SYSCALL_DEFINE0(rt_sigreturn)
101{ 101{
102 struct rt_sigframe __user *sf; 102 struct rt_sigframe __user *sf;
103 unsigned int magic; 103 unsigned int magic;
104 int err;
105 struct pt_regs *regs = current_pt_regs(); 104 struct pt_regs *regs = current_pt_regs();
106 105
107 /* Always make any pending restarted system calls return -EINTR */ 106 /* Always make any pending restarted system calls return -EINTR */
@@ -119,15 +118,16 @@ SYSCALL_DEFINE0(rt_sigreturn)
119 if (!access_ok(VERIFY_READ, sf, sizeof(*sf))) 118 if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
120 goto badframe; 119 goto badframe;
121 120
122 err = restore_usr_regs(regs, sf); 121 if (__get_user(magic, &sf->sigret_magic))
123 err |= __get_user(magic, &sf->sigret_magic);
124 if (err)
125 goto badframe; 122 goto badframe;
126 123
127 if (unlikely(is_do_ss_needed(magic))) 124 if (unlikely(is_do_ss_needed(magic)))
128 if (restore_altstack(&sf->uc.uc_stack)) 125 if (restore_altstack(&sf->uc.uc_stack))
129 goto badframe; 126 goto badframe;
130 127
128 if (restore_usr_regs(regs, sf))
129 goto badframe;
130
131 /* Don't restart from sigreturn */ 131 /* Don't restart from sigreturn */
132 syscall_wont_restart(regs); 132 syscall_wont_restart(regs);
133 133
@@ -191,6 +191,15 @@ setup_rt_frame(int signo, struct k_sigaction *ka, siginfo_t *info,
191 return 1; 191 return 1;
192 192
193 /* 193 /*
194 * w/o SA_SIGINFO, struct ucontext is partially populated (only
195 * uc_mcontext/uc_sigmask) for kernel's normal user state preservation
196 * during signal handler execution. This works for SA_SIGINFO as well
197 * although the semantics are now overloaded (the same reg state can be
198 * inspected by userland: but are they allowed to fiddle with it ?
199 */
200 err |= stash_usr_regs(sf, regs, set);
201
202 /*
194 * SA_SIGINFO requires 3 args to signal handler: 203 * SA_SIGINFO requires 3 args to signal handler:
195 * #1: sig-no (common to any handler) 204 * #1: sig-no (common to any handler)
196 * #2: struct siginfo 205 * #2: struct siginfo
@@ -213,14 +222,6 @@ setup_rt_frame(int signo, struct k_sigaction *ka, siginfo_t *info,
213 magic = MAGIC_SIGALTSTK; 222 magic = MAGIC_SIGALTSTK;
214 } 223 }
215 224
216 /*
217 * w/o SA_SIGINFO, struct ucontext is partially populated (only
218 * uc_mcontext/uc_sigmask) for kernel's normal user state preservation
219 * during signal handler execution. This works for SA_SIGINFO as well
220 * although the semantics are now overloaded (the same reg state can be
221 * inspected by userland: but are they allowed to fiddle with it ?
222 */
223 err |= stash_usr_regs(sf, regs, set);
224 err |= __put_user(magic, &sf->sigret_magic); 225 err |= __put_user(magic, &sf->sigret_magic);
225 if (err) 226 if (err)
226 return err; 227 return err;
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
index 0e51e69cf30d..3fde7de3ea67 100644
--- a/arch/arc/kernel/time.c
+++ b/arch/arc/kernel/time.c
@@ -227,12 +227,9 @@ void __attribute__((weak)) arc_local_timer_setup(unsigned int cpu)
227{ 227{
228 struct clock_event_device *clk = &per_cpu(arc_clockevent_device, cpu); 228 struct clock_event_device *clk = &per_cpu(arc_clockevent_device, cpu);
229 229
230 clockevents_calc_mult_shift(clk, arc_get_core_freq(), 5);
231
232 clk->max_delta_ns = clockevent_delta2ns(ARC_TIMER_MAX, clk);
233 clk->cpumask = cpumask_of(cpu); 230 clk->cpumask = cpumask_of(cpu);
234 231 clockevents_config_and_register(clk, arc_get_core_freq(),
235 clockevents_register_device(clk); 232 0, ARC_TIMER_MAX);
236 233
237 /* 234 /*
238 * setup the per-cpu timer IRQ handler - for all cpus 235 * setup the per-cpu timer IRQ handler - for all cpus
diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
index 28d170060747..7ff5b5c183bb 100644
--- a/arch/arc/kernel/unaligned.c
+++ b/arch/arc/kernel/unaligned.c
@@ -245,6 +245,12 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
245 regs->status32 &= ~STATUS_DE_MASK; 245 regs->status32 &= ~STATUS_DE_MASK;
246 } else { 246 } else {
247 regs->ret += state.instr_len; 247 regs->ret += state.instr_len;
248
249 /* handle zero-overhead-loop */
250 if ((regs->ret == regs->lp_end) && (regs->lp_count)) {
251 regs->ret = regs->lp_start;
252 regs->lp_count--;
253 }
248 } 254 }
249 255
250 return 0; 256 return 0;
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 3f7714d8d2d2..1ad6fb6c094d 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -2217,8 +2217,7 @@ config NEON
2217 2217
2218config KERNEL_MODE_NEON 2218config KERNEL_MODE_NEON
2219 bool "Support for NEON in kernel mode" 2219 bool "Support for NEON in kernel mode"
2220 default n 2220 depends on NEON && AEABI
2221 depends on NEON
2222 help 2221 help
2223 Say Y to include support for NEON in kernel mode. 2222 Say Y to include support for NEON in kernel mode.
2224 2223
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index a37a50f575a2..db50b626be98 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -296,10 +296,15 @@ archprepare:
296# Convert bzImage to zImage 296# Convert bzImage to zImage
297bzImage: zImage 297bzImage: zImage
298 298
299zImage Image xipImage bootpImage uImage: vmlinux 299BOOT_TARGETS = zImage Image xipImage bootpImage uImage
300INSTALL_TARGETS = zinstall uinstall install
301
302PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
303
304$(BOOT_TARGETS): vmlinux
300 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ 305 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
301 306
302zinstall uinstall install: vmlinux 307$(INSTALL_TARGETS):
303 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@ 308 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
304 309
305%.dtb: | scripts 310%.dtb: | scripts
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index 84aa2caf07ed..ec2f8065f955 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -95,24 +95,24 @@ initrd:
95 @test "$(INITRD)" != "" || \ 95 @test "$(INITRD)" != "" || \
96 (echo You must specify INITRD; exit -1) 96 (echo You must specify INITRD; exit -1)
97 97
98install: $(obj)/Image 98install:
99 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ 99 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
100 $(obj)/Image System.map "$(INSTALL_PATH)" 100 $(obj)/Image System.map "$(INSTALL_PATH)"
101 101
102zinstall: $(obj)/zImage 102zinstall:
103 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ 103 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
104 $(obj)/zImage System.map "$(INSTALL_PATH)" 104 $(obj)/zImage System.map "$(INSTALL_PATH)"
105 105
106uinstall: $(obj)/uImage 106uinstall:
107 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ 107 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
108 $(obj)/uImage System.map "$(INSTALL_PATH)" 108 $(obj)/uImage System.map "$(INSTALL_PATH)"
109 109
110zi: 110zi:
111 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ 111 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
112 $(obj)/zImage System.map "$(INSTALL_PATH)" 112 $(obj)/zImage System.map "$(INSTALL_PATH)"
113 113
114i: 114i:
115 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ 115 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
116 $(obj)/Image System.map "$(INSTALL_PATH)" 116 $(obj)/Image System.map "$(INSTALL_PATH)"
117 117
118subdir- := bootp compressed dts 118subdir- := bootp compressed dts
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index e95af3f5433b..802720e3e8fd 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -41,6 +41,8 @@ dtb-$(CONFIG_ARCH_AT91) += sama5d33ek.dtb
41dtb-$(CONFIG_ARCH_AT91) += sama5d34ek.dtb 41dtb-$(CONFIG_ARCH_AT91) += sama5d34ek.dtb
42dtb-$(CONFIG_ARCH_AT91) += sama5d35ek.dtb 42dtb-$(CONFIG_ARCH_AT91) += sama5d35ek.dtb
43 43
44dtb-$(CONFIG_ARCH_ATLAS6) += atlas6-evb.dtb
45
44dtb-$(CONFIG_ARCH_BCM2835) += bcm2835-rpi-b.dtb 46dtb-$(CONFIG_ARCH_BCM2835) += bcm2835-rpi-b.dtb
45dtb-$(CONFIG_ARCH_BCM) += bcm11351-brt.dtb \ 47dtb-$(CONFIG_ARCH_BCM) += bcm11351-brt.dtb \
46 bcm28155-ap.dtb 48 bcm28155-ap.dtb
diff --git a/arch/arm/boot/dts/armada-370-netgear-rn102.dts b/arch/arm/boot/dts/armada-370-netgear-rn102.dts
index 05e4485a8225..8ac2ac1f69cc 100644
--- a/arch/arm/boot/dts/armada-370-netgear-rn102.dts
+++ b/arch/arm/boot/dts/armada-370-netgear-rn102.dts
@@ -27,6 +27,25 @@
27 }; 27 };
28 28
29 soc { 29 soc {
30 ranges = <MBUS_ID(0xf0, 0x01) 0 0xd0000000 0x100000
31 MBUS_ID(0x01, 0xe0) 0 0xfff00000 0x100000>;
32
33 pcie-controller {
34 status = "okay";
35
36 /* Connected to Marvell SATA controller */
37 pcie@1,0 {
38 /* Port 0, Lane 0 */
39 status = "okay";
40 };
41
42 /* Connected to FL1009 USB 3.0 controller */
43 pcie@2,0 {
44 /* Port 1, Lane 0 */
45 status = "okay";
46 };
47 };
48
30 internal-regs { 49 internal-regs {
31 serial@12000 { 50 serial@12000 {
32 clock-frequency = <200000000>; 51 clock-frequency = <200000000>;
@@ -57,6 +76,11 @@
57 marvell,pins = "mpp56"; 76 marvell,pins = "mpp56";
58 marvell,function = "gpio"; 77 marvell,function = "gpio";
59 }; 78 };
79
80 poweroff: poweroff {
81 marvell,pins = "mpp8";
82 marvell,function = "gpio";
83 };
60 }; 84 };
61 85
62 mdio { 86 mdio {
@@ -89,22 +113,6 @@
89 pwm_polarity = <0>; 113 pwm_polarity = <0>;
90 }; 114 };
91 }; 115 };
92
93 pcie-controller {
94 status = "okay";
95
96 /* Connected to Marvell SATA controller */
97 pcie@1,0 {
98 /* Port 0, Lane 0 */
99 status = "okay";
100 };
101
102 /* Connected to FL1009 USB 3.0 controller */
103 pcie@2,0 {
104 /* Port 1, Lane 0 */
105 status = "okay";
106 };
107 };
108 }; 116 };
109 }; 117 };
110 118
@@ -160,7 +168,7 @@
160 button@1 { 168 button@1 {
161 label = "Power Button"; 169 label = "Power Button";
162 linux,code = <116>; /* KEY_POWER */ 170 linux,code = <116>; /* KEY_POWER */
163 gpios = <&gpio1 30 1>; 171 gpios = <&gpio1 30 0>;
164 }; 172 };
165 173
166 button@2 { 174 button@2 {
@@ -176,4 +184,11 @@
176 }; 184 };
177 }; 185 };
178 186
187 gpio_poweroff {
188 compatible = "gpio-poweroff";
189 pinctrl-0 = <&poweroff>;
190 pinctrl-names = "default";
191 gpios = <&gpio0 8 1>;
192 };
193
179}; 194};
diff --git a/arch/arm/boot/dts/armada-xp.dtsi b/arch/arm/boot/dts/armada-xp.dtsi
index def125c0eeaa..3058522f5aad 100644
--- a/arch/arm/boot/dts/armada-xp.dtsi
+++ b/arch/arm/boot/dts/armada-xp.dtsi
@@ -70,6 +70,8 @@
70 70
71 timer@20300 { 71 timer@20300 {
72 compatible = "marvell,armada-xp-timer"; 72 compatible = "marvell,armada-xp-timer";
73 clocks = <&coreclk 2>, <&refclk>;
74 clock-names = "nbclk", "fixed";
73 }; 75 };
74 76
75 coreclk: mvebu-sar@18230 { 77 coreclk: mvebu-sar@18230 {
@@ -169,4 +171,13 @@
169 }; 171 };
170 }; 172 };
171 }; 173 };
174
175 clocks {
176 /* 25 MHz reference crystal */
177 refclk: oscillator {
178 compatible = "fixed-clock";
179 #clock-cells = <0>;
180 clock-frequency = <25000000>;
181 };
182 };
172}; 183};
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index cf78ac0b04b1..e74dc15efa9d 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -190,12 +190,12 @@
190 AT91_PIOA 8 AT91_PERIPH_A AT91_PINCTRL_NONE>; /* PA8 periph A */ 190 AT91_PIOA 8 AT91_PERIPH_A AT91_PINCTRL_NONE>; /* PA8 periph A */
191 }; 191 };
192 192
193 pinctrl_uart2_rts: uart2_rts-0 { 193 pinctrl_usart2_rts: usart2_rts-0 {
194 atmel,pins = 194 atmel,pins =
195 <AT91_PIOB 0 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PB0 periph B */ 195 <AT91_PIOB 0 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PB0 periph B */
196 }; 196 };
197 197
198 pinctrl_uart2_cts: uart2_cts-0 { 198 pinctrl_usart2_cts: usart2_cts-0 {
199 atmel,pins = 199 atmel,pins =
200 <AT91_PIOB 1 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PB1 periph B */ 200 <AT91_PIOB 1 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PB1 periph B */
201 }; 201 };
@@ -556,6 +556,7 @@
556 interrupts = <12 IRQ_TYPE_LEVEL_HIGH 0>; 556 interrupts = <12 IRQ_TYPE_LEVEL_HIGH 0>;
557 dmas = <&dma0 1 AT91_DMA_CFG_PER_ID(0)>; 557 dmas = <&dma0 1 AT91_DMA_CFG_PER_ID(0)>;
558 dma-names = "rxtx"; 558 dma-names = "rxtx";
559 pinctrl-names = "default";
559 #address-cells = <1>; 560 #address-cells = <1>;
560 #size-cells = <0>; 561 #size-cells = <0>;
561 status = "disabled"; 562 status = "disabled";
@@ -567,6 +568,7 @@
567 interrupts = <26 IRQ_TYPE_LEVEL_HIGH 0>; 568 interrupts = <26 IRQ_TYPE_LEVEL_HIGH 0>;
568 dmas = <&dma1 1 AT91_DMA_CFG_PER_ID(0)>; 569 dmas = <&dma1 1 AT91_DMA_CFG_PER_ID(0)>;
569 dma-names = "rxtx"; 570 dma-names = "rxtx";
571 pinctrl-names = "default";
570 #address-cells = <1>; 572 #address-cells = <1>;
571 #size-cells = <0>; 573 #size-cells = <0>;
572 status = "disabled"; 574 status = "disabled";
diff --git a/arch/arm/boot/dts/atlas6.dtsi b/arch/arm/boot/dts/atlas6.dtsi
index 8678e0c11119..6db4f81d4795 100644
--- a/arch/arm/boot/dts/atlas6.dtsi
+++ b/arch/arm/boot/dts/atlas6.dtsi
@@ -181,6 +181,8 @@
181 interrupts = <17>; 181 interrupts = <17>;
182 fifosize = <128>; 182 fifosize = <128>;
183 clocks = <&clks 13>; 183 clocks = <&clks 13>;
184 sirf,uart-dma-rx-channel = <21>;
185 sirf,uart-dma-tx-channel = <2>;
184 }; 186 };
185 187
186 uart1: uart@b0060000 { 188 uart1: uart@b0060000 {
@@ -199,6 +201,8 @@
199 interrupts = <19>; 201 interrupts = <19>;
200 fifosize = <128>; 202 fifosize = <128>;
201 clocks = <&clks 15>; 203 clocks = <&clks 15>;
204 sirf,uart-dma-rx-channel = <6>;
205 sirf,uart-dma-tx-channel = <7>;
202 }; 206 };
203 207
204 usp0: usp@b0080000 { 208 usp0: usp@b0080000 {
@@ -206,7 +210,10 @@
206 compatible = "sirf,prima2-usp"; 210 compatible = "sirf,prima2-usp";
207 reg = <0xb0080000 0x10000>; 211 reg = <0xb0080000 0x10000>;
208 interrupts = <20>; 212 interrupts = <20>;
213 fifosize = <128>;
209 clocks = <&clks 28>; 214 clocks = <&clks 28>;
215 sirf,usp-dma-rx-channel = <17>;
216 sirf,usp-dma-tx-channel = <18>;
210 }; 217 };
211 218
212 usp1: usp@b0090000 { 219 usp1: usp@b0090000 {
@@ -214,7 +221,10 @@
214 compatible = "sirf,prima2-usp"; 221 compatible = "sirf,prima2-usp";
215 reg = <0xb0090000 0x10000>; 222 reg = <0xb0090000 0x10000>;
216 interrupts = <21>; 223 interrupts = <21>;
224 fifosize = <128>;
217 clocks = <&clks 29>; 225 clocks = <&clks 29>;
226 sirf,usp-dma-rx-channel = <14>;
227 sirf,usp-dma-tx-channel = <15>;
218 }; 228 };
219 229
220 dmac0: dma-controller@b00b0000 { 230 dmac0: dma-controller@b00b0000 {
@@ -237,6 +247,8 @@
237 compatible = "sirf,prima2-vip"; 247 compatible = "sirf,prima2-vip";
238 reg = <0xb00C0000 0x10000>; 248 reg = <0xb00C0000 0x10000>;
239 clocks = <&clks 31>; 249 clocks = <&clks 31>;
250 interrupts = <14>;
251 sirf,vip-dma-rx-channel = <16>;
240 }; 252 };
241 253
242 spi0: spi@b00d0000 { 254 spi0: spi@b00d0000 {
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index 7d7cc777ff7b..bbac42a78ce5 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -96,6 +96,11 @@
96 <1 14 0xf08>, 96 <1 14 0xf08>,
97 <1 11 0xf08>, 97 <1 11 0xf08>,
98 <1 10 0xf08>; 98 <1 10 0xf08>;
99 /* Unfortunately we need this since some versions of U-Boot
100 * on Exynos don't set the CNTFRQ register, so we need the
101 * value from DT.
102 */
103 clock-frequency = <24000000>;
99 }; 104 };
100 105
101 mct@101C0000 { 106 mct@101C0000 {
diff --git a/arch/arm/boot/dts/kirkwood.dtsi b/arch/arm/boot/dts/kirkwood.dtsi
index cf7aeaf89e9c..1335b2e1bed4 100644
--- a/arch/arm/boot/dts/kirkwood.dtsi
+++ b/arch/arm/boot/dts/kirkwood.dtsi
@@ -13,6 +13,7 @@
13 cpu@0 { 13 cpu@0 {
14 device_type = "cpu"; 14 device_type = "cpu";
15 compatible = "marvell,feroceon"; 15 compatible = "marvell,feroceon";
16 reg = <0>;
16 clocks = <&core_clk 1>, <&core_clk 3>, <&gate_clk 11>; 17 clocks = <&core_clk 1>, <&core_clk 3>, <&gate_clk 11>;
17 clock-names = "cpu_clk", "ddrclk", "powersave"; 18 clock-names = "cpu_clk", "ddrclk", "powersave";
18 }; 19 };
@@ -167,7 +168,7 @@
167 xor@60900 { 168 xor@60900 {
168 compatible = "marvell,orion-xor"; 169 compatible = "marvell,orion-xor";
169 reg = <0x60900 0x100 170 reg = <0x60900 0x100
170 0xd0B00 0x100>; 171 0x60B00 0x100>;
171 status = "okay"; 172 status = "okay";
172 clocks = <&gate_clk 16>; 173 clocks = <&gate_clk 16>;
173 174
diff --git a/arch/arm/boot/dts/omap3-beagle-xm.dts b/arch/arm/boot/dts/omap3-beagle-xm.dts
index 0c514dc8460c..2816bf612672 100644
--- a/arch/arm/boot/dts/omap3-beagle-xm.dts
+++ b/arch/arm/boot/dts/omap3-beagle-xm.dts
@@ -11,7 +11,7 @@
11 11
12/ { 12/ {
13 model = "TI OMAP3 BeagleBoard xM"; 13 model = "TI OMAP3 BeagleBoard xM";
14 compatible = "ti,omap3-beagle-xm", "ti,omap3-beagle", "ti,omap3"; 14 compatible = "ti,omap3-beagle-xm", "ti,omap36xx", "ti,omap3";
15 15
16 cpus { 16 cpus {
17 cpu@0 { 17 cpu@0 {
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index 7d95cda1fae4..b41bd57f4328 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -108,7 +108,7 @@
108 #address-cells = <1>; 108 #address-cells = <1>;
109 #size-cells = <0>; 109 #size-cells = <0>;
110 pinctrl-single,register-width = <16>; 110 pinctrl-single,register-width = <16>;
111 pinctrl-single,function-mask = <0x7f1f>; 111 pinctrl-single,function-mask = <0xff1f>;
112 }; 112 };
113 113
114 omap3_pmx_wkup: pinmux@0x48002a00 { 114 omap3_pmx_wkup: pinmux@0x48002a00 {
@@ -117,7 +117,7 @@
117 #address-cells = <1>; 117 #address-cells = <1>;
118 #size-cells = <0>; 118 #size-cells = <0>;
119 pinctrl-single,register-width = <16>; 119 pinctrl-single,register-width = <16>;
120 pinctrl-single,function-mask = <0x7f1f>; 120 pinctrl-single,function-mask = <0xff1f>;
121 }; 121 };
122 122
123 gpio1: gpio@48310000 { 123 gpio1: gpio@48310000 {
diff --git a/arch/arm/boot/dts/prima2.dtsi b/arch/arm/boot/dts/prima2.dtsi
index bbeb623fc2c6..27ed9f5144bc 100644
--- a/arch/arm/boot/dts/prima2.dtsi
+++ b/arch/arm/boot/dts/prima2.dtsi
@@ -171,7 +171,8 @@
171 compatible = "simple-bus"; 171 compatible = "simple-bus";
172 #address-cells = <1>; 172 #address-cells = <1>;
173 #size-cells = <1>; 173 #size-cells = <1>;
174 ranges = <0xb0000000 0xb0000000 0x180000>; 174 ranges = <0xb0000000 0xb0000000 0x180000>,
175 <0x56000000 0x56000000 0x1b00000>;
175 176
176 timer@b0020000 { 177 timer@b0020000 {
177 compatible = "sirf,prima2-tick"; 178 compatible = "sirf,prima2-tick";
@@ -196,25 +197,32 @@
196 uart0: uart@b0050000 { 197 uart0: uart@b0050000 {
197 cell-index = <0>; 198 cell-index = <0>;
198 compatible = "sirf,prima2-uart"; 199 compatible = "sirf,prima2-uart";
199 reg = <0xb0050000 0x10000>; 200 reg = <0xb0050000 0x1000>;
200 interrupts = <17>; 201 interrupts = <17>;
202 fifosize = <128>;
201 clocks = <&clks 13>; 203 clocks = <&clks 13>;
204 sirf,uart-dma-rx-channel = <21>;
205 sirf,uart-dma-tx-channel = <2>;
202 }; 206 };
203 207
204 uart1: uart@b0060000 { 208 uart1: uart@b0060000 {
205 cell-index = <1>; 209 cell-index = <1>;
206 compatible = "sirf,prima2-uart"; 210 compatible = "sirf,prima2-uart";
207 reg = <0xb0060000 0x10000>; 211 reg = <0xb0060000 0x1000>;
208 interrupts = <18>; 212 interrupts = <18>;
213 fifosize = <32>;
209 clocks = <&clks 14>; 214 clocks = <&clks 14>;
210 }; 215 };
211 216
212 uart2: uart@b0070000 { 217 uart2: uart@b0070000 {
213 cell-index = <2>; 218 cell-index = <2>;
214 compatible = "sirf,prima2-uart"; 219 compatible = "sirf,prima2-uart";
215 reg = <0xb0070000 0x10000>; 220 reg = <0xb0070000 0x1000>;
216 interrupts = <19>; 221 interrupts = <19>;
222 fifosize = <128>;
217 clocks = <&clks 15>; 223 clocks = <&clks 15>;
224 sirf,uart-dma-rx-channel = <6>;
225 sirf,uart-dma-tx-channel = <7>;
218 }; 226 };
219 227
220 usp0: usp@b0080000 { 228 usp0: usp@b0080000 {
@@ -222,7 +230,10 @@
222 compatible = "sirf,prima2-usp"; 230 compatible = "sirf,prima2-usp";
223 reg = <0xb0080000 0x10000>; 231 reg = <0xb0080000 0x10000>;
224 interrupts = <20>; 232 interrupts = <20>;
233 fifosize = <128>;
225 clocks = <&clks 28>; 234 clocks = <&clks 28>;
235 sirf,usp-dma-rx-channel = <17>;
236 sirf,usp-dma-tx-channel = <18>;
226 }; 237 };
227 238
228 usp1: usp@b0090000 { 239 usp1: usp@b0090000 {
@@ -230,7 +241,10 @@
230 compatible = "sirf,prima2-usp"; 241 compatible = "sirf,prima2-usp";
231 reg = <0xb0090000 0x10000>; 242 reg = <0xb0090000 0x10000>;
232 interrupts = <21>; 243 interrupts = <21>;
244 fifosize = <128>;
233 clocks = <&clks 29>; 245 clocks = <&clks 29>;
246 sirf,usp-dma-rx-channel = <14>;
247 sirf,usp-dma-tx-channel = <15>;
234 }; 248 };
235 249
236 usp2: usp@b00a0000 { 250 usp2: usp@b00a0000 {
@@ -238,7 +252,10 @@
238 compatible = "sirf,prima2-usp"; 252 compatible = "sirf,prima2-usp";
239 reg = <0xb00a0000 0x10000>; 253 reg = <0xb00a0000 0x10000>;
240 interrupts = <22>; 254 interrupts = <22>;
255 fifosize = <128>;
241 clocks = <&clks 30>; 256 clocks = <&clks 30>;
257 sirf,usp-dma-rx-channel = <10>;
258 sirf,usp-dma-tx-channel = <11>;
242 }; 259 };
243 260
244 dmac0: dma-controller@b00b0000 { 261 dmac0: dma-controller@b00b0000 {
@@ -261,6 +278,8 @@
261 compatible = "sirf,prima2-vip"; 278 compatible = "sirf,prima2-vip";
262 reg = <0xb00C0000 0x10000>; 279 reg = <0xb00C0000 0x10000>;
263 clocks = <&clks 31>; 280 clocks = <&clks 31>;
281 interrupts = <14>;
282 sirf,vip-dma-rx-channel = <16>;
264 }; 283 };
265 284
266 spi0: spi@b00d0000 { 285 spi0: spi@b00d0000 {
diff --git a/arch/arm/boot/dts/r8a73a4.dtsi b/arch/arm/boot/dts/r8a73a4.dtsi
index 6c26caa880f2..658fcc537576 100644
--- a/arch/arm/boot/dts/r8a73a4.dtsi
+++ b/arch/arm/boot/dts/r8a73a4.dtsi
@@ -193,7 +193,7 @@
193 }; 193 };
194 194
195 sdhi0: sdhi@ee100000 { 195 sdhi0: sdhi@ee100000 {
196 compatible = "renesas,r8a73a4-sdhi"; 196 compatible = "renesas,sdhi-r8a73a4";
197 reg = <0 0xee100000 0 0x100>; 197 reg = <0 0xee100000 0 0x100>;
198 interrupt-parent = <&gic>; 198 interrupt-parent = <&gic>;
199 interrupts = <0 165 4>; 199 interrupts = <0 165 4>;
@@ -202,7 +202,7 @@
202 }; 202 };
203 203
204 sdhi1: sdhi@ee120000 { 204 sdhi1: sdhi@ee120000 {
205 compatible = "renesas,r8a73a4-sdhi"; 205 compatible = "renesas,sdhi-r8a73a4";
206 reg = <0 0xee120000 0 0x100>; 206 reg = <0 0xee120000 0 0x100>;
207 interrupt-parent = <&gic>; 207 interrupt-parent = <&gic>;
208 interrupts = <0 166 4>; 208 interrupts = <0 166 4>;
@@ -211,7 +211,7 @@
211 }; 211 };
212 212
213 sdhi2: sdhi@ee140000 { 213 sdhi2: sdhi@ee140000 {
214 compatible = "renesas,r8a73a4-sdhi"; 214 compatible = "renesas,sdhi-r8a73a4";
215 reg = <0 0xee140000 0 0x100>; 215 reg = <0 0xee140000 0 0x100>;
216 interrupt-parent = <&gic>; 216 interrupt-parent = <&gic>;
217 interrupts = <0 167 4>; 217 interrupts = <0 167 4>;
diff --git a/arch/arm/boot/dts/r8a7778.dtsi b/arch/arm/boot/dts/r8a7778.dtsi
index 45ac404ab6d8..3577aba82583 100644
--- a/arch/arm/boot/dts/r8a7778.dtsi
+++ b/arch/arm/boot/dts/r8a7778.dtsi
@@ -96,6 +96,5 @@
96 pfc: pfc@fffc0000 { 96 pfc: pfc@fffc0000 {
97 compatible = "renesas,pfc-r8a7778"; 97 compatible = "renesas,pfc-r8a7778";
98 reg = <0xfffc000 0x118>; 98 reg = <0xfffc000 0x118>;
99 #gpio-range-cells = <3>;
100 }; 99 };
101}; 100};
diff --git a/arch/arm/boot/dts/r8a7779.dtsi b/arch/arm/boot/dts/r8a7779.dtsi
index 23a62447359c..ebbe507fcbfa 100644
--- a/arch/arm/boot/dts/r8a7779.dtsi
+++ b/arch/arm/boot/dts/r8a7779.dtsi
@@ -188,7 +188,6 @@
188 pfc: pfc@fffc0000 { 188 pfc: pfc@fffc0000 {
189 compatible = "renesas,pfc-r8a7779"; 189 compatible = "renesas,pfc-r8a7779";
190 reg = <0xfffc0000 0x23c>; 190 reg = <0xfffc0000 0x23c>;
191 #gpio-range-cells = <3>;
192 }; 191 };
193 192
194 thermal@ffc48000 { 193 thermal@ffc48000 {
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
index 3b879e7c697c..413b4c29e782 100644
--- a/arch/arm/boot/dts/r8a7790.dtsi
+++ b/arch/arm/boot/dts/r8a7790.dtsi
@@ -148,11 +148,10 @@
148 pfc: pfc@e6060000 { 148 pfc: pfc@e6060000 {
149 compatible = "renesas,pfc-r8a7790"; 149 compatible = "renesas,pfc-r8a7790";
150 reg = <0 0xe6060000 0 0x250>; 150 reg = <0 0xe6060000 0 0x250>;
151 #gpio-range-cells = <3>;
152 }; 151 };
153 152
154 sdhi0: sdhi@ee100000 { 153 sdhi0: sdhi@ee100000 {
155 compatible = "renesas,r8a7790-sdhi"; 154 compatible = "renesas,sdhi-r8a7790";
156 reg = <0 0xee100000 0 0x100>; 155 reg = <0 0xee100000 0 0x100>;
157 interrupt-parent = <&gic>; 156 interrupt-parent = <&gic>;
158 interrupts = <0 165 4>; 157 interrupts = <0 165 4>;
@@ -161,7 +160,7 @@
161 }; 160 };
162 161
163 sdhi1: sdhi@ee120000 { 162 sdhi1: sdhi@ee120000 {
164 compatible = "renesas,r8a7790-sdhi"; 163 compatible = "renesas,sdhi-r8a7790";
165 reg = <0 0xee120000 0 0x100>; 164 reg = <0 0xee120000 0 0x100>;
166 interrupt-parent = <&gic>; 165 interrupt-parent = <&gic>;
167 interrupts = <0 166 4>; 166 interrupts = <0 166 4>;
@@ -170,7 +169,7 @@
170 }; 169 };
171 170
172 sdhi2: sdhi@ee140000 { 171 sdhi2: sdhi@ee140000 {
173 compatible = "renesas,r8a7790-sdhi"; 172 compatible = "renesas,sdhi-r8a7790";
174 reg = <0 0xee140000 0 0x100>; 173 reg = <0 0xee140000 0 0x100>;
175 interrupt-parent = <&gic>; 174 interrupt-parent = <&gic>;
176 interrupts = <0 167 4>; 175 interrupts = <0 167 4>;
@@ -179,7 +178,7 @@
179 }; 178 };
180 179
181 sdhi3: sdhi@ee160000 { 180 sdhi3: sdhi@ee160000 {
182 compatible = "renesas,r8a7790-sdhi"; 181 compatible = "renesas,sdhi-r8a7790";
183 reg = <0 0xee160000 0 0x100>; 182 reg = <0 0xee160000 0 0x100>;
184 interrupt-parent = <&gic>; 183 interrupt-parent = <&gic>;
185 interrupts = <0 168 4>; 184 interrupts = <0 168 4>;
diff --git a/arch/arm/boot/dts/sh73a0.dtsi b/arch/arm/boot/dts/sh73a0.dtsi
index ba59a5875a10..3955c7606a6f 100644
--- a/arch/arm/boot/dts/sh73a0.dtsi
+++ b/arch/arm/boot/dts/sh73a0.dtsi
@@ -196,7 +196,7 @@
196 }; 196 };
197 197
198 sdhi0: sdhi@ee100000 { 198 sdhi0: sdhi@ee100000 {
199 compatible = "renesas,r8a7740-sdhi"; 199 compatible = "renesas,sdhi-r8a7740";
200 reg = <0xee100000 0x100>; 200 reg = <0xee100000 0x100>;
201 interrupt-parent = <&gic>; 201 interrupt-parent = <&gic>;
202 interrupts = <0 83 4 202 interrupts = <0 83 4
@@ -208,7 +208,7 @@
208 208
209 /* SDHI1 and SDHI2 have no CD pins, no need for CD IRQ */ 209 /* SDHI1 and SDHI2 have no CD pins, no need for CD IRQ */
210 sdhi1: sdhi@ee120000 { 210 sdhi1: sdhi@ee120000 {
211 compatible = "renesas,r8a7740-sdhi"; 211 compatible = "renesas,sdhi-r8a7740";
212 reg = <0xee120000 0x100>; 212 reg = <0xee120000 0x100>;
213 interrupt-parent = <&gic>; 213 interrupt-parent = <&gic>;
214 interrupts = <0 88 4 214 interrupts = <0 88 4
@@ -219,7 +219,7 @@
219 }; 219 };
220 220
221 sdhi2: sdhi@ee140000 { 221 sdhi2: sdhi@ee140000 {
222 compatible = "renesas,r8a7740-sdhi"; 222 compatible = "renesas,sdhi-r8a7740";
223 reg = <0xee140000 0x100>; 223 reg = <0xee140000 0x100>;
224 interrupt-parent = <&gic>; 224 interrupt-parent = <&gic>;
225 interrupts = <0 104 4 225 interrupts = <0 104 4
diff --git a/arch/arm/boot/install.sh b/arch/arm/boot/install.sh
index 06ea7d42ce8e..2a45092a40e3 100644
--- a/arch/arm/boot/install.sh
+++ b/arch/arm/boot/install.sh
@@ -20,6 +20,20 @@
20# $4 - default install path (blank if root directory) 20# $4 - default install path (blank if root directory)
21# 21#
22 22
23verify () {
24 if [ ! -f "$1" ]; then
25 echo "" 1>&2
26 echo " *** Missing file: $1" 1>&2
27 echo ' *** You need to run "make" before "make install".' 1>&2
28 echo "" 1>&2
29 exit 1
30 fi
31}
32
33# Make sure the files actually exist
34verify "$2"
35verify "$3"
36
23# User may have a custom install script 37# User may have a custom install script
24if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi 38if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
25if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi 39if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c
index 117f955a2a06..8e1a0245907f 100644
--- a/arch/arm/common/edma.c
+++ b/arch/arm/common/edma.c
@@ -269,6 +269,11 @@ static const struct edmacc_param dummy_paramset = {
269 .ccnt = 1, 269 .ccnt = 1,
270}; 270};
271 271
272static const struct of_device_id edma_of_ids[] = {
273 { .compatible = "ti,edma3", },
274 {}
275};
276
272/*****************************************************************************/ 277/*****************************************************************************/
273 278
274static void map_dmach_queue(unsigned ctlr, unsigned ch_no, 279static void map_dmach_queue(unsigned ctlr, unsigned ch_no,
@@ -560,14 +565,38 @@ static int reserve_contiguous_slots(int ctlr, unsigned int id,
560static int prepare_unused_channel_list(struct device *dev, void *data) 565static int prepare_unused_channel_list(struct device *dev, void *data)
561{ 566{
562 struct platform_device *pdev = to_platform_device(dev); 567 struct platform_device *pdev = to_platform_device(dev);
563 int i, ctlr; 568 int i, count, ctlr;
569 struct of_phandle_args dma_spec;
564 570
571 if (dev->of_node) {
572 count = of_property_count_strings(dev->of_node, "dma-names");
573 if (count < 0)
574 return 0;
575 for (i = 0; i < count; i++) {
576 if (of_parse_phandle_with_args(dev->of_node, "dmas",
577 "#dma-cells", i,
578 &dma_spec))
579 continue;
580
581 if (!of_match_node(edma_of_ids, dma_spec.np)) {
582 of_node_put(dma_spec.np);
583 continue;
584 }
585
586 clear_bit(EDMA_CHAN_SLOT(dma_spec.args[0]),
587 edma_cc[0]->edma_unused);
588 of_node_put(dma_spec.np);
589 }
590 return 0;
591 }
592
593 /* For non-OF case */
565 for (i = 0; i < pdev->num_resources; i++) { 594 for (i = 0; i < pdev->num_resources; i++) {
566 if ((pdev->resource[i].flags & IORESOURCE_DMA) && 595 if ((pdev->resource[i].flags & IORESOURCE_DMA) &&
567 (int)pdev->resource[i].start >= 0) { 596 (int)pdev->resource[i].start >= 0) {
568 ctlr = EDMA_CTLR(pdev->resource[i].start); 597 ctlr = EDMA_CTLR(pdev->resource[i].start);
569 clear_bit(EDMA_CHAN_SLOT(pdev->resource[i].start), 598 clear_bit(EDMA_CHAN_SLOT(pdev->resource[i].start),
570 edma_cc[ctlr]->edma_unused); 599 edma_cc[ctlr]->edma_unused);
571 } 600 }
572 } 601 }
573 602
@@ -1762,11 +1791,6 @@ static int edma_probe(struct platform_device *pdev)
1762 return 0; 1791 return 0;
1763} 1792}
1764 1793
1765static const struct of_device_id edma_of_ids[] = {
1766 { .compatible = "ti,edma3", },
1767 {}
1768};
1769
1770static struct platform_driver edma_driver = { 1794static struct platform_driver edma_driver = {
1771 .driver = { 1795 .driver = {
1772 .name = "edma", 1796 .name = "edma",
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index f3935b46df29..119fc378fc52 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -135,6 +135,7 @@ CONFIG_MMC=y
135CONFIG_MMC_ARMMMCI=y 135CONFIG_MMC_ARMMMCI=y
136CONFIG_MMC_SDHCI=y 136CONFIG_MMC_SDHCI=y
137CONFIG_MMC_SDHCI_PLTFM=y 137CONFIG_MMC_SDHCI_PLTFM=y
138CONFIG_MMC_SDHCI_ESDHC_IMX=y
138CONFIG_MMC_SDHCI_TEGRA=y 139CONFIG_MMC_SDHCI_TEGRA=y
139CONFIG_MMC_SDHCI_SPEAR=y 140CONFIG_MMC_SDHCI_SPEAR=y
140CONFIG_MMC_OMAP=y 141CONFIG_MMC_OMAP=y
diff --git a/arch/arm/crypto/aes-armv4.S b/arch/arm/crypto/aes-armv4.S
index 19d6cd6f29f9..3a14ea8fe97e 100644
--- a/arch/arm/crypto/aes-armv4.S
+++ b/arch/arm/crypto/aes-armv4.S
@@ -148,7 +148,7 @@ AES_Te:
148@ const AES_KEY *key) { 148@ const AES_KEY *key) {
149.align 5 149.align 5
150ENTRY(AES_encrypt) 150ENTRY(AES_encrypt)
151 sub r3,pc,#8 @ AES_encrypt 151 adr r3,AES_encrypt
152 stmdb sp!,{r1,r4-r12,lr} 152 stmdb sp!,{r1,r4-r12,lr}
153 mov r12,r0 @ inp 153 mov r12,r0 @ inp
154 mov r11,r2 154 mov r11,r2
@@ -381,7 +381,7 @@ _armv4_AES_encrypt:
381.align 5 381.align 5
382ENTRY(private_AES_set_encrypt_key) 382ENTRY(private_AES_set_encrypt_key)
383_armv4_AES_set_encrypt_key: 383_armv4_AES_set_encrypt_key:
384 sub r3,pc,#8 @ AES_set_encrypt_key 384 adr r3,_armv4_AES_set_encrypt_key
385 teq r0,#0 385 teq r0,#0
386 moveq r0,#-1 386 moveq r0,#-1
387 beq .Labrt 387 beq .Labrt
@@ -843,7 +843,7 @@ AES_Td:
843@ const AES_KEY *key) { 843@ const AES_KEY *key) {
844.align 5 844.align 5
845ENTRY(AES_decrypt) 845ENTRY(AES_decrypt)
846 sub r3,pc,#8 @ AES_decrypt 846 adr r3,AES_decrypt
847 stmdb sp!,{r1,r4-r12,lr} 847 stmdb sp!,{r1,r4-r12,lr}
848 mov r12,r0 @ inp 848 mov r12,r0 @ inp
849 mov r11,r2 849 mov r11,r2
diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h
index bfc198c75913..863c892b4aaa 100644
--- a/arch/arm/include/asm/jump_label.h
+++ b/arch/arm/include/asm/jump_label.h
@@ -16,7 +16,7 @@
16 16
17static __always_inline bool arch_static_branch(struct static_key *key) 17static __always_inline bool arch_static_branch(struct static_key *key)
18{ 18{
19 asm goto("1:\n\t" 19 asm_volatile_goto("1:\n\t"
20 JUMP_LABEL_NOP "\n\t" 20 JUMP_LABEL_NOP "\n\t"
21 ".pushsection __jump_table, \"aw\"\n\t" 21 ".pushsection __jump_table, \"aw\"\n\t"
22 ".word 1b, %l[l_yes], %c0\n\t" 22 ".word 1b, %l[l_yes], %c0\n\t"
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 7e1f76027f66..72abdc541f38 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -19,6 +19,13 @@
19#include <asm/unified.h> 19#include <asm/unified.h>
20#include <asm/compiler.h> 20#include <asm/compiler.h>
21 21
22#if __LINUX_ARM_ARCH__ < 6
23#include <asm-generic/uaccess-unaligned.h>
24#else
25#define __get_user_unaligned __get_user
26#define __put_user_unaligned __put_user
27#endif
28
22#define VERIFY_READ 0 29#define VERIFY_READ 0
23#define VERIFY_WRITE 1 30#define VERIFY_WRITE 1
24 31
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 74ad15d1a065..bc6bd9683ba4 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -442,10 +442,10 @@ local_restart:
442 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine 442 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
443 443
444 add r1, sp, #S_OFF 444 add r1, sp, #S_OFF
445 cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) 4452: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
446 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back 446 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
447 bcs arm_syscall 447 bcs arm_syscall
4482: mov why, #0 @ no longer a real syscall 448 mov why, #0 @ no longer a real syscall
449 b sys_ni_syscall @ not private func 449 b sys_ni_syscall @ not private func
450 450
451#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI) 451#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI)
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index de23a9beed13..39f89fbd5111 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -329,10 +329,10 @@
329#ifdef CONFIG_CONTEXT_TRACKING 329#ifdef CONFIG_CONTEXT_TRACKING
330 .if \save 330 .if \save
331 stmdb sp!, {r0-r3, ip, lr} 331 stmdb sp!, {r0-r3, ip, lr}
332 bl user_exit 332 bl context_tracking_user_exit
333 ldmia sp!, {r0-r3, ip, lr} 333 ldmia sp!, {r0-r3, ip, lr}
334 .else 334 .else
335 bl user_exit 335 bl context_tracking_user_exit
336 .endif 336 .endif
337#endif 337#endif
338 .endm 338 .endm
@@ -341,10 +341,10 @@
341#ifdef CONFIG_CONTEXT_TRACKING 341#ifdef CONFIG_CONTEXT_TRACKING
342 .if \save 342 .if \save
343 stmdb sp!, {r0-r3, ip, lr} 343 stmdb sp!, {r0-r3, ip, lr}
344 bl user_enter 344 bl context_tracking_user_enter
345 ldmia sp!, {r0-r3, ip, lr} 345 ldmia sp!, {r0-r3, ip, lr}
346 .else 346 .else
347 bl user_enter 347 bl context_tracking_user_enter
348 .endif 348 .endif
349#endif 349#endif
350 .endm 350 .endm
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c
index 71e08baee209..c02ba4af599f 100644
--- a/arch/arm/kvm/reset.c
+++ b/arch/arm/kvm/reset.c
@@ -58,14 +58,14 @@ static const struct kvm_irq_level a15_vtimer_irq = {
58 */ 58 */
59int kvm_reset_vcpu(struct kvm_vcpu *vcpu) 59int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
60{ 60{
61 struct kvm_regs *cpu_reset; 61 struct kvm_regs *reset_regs;
62 const struct kvm_irq_level *cpu_vtimer_irq; 62 const struct kvm_irq_level *cpu_vtimer_irq;
63 63
64 switch (vcpu->arch.target) { 64 switch (vcpu->arch.target) {
65 case KVM_ARM_TARGET_CORTEX_A15: 65 case KVM_ARM_TARGET_CORTEX_A15:
66 if (vcpu->vcpu_id > a15_max_cpu_idx) 66 if (vcpu->vcpu_id > a15_max_cpu_idx)
67 return -EINVAL; 67 return -EINVAL;
68 cpu_reset = &a15_regs_reset; 68 reset_regs = &a15_regs_reset;
69 vcpu->arch.midr = read_cpuid_id(); 69 vcpu->arch.midr = read_cpuid_id();
70 cpu_vtimer_irq = &a15_vtimer_irq; 70 cpu_vtimer_irq = &a15_vtimer_irq;
71 break; 71 break;
@@ -74,7 +74,7 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
74 } 74 }
75 75
76 /* Reset core registers */ 76 /* Reset core registers */
77 memcpy(&vcpu->arch.regs, cpu_reset, sizeof(vcpu->arch.regs)); 77 memcpy(&vcpu->arch.regs, reset_regs, sizeof(vcpu->arch.regs));
78 78
79 /* Reset CP15 registers */ 79 /* Reset CP15 registers */
80 kvm_reset_coprocs(vcpu); 80 kvm_reset_coprocs(vcpu);
diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c
index 180b3024bec3..f607deb40f4d 100644
--- a/arch/arm/mach-at91/at91rm9200_time.c
+++ b/arch/arm/mach-at91/at91rm9200_time.c
@@ -93,7 +93,7 @@ static irqreturn_t at91rm9200_timer_interrupt(int irq, void *dev_id)
93 93
94static struct irqaction at91rm9200_timer_irq = { 94static struct irqaction at91rm9200_timer_irq = {
95 .name = "at91_tick", 95 .name = "at91_tick",
96 .flags = IRQF_SHARED | IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, 96 .flags = IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
97 .handler = at91rm9200_timer_interrupt, 97 .handler = at91rm9200_timer_interrupt,
98 .irq = NR_IRQS_LEGACY + AT91_ID_SYS, 98 .irq = NR_IRQS_LEGACY + AT91_ID_SYS,
99}; 99};
diff --git a/arch/arm/mach-at91/at91sam926x_time.c b/arch/arm/mach-at91/at91sam926x_time.c
index 3a4bc2e1a65e..bb392320a0dd 100644
--- a/arch/arm/mach-at91/at91sam926x_time.c
+++ b/arch/arm/mach-at91/at91sam926x_time.c
@@ -171,7 +171,7 @@ static irqreturn_t at91sam926x_pit_interrupt(int irq, void *dev_id)
171 171
172static struct irqaction at91sam926x_pit_irq = { 172static struct irqaction at91sam926x_pit_irq = {
173 .name = "at91_tick", 173 .name = "at91_tick",
174 .flags = IRQF_SHARED | IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, 174 .flags = IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
175 .handler = at91sam926x_pit_interrupt, 175 .handler = at91sam926x_pit_interrupt,
176 .irq = NR_IRQS_LEGACY + AT91_ID_SYS, 176 .irq = NR_IRQS_LEGACY + AT91_ID_SYS,
177}; 177};
diff --git a/arch/arm/mach-at91/at91sam9g45_reset.S b/arch/arm/mach-at91/at91sam9g45_reset.S
index 721a1a34dd1d..c40c1e2ef80f 100644
--- a/arch/arm/mach-at91/at91sam9g45_reset.S
+++ b/arch/arm/mach-at91/at91sam9g45_reset.S
@@ -16,11 +16,17 @@
16#include "at91_rstc.h" 16#include "at91_rstc.h"
17 .arm 17 .arm
18 18
19/*
20 * at91_ramc_base is an array void*
21 * init at NULL if only one DDR controler is present in or DT
22 */
19 .globl at91sam9g45_restart 23 .globl at91sam9g45_restart
20 24
21at91sam9g45_restart: 25at91sam9g45_restart:
22 ldr r5, =at91_ramc_base @ preload constants 26 ldr r5, =at91_ramc_base @ preload constants
23 ldr r0, [r5] 27 ldr r0, [r5]
28 ldr r5, [r5, #4] @ ddr1
29 cmp r5, #0
24 ldr r4, =at91_rstc_base 30 ldr r4, =at91_rstc_base
25 ldr r1, [r4] 31 ldr r1, [r4]
26 32
@@ -30,6 +36,8 @@ at91sam9g45_restart:
30 36
31 .balign 32 @ align to cache line 37 .balign 32 @ align to cache line
32 38
39 strne r2, [r5, #AT91_DDRSDRC_RTR] @ disable DDR1 access
40 strne r3, [r5, #AT91_DDRSDRC_LPR] @ power down DDR1
33 str r2, [r0, #AT91_DDRSDRC_RTR] @ disable DDR0 access 41 str r2, [r0, #AT91_DDRSDRC_RTR] @ disable DDR0 access
34 str r3, [r0, #AT91_DDRSDRC_LPR] @ power down DDR0 42 str r3, [r0, #AT91_DDRSDRC_LPR] @ power down DDR0
35 str r4, [r1, #AT91_RSTC_CR] @ reset processor 43 str r4, [r1, #AT91_RSTC_CR] @ reset processor
diff --git a/arch/arm/mach-at91/at91x40_time.c b/arch/arm/mach-at91/at91x40_time.c
index 2919eba41ff4..c0e637adf65d 100644
--- a/arch/arm/mach-at91/at91x40_time.c
+++ b/arch/arm/mach-at91/at91x40_time.c
@@ -57,7 +57,7 @@ static irqreturn_t at91x40_timer_interrupt(int irq, void *dev_id)
57 57
58static struct irqaction at91x40_timer_irq = { 58static struct irqaction at91x40_timer_irq = {
59 .name = "at91_tick", 59 .name = "at91_tick",
60 .flags = IRQF_DISABLED | IRQF_TIMER, 60 .flags = IRQF_TIMER,
61 .handler = at91x40_timer_interrupt 61 .handler = at91x40_timer_interrupt
62}; 62};
63 63
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index 92b7f770615a..4078ba93776b 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -176,7 +176,7 @@ static struct at24_platform_data eeprom_info = {
176 .context = (void *)0x7f00, 176 .context = (void *)0x7f00,
177}; 177};
178 178
179static struct snd_platform_data dm365_evm_snd_data = { 179static struct snd_platform_data dm365_evm_snd_data __maybe_unused = {
180 .asp_chan_q = EVENTQ_3, 180 .asp_chan_q = EVENTQ_3,
181}; 181};
182 182
diff --git a/arch/arm/mach-davinci/include/mach/serial.h b/arch/arm/mach-davinci/include/mach/serial.h
index 52b8571b2e70..ce402cd21fa0 100644
--- a/arch/arm/mach-davinci/include/mach/serial.h
+++ b/arch/arm/mach-davinci/include/mach/serial.h
@@ -15,8 +15,6 @@
15 15
16#include <mach/hardware.h> 16#include <mach/hardware.h>
17 17
18#include <linux/platform_device.h>
19
20#define DAVINCI_UART0_BASE (IO_PHYS + 0x20000) 18#define DAVINCI_UART0_BASE (IO_PHYS + 0x20000)
21#define DAVINCI_UART1_BASE (IO_PHYS + 0x20400) 19#define DAVINCI_UART1_BASE (IO_PHYS + 0x20400)
22#define DAVINCI_UART2_BASE (IO_PHYS + 0x20800) 20#define DAVINCI_UART2_BASE (IO_PHYS + 0x20800)
@@ -39,6 +37,8 @@
39#define UART_DM646X_SCR_TX_WATERMARK 0x08 37#define UART_DM646X_SCR_TX_WATERMARK 0x08
40 38
41#ifndef __ASSEMBLY__ 39#ifndef __ASSEMBLY__
40#include <linux/platform_device.h>
41
42extern int davinci_serial_init(struct platform_device *); 42extern int davinci_serial_init(struct platform_device *);
43#endif 43#endif
44 44
diff --git a/arch/arm/mach-integrator/pci_v3.h b/arch/arm/mach-integrator/pci_v3.h
index 755fd29fed4a..06a9e2e7d007 100644
--- a/arch/arm/mach-integrator/pci_v3.h
+++ b/arch/arm/mach-integrator/pci_v3.h
@@ -1,2 +1,9 @@
1/* Simple oneliner include to the PCIv3 early init */ 1/* Simple oneliner include to the PCIv3 early init */
2#ifdef CONFIG_PCI
2extern int pci_v3_early_init(void); 3extern int pci_v3_early_init(void);
4#else
5static inline int pci_v3_early_init(void)
6{
7 return 0;
8}
9#endif
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
index 4c24303ec481..58adf2fd9cfc 100644
--- a/arch/arm/mach-mvebu/coherency.c
+++ b/arch/arm/mach-mvebu/coherency.c
@@ -140,6 +140,7 @@ int __init coherency_init(void)
140 coherency_base = of_iomap(np, 0); 140 coherency_base = of_iomap(np, 0);
141 coherency_cpu_base = of_iomap(np, 1); 141 coherency_cpu_base = of_iomap(np, 1);
142 set_cpu_coherent(cpu_logical_map(smp_processor_id()), 0); 142 set_cpu_coherent(cpu_logical_map(smp_processor_id()), 0);
143 of_node_put(np);
143 } 144 }
144 145
145 return 0; 146 return 0;
@@ -147,9 +148,14 @@ int __init coherency_init(void)
147 148
148static int __init coherency_late_init(void) 149static int __init coherency_late_init(void)
149{ 150{
150 if (of_find_matching_node(NULL, of_coherency_table)) 151 struct device_node *np;
152
153 np = of_find_matching_node(NULL, of_coherency_table);
154 if (np) {
151 bus_register_notifier(&platform_bus_type, 155 bus_register_notifier(&platform_bus_type,
152 &mvebu_hwcc_platform_nb); 156 &mvebu_hwcc_platform_nb);
157 of_node_put(np);
158 }
153 return 0; 159 return 0;
154} 160}
155 161
diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c
index 3cc4bef6401c..27fc4f049474 100644
--- a/arch/arm/mach-mvebu/pmsu.c
+++ b/arch/arm/mach-mvebu/pmsu.c
@@ -67,6 +67,7 @@ int __init armada_370_xp_pmsu_init(void)
67 pr_info("Initializing Power Management Service Unit\n"); 67 pr_info("Initializing Power Management Service Unit\n");
68 pmsu_mp_base = of_iomap(np, 0); 68 pmsu_mp_base = of_iomap(np, 0);
69 pmsu_reset_base = of_iomap(np, 1); 69 pmsu_reset_base = of_iomap(np, 1);
70 of_node_put(np);
70 } 71 }
71 72
72 return 0; 73 return 0;
diff --git a/arch/arm/mach-mvebu/system-controller.c b/arch/arm/mach-mvebu/system-controller.c
index f875124ff4f9..5175083cdb34 100644
--- a/arch/arm/mach-mvebu/system-controller.c
+++ b/arch/arm/mach-mvebu/system-controller.c
@@ -98,6 +98,7 @@ static int __init mvebu_system_controller_init(void)
98 BUG_ON(!match); 98 BUG_ON(!match);
99 system_controller_base = of_iomap(np, 0); 99 system_controller_base = of_iomap(np, 0);
100 mvebu_sc = (struct mvebu_system_controller *)match->data; 100 mvebu_sc = (struct mvebu_system_controller *)match->data;
101 of_node_put(np);
101 } 102 }
102 103
103 return 0; 104 return 0;
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 39c78387ddec..87162e1b94a5 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -129,6 +129,24 @@ DT_MACHINE_START(OMAP3_DT, "Generic OMAP3 (Flattened Device Tree)")
129 .restart = omap3xxx_restart, 129 .restart = omap3xxx_restart,
130MACHINE_END 130MACHINE_END
131 131
132static const char *omap36xx_boards_compat[] __initdata = {
133 "ti,omap36xx",
134 NULL,
135};
136
137DT_MACHINE_START(OMAP36XX_DT, "Generic OMAP36xx (Flattened Device Tree)")
138 .reserve = omap_reserve,
139 .map_io = omap3_map_io,
140 .init_early = omap3630_init_early,
141 .init_irq = omap_intc_of_init,
142 .handle_irq = omap3_intc_handle_irq,
143 .init_machine = omap_generic_init,
144 .init_late = omap3_init_late,
145 .init_time = omap3_sync32k_timer_init,
146 .dt_compat = omap36xx_boards_compat,
147 .restart = omap3xxx_restart,
148MACHINE_END
149
132static const char *omap3_gp_boards_compat[] __initdata = { 150static const char *omap3_gp_boards_compat[] __initdata = {
133 "ti,omap3-beagle", 151 "ti,omap3-beagle",
134 "timll,omap3-devkit8000", 152 "timll,omap3-devkit8000",
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index c3270c0f1fce..f6fe388af989 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -167,38 +167,47 @@ static struct lp55xx_led_config rx51_lp5523_led_config[] = {
167 .name = "lp5523:kb1", 167 .name = "lp5523:kb1",
168 .chan_nr = 0, 168 .chan_nr = 0,
169 .led_current = 50, 169 .led_current = 50,
170 .max_current = 100,
170 }, { 171 }, {
171 .name = "lp5523:kb2", 172 .name = "lp5523:kb2",
172 .chan_nr = 1, 173 .chan_nr = 1,
173 .led_current = 50, 174 .led_current = 50,
175 .max_current = 100,
174 }, { 176 }, {
175 .name = "lp5523:kb3", 177 .name = "lp5523:kb3",
176 .chan_nr = 2, 178 .chan_nr = 2,
177 .led_current = 50, 179 .led_current = 50,
180 .max_current = 100,
178 }, { 181 }, {
179 .name = "lp5523:kb4", 182 .name = "lp5523:kb4",
180 .chan_nr = 3, 183 .chan_nr = 3,
181 .led_current = 50, 184 .led_current = 50,
185 .max_current = 100,
182 }, { 186 }, {
183 .name = "lp5523:b", 187 .name = "lp5523:b",
184 .chan_nr = 4, 188 .chan_nr = 4,
185 .led_current = 50, 189 .led_current = 50,
190 .max_current = 100,
186 }, { 191 }, {
187 .name = "lp5523:g", 192 .name = "lp5523:g",
188 .chan_nr = 5, 193 .chan_nr = 5,
189 .led_current = 50, 194 .led_current = 50,
195 .max_current = 100,
190 }, { 196 }, {
191 .name = "lp5523:r", 197 .name = "lp5523:r",
192 .chan_nr = 6, 198 .chan_nr = 6,
193 .led_current = 50, 199 .led_current = 50,
200 .max_current = 100,
194 }, { 201 }, {
195 .name = "lp5523:kb5", 202 .name = "lp5523:kb5",
196 .chan_nr = 7, 203 .chan_nr = 7,
197 .led_current = 50, 204 .led_current = 50,
205 .max_current = 100,
198 }, { 206 }, {
199 .name = "lp5523:kb6", 207 .name = "lp5523:kb6",
200 .chan_nr = 8, 208 .chan_nr = 8,
201 .led_current = 50, 209 .led_current = 50,
210 .max_current = 100,
202 } 211 }
203}; 212};
204 213
diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c
index 64b5a8346982..8b6876c98ce1 100644
--- a/arch/arm/mach-omap2/gpmc-onenand.c
+++ b/arch/arm/mach-omap2/gpmc-onenand.c
@@ -272,9 +272,19 @@ static int omap2_onenand_setup_async(void __iomem *onenand_base)
272 struct gpmc_timings t; 272 struct gpmc_timings t;
273 int ret; 273 int ret;
274 274
275 if (gpmc_onenand_data->of_node) 275 if (gpmc_onenand_data->of_node) {
276 gpmc_read_settings_dt(gpmc_onenand_data->of_node, 276 gpmc_read_settings_dt(gpmc_onenand_data->of_node,
277 &onenand_async); 277 &onenand_async);
278 if (onenand_async.sync_read || onenand_async.sync_write) {
279 if (onenand_async.sync_write)
280 gpmc_onenand_data->flags |=
281 ONENAND_SYNC_READWRITE;
282 else
283 gpmc_onenand_data->flags |= ONENAND_SYNC_READ;
284 onenand_async.sync_read = false;
285 onenand_async.sync_write = false;
286 }
287 }
278 288
279 omap2_onenand_set_async_mode(onenand_base); 289 omap2_onenand_set_async_mode(onenand_base);
280 290
diff --git a/arch/arm/mach-omap2/mux.h b/arch/arm/mach-omap2/mux.h
index 5d2080ef7923..16f78a990d04 100644
--- a/arch/arm/mach-omap2/mux.h
+++ b/arch/arm/mach-omap2/mux.h
@@ -28,7 +28,7 @@
28#define OMAP_PULL_UP (1 << 4) 28#define OMAP_PULL_UP (1 << 4)
29#define OMAP_ALTELECTRICALSEL (1 << 5) 29#define OMAP_ALTELECTRICALSEL (1 << 5)
30 30
31/* 34xx specific mux bit defines */ 31/* omap3/4/5 specific mux bit defines */
32#define OMAP_INPUT_EN (1 << 8) 32#define OMAP_INPUT_EN (1 << 8)
33#define OMAP_OFF_EN (1 << 9) 33#define OMAP_OFF_EN (1 << 9)
34#define OMAP_OFFOUT_EN (1 << 10) 34#define OMAP_OFFOUT_EN (1 << 10)
@@ -36,8 +36,6 @@
36#define OMAP_OFF_PULL_EN (1 << 12) 36#define OMAP_OFF_PULL_EN (1 << 12)
37#define OMAP_OFF_PULL_UP (1 << 13) 37#define OMAP_OFF_PULL_UP (1 << 13)
38#define OMAP_WAKEUP_EN (1 << 14) 38#define OMAP_WAKEUP_EN (1 << 14)
39
40/* 44xx specific mux bit defines */
41#define OMAP_WAKEUP_EVENT (1 << 15) 39#define OMAP_WAKEUP_EVENT (1 << 15)
42 40
43/* Active pin states */ 41/* Active pin states */
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index fa74a0625da1..ead48fa5715e 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -628,7 +628,7 @@ void __init omap4_local_timer_init(void)
628#endif /* CONFIG_HAVE_ARM_TWD */ 628#endif /* CONFIG_HAVE_ARM_TWD */
629#endif /* CONFIG_ARCH_OMAP4 */ 629#endif /* CONFIG_ARCH_OMAP4 */
630 630
631#ifdef CONFIG_SOC_OMAP5 631#if defined(CONFIG_SOC_OMAP5) || defined(CONFIG_SOC_DRA7XX)
632void __init omap5_realtime_timer_init(void) 632void __init omap5_realtime_timer_init(void)
633{ 633{
634 omap4_sync32k_timer_init(); 634 omap4_sync32k_timer_init();
@@ -636,7 +636,7 @@ void __init omap5_realtime_timer_init(void)
636 636
637 clocksource_of_init(); 637 clocksource_of_init();
638} 638}
639#endif /* CONFIG_SOC_OMAP5 */ 639#endif /* CONFIG_SOC_OMAP5 || CONFIG_SOC_DRA7XX */
640 640
641/** 641/**
642 * omap_timer_init - build and register timer device with an 642 * omap_timer_init - build and register timer device with an
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
index 5bd1479d3deb..7f8f6076d360 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
@@ -1108,9 +1108,9 @@ static const struct pinctrl_map eva_pinctrl_map[] = {
1108 PIN_MAP_MUX_GROUP_DEFAULT("asoc-simple-card.1", "pfc-r8a7740", 1108 PIN_MAP_MUX_GROUP_DEFAULT("asoc-simple-card.1", "pfc-r8a7740",
1109 "fsib_mclk_in", "fsib"), 1109 "fsib_mclk_in", "fsib"),
1110 /* GETHER */ 1110 /* GETHER */
1111 PIN_MAP_MUX_GROUP_DEFAULT("sh-eth", "pfc-r8a7740", 1111 PIN_MAP_MUX_GROUP_DEFAULT("r8a7740-gether", "pfc-r8a7740",
1112 "gether_mii", "gether"), 1112 "gether_mii", "gether"),
1113 PIN_MAP_MUX_GROUP_DEFAULT("sh-eth", "pfc-r8a7740", 1113 PIN_MAP_MUX_GROUP_DEFAULT("r8a7740-gether", "pfc-r8a7740",
1114 "gether_int", "gether"), 1114 "gether_int", "gether"),
1115 /* HDMI */ 1115 /* HDMI */
1116 PIN_MAP_MUX_GROUP_DEFAULT("sh-mobile-hdmi", "pfc-r8a7740", 1116 PIN_MAP_MUX_GROUP_DEFAULT("sh-mobile-hdmi", "pfc-r8a7740",
diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c
index ffb6f0ac7606..5930af8d434f 100644
--- a/arch/arm/mach-shmobile/board-lager.c
+++ b/arch/arm/mach-shmobile/board-lager.c
@@ -29,6 +29,7 @@
29#include <linux/pinctrl/machine.h> 29#include <linux/pinctrl/machine.h>
30#include <linux/platform_data/gpio-rcar.h> 30#include <linux/platform_data/gpio-rcar.h>
31#include <linux/platform_device.h> 31#include <linux/platform_device.h>
32#include <linux/phy.h>
32#include <linux/regulator/fixed.h> 33#include <linux/regulator/fixed.h>
33#include <linux/regulator/machine.h> 34#include <linux/regulator/machine.h>
34#include <linux/sh_eth.h> 35#include <linux/sh_eth.h>
@@ -155,6 +156,30 @@ static void __init lager_add_standard_devices(void)
155 &ether_pdata, sizeof(ether_pdata)); 156 &ether_pdata, sizeof(ether_pdata));
156} 157}
157 158
159/*
160 * Ether LEDs on the Lager board are named LINK and ACTIVE which corresponds
161 * to non-default 01 setting of the Micrel KSZ8041 PHY control register 1 bits
162 * 14-15. We have to set them back to 01 from the default 00 value each time
163 * the PHY is reset. It's also important because the PHY's LED0 signal is
164 * connected to SoC's ETH_LINK signal and in the PHY's default mode it will
165 * bounce on and off after each packet, which we apparently want to avoid.
166 */
167static int lager_ksz8041_fixup(struct phy_device *phydev)
168{
169 u16 phyctrl1 = phy_read(phydev, 0x1e);
170
171 phyctrl1 &= ~0xc000;
172 phyctrl1 |= 0x4000;
173 return phy_write(phydev, 0x1e, phyctrl1);
174}
175
176static void __init lager_init(void)
177{
178 lager_add_standard_devices();
179
180 phy_register_fixup_for_id("r8a7790-ether-ff:01", lager_ksz8041_fixup);
181}
182
158static const char *lager_boards_compat_dt[] __initdata = { 183static const char *lager_boards_compat_dt[] __initdata = {
159 "renesas,lager", 184 "renesas,lager",
160 NULL, 185 NULL,
@@ -163,6 +188,6 @@ static const char *lager_boards_compat_dt[] __initdata = {
163DT_MACHINE_START(LAGER_DT, "lager") 188DT_MACHINE_START(LAGER_DT, "lager")
164 .init_early = r8a7790_init_delay, 189 .init_early = r8a7790_init_delay,
165 .init_time = r8a7790_timer_init, 190 .init_time = r8a7790_timer_init,
166 .init_machine = lager_add_standard_devices, 191 .init_machine = lager_init,
167 .dt_compat = lager_boards_compat_dt, 192 .dt_compat = lager_boards_compat_dt,
168MACHINE_END 193MACHINE_END
diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c
index 7aeb5d60e484..e6eb48192912 100644
--- a/arch/arm/mach-vexpress/tc2_pm.c
+++ b/arch/arm/mach-vexpress/tc2_pm.c
@@ -131,6 +131,16 @@ static void tc2_pm_down(u64 residency)
131 } else 131 } else
132 BUG(); 132 BUG();
133 133
134 /*
135 * If the CPU is committed to power down, make sure
136 * the power controller will be in charge of waking it
137 * up upon IRQ, ie IRQ lines are cut from GIC CPU IF
138 * to the CPU by disabling the GIC CPU IF to prevent wfi
139 * from completing execution behind power controller back
140 */
141 if (!skip_wfi)
142 gic_cpu_if_down();
143
134 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { 144 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
135 arch_spin_unlock(&tc2_pm_lock); 145 arch_spin_unlock(&tc2_pm_lock);
136 146
@@ -231,7 +241,6 @@ static void tc2_pm_suspend(u64 residency)
231 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); 241 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
232 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); 242 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
233 ve_spc_set_resume_addr(cluster, cpu, virt_to_phys(mcpm_entry_point)); 243 ve_spc_set_resume_addr(cluster, cpu, virt_to_phys(mcpm_entry_point));
234 gic_cpu_if_down();
235 tc2_pm_down(residency); 244 tc2_pm_down(residency);
236} 245}
237 246
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index 1a6bfe954d49..835c559786bd 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -6,13 +6,6 @@ config FRAME_POINTER
6 bool 6 bool
7 default y 7 default y
8 8
9config DEBUG_STACK_USAGE
10 bool "Enable stack utilization instrumentation"
11 depends on DEBUG_KERNEL
12 help
13 Enables the display of the minimum amount of free stack which each
14 task has ever had available in the sysrq-T output.
15
16config EARLY_PRINTK 9config EARLY_PRINTK
17 bool "Early printk support" 10 bool "Early printk support"
18 default y 11 default y
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 5b3e83217b03..31c81e9b792e 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -42,7 +42,7 @@ CONFIG_IP_PNP_BOOTP=y
42# CONFIG_WIRELESS is not set 42# CONFIG_WIRELESS is not set
43CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 43CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
44CONFIG_DEVTMPFS=y 44CONFIG_DEVTMPFS=y
45# CONFIG_BLK_DEV is not set 45CONFIG_BLK_DEV=y
46CONFIG_SCSI=y 46CONFIG_SCSI=y
47# CONFIG_SCSI_PROC_FS is not set 47# CONFIG_SCSI_PROC_FS is not set
48CONFIG_BLK_DEV_SD=y 48CONFIG_BLK_DEV_SD=y
@@ -72,6 +72,7 @@ CONFIG_LOGO=y
72# CONFIG_IOMMU_SUPPORT is not set 72# CONFIG_IOMMU_SUPPORT is not set
73CONFIG_EXT2_FS=y 73CONFIG_EXT2_FS=y
74CONFIG_EXT3_FS=y 74CONFIG_EXT3_FS=y
75CONFIG_EXT4_FS=y
75# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 76# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
76# CONFIG_EXT3_FS_XATTR is not set 77# CONFIG_EXT3_FS_XATTR is not set
77CONFIG_FUSE_FS=y 78CONFIG_FUSE_FS=y
@@ -90,3 +91,5 @@ CONFIG_DEBUG_KERNEL=y
90CONFIG_DEBUG_INFO=y 91CONFIG_DEBUG_INFO=y
91# CONFIG_FTRACE is not set 92# CONFIG_FTRACE is not set
92CONFIG_ATOMIC64_SELFTEST=y 93CONFIG_ATOMIC64_SELFTEST=y
94CONFIG_VIRTIO_MMIO=y
95CONFIG_VIRTIO_BLK=y
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index edb3d5c73a32..7ecc2b23882e 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -166,9 +166,10 @@ do { \
166 166
167#define get_user(x, ptr) \ 167#define get_user(x, ptr) \
168({ \ 168({ \
169 __typeof__(*(ptr)) __user *__p = (ptr); \
169 might_fault(); \ 170 might_fault(); \
170 access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) ? \ 171 access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \
171 __get_user((x), (ptr)) : \ 172 __get_user((x), __p) : \
172 ((x) = 0, -EFAULT); \ 173 ((x) = 0, -EFAULT); \
173}) 174})
174 175
@@ -227,9 +228,10 @@ do { \
227 228
228#define put_user(x, ptr) \ 229#define put_user(x, ptr) \
229({ \ 230({ \
231 __typeof__(*(ptr)) __user *__p = (ptr); \
230 might_fault(); \ 232 might_fault(); \
231 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \ 233 access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \
232 __put_user((x), (ptr)) : \ 234 __put_user((x), __p) : \
233 -EFAULT; \ 235 -EFAULT; \
234}) 236})
235 237
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 1f2e4d5a5c0f..bb785d23dbde 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -80,8 +80,10 @@ void fpsimd_thread_switch(struct task_struct *next)
80 80
81void fpsimd_flush_thread(void) 81void fpsimd_flush_thread(void)
82{ 82{
83 preempt_disable();
83 memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state)); 84 memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
84 fpsimd_load_state(&current->thread.fpsimd_state); 85 fpsimd_load_state(&current->thread.fpsimd_state);
86 preempt_enable();
85} 87}
86 88
87#ifdef CONFIG_KERNEL_MODE_NEON 89#ifdef CONFIG_KERNEL_MODE_NEON
diff --git a/arch/arm64/mm/tlb.S b/arch/arm64/mm/tlb.S
index 8ae80a18e8ec..19da91e0cd27 100644
--- a/arch/arm64/mm/tlb.S
+++ b/arch/arm64/mm/tlb.S
@@ -35,7 +35,7 @@
35 */ 35 */
36ENTRY(__cpu_flush_user_tlb_range) 36ENTRY(__cpu_flush_user_tlb_range)
37 vma_vm_mm x3, x2 // get vma->vm_mm 37 vma_vm_mm x3, x2 // get vma->vm_mm
38 mmid x3, x3 // get vm_mm->context.id 38 mmid w3, x3 // get vm_mm->context.id
39 dsb sy 39 dsb sy
40 lsr x0, x0, #12 // align address 40 lsr x0, x0, #12 // align address
41 lsr x1, x1, #12 41 lsr x1, x1, #12
diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild
index d22af851f3f6..fd7980743890 100644
--- a/arch/avr32/include/asm/Kbuild
+++ b/arch/avr32/include/asm/Kbuild
@@ -1,5 +1,19 @@
1 1
2generic-y += clkdev.h 2generic-y += clkdev.h
3generic-y += cputime.h
4generic-y += delay.h
5generic-y += device.h
6generic-y += div64.h
7generic-y += emergency-restart.h
3generic-y += exec.h 8generic-y += exec.h
4generic-y += trace_clock.h 9generic-y += futex.h
10generic-y += irq_regs.h
5generic-y += param.h 11generic-y += param.h
12generic-y += local.h
13generic-y += local64.h
14generic-y += percpu.h
15generic-y += scatterlist.h
16generic-y += sections.h
17generic-y += topology.h
18generic-y += trace_clock.h
19generic-y += xor.h
diff --git a/arch/avr32/include/asm/cputime.h b/arch/avr32/include/asm/cputime.h
deleted file mode 100644
index e87e0f81cbeb..000000000000
--- a/arch/avr32/include/asm/cputime.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_AVR32_CPUTIME_H
2#define __ASM_AVR32_CPUTIME_H
3
4#include <asm-generic/cputime.h>
5
6#endif /* __ASM_AVR32_CPUTIME_H */
diff --git a/arch/avr32/include/asm/delay.h b/arch/avr32/include/asm/delay.h
deleted file mode 100644
index 9670e127b7b2..000000000000
--- a/arch/avr32/include/asm/delay.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/delay.h>
diff --git a/arch/avr32/include/asm/device.h b/arch/avr32/include/asm/device.h
deleted file mode 100644
index d8f9872b0e2d..000000000000
--- a/arch/avr32/include/asm/device.h
+++ /dev/null
@@ -1,7 +0,0 @@
1/*
2 * Arch specific extensions to struct device
3 *
4 * This file is released under the GPLv2
5 */
6#include <asm-generic/device.h>
7
diff --git a/arch/avr32/include/asm/div64.h b/arch/avr32/include/asm/div64.h
deleted file mode 100644
index d7ddd4fdeca6..000000000000
--- a/arch/avr32/include/asm/div64.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_AVR32_DIV64_H
2#define __ASM_AVR32_DIV64_H
3
4#include <asm-generic/div64.h>
5
6#endif /* __ASM_AVR32_DIV64_H */
diff --git a/arch/avr32/include/asm/emergency-restart.h b/arch/avr32/include/asm/emergency-restart.h
deleted file mode 100644
index 3e7e014776ba..000000000000
--- a/arch/avr32/include/asm/emergency-restart.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_AVR32_EMERGENCY_RESTART_H
2#define __ASM_AVR32_EMERGENCY_RESTART_H
3
4#include <asm-generic/emergency-restart.h>
5
6#endif /* __ASM_AVR32_EMERGENCY_RESTART_H */
diff --git a/arch/avr32/include/asm/futex.h b/arch/avr32/include/asm/futex.h
deleted file mode 100644
index 10419f14a68a..000000000000
--- a/arch/avr32/include/asm/futex.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_AVR32_FUTEX_H
2#define __ASM_AVR32_FUTEX_H
3
4#include <asm-generic/futex.h>
5
6#endif /* __ASM_AVR32_FUTEX_H */
diff --git a/arch/avr32/include/asm/irq_regs.h b/arch/avr32/include/asm/irq_regs.h
deleted file mode 100644
index 3dd9c0b70270..000000000000
--- a/arch/avr32/include/asm/irq_regs.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/irq_regs.h>
diff --git a/arch/avr32/include/asm/local.h b/arch/avr32/include/asm/local.h
deleted file mode 100644
index 1c1619694da3..000000000000
--- a/arch/avr32/include/asm/local.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_AVR32_LOCAL_H
2#define __ASM_AVR32_LOCAL_H
3
4#include <asm-generic/local.h>
5
6#endif /* __ASM_AVR32_LOCAL_H */
diff --git a/arch/avr32/include/asm/local64.h b/arch/avr32/include/asm/local64.h
deleted file mode 100644
index 36c93b5cc239..000000000000
--- a/arch/avr32/include/asm/local64.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/local64.h>
diff --git a/arch/avr32/include/asm/percpu.h b/arch/avr32/include/asm/percpu.h
deleted file mode 100644
index 69227b4cd0d4..000000000000
--- a/arch/avr32/include/asm/percpu.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_AVR32_PERCPU_H
2#define __ASM_AVR32_PERCPU_H
3
4#include <asm-generic/percpu.h>
5
6#endif /* __ASM_AVR32_PERCPU_H */
diff --git a/arch/avr32/include/asm/scatterlist.h b/arch/avr32/include/asm/scatterlist.h
deleted file mode 100644
index a5902d9834e8..000000000000
--- a/arch/avr32/include/asm/scatterlist.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_AVR32_SCATTERLIST_H
2#define __ASM_AVR32_SCATTERLIST_H
3
4#include <asm-generic/scatterlist.h>
5
6#endif /* __ASM_AVR32_SCATTERLIST_H */
diff --git a/arch/avr32/include/asm/sections.h b/arch/avr32/include/asm/sections.h
deleted file mode 100644
index aa14252e4181..000000000000
--- a/arch/avr32/include/asm/sections.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_AVR32_SECTIONS_H
2#define __ASM_AVR32_SECTIONS_H
3
4#include <asm-generic/sections.h>
5
6#endif /* __ASM_AVR32_SECTIONS_H */
diff --git a/arch/avr32/include/asm/topology.h b/arch/avr32/include/asm/topology.h
deleted file mode 100644
index 5b766cbb4806..000000000000
--- a/arch/avr32/include/asm/topology.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_AVR32_TOPOLOGY_H
2#define __ASM_AVR32_TOPOLOGY_H
3
4#include <asm-generic/topology.h>
5
6#endif /* __ASM_AVR32_TOPOLOGY_H */
diff --git a/arch/avr32/include/asm/xor.h b/arch/avr32/include/asm/xor.h
deleted file mode 100644
index 99c87aa0af4f..000000000000
--- a/arch/avr32/include/asm/xor.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASM_XOR_H
2#define _ASM_XOR_H
3
4#include <asm-generic/xor.h>
5
6#endif
diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c
index c2731003edef..42a53e740a7e 100644
--- a/arch/avr32/kernel/process.c
+++ b/arch/avr32/kernel/process.c
@@ -289,7 +289,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
289 memset(childregs, 0, sizeof(struct pt_regs)); 289 memset(childregs, 0, sizeof(struct pt_regs));
290 p->thread.cpu_context.r0 = arg; 290 p->thread.cpu_context.r0 = arg;
291 p->thread.cpu_context.r1 = usp; /* fn */ 291 p->thread.cpu_context.r1 = usp; /* fn */
292 p->thread.cpu_context.r2 = syscall_return; 292 p->thread.cpu_context.r2 = (unsigned long)syscall_return;
293 p->thread.cpu_context.pc = (unsigned long)ret_from_kernel_thread; 293 p->thread.cpu_context.pc = (unsigned long)ret_from_kernel_thread;
294 childregs->sr = MODE_SUPERVISOR; 294 childregs->sr = MODE_SUPERVISOR;
295 } else { 295 } else {
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c
index 869a1c6ffeee..12f828ad5058 100644
--- a/arch/avr32/kernel/time.c
+++ b/arch/avr32/kernel/time.c
@@ -98,7 +98,14 @@ static void comparator_mode(enum clock_event_mode mode,
98 case CLOCK_EVT_MODE_SHUTDOWN: 98 case CLOCK_EVT_MODE_SHUTDOWN:
99 sysreg_write(COMPARE, 0); 99 sysreg_write(COMPARE, 0);
100 pr_debug("%s: stop\n", evdev->name); 100 pr_debug("%s: stop\n", evdev->name);
101 cpu_idle_poll_ctrl(false); 101 if (evdev->mode == CLOCK_EVT_MODE_ONESHOT ||
102 evdev->mode == CLOCK_EVT_MODE_RESUME) {
103 /*
104 * Only disable idle poll if we have forced that
105 * in a previous call.
106 */
107 cpu_idle_poll_ctrl(false);
108 }
102 break; 109 break;
103 default: 110 default:
104 BUG(); 111 BUG();
diff --git a/arch/mips/alchemy/board-mtx1.c b/arch/mips/alchemy/board-mtx1.c
index 4a9baa9f6330..9969dbab19e3 100644
--- a/arch/mips/alchemy/board-mtx1.c
+++ b/arch/mips/alchemy/board-mtx1.c
@@ -276,7 +276,7 @@ static struct platform_device mtx1_pci_host = {
276 .resource = alchemy_pci_host_res, 276 .resource = alchemy_pci_host_res,
277}; 277};
278 278
279static struct __initdata platform_device * mtx1_devs[] = { 279static struct platform_device *mtx1_devs[] __initdata = {
280 &mtx1_pci_host, 280 &mtx1_pci_host,
281 &mtx1_gpio_leds, 281 &mtx1_gpio_leds,
282 &mtx1_wdt, 282 &mtx1_wdt,
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 51680d15ca8e..d445d060e346 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -187,7 +187,7 @@
187 187
188/* 188/*
189 * MIPS32, MIPS64, VR5500, IDT32332, IDT32334 and maybe a few other 189 * MIPS32, MIPS64, VR5500, IDT32332, IDT32334 and maybe a few other
190 * pre-MIPS32/MIPS53 processors have CLO, CLZ. The IDT RC64574 is 64-bit and 190 * pre-MIPS32/MIPS64 processors have CLO, CLZ. The IDT RC64574 is 64-bit and
191 * has CLO and CLZ but not DCLO nor DCLZ. For 64-bit kernels 191 * has CLO and CLZ but not DCLO nor DCLZ. For 64-bit kernels
192 * cpu_has_clo_clz also indicates the availability of DCLO and DCLZ. 192 * cpu_has_clo_clz also indicates the availability of DCLO and DCLZ.
193 */ 193 */
diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
index 4d6d77ed9b9d..e194f957ca8c 100644
--- a/arch/mips/include/asm/jump_label.h
+++ b/arch/mips/include/asm/jump_label.h
@@ -22,7 +22,7 @@
22 22
23static __always_inline bool arch_static_branch(struct static_key *key) 23static __always_inline bool arch_static_branch(struct static_key *key)
24{ 24{
25 asm goto("1:\tnop\n\t" 25 asm_volatile_goto("1:\tnop\n\t"
26 "nop\n\t" 26 "nop\n\t"
27 ".pushsection __jump_table, \"aw\"\n\t" 27 ".pushsection __jump_table, \"aw\"\n\t"
28 WORD_INSN " 1b, %l[l_yes], %0\n\t" 28 WORD_INSN " 1b, %l[l_yes], %0\n\t"
diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S
index 4204d76af854..029e002a4ea0 100644
--- a/arch/mips/kernel/octeon_switch.S
+++ b/arch/mips/kernel/octeon_switch.S
@@ -73,7 +73,7 @@
733: 733:
74 74
75#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) 75#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
76 PTR_L t8, __stack_chk_guard 76 PTR_LA t8, __stack_chk_guard
77 LONG_L t9, TASK_STACK_CANARY(a1) 77 LONG_L t9, TASK_STACK_CANARY(a1)
78 LONG_S t9, 0(t8) 78 LONG_S t9, 0(t8)
79#endif 79#endif
diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S
index 38af83f84c4a..20b7b040e76f 100644
--- a/arch/mips/kernel/r2300_switch.S
+++ b/arch/mips/kernel/r2300_switch.S
@@ -67,7 +67,7 @@ LEAF(resume)
671: 671:
68 68
69#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) 69#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
70 PTR_L t8, __stack_chk_guard 70 PTR_LA t8, __stack_chk_guard
71 LONG_L t9, TASK_STACK_CANARY(a1) 71 LONG_L t9, TASK_STACK_CANARY(a1)
72 LONG_S t9, 0(t8) 72 LONG_S t9, 0(t8)
73#endif 73#endif
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index 921238a6bd26..078de5eaca8f 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -69,7 +69,7 @@
691: 691:
70 70
71#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) 71#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
72 PTR_L t8, __stack_chk_guard 72 PTR_LA t8, __stack_chk_guard
73 LONG_L t9, TASK_STACK_CANARY(a1) 73 LONG_L t9, TASK_STACK_CANARY(a1)
74 LONG_S t9, 0(t8) 74 LONG_S t9, 0(t8)
75#endif 75#endif
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 627883bc6d5f..bc6f96fcb529 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -609,6 +609,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
609 r4k_blast_scache(); 609 r4k_blast_scache();
610 else 610 else
611 blast_scache_range(addr, addr + size); 611 blast_scache_range(addr, addr + size);
612 preempt_enable();
612 __sync(); 613 __sync();
613 return; 614 return;
614 } 615 }
@@ -650,6 +651,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
650 */ 651 */
651 blast_inv_scache_range(addr, addr + size); 652 blast_inv_scache_range(addr, addr + size);
652 } 653 }
654 preempt_enable();
653 __sync(); 655 __sync();
654 return; 656 return;
655 } 657 }
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index f25a7e9f8cbc..5f8b95512580 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -308,12 +308,10 @@ static void mips_dma_sync_sg_for_cpu(struct device *dev,
308{ 308{
309 int i; 309 int i;
310 310
311 /* Make sure that gcc doesn't leave the empty loop body. */ 311 if (cpu_needs_post_dma_flush(dev))
312 for (i = 0; i < nelems; i++, sg++) { 312 for (i = 0; i < nelems; i++, sg++)
313 if (cpu_needs_post_dma_flush(dev))
314 __dma_sync(sg_page(sg), sg->offset, sg->length, 313 __dma_sync(sg_page(sg), sg->offset, sg->length,
315 direction); 314 direction);
316 }
317} 315}
318 316
319static void mips_dma_sync_sg_for_device(struct device *dev, 317static void mips_dma_sync_sg_for_device(struct device *dev,
@@ -321,12 +319,10 @@ static void mips_dma_sync_sg_for_device(struct device *dev,
321{ 319{
322 int i; 320 int i;
323 321
324 /* Make sure that gcc doesn't leave the empty loop body. */ 322 if (!plat_device_is_coherent(dev))
325 for (i = 0; i < nelems; i++, sg++) { 323 for (i = 0; i < nelems; i++, sg++)
326 if (!plat_device_is_coherent(dev))
327 __dma_sync(sg_page(sg), sg->offset, sg->length, 324 __dma_sync(sg_page(sg), sg->offset, sg->length,
328 direction); 325 direction);
329 }
330} 326}
331 327
332int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 328int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
diff --git a/arch/openrisc/include/asm/prom.h b/arch/openrisc/include/asm/prom.h
index eb59bfe23e85..93c9980e1b6b 100644
--- a/arch/openrisc/include/asm/prom.h
+++ b/arch/openrisc/include/asm/prom.h
@@ -14,53 +14,9 @@
14 * the Free Software Foundation; either version 2 of the License, or 14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version. 15 * (at your option) any later version.
16 */ 16 */
17
18#include <linux/of.h> /* linux/of.h gets to determine #include ordering */
19
20#ifndef _ASM_OPENRISC_PROM_H 17#ifndef _ASM_OPENRISC_PROM_H
21#define _ASM_OPENRISC_PROM_H 18#define _ASM_OPENRISC_PROM_H
22#ifdef __KERNEL__
23#ifndef __ASSEMBLY__
24 19
25#include <linux/types.h>
26#include <asm/irq.h>
27#include <linux/irqdomain.h>
28#include <linux/atomic.h>
29#include <linux/of_irq.h>
30#include <linux/of_fdt.h>
31#include <linux/of_address.h>
32#include <linux/proc_fs.h>
33#include <linux/platform_device.h>
34#define HAVE_ARCH_DEVTREE_FIXUPS 20#define HAVE_ARCH_DEVTREE_FIXUPS
35 21
36/* Other Prototypes */
37extern int early_uartlite_console(void);
38
39/* Parse the ibm,dma-window property of an OF node into the busno, phys and
40 * size parameters.
41 */
42void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
43 unsigned long *busno, unsigned long *phys, unsigned long *size);
44
45extern void kdump_move_device_tree(void);
46
47/* Get the MAC address */
48extern const void *of_get_mac_address(struct device_node *np);
49
50/**
51 * of_irq_map_pci - Resolve the interrupt for a PCI device
52 * @pdev: the device whose interrupt is to be resolved
53 * @out_irq: structure of_irq filled by this function
54 *
55 * This function resolves the PCI interrupt for a given PCI device. If a
56 * device-node exists for a given pci_dev, it will use normal OF tree
57 * walking. If not, it will implement standard swizzling and walk up the
58 * PCI tree until an device-node is found, at which point it will finish
59 * resolving using the OF tree walking.
60 */
61struct pci_dev;
62extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq);
63
64#endif /* __ASSEMBLY__ */
65#endif /* __KERNEL__ */
66#endif /* _ASM_OPENRISC_PROM_H */ 22#endif /* _ASM_OPENRISC_PROM_H */
diff --git a/arch/parisc/include/asm/traps.h b/arch/parisc/include/asm/traps.h
index 1945f995f2df..4736020ba5ea 100644
--- a/arch/parisc/include/asm/traps.h
+++ b/arch/parisc/include/asm/traps.h
@@ -6,7 +6,7 @@ struct pt_regs;
6 6
7/* traps.c */ 7/* traps.c */
8void parisc_terminate(char *msg, struct pt_regs *regs, 8void parisc_terminate(char *msg, struct pt_regs *regs,
9 int code, unsigned long offset); 9 int code, unsigned long offset) __noreturn __cold;
10 10
11/* mm/fault.c */ 11/* mm/fault.c */
12void do_page_fault(struct pt_regs *regs, unsigned long code, 12void do_page_fault(struct pt_regs *regs, unsigned long code,
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index c035673209f7..b521c0adf4ec 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -602,6 +602,7 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
602 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); 602 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
603 } 603 }
604} 604}
605EXPORT_SYMBOL_GPL(flush_cache_page);
605 606
606#ifdef CONFIG_PARISC_TMPALIAS 607#ifdef CONFIG_PARISC_TMPALIAS
607 608
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 8a252f2d6c08..2b96602e812f 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -72,7 +72,6 @@ enum ipi_message_type {
72 IPI_NOP=0, 72 IPI_NOP=0,
73 IPI_RESCHEDULE=1, 73 IPI_RESCHEDULE=1,
74 IPI_CALL_FUNC, 74 IPI_CALL_FUNC,
75 IPI_CALL_FUNC_SINGLE,
76 IPI_CPU_START, 75 IPI_CPU_START,
77 IPI_CPU_STOP, 76 IPI_CPU_STOP,
78 IPI_CPU_TEST 77 IPI_CPU_TEST
@@ -164,11 +163,6 @@ ipi_interrupt(int irq, void *dev_id)
164 generic_smp_call_function_interrupt(); 163 generic_smp_call_function_interrupt();
165 break; 164 break;
166 165
167 case IPI_CALL_FUNC_SINGLE:
168 smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC_SINGLE\n", this_cpu);
169 generic_smp_call_function_single_interrupt();
170 break;
171
172 case IPI_CPU_START: 166 case IPI_CPU_START:
173 smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_START\n", this_cpu); 167 smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_START\n", this_cpu);
174 break; 168 break;
@@ -260,7 +254,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
260 254
261void arch_send_call_function_single_ipi(int cpu) 255void arch_send_call_function_single_ipi(int cpu)
262{ 256{
263 send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE); 257 send_IPI_single(cpu, IPI_CALL_FUNC);
264} 258}
265 259
266/* 260/*
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 04e47c6a4562..1cd1d0c83b6d 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -291,11 +291,6 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err)
291 do_exit(SIGSEGV); 291 do_exit(SIGSEGV);
292} 292}
293 293
294int syscall_ipi(int (*syscall) (struct pt_regs *), struct pt_regs *regs)
295{
296 return syscall(regs);
297}
298
299/* gdb uses break 4,8 */ 294/* gdb uses break 4,8 */
300#define GDB_BREAK_INSN 0x10004 295#define GDB_BREAK_INSN 0x10004
301static void handle_gdb_break(struct pt_regs *regs, int wot) 296static void handle_gdb_break(struct pt_regs *regs, int wot)
@@ -805,14 +800,14 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
805 else { 800 else {
806 801
807 /* 802 /*
808 * The kernel should never fault on its own address space. 803 * The kernel should never fault on its own address space,
804 * unless pagefault_disable() was called before.
809 */ 805 */
810 806
811 if (fault_space == 0) 807 if (fault_space == 0 && !in_atomic())
812 { 808 {
813 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC); 809 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
814 parisc_terminate("Kernel Fault", regs, code, fault_address); 810 parisc_terminate("Kernel Fault", regs, code, fault_address);
815
816 } 811 }
817 } 812 }
818 813
diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c
index ac4370b1ca40..b5507ec06b84 100644
--- a/arch/parisc/lib/memcpy.c
+++ b/arch/parisc/lib/memcpy.c
@@ -56,7 +56,7 @@
56#ifdef __KERNEL__ 56#ifdef __KERNEL__
57#include <linux/module.h> 57#include <linux/module.h>
58#include <linux/compiler.h> 58#include <linux/compiler.h>
59#include <asm/uaccess.h> 59#include <linux/uaccess.h>
60#define s_space "%%sr1" 60#define s_space "%%sr1"
61#define d_space "%%sr2" 61#define d_space "%%sr2"
62#else 62#else
@@ -524,4 +524,17 @@ EXPORT_SYMBOL(copy_to_user);
524EXPORT_SYMBOL(copy_from_user); 524EXPORT_SYMBOL(copy_from_user);
525EXPORT_SYMBOL(copy_in_user); 525EXPORT_SYMBOL(copy_in_user);
526EXPORT_SYMBOL(memcpy); 526EXPORT_SYMBOL(memcpy);
527
528long probe_kernel_read(void *dst, const void *src, size_t size)
529{
530 unsigned long addr = (unsigned long)src;
531
532 if (size < 0 || addr < PAGE_SIZE)
533 return -EFAULT;
534
535 /* check for I/O space F_EXTEND(0xfff00000) access as well? */
536
537 return __probe_kernel_read(dst, src, size);
538}
539
527#endif 540#endif
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index d10d27a720c0..0293588d5b8c 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -171,17 +171,25 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
171 unsigned long address) 171 unsigned long address)
172{ 172{
173 struct vm_area_struct *vma, *prev_vma; 173 struct vm_area_struct *vma, *prev_vma;
174 struct task_struct *tsk = current; 174 struct task_struct *tsk;
175 struct mm_struct *mm = tsk->mm; 175 struct mm_struct *mm;
176 unsigned long acc_type; 176 unsigned long acc_type;
177 int fault; 177 int fault;
178 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 178 unsigned int flags;
179 179
180 if (in_atomic() || !mm) 180 if (in_atomic())
181 goto no_context; 181 goto no_context;
182 182
183 tsk = current;
184 mm = tsk->mm;
185 if (!mm)
186 goto no_context;
187
188 flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
183 if (user_mode(regs)) 189 if (user_mode(regs))
184 flags |= FAULT_FLAG_USER; 190 flags |= FAULT_FLAG_USER;
191
192 acc_type = parisc_acctyp(code, regs->iir);
185 if (acc_type & VM_WRITE) 193 if (acc_type & VM_WRITE)
186 flags |= FAULT_FLAG_WRITE; 194 flags |= FAULT_FLAG_WRITE;
187retry: 195retry:
@@ -196,8 +204,6 @@ retry:
196 204
197good_area: 205good_area:
198 206
199 acc_type = parisc_acctyp(code,regs->iir);
200
201 if ((vma->vm_flags & acc_type) != acc_type) 207 if ((vma->vm_flags & acc_type) != acc_type)
202 goto bad_area; 208 goto bad_area;
203 209
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 6a15c968d214..15ca2255f438 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -74,7 +74,7 @@ src-wlib-$(CONFIG_8xx) += mpc8xx.c planetcore.c
74src-wlib-$(CONFIG_PPC_82xx) += pq2.c fsl-soc.c planetcore.c 74src-wlib-$(CONFIG_PPC_82xx) += pq2.c fsl-soc.c planetcore.c
75src-wlib-$(CONFIG_EMBEDDED6xx) += mv64x60.c mv64x60_i2c.c ugecon.c 75src-wlib-$(CONFIG_EMBEDDED6xx) += mv64x60.c mv64x60_i2c.c ugecon.c
76 76
77src-plat-y := of.c 77src-plat-y := of.c epapr.c
78src-plat-$(CONFIG_40x) += fixed-head.S ep405.c cuboot-hotfoot.c \ 78src-plat-$(CONFIG_40x) += fixed-head.S ep405.c cuboot-hotfoot.c \
79 treeboot-walnut.c cuboot-acadia.c \ 79 treeboot-walnut.c cuboot-acadia.c \
80 cuboot-kilauea.c simpleboot.c \ 80 cuboot-kilauea.c simpleboot.c \
@@ -97,7 +97,7 @@ src-plat-$(CONFIG_EMBEDDED6xx) += cuboot-pq2.c cuboot-mpc7448hpc2.c \
97 prpmc2800.c 97 prpmc2800.c
98src-plat-$(CONFIG_AMIGAONE) += cuboot-amigaone.c 98src-plat-$(CONFIG_AMIGAONE) += cuboot-amigaone.c
99src-plat-$(CONFIG_PPC_PS3) += ps3-head.S ps3-hvcall.S ps3.c 99src-plat-$(CONFIG_PPC_PS3) += ps3-head.S ps3-hvcall.S ps3.c
100src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c 100src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c epapr-wrapper.c
101 101
102src-wlib := $(sort $(src-wlib-y)) 102src-wlib := $(sort $(src-wlib-y))
103src-plat := $(sort $(src-plat-y)) 103src-plat := $(sort $(src-plat-y))
diff --git a/arch/powerpc/boot/epapr-wrapper.c b/arch/powerpc/boot/epapr-wrapper.c
new file mode 100644
index 000000000000..c10191006673
--- /dev/null
+++ b/arch/powerpc/boot/epapr-wrapper.c
@@ -0,0 +1,9 @@
1extern void epapr_platform_init(unsigned long r3, unsigned long r4,
2 unsigned long r5, unsigned long r6,
3 unsigned long r7);
4
5void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
6 unsigned long r6, unsigned long r7)
7{
8 epapr_platform_init(r3, r4, r5, r6, r7);
9}
diff --git a/arch/powerpc/boot/epapr.c b/arch/powerpc/boot/epapr.c
index 06c1961bd124..02e91aa2194a 100644
--- a/arch/powerpc/boot/epapr.c
+++ b/arch/powerpc/boot/epapr.c
@@ -48,8 +48,8 @@ static void platform_fixups(void)
48 fdt_addr, fdt_totalsize((void *)fdt_addr), ima_size); 48 fdt_addr, fdt_totalsize((void *)fdt_addr), ima_size);
49} 49}
50 50
51void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, 51void epapr_platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
52 unsigned long r6, unsigned long r7) 52 unsigned long r6, unsigned long r7)
53{ 53{
54 epapr_magic = r6; 54 epapr_magic = r6;
55 ima_size = r7; 55 ima_size = r7;
diff --git a/arch/powerpc/boot/of.c b/arch/powerpc/boot/of.c
index 61d9899aa0d0..62e2f43ec1df 100644
--- a/arch/powerpc/boot/of.c
+++ b/arch/powerpc/boot/of.c
@@ -26,6 +26,9 @@
26 26
27static unsigned long claim_base; 27static unsigned long claim_base;
28 28
29void epapr_platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
30 unsigned long r6, unsigned long r7);
31
29static void *of_try_claim(unsigned long size) 32static void *of_try_claim(unsigned long size)
30{ 33{
31 unsigned long addr = 0; 34 unsigned long addr = 0;
@@ -61,7 +64,7 @@ static void of_image_hdr(const void *hdr)
61 } 64 }
62} 65}
63 66
64void platform_init(unsigned long a1, unsigned long a2, void *promptr) 67static void of_platform_init(unsigned long a1, unsigned long a2, void *promptr)
65{ 68{
66 platform_ops.image_hdr = of_image_hdr; 69 platform_ops.image_hdr = of_image_hdr;
67 platform_ops.malloc = of_try_claim; 70 platform_ops.malloc = of_try_claim;
@@ -81,3 +84,14 @@ void platform_init(unsigned long a1, unsigned long a2, void *promptr)
81 loader_info.initrd_size = a2; 84 loader_info.initrd_size = a2;
82 } 85 }
83} 86}
87
88void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
89 unsigned long r6, unsigned long r7)
90{
91 /* Detect OF vs. ePAPR boot */
92 if (r5)
93 of_platform_init(r3, r4, (void *)r5);
94 else
95 epapr_platform_init(r3, r4, r5, r6, r7);
96}
97
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index 6761c746048d..cd7af841ba05 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -148,18 +148,18 @@ make_space=y
148 148
149case "$platform" in 149case "$platform" in
150pseries) 150pseries)
151 platformo=$object/of.o 151 platformo="$object/of.o $object/epapr.o"
152 link_address='0x4000000' 152 link_address='0x4000000'
153 ;; 153 ;;
154maple) 154maple)
155 platformo=$object/of.o 155 platformo="$object/of.o $object/epapr.o"
156 link_address='0x400000' 156 link_address='0x400000'
157 ;; 157 ;;
158pmac|chrp) 158pmac|chrp)
159 platformo=$object/of.o 159 platformo="$object/of.o $object/epapr.o"
160 ;; 160 ;;
161coff) 161coff)
162 platformo="$object/crt0.o $object/of.o" 162 platformo="$object/crt0.o $object/of.o $object/epapr.o"
163 lds=$object/zImage.coff.lds 163 lds=$object/zImage.coff.lds
164 link_address='0x500000' 164 link_address='0x500000'
165 pie= 165 pie=
@@ -253,6 +253,7 @@ treeboot-iss4xx-mpic)
253 platformo="$object/treeboot-iss4xx.o" 253 platformo="$object/treeboot-iss4xx.o"
254 ;; 254 ;;
255epapr) 255epapr)
256 platformo="$object/epapr.o $object/epapr-wrapper.o"
256 link_address='0x20000000' 257 link_address='0x20000000'
257 pie=-pie 258 pie=-pie
258 ;; 259 ;;
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index 0e40843a1c6e..41f13cec8a8f 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -69,9 +69,9 @@ extern struct thread_info *softirq_ctx[NR_CPUS];
69 69
70extern void irq_ctx_init(void); 70extern void irq_ctx_init(void);
71extern void call_do_softirq(struct thread_info *tp); 71extern void call_do_softirq(struct thread_info *tp);
72extern int call_handle_irq(int irq, void *p1, 72extern void call_do_irq(struct pt_regs *regs, struct thread_info *tp);
73 struct thread_info *tp, void *func);
74extern void do_IRQ(struct pt_regs *regs); 73extern void do_IRQ(struct pt_regs *regs);
74extern void __do_irq(struct pt_regs *regs);
75 75
76int irq_choose_cpu(const struct cpumask *mask); 76int irq_choose_cpu(const struct cpumask *mask);
77 77
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
index ae098c438f00..f016bb699b5f 100644
--- a/arch/powerpc/include/asm/jump_label.h
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -19,7 +19,7 @@
19 19
20static __always_inline bool arch_static_branch(struct static_key *key) 20static __always_inline bool arch_static_branch(struct static_key *key)
21{ 21{
22 asm goto("1:\n\t" 22 asm_volatile_goto("1:\n\t"
23 "nop\n\t" 23 "nop\n\t"
24 ".pushsection __jump_table, \"aw\"\n\t" 24 ".pushsection __jump_table, \"aw\"\n\t"
25 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t" 25 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index e378cccfca55..ce4de5aed7b5 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -149,8 +149,6 @@ typedef struct {
149 149
150struct thread_struct { 150struct thread_struct {
151 unsigned long ksp; /* Kernel stack pointer */ 151 unsigned long ksp; /* Kernel stack pointer */
152 unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
153
154#ifdef CONFIG_PPC64 152#ifdef CONFIG_PPC64
155 unsigned long ksp_vsid; 153 unsigned long ksp_vsid;
156#endif 154#endif
@@ -162,6 +160,7 @@ struct thread_struct {
162#endif 160#endif
163#ifdef CONFIG_PPC32 161#ifdef CONFIG_PPC32
164 void *pgdir; /* root of page-table tree */ 162 void *pgdir; /* root of page-table tree */
163 unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
165#endif 164#endif
166#ifdef CONFIG_PPC_ADV_DEBUG_REGS 165#ifdef CONFIG_PPC_ADV_DEBUG_REGS
167 /* 166 /*
@@ -321,7 +320,6 @@ struct thread_struct {
321#else 320#else
322#define INIT_THREAD { \ 321#define INIT_THREAD { \
323 .ksp = INIT_SP, \ 322 .ksp = INIT_SP, \
324 .ksp_limit = INIT_SP_LIMIT, \
325 .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \ 323 .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
326 .fs = KERNEL_DS, \ 324 .fs = KERNEL_DS, \
327 .fpr = {{0}}, \ 325 .fpr = {{0}}, \
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index d8958be5f31a..502c7a4e73f7 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -80,10 +80,11 @@ int main(void)
80 DEFINE(TASKTHREADPPR, offsetof(struct task_struct, thread.ppr)); 80 DEFINE(TASKTHREADPPR, offsetof(struct task_struct, thread.ppr));
81#else 81#else
82 DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); 82 DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
83 DEFINE(THREAD_INFO_GAP, _ALIGN_UP(sizeof(struct thread_info), 16));
84 DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
83#endif /* CONFIG_PPC64 */ 85#endif /* CONFIG_PPC64 */
84 86
85 DEFINE(KSP, offsetof(struct thread_struct, ksp)); 87 DEFINE(KSP, offsetof(struct thread_struct, ksp));
86 DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
87 DEFINE(PT_REGS, offsetof(struct thread_struct, regs)); 88 DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
88#ifdef CONFIG_BOOKE 89#ifdef CONFIG_BOOKE
89 DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0])); 90 DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0]));
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 0adab06ce5c0..572bb5b95f35 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -661,7 +661,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
661 /* number of bytes needed for the bitmap */ 661 /* number of bytes needed for the bitmap */
662 sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long); 662 sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
663 663
664 page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz)); 664 page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz));
665 if (!page) 665 if (!page)
666 panic("iommu_init_table: Can't allocate %ld bytes\n", sz); 666 panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
667 tbl->it_map = page_address(page); 667 tbl->it_map = page_address(page);
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index c69440cef7af..c7cb8c232d2f 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -441,50 +441,6 @@ void migrate_irqs(void)
441} 441}
442#endif 442#endif
443 443
444static inline void handle_one_irq(unsigned int irq)
445{
446 struct thread_info *curtp, *irqtp;
447 unsigned long saved_sp_limit;
448 struct irq_desc *desc;
449
450 desc = irq_to_desc(irq);
451 if (!desc)
452 return;
453
454 /* Switch to the irq stack to handle this */
455 curtp = current_thread_info();
456 irqtp = hardirq_ctx[smp_processor_id()];
457
458 if (curtp == irqtp) {
459 /* We're already on the irq stack, just handle it */
460 desc->handle_irq(irq, desc);
461 return;
462 }
463
464 saved_sp_limit = current->thread.ksp_limit;
465
466 irqtp->task = curtp->task;
467 irqtp->flags = 0;
468
469 /* Copy the softirq bits in preempt_count so that the
470 * softirq checks work in the hardirq context. */
471 irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
472 (curtp->preempt_count & SOFTIRQ_MASK);
473
474 current->thread.ksp_limit = (unsigned long)irqtp +
475 _ALIGN_UP(sizeof(struct thread_info), 16);
476
477 call_handle_irq(irq, desc, irqtp, desc->handle_irq);
478 current->thread.ksp_limit = saved_sp_limit;
479 irqtp->task = NULL;
480
481 /* Set any flag that may have been set on the
482 * alternate stack
483 */
484 if (irqtp->flags)
485 set_bits(irqtp->flags, &curtp->flags);
486}
487
488static inline void check_stack_overflow(void) 444static inline void check_stack_overflow(void)
489{ 445{
490#ifdef CONFIG_DEBUG_STACKOVERFLOW 446#ifdef CONFIG_DEBUG_STACKOVERFLOW
@@ -501,9 +457,9 @@ static inline void check_stack_overflow(void)
501#endif 457#endif
502} 458}
503 459
504void do_IRQ(struct pt_regs *regs) 460void __do_irq(struct pt_regs *regs)
505{ 461{
506 struct pt_regs *old_regs = set_irq_regs(regs); 462 struct irq_desc *desc;
507 unsigned int irq; 463 unsigned int irq;
508 464
509 irq_enter(); 465 irq_enter();
@@ -519,18 +475,57 @@ void do_IRQ(struct pt_regs *regs)
519 */ 475 */
520 irq = ppc_md.get_irq(); 476 irq = ppc_md.get_irq();
521 477
522 /* We can hard enable interrupts now */ 478 /* We can hard enable interrupts now to allow perf interrupts */
523 may_hard_irq_enable(); 479 may_hard_irq_enable();
524 480
525 /* And finally process it */ 481 /* And finally process it */
526 if (irq != NO_IRQ) 482 if (unlikely(irq == NO_IRQ))
527 handle_one_irq(irq);
528 else
529 __get_cpu_var(irq_stat).spurious_irqs++; 483 __get_cpu_var(irq_stat).spurious_irqs++;
484 else {
485 desc = irq_to_desc(irq);
486 if (likely(desc))
487 desc->handle_irq(irq, desc);
488 }
530 489
531 trace_irq_exit(regs); 490 trace_irq_exit(regs);
532 491
533 irq_exit(); 492 irq_exit();
493}
494
495void do_IRQ(struct pt_regs *regs)
496{
497 struct pt_regs *old_regs = set_irq_regs(regs);
498 struct thread_info *curtp, *irqtp, *sirqtp;
499
500 /* Switch to the irq stack to handle this */
501 curtp = current_thread_info();
502 irqtp = hardirq_ctx[raw_smp_processor_id()];
503 sirqtp = softirq_ctx[raw_smp_processor_id()];
504
505 /* Already there ? */
506 if (unlikely(curtp == irqtp || curtp == sirqtp)) {
507 __do_irq(regs);
508 set_irq_regs(old_regs);
509 return;
510 }
511
512 /* Prepare the thread_info in the irq stack */
513 irqtp->task = curtp->task;
514 irqtp->flags = 0;
515
516 /* Copy the preempt_count so that the [soft]irq checks work. */
517 irqtp->preempt_count = curtp->preempt_count;
518
519 /* Switch stack and call */
520 call_do_irq(regs, irqtp);
521
522 /* Restore stack limit */
523 irqtp->task = NULL;
524
525 /* Copy back updates to the thread_info */
526 if (irqtp->flags)
527 set_bits(irqtp->flags, &curtp->flags);
528
534 set_irq_regs(old_regs); 529 set_irq_regs(old_regs);
535} 530}
536 531
@@ -592,28 +587,22 @@ void irq_ctx_init(void)
592 memset((void *)softirq_ctx[i], 0, THREAD_SIZE); 587 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
593 tp = softirq_ctx[i]; 588 tp = softirq_ctx[i];
594 tp->cpu = i; 589 tp->cpu = i;
595 tp->preempt_count = 0;
596 590
597 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); 591 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
598 tp = hardirq_ctx[i]; 592 tp = hardirq_ctx[i];
599 tp->cpu = i; 593 tp->cpu = i;
600 tp->preempt_count = HARDIRQ_OFFSET;
601 } 594 }
602} 595}
603 596
604static inline void do_softirq_onstack(void) 597static inline void do_softirq_onstack(void)
605{ 598{
606 struct thread_info *curtp, *irqtp; 599 struct thread_info *curtp, *irqtp;
607 unsigned long saved_sp_limit = current->thread.ksp_limit;
608 600
609 curtp = current_thread_info(); 601 curtp = current_thread_info();
610 irqtp = softirq_ctx[smp_processor_id()]; 602 irqtp = softirq_ctx[smp_processor_id()];
611 irqtp->task = curtp->task; 603 irqtp->task = curtp->task;
612 irqtp->flags = 0; 604 irqtp->flags = 0;
613 current->thread.ksp_limit = (unsigned long)irqtp +
614 _ALIGN_UP(sizeof(struct thread_info), 16);
615 call_do_softirq(irqtp); 605 call_do_softirq(irqtp);
616 current->thread.ksp_limit = saved_sp_limit;
617 irqtp->task = NULL; 606 irqtp->task = NULL;
618 607
619 /* Set any flag that may have been set on the 608 /* Set any flag that may have been set on the
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 777d999f563b..2b0ad9845363 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -36,26 +36,41 @@
36 36
37 .text 37 .text
38 38
39/*
40 * We store the saved ksp_limit in the unused part
41 * of the STACK_FRAME_OVERHEAD
42 */
39_GLOBAL(call_do_softirq) 43_GLOBAL(call_do_softirq)
40 mflr r0 44 mflr r0
41 stw r0,4(r1) 45 stw r0,4(r1)
46 lwz r10,THREAD+KSP_LIMIT(r2)
47 addi r11,r3,THREAD_INFO_GAP
42 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3) 48 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
43 mr r1,r3 49 mr r1,r3
50 stw r10,8(r1)
51 stw r11,THREAD+KSP_LIMIT(r2)
44 bl __do_softirq 52 bl __do_softirq
53 lwz r10,8(r1)
45 lwz r1,0(r1) 54 lwz r1,0(r1)
46 lwz r0,4(r1) 55 lwz r0,4(r1)
56 stw r10,THREAD+KSP_LIMIT(r2)
47 mtlr r0 57 mtlr r0
48 blr 58 blr
49 59
50_GLOBAL(call_handle_irq) 60_GLOBAL(call_do_irq)
51 mflr r0 61 mflr r0
52 stw r0,4(r1) 62 stw r0,4(r1)
53 mtctr r6 63 lwz r10,THREAD+KSP_LIMIT(r2)
54 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5) 64 addi r11,r3,THREAD_INFO_GAP
55 mr r1,r5 65 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
56 bctrl 66 mr r1,r4
67 stw r10,8(r1)
68 stw r11,THREAD+KSP_LIMIT(r2)
69 bl __do_irq
70 lwz r10,8(r1)
57 lwz r1,0(r1) 71 lwz r1,0(r1)
58 lwz r0,4(r1) 72 lwz r0,4(r1)
73 stw r10,THREAD+KSP_LIMIT(r2)
59 mtlr r0 74 mtlr r0
60 blr 75 blr
61 76
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 971d7e78aff2..e59caf874d05 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -40,14 +40,12 @@ _GLOBAL(call_do_softirq)
40 mtlr r0 40 mtlr r0
41 blr 41 blr
42 42
43_GLOBAL(call_handle_irq) 43_GLOBAL(call_do_irq)
44 ld r8,0(r6)
45 mflr r0 44 mflr r0
46 std r0,16(r1) 45 std r0,16(r1)
47 mtctr r8 46 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
48 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5) 47 mr r1,r4
49 mr r1,r5 48 bl .__do_irq
50 bctrl
51 ld r1,0(r1) 49 ld r1,0(r1)
52 ld r0,16(r1) 50 ld r0,16(r1)
53 mtlr r0 51 mtlr r0
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 6f428da53e20..96d2fdf3aa9e 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1000,9 +1000,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
1000 kregs = (struct pt_regs *) sp; 1000 kregs = (struct pt_regs *) sp;
1001 sp -= STACK_FRAME_OVERHEAD; 1001 sp -= STACK_FRAME_OVERHEAD;
1002 p->thread.ksp = sp; 1002 p->thread.ksp = sp;
1003#ifdef CONFIG_PPC32
1003 p->thread.ksp_limit = (unsigned long)task_stack_page(p) + 1004 p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
1004 _ALIGN_UP(sizeof(struct thread_info), 16); 1005 _ALIGN_UP(sizeof(struct thread_info), 16);
1005 1006#endif
1006#ifdef CONFIG_HAVE_HW_BREAKPOINT 1007#ifdef CONFIG_HAVE_HW_BREAKPOINT
1007 p->thread.ptrace_bps[0] = NULL; 1008 p->thread.ptrace_bps[0] = NULL;
1008#endif 1009#endif
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 12e656ffe60e..5fe2842e8bab 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -196,6 +196,8 @@ static int __initdata mem_reserve_cnt;
196 196
197static cell_t __initdata regbuf[1024]; 197static cell_t __initdata regbuf[1024];
198 198
199static bool rtas_has_query_cpu_stopped;
200
199 201
200/* 202/*
201 * Error results ... some OF calls will return "-1" on error, some 203 * Error results ... some OF calls will return "-1" on error, some
@@ -1574,6 +1576,11 @@ static void __init prom_instantiate_rtas(void)
1574 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry", 1576 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
1575 &val, sizeof(val)); 1577 &val, sizeof(val));
1576 1578
1579 /* Check if it supports "query-cpu-stopped-state" */
1580 if (prom_getprop(rtas_node, "query-cpu-stopped-state",
1581 &val, sizeof(val)) != PROM_ERROR)
1582 rtas_has_query_cpu_stopped = true;
1583
1577#if defined(CONFIG_PPC_POWERNV) && defined(__BIG_ENDIAN__) 1584#if defined(CONFIG_PPC_POWERNV) && defined(__BIG_ENDIAN__)
1578 /* PowerVN takeover hack */ 1585 /* PowerVN takeover hack */
1579 prom_rtas_data = base; 1586 prom_rtas_data = base;
@@ -1815,6 +1822,18 @@ static void __init prom_hold_cpus(void)
1815 = (void *) LOW_ADDR(__secondary_hold_acknowledge); 1822 = (void *) LOW_ADDR(__secondary_hold_acknowledge);
1816 unsigned long secondary_hold = LOW_ADDR(__secondary_hold); 1823 unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
1817 1824
1825 /*
1826 * On pseries, if RTAS supports "query-cpu-stopped-state",
1827 * we skip this stage, the CPUs will be started by the
1828 * kernel using RTAS.
1829 */
1830 if ((of_platform == PLATFORM_PSERIES ||
1831 of_platform == PLATFORM_PSERIES_LPAR) &&
1832 rtas_has_query_cpu_stopped) {
1833 prom_printf("prom_hold_cpus: skipped\n");
1834 return;
1835 }
1836
1818 prom_debug("prom_hold_cpus: start...\n"); 1837 prom_debug("prom_hold_cpus: start...\n");
1819 prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop); 1838 prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop);
1820 prom_debug(" 1) *spinloop = 0x%x\n", *spinloop); 1839 prom_debug(" 1) *spinloop = 0x%x\n", *spinloop);
@@ -3011,6 +3030,8 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
3011 * On non-powermacs, put all CPUs in spin-loops. 3030 * On non-powermacs, put all CPUs in spin-loops.
3012 * 3031 *
3013 * PowerMacs use a different mechanism to spin CPUs 3032 * PowerMacs use a different mechanism to spin CPUs
3033 *
3034 * (This must be done after instanciating RTAS)
3014 */ 3035 */
3015 if (of_platform != PLATFORM_POWERMAC && 3036 if (of_platform != PLATFORM_POWERMAC &&
3016 of_platform != PLATFORM_OPAL) 3037 of_platform != PLATFORM_OPAL)
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 27a90b99ef67..b4e667663d9b 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -17,6 +17,7 @@
17#include <asm/machdep.h> 17#include <asm/machdep.h>
18#include <asm/smp.h> 18#include <asm/smp.h>
19#include <asm/pmc.h> 19#include <asm/pmc.h>
20#include <asm/firmware.h>
20 21
21#include "cacheinfo.h" 22#include "cacheinfo.h"
22 23
@@ -179,15 +180,25 @@ SYSFS_PMCSETUP(spurr, SPRN_SPURR);
179SYSFS_PMCSETUP(dscr, SPRN_DSCR); 180SYSFS_PMCSETUP(dscr, SPRN_DSCR);
180SYSFS_PMCSETUP(pir, SPRN_PIR); 181SYSFS_PMCSETUP(pir, SPRN_PIR);
181 182
183/*
184 Lets only enable read for phyp resources and
185 enable write when needed with a separate function.
186 Lets be conservative and default to pseries.
187*/
182static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra); 188static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
183static DEVICE_ATTR(spurr, 0400, show_spurr, NULL); 189static DEVICE_ATTR(spurr, 0400, show_spurr, NULL);
184static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr); 190static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr);
185static DEVICE_ATTR(purr, 0600, show_purr, store_purr); 191static DEVICE_ATTR(purr, 0400, show_purr, store_purr);
186static DEVICE_ATTR(pir, 0400, show_pir, NULL); 192static DEVICE_ATTR(pir, 0400, show_pir, NULL);
187 193
188unsigned long dscr_default = 0; 194unsigned long dscr_default = 0;
189EXPORT_SYMBOL(dscr_default); 195EXPORT_SYMBOL(dscr_default);
190 196
197static void add_write_permission_dev_attr(struct device_attribute *attr)
198{
199 attr->attr.mode |= 0200;
200}
201
191static ssize_t show_dscr_default(struct device *dev, 202static ssize_t show_dscr_default(struct device *dev,
192 struct device_attribute *attr, char *buf) 203 struct device_attribute *attr, char *buf)
193{ 204{
@@ -394,8 +405,11 @@ static void register_cpu_online(unsigned int cpu)
394 if (cpu_has_feature(CPU_FTR_MMCRA)) 405 if (cpu_has_feature(CPU_FTR_MMCRA))
395 device_create_file(s, &dev_attr_mmcra); 406 device_create_file(s, &dev_attr_mmcra);
396 407
397 if (cpu_has_feature(CPU_FTR_PURR)) 408 if (cpu_has_feature(CPU_FTR_PURR)) {
409 if (!firmware_has_feature(FW_FEATURE_LPAR))
410 add_write_permission_dev_attr(&dev_attr_purr);
398 device_create_file(s, &dev_attr_purr); 411 device_create_file(s, &dev_attr_purr);
412 }
399 413
400 if (cpu_has_feature(CPU_FTR_SPURR)) 414 if (cpu_has_feature(CPU_FTR_SPURR))
401 device_create_file(s, &dev_attr_spurr); 415 device_create_file(s, &dev_attr_spurr);
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index 7b60b9851469..cd809eaa8b5c 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -79,6 +79,11 @@ _GLOBAL(tm_abort)
79 TABORT(R3) 79 TABORT(R3)
80 blr 80 blr
81 81
82 .section ".toc","aw"
83DSCR_DEFAULT:
84 .tc dscr_default[TC],dscr_default
85
86 .section ".text"
82 87
83/* void tm_reclaim(struct thread_struct *thread, 88/* void tm_reclaim(struct thread_struct *thread,
84 * unsigned long orig_msr, 89 * unsigned long orig_msr,
@@ -123,6 +128,7 @@ _GLOBAL(tm_reclaim)
123 mr r15, r14 128 mr r15, r14
124 ori r15, r15, MSR_FP 129 ori r15, r15, MSR_FP
125 li r16, MSR_RI 130 li r16, MSR_RI
131 ori r16, r16, MSR_EE /* IRQs hard off */
126 andc r15, r15, r16 132 andc r15, r15, r16
127 oris r15, r15, MSR_VEC@h 133 oris r15, r15, MSR_VEC@h
128#ifdef CONFIG_VSX 134#ifdef CONFIG_VSX
@@ -187,11 +193,18 @@ dont_backup_fp:
187 std r1, PACATMSCRATCH(r13) 193 std r1, PACATMSCRATCH(r13)
188 ld r1, PACAR1(r13) 194 ld r1, PACAR1(r13)
189 195
196 /* Store the PPR in r11 and reset to decent value */
197 std r11, GPR11(r1) /* Temporary stash */
198 mfspr r11, SPRN_PPR
199 HMT_MEDIUM
200
190 /* Now get some more GPRS free */ 201 /* Now get some more GPRS free */
191 std r7, GPR7(r1) /* Temporary stash */ 202 std r7, GPR7(r1) /* Temporary stash */
192 std r12, GPR12(r1) /* '' '' '' */ 203 std r12, GPR12(r1) /* '' '' '' */
193 ld r12, STACK_PARAM(0)(r1) /* Param 0, thread_struct * */ 204 ld r12, STACK_PARAM(0)(r1) /* Param 0, thread_struct * */
194 205
206 std r11, THREAD_TM_PPR(r12) /* Store PPR and free r11 */
207
195 addi r7, r12, PT_CKPT_REGS /* Thread's ckpt_regs */ 208 addi r7, r12, PT_CKPT_REGS /* Thread's ckpt_regs */
196 209
197 /* Make r7 look like an exception frame so that we 210 /* Make r7 look like an exception frame so that we
@@ -203,15 +216,19 @@ dont_backup_fp:
203 SAVE_GPR(0, r7) /* user r0 */ 216 SAVE_GPR(0, r7) /* user r0 */
204 SAVE_GPR(2, r7) /* user r2 */ 217 SAVE_GPR(2, r7) /* user r2 */
205 SAVE_4GPRS(3, r7) /* user r3-r6 */ 218 SAVE_4GPRS(3, r7) /* user r3-r6 */
206 SAVE_4GPRS(8, r7) /* user r8-r11 */ 219 SAVE_GPR(8, r7) /* user r8 */
220 SAVE_GPR(9, r7) /* user r9 */
221 SAVE_GPR(10, r7) /* user r10 */
207 ld r3, PACATMSCRATCH(r13) /* user r1 */ 222 ld r3, PACATMSCRATCH(r13) /* user r1 */
208 ld r4, GPR7(r1) /* user r7 */ 223 ld r4, GPR7(r1) /* user r7 */
209 ld r5, GPR12(r1) /* user r12 */ 224 ld r5, GPR11(r1) /* user r11 */
210 GET_SCRATCH0(6) /* user r13 */ 225 ld r6, GPR12(r1) /* user r12 */
226 GET_SCRATCH0(8) /* user r13 */
211 std r3, GPR1(r7) 227 std r3, GPR1(r7)
212 std r4, GPR7(r7) 228 std r4, GPR7(r7)
213 std r5, GPR12(r7) 229 std r5, GPR11(r7)
214 std r6, GPR13(r7) 230 std r6, GPR12(r7)
231 std r8, GPR13(r7)
215 232
216 SAVE_NVGPRS(r7) /* user r14-r31 */ 233 SAVE_NVGPRS(r7) /* user r14-r31 */
217 234
@@ -234,14 +251,12 @@ dont_backup_fp:
234 std r6, _XER(r7) 251 std r6, _XER(r7)
235 252
236 253
237 /* ******************** TAR, PPR, DSCR ********** */ 254 /* ******************** TAR, DSCR ********** */
238 mfspr r3, SPRN_TAR 255 mfspr r3, SPRN_TAR
239 mfspr r4, SPRN_PPR 256 mfspr r4, SPRN_DSCR
240 mfspr r5, SPRN_DSCR
241 257
242 std r3, THREAD_TM_TAR(r12) 258 std r3, THREAD_TM_TAR(r12)
243 std r4, THREAD_TM_PPR(r12) 259 std r4, THREAD_TM_DSCR(r12)
244 std r5, THREAD_TM_DSCR(r12)
245 260
246 /* MSR and flags: We don't change CRs, and we don't need to alter 261 /* MSR and flags: We don't change CRs, and we don't need to alter
247 * MSR. 262 * MSR.
@@ -258,7 +273,7 @@ dont_backup_fp:
258 std r3, THREAD_TM_TFHAR(r12) 273 std r3, THREAD_TM_TFHAR(r12)
259 std r4, THREAD_TM_TFIAR(r12) 274 std r4, THREAD_TM_TFIAR(r12)
260 275
261 /* AMR and PPR are checkpointed too, but are unsupported by Linux. */ 276 /* AMR is checkpointed too, but is unsupported by Linux. */
262 277
263 /* Restore original MSR/IRQ state & clear TM mode */ 278 /* Restore original MSR/IRQ state & clear TM mode */
264 ld r14, TM_FRAME_L0(r1) /* Orig MSR */ 279 ld r14, TM_FRAME_L0(r1) /* Orig MSR */
@@ -274,6 +289,12 @@ dont_backup_fp:
274 mtcr r4 289 mtcr r4
275 mtlr r0 290 mtlr r0
276 ld r2, 40(r1) 291 ld r2, 40(r1)
292
293 /* Load system default DSCR */
294 ld r4, DSCR_DEFAULT@toc(r2)
295 ld r0, 0(r4)
296 mtspr SPRN_DSCR, r0
297
277 blr 298 blr
278 299
279 300
@@ -358,25 +379,24 @@ dont_restore_fp:
358 379
359restore_gprs: 380restore_gprs:
360 381
361 /* ******************** TAR, PPR, DSCR ********** */ 382 /* ******************** CR,LR,CCR,MSR ********** */
362 ld r4, THREAD_TM_TAR(r3) 383 ld r4, _CTR(r7)
363 ld r5, THREAD_TM_PPR(r3) 384 ld r5, _LINK(r7)
364 ld r6, THREAD_TM_DSCR(r3) 385 ld r6, _CCR(r7)
386 ld r8, _XER(r7)
365 387
366 mtspr SPRN_TAR, r4 388 mtctr r4
367 mtspr SPRN_PPR, r5 389 mtlr r5
368 mtspr SPRN_DSCR, r6 390 mtcr r6
391 mtxer r8
369 392
370 /* ******************** CR,LR,CCR,MSR ********** */ 393 /* ******************** TAR ******************** */
371 ld r3, _CTR(r7) 394 ld r4, THREAD_TM_TAR(r3)
372 ld r4, _LINK(r7) 395 mtspr SPRN_TAR, r4
373 ld r5, _CCR(r7)
374 ld r6, _XER(r7)
375 396
376 mtctr r3 397 /* Load up the PPR and DSCR in GPRs only at this stage */
377 mtlr r4 398 ld r5, THREAD_TM_DSCR(r3)
378 mtcr r5 399 ld r6, THREAD_TM_PPR(r3)
379 mtxer r6
380 400
381 /* Clear the MSR RI since we are about to change R1. EE is already off 401 /* Clear the MSR RI since we are about to change R1. EE is already off
382 */ 402 */
@@ -384,19 +404,26 @@ restore_gprs:
384 mtmsrd r4, 1 404 mtmsrd r4, 1
385 405
386 REST_4GPRS(0, r7) /* GPR0-3 */ 406 REST_4GPRS(0, r7) /* GPR0-3 */
387 REST_GPR(4, r7) /* GPR4-6 */ 407 REST_GPR(4, r7) /* GPR4 */
388 REST_GPR(5, r7)
389 REST_GPR(6, r7)
390 REST_4GPRS(8, r7) /* GPR8-11 */ 408 REST_4GPRS(8, r7) /* GPR8-11 */
391 REST_2GPRS(12, r7) /* GPR12-13 */ 409 REST_2GPRS(12, r7) /* GPR12-13 */
392 410
393 REST_NVGPRS(r7) /* GPR14-31 */ 411 REST_NVGPRS(r7) /* GPR14-31 */
394 412
395 ld r7, GPR7(r7) /* GPR7 */ 413 /* Load up PPR and DSCR here so we don't run with user values for long
414 */
415 mtspr SPRN_DSCR, r5
416 mtspr SPRN_PPR, r6
417
418 REST_GPR(5, r7) /* GPR5-7 */
419 REST_GPR(6, r7)
420 ld r7, GPR7(r7)
396 421
397 /* Commit register state as checkpointed state: */ 422 /* Commit register state as checkpointed state: */
398 TRECHKPT 423 TRECHKPT
399 424
425 HMT_MEDIUM
426
400 /* Our transactional state has now changed. 427 /* Our transactional state has now changed.
401 * 428 *
402 * Now just get out of here. Transactional (current) state will be 429 * Now just get out of here. Transactional (current) state will be
@@ -419,6 +446,12 @@ restore_gprs:
419 mtcr r4 446 mtcr r4
420 mtlr r0 447 mtlr r0
421 ld r2, 40(r1) 448 ld r2, 40(r1)
449
450 /* Load system default DSCR */
451 ld r4, DSCR_DEFAULT@toc(r2)
452 ld r0, 0(r4)
453 mtspr SPRN_DSCR, r0
454
422 blr 455 blr
423 456
424 /* ****************************************************************** */ 457 /* ****************************************************************** */
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 78a350670de3..d38cc08b16c7 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -1530,11 +1530,15 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
1530 const char *cp; 1530 const char *cp;
1531 1531
1532 dn = dev->of_node; 1532 dn = dev->of_node;
1533 if (!dn) 1533 if (!dn) {
1534 return -ENODEV; 1534 strcat(buf, "\n");
1535 return strlen(buf);
1536 }
1535 cp = of_get_property(dn, "compatible", NULL); 1537 cp = of_get_property(dn, "compatible", NULL);
1536 if (!cp) 1538 if (!cp) {
1537 return -ENODEV; 1539 strcat(buf, "\n");
1540 return strlen(buf);
1541 }
1538 1542
1539 return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp); 1543 return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
1540} 1544}
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 294b7af28cdd..c71103b8a748 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1066,7 +1066,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1066BEGIN_FTR_SECTION 1066BEGIN_FTR_SECTION
1067 mfspr r8, SPRN_DSCR 1067 mfspr r8, SPRN_DSCR
1068 ld r7, HSTATE_DSCR(r13) 1068 ld r7, HSTATE_DSCR(r13)
1069 std r8, VCPU_DSCR(r7) 1069 std r8, VCPU_DSCR(r9)
1070 mtspr SPRN_DSCR, r7 1070 mtspr SPRN_DSCR, r7
1071END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1071END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1072 1072
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 1c6a9d729df4..c65593abae8e 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -332,6 +332,13 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
332 unsigned long hva; 332 unsigned long hva;
333 int pfnmap = 0; 333 int pfnmap = 0;
334 int tsize = BOOK3E_PAGESZ_4K; 334 int tsize = BOOK3E_PAGESZ_4K;
335 int ret = 0;
336 unsigned long mmu_seq;
337 struct kvm *kvm = vcpu_e500->vcpu.kvm;
338
339 /* used to check for invalidations in progress */
340 mmu_seq = kvm->mmu_notifier_seq;
341 smp_rmb();
335 342
336 /* 343 /*
337 * Translate guest physical to true physical, acquiring 344 * Translate guest physical to true physical, acquiring
@@ -449,6 +456,12 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
449 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); 456 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
450 } 457 }
451 458
459 spin_lock(&kvm->mmu_lock);
460 if (mmu_notifier_retry(kvm, mmu_seq)) {
461 ret = -EAGAIN;
462 goto out;
463 }
464
452 kvmppc_e500_ref_setup(ref, gtlbe, pfn); 465 kvmppc_e500_ref_setup(ref, gtlbe, pfn);
453 466
454 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, 467 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
@@ -457,10 +470,13 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
457 /* Clear i-cache for new pages */ 470 /* Clear i-cache for new pages */
458 kvmppc_mmu_flush_icache(pfn); 471 kvmppc_mmu_flush_icache(pfn);
459 472
473out:
474 spin_unlock(&kvm->mmu_lock);
475
460 /* Drop refcount on page, so that mmu notifiers can clear it */ 476 /* Drop refcount on page, so that mmu notifiers can clear it */
461 kvm_release_pfn_clean(pfn); 477 kvm_release_pfn_clean(pfn);
462 478
463 return 0; 479 return ret;
464} 480}
465 481
466/* XXX only map the one-one case, for now use TLB0 */ 482/* XXX only map the one-one case, for now use TLB0 */
diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S
index 167f72555d60..57a072065057 100644
--- a/arch/powerpc/lib/checksum_64.S
+++ b/arch/powerpc/lib/checksum_64.S
@@ -226,19 +226,35 @@ _GLOBAL(csum_partial)
226 blr 226 blr
227 227
228 228
229 .macro source 229 .macro srcnr
230100: 230100:
231 .section __ex_table,"a" 231 .section __ex_table,"a"
232 .align 3 232 .align 3
233 .llong 100b,.Lsrc_error 233 .llong 100b,.Lsrc_error_nr
234 .previous 234 .previous
235 .endm 235 .endm
236 236
237 .macro dest 237 .macro source
238150:
239 .section __ex_table,"a"
240 .align 3
241 .llong 150b,.Lsrc_error
242 .previous
243 .endm
244
245 .macro dstnr
238200: 246200:
239 .section __ex_table,"a" 247 .section __ex_table,"a"
240 .align 3 248 .align 3
241 .llong 200b,.Ldest_error 249 .llong 200b,.Ldest_error_nr
250 .previous
251 .endm
252
253 .macro dest
254250:
255 .section __ex_table,"a"
256 .align 3
257 .llong 250b,.Ldest_error
242 .previous 258 .previous
243 .endm 259 .endm
244 260
@@ -269,16 +285,16 @@ _GLOBAL(csum_partial_copy_generic)
269 rldicl. r6,r3,64-1,64-2 /* r6 = (r3 & 0x3) >> 1 */ 285 rldicl. r6,r3,64-1,64-2 /* r6 = (r3 & 0x3) >> 1 */
270 beq .Lcopy_aligned 286 beq .Lcopy_aligned
271 287
272 li r7,4 288 li r9,4
273 sub r6,r7,r6 289 sub r6,r9,r6
274 mtctr r6 290 mtctr r6
275 291
2761: 2921:
277source; lhz r6,0(r3) /* align to doubleword */ 293srcnr; lhz r6,0(r3) /* align to doubleword */
278 subi r5,r5,2 294 subi r5,r5,2
279 addi r3,r3,2 295 addi r3,r3,2
280 adde r0,r0,r6 296 adde r0,r0,r6
281dest; sth r6,0(r4) 297dstnr; sth r6,0(r4)
282 addi r4,r4,2 298 addi r4,r4,2
283 bdnz 1b 299 bdnz 1b
284 300
@@ -392,10 +408,10 @@ dest; std r16,56(r4)
392 408
393 mtctr r6 409 mtctr r6
3943: 4103:
395source; ld r6,0(r3) 411srcnr; ld r6,0(r3)
396 addi r3,r3,8 412 addi r3,r3,8
397 adde r0,r0,r6 413 adde r0,r0,r6
398dest; std r6,0(r4) 414dstnr; std r6,0(r4)
399 addi r4,r4,8 415 addi r4,r4,8
400 bdnz 3b 416 bdnz 3b
401 417
@@ -405,10 +421,10 @@ dest; std r6,0(r4)
405 srdi. r6,r5,2 421 srdi. r6,r5,2
406 beq .Lcopy_tail_halfword 422 beq .Lcopy_tail_halfword
407 423
408source; lwz r6,0(r3) 424srcnr; lwz r6,0(r3)
409 addi r3,r3,4 425 addi r3,r3,4
410 adde r0,r0,r6 426 adde r0,r0,r6
411dest; stw r6,0(r4) 427dstnr; stw r6,0(r4)
412 addi r4,r4,4 428 addi r4,r4,4
413 subi r5,r5,4 429 subi r5,r5,4
414 430
@@ -416,10 +432,10 @@ dest; stw r6,0(r4)
416 srdi. r6,r5,1 432 srdi. r6,r5,1
417 beq .Lcopy_tail_byte 433 beq .Lcopy_tail_byte
418 434
419source; lhz r6,0(r3) 435srcnr; lhz r6,0(r3)
420 addi r3,r3,2 436 addi r3,r3,2
421 adde r0,r0,r6 437 adde r0,r0,r6
422dest; sth r6,0(r4) 438dstnr; sth r6,0(r4)
423 addi r4,r4,2 439 addi r4,r4,2
424 subi r5,r5,2 440 subi r5,r5,2
425 441
@@ -427,10 +443,10 @@ dest; sth r6,0(r4)
427 andi. r6,r5,1 443 andi. r6,r5,1
428 beq .Lcopy_finish 444 beq .Lcopy_finish
429 445
430source; lbz r6,0(r3) 446srcnr; lbz r6,0(r3)
431 sldi r9,r6,8 /* Pad the byte out to 16 bits */ 447 sldi r9,r6,8 /* Pad the byte out to 16 bits */
432 adde r0,r0,r9 448 adde r0,r0,r9
433dest; stb r6,0(r4) 449dstnr; stb r6,0(r4)
434 450
435.Lcopy_finish: 451.Lcopy_finish:
436 addze r0,r0 /* add in final carry */ 452 addze r0,r0 /* add in final carry */
@@ -440,6 +456,11 @@ dest; stb r6,0(r4)
440 blr 456 blr
441 457
442.Lsrc_error: 458.Lsrc_error:
459 ld r14,STK_REG(R14)(r1)
460 ld r15,STK_REG(R15)(r1)
461 ld r16,STK_REG(R16)(r1)
462 addi r1,r1,STACKFRAMESIZE
463.Lsrc_error_nr:
443 cmpdi 0,r7,0 464 cmpdi 0,r7,0
444 beqlr 465 beqlr
445 li r6,-EFAULT 466 li r6,-EFAULT
@@ -447,6 +468,11 @@ dest; stb r6,0(r4)
447 blr 468 blr
448 469
449.Ldest_error: 470.Ldest_error:
471 ld r14,STK_REG(R14)(r1)
472 ld r15,STK_REG(R15)(r1)
473 ld r16,STK_REG(R16)(r1)
474 addi r1,r1,STACKFRAMESIZE
475.Ldest_error_nr:
450 cmpdi 0,r8,0 476 cmpdi 0,r8,0
451 beqlr 477 beqlr
452 li r6,-EFAULT 478 li r6,-EFAULT
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index a7ee978fb860..b1faa1593c90 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -1505,6 +1505,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1505 */ 1505 */
1506 if ((ra == 1) && !(regs->msr & MSR_PR) \ 1506 if ((ra == 1) && !(regs->msr & MSR_PR) \
1507 && (val3 >= (regs->gpr[1] - STACK_INT_FRAME_SIZE))) { 1507 && (val3 >= (regs->gpr[1] - STACK_INT_FRAME_SIZE))) {
1508#ifdef CONFIG_PPC32
1508 /* 1509 /*
1509 * Check if we will touch kernel sack overflow 1510 * Check if we will touch kernel sack overflow
1510 */ 1511 */
@@ -1513,7 +1514,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1513 err = -EINVAL; 1514 err = -EINVAL;
1514 break; 1515 break;
1515 } 1516 }
1516 1517#endif /* CONFIG_PPC32 */
1517 /* 1518 /*
1518 * Check if we already set since that means we'll 1519 * Check if we already set since that means we'll
1519 * lose the previous value. 1520 * lose the previous value.
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index d0cd9e4c6837..8ed035d2edb5 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -300,5 +300,9 @@ void vmemmap_free(unsigned long start, unsigned long end)
300{ 300{
301} 301}
302 302
303void register_page_bootmem_memmap(unsigned long section_nr,
304 struct page *start_page, unsigned long size)
305{
306}
303#endif /* CONFIG_SPARSEMEM_VMEMMAP */ 307#endif /* CONFIG_SPARSEMEM_VMEMMAP */
304 308
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 1cf9c5b67f24..3fa93dc7fe75 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -297,12 +297,21 @@ void __init paging_init(void)
297} 297}
298#endif /* ! CONFIG_NEED_MULTIPLE_NODES */ 298#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
299 299
300static void __init register_page_bootmem_info(void)
301{
302 int i;
303
304 for_each_online_node(i)
305 register_page_bootmem_info_node(NODE_DATA(i));
306}
307
300void __init mem_init(void) 308void __init mem_init(void)
301{ 309{
302#ifdef CONFIG_SWIOTLB 310#ifdef CONFIG_SWIOTLB
303 swiotlb_init(0); 311 swiotlb_init(0);
304#endif 312#endif
305 313
314 register_page_bootmem_info();
306 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 315 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
307 set_max_mapnr(max_pfn); 316 set_max_mapnr(max_pfn);
308 free_all_bootmem(); 317 free_all_bootmem();
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index 2ee4a707f0df..a3f7abd2f13f 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -199,6 +199,7 @@
199#define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1))) 199#define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1)))
200#define MMCR1_COMBINE_SHIFT(pmc) (35 - ((pmc) - 1)) 200#define MMCR1_COMBINE_SHIFT(pmc) (35 - ((pmc) - 1))
201#define MMCR1_PMCSEL_SHIFT(pmc) (24 - (((pmc) - 1)) * 8) 201#define MMCR1_PMCSEL_SHIFT(pmc) (24 - (((pmc) - 1)) * 8)
202#define MMCR1_FAB_SHIFT 36
202#define MMCR1_DC_QUAL_SHIFT 47 203#define MMCR1_DC_QUAL_SHIFT 47
203#define MMCR1_IC_QUAL_SHIFT 46 204#define MMCR1_IC_QUAL_SHIFT 46
204 205
@@ -388,8 +389,8 @@ static int power8_compute_mmcr(u64 event[], int n_ev,
388 * the threshold bits are used for the match value. 389 * the threshold bits are used for the match value.
389 */ 390 */
390 if (event_is_fab_match(event[i])) { 391 if (event_is_fab_match(event[i])) {
391 mmcr1 |= (event[i] >> EVENT_THR_CTL_SHIFT) & 392 mmcr1 |= ((event[i] >> EVENT_THR_CTL_SHIFT) &
392 EVENT_THR_CTL_MASK; 393 EVENT_THR_CTL_MASK) << MMCR1_FAB_SHIFT;
393 } else { 394 } else {
394 val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK; 395 val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK;
395 mmcra |= val << MMCRA_THR_CTL_SHIFT; 396 mmcra |= val << MMCRA_THR_CTL_SHIFT;
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 1c1771a40250..24f58cb0a543 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -233,18 +233,24 @@ static void __init smp_init_pseries(void)
233 233
234 alloc_bootmem_cpumask_var(&of_spin_mask); 234 alloc_bootmem_cpumask_var(&of_spin_mask);
235 235
236 /* Mark threads which are still spinning in hold loops. */ 236 /*
237 if (cpu_has_feature(CPU_FTR_SMT)) { 237 * Mark threads which are still spinning in hold loops
238 for_each_present_cpu(i) { 238 *
239 if (cpu_thread_in_core(i) == 0) 239 * We know prom_init will not have started them if RTAS supports
240 cpumask_set_cpu(i, of_spin_mask); 240 * query-cpu-stopped-state.
241 } 241 */
242 } else { 242 if (rtas_token("query-cpu-stopped-state") == RTAS_UNKNOWN_SERVICE) {
243 cpumask_copy(of_spin_mask, cpu_present_mask); 243 if (cpu_has_feature(CPU_FTR_SMT)) {
244 for_each_present_cpu(i) {
245 if (cpu_thread_in_core(i) == 0)
246 cpumask_set_cpu(i, of_spin_mask);
247 }
248 } else
249 cpumask_copy(of_spin_mask, cpu_present_mask);
250
251 cpumask_clear_cpu(boot_cpuid, of_spin_mask);
244 } 252 }
245 253
246 cpumask_clear_cpu(boot_cpuid, of_spin_mask);
247
248 /* Non-lpar has additional take/give timebase */ 254 /* Non-lpar has additional take/give timebase */
249 if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) { 255 if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
250 smp_ops->give_timebase = rtas_give_timebase; 256 smp_ops->give_timebase = rtas_give_timebase;
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index dcc6ac2d8026..7143793859fa 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -93,6 +93,7 @@ config S390
93 select ARCH_INLINE_WRITE_UNLOCK_IRQ 93 select ARCH_INLINE_WRITE_UNLOCK_IRQ
94 select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE 94 select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
95 select ARCH_SAVE_PAGE_KEYS if HIBERNATION 95 select ARCH_SAVE_PAGE_KEYS if HIBERNATION
96 select ARCH_USE_CMPXCHG_LOCKREF
96 select ARCH_WANT_IPC_PARSE_VERSION 97 select ARCH_WANT_IPC_PARSE_VERSION
97 select BUILDTIME_EXTABLE_SORT 98 select BUILDTIME_EXTABLE_SORT
98 select CLONE_BACKWARDS2 99 select CLONE_BACKWARDS2
@@ -102,7 +103,6 @@ config S390
102 select GENERIC_TIME_VSYSCALL_OLD 103 select GENERIC_TIME_VSYSCALL_OLD
103 select HAVE_ALIGNED_STRUCT_PAGE if SLUB 104 select HAVE_ALIGNED_STRUCT_PAGE if SLUB
104 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 105 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
105 select HAVE_ARCH_MUTEX_CPU_RELAX
106 select HAVE_ARCH_SECCOMP_FILTER 106 select HAVE_ARCH_SECCOMP_FILTER
107 select HAVE_ARCH_TRACEHOOK 107 select HAVE_ARCH_TRACEHOOK
108 select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT 108 select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
index 6c32190dc73e..346b1c85ffb4 100644
--- a/arch/s390/include/asm/jump_label.h
+++ b/arch/s390/include/asm/jump_label.h
@@ -15,7 +15,7 @@
15 15
16static __always_inline bool arch_static_branch(struct static_key *key) 16static __always_inline bool arch_static_branch(struct static_key *key)
17{ 17{
18 asm goto("0: brcl 0,0\n" 18 asm_volatile_goto("0: brcl 0,0\n"
19 ".pushsection __jump_table, \"aw\"\n" 19 ".pushsection __jump_table, \"aw\"\n"
20 ASM_ALIGN "\n" 20 ASM_ALIGN "\n"
21 ASM_PTR " 0b, %l[label], %0\n" 21 ASM_PTR " 0b, %l[label], %0\n"
diff --git a/arch/s390/include/asm/mutex.h b/arch/s390/include/asm/mutex.h
index 688271f5f2e4..458c1f7fbc18 100644
--- a/arch/s390/include/asm/mutex.h
+++ b/arch/s390/include/asm/mutex.h
@@ -7,5 +7,3 @@
7 */ 7 */
8 8
9#include <asm-generic/mutex-dec.h> 9#include <asm-generic/mutex-dec.h>
10
11#define arch_mutex_cpu_relax() barrier()
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 0eb37505cab1..ca7821f07260 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -198,6 +198,8 @@ static inline void cpu_relax(void)
198 barrier(); 198 barrier();
199} 199}
200 200
201#define arch_mutex_cpu_relax() barrier()
202
201static inline void psw_set_key(unsigned int key) 203static inline void psw_set_key(unsigned int key)
202{ 204{
203 asm volatile("spka 0(%0)" : : "d" (key)); 205 asm volatile("spka 0(%0)" : : "d" (key));
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 701fe8c59e1f..83e5d216105e 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -44,6 +44,11 @@ extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
44extern int arch_spin_trylock_retry(arch_spinlock_t *); 44extern int arch_spin_trylock_retry(arch_spinlock_t *);
45extern void arch_spin_relax(arch_spinlock_t *lock); 45extern void arch_spin_relax(arch_spinlock_t *lock);
46 46
47static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
48{
49 return lock.owner_cpu == 0;
50}
51
47static inline void arch_spin_lock(arch_spinlock_t *lp) 52static inline void arch_spin_lock(arch_spinlock_t *lp)
48{ 53{
49 int old; 54 int old;
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index c84f33d51f7b..7dd21720e5b0 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -40,28 +40,26 @@ static inline void *load_real_addr(void *addr)
40} 40}
41 41
42/* 42/*
43 * Copy up to one page to vmalloc or real memory 43 * Copy real to virtual or real memory
44 */ 44 */
45static ssize_t copy_page_real(void *buf, void *src, size_t csize) 45static int copy_from_realmem(void *dest, void *src, size_t count)
46{ 46{
47 size_t size; 47 unsigned long size;
48 int rc;
48 49
49 if (is_vmalloc_addr(buf)) { 50 if (!count)
50 BUG_ON(csize >= PAGE_SIZE); 51 return 0;
51 /* If buf is not page aligned, copy first part */ 52 if (!is_vmalloc_or_module_addr(dest))
52 size = min(roundup(__pa(buf), PAGE_SIZE) - __pa(buf), csize); 53 return memcpy_real(dest, src, count);
53 if (size) { 54 do {
54 if (memcpy_real(load_real_addr(buf), src, size)) 55 size = min(count, PAGE_SIZE - (__pa(dest) & ~PAGE_MASK));
55 return -EFAULT; 56 if (memcpy_real(load_real_addr(dest), src, size))
56 buf += size; 57 return -EFAULT;
57 src += size; 58 count -= size;
58 } 59 dest += size;
59 /* Copy second part */ 60 src += size;
60 size = csize - size; 61 } while (count);
61 return (size) ? memcpy_real(load_real_addr(buf), src, size) : 0; 62 return 0;
62 } else {
63 return memcpy_real(buf, src, csize);
64 }
65} 63}
66 64
67/* 65/*
@@ -114,7 +112,7 @@ static ssize_t copy_oldmem_page_kdump(char *buf, size_t csize,
114 rc = copy_to_user_real((void __force __user *) buf, 112 rc = copy_to_user_real((void __force __user *) buf,
115 (void *) src, csize); 113 (void *) src, csize);
116 else 114 else
117 rc = copy_page_real(buf, (void *) src, csize); 115 rc = copy_from_realmem(buf, (void *) src, csize);
118 return (rc == 0) ? rc : csize; 116 return (rc == 0) ? rc : csize;
119} 117}
120 118
@@ -210,7 +208,7 @@ int copy_from_oldmem(void *dest, void *src, size_t count)
210 if (OLDMEM_BASE) { 208 if (OLDMEM_BASE) {
211 if ((unsigned long) src < OLDMEM_SIZE) { 209 if ((unsigned long) src < OLDMEM_SIZE) {
212 copied = min(count, OLDMEM_SIZE - (unsigned long) src); 210 copied = min(count, OLDMEM_SIZE - (unsigned long) src);
213 rc = memcpy_real(dest, src + OLDMEM_BASE, copied); 211 rc = copy_from_realmem(dest, src + OLDMEM_BASE, copied);
214 if (rc) 212 if (rc)
215 return rc; 213 return rc;
216 } 214 }
@@ -223,7 +221,7 @@ int copy_from_oldmem(void *dest, void *src, size_t count)
223 return rc; 221 return rc;
224 } 222 }
225 } 223 }
226 return memcpy_real(dest + copied, src + copied, count - copied); 224 return copy_from_realmem(dest + copied, src + copied, count - copied);
227} 225}
228 226
229/* 227/*
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index cc30d1fb000c..0dc2b6d0a1ec 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -266,6 +266,7 @@ sysc_sigpending:
266 tm __TI_flags+3(%r12),_TIF_SYSCALL 266 tm __TI_flags+3(%r12),_TIF_SYSCALL
267 jno sysc_return 267 jno sysc_return
268 lm %r2,%r7,__PT_R2(%r11) # load svc arguments 268 lm %r2,%r7,__PT_R2(%r11) # load svc arguments
269 l %r10,__TI_sysc_table(%r12) # 31 bit system call table
269 xr %r8,%r8 # svc 0 returns -ENOSYS 270 xr %r8,%r8 # svc 0 returns -ENOSYS
270 clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2) 271 clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2)
271 jnl sysc_nr_ok # invalid svc number -> do svc 0 272 jnl sysc_nr_ok # invalid svc number -> do svc 0
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 2b2188b97c6a..e5b43c97a834 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -297,6 +297,7 @@ sysc_sigpending:
297 tm __TI_flags+7(%r12),_TIF_SYSCALL 297 tm __TI_flags+7(%r12),_TIF_SYSCALL
298 jno sysc_return 298 jno sysc_return
299 lmg %r2,%r7,__PT_R2(%r11) # load svc arguments 299 lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
300 lg %r10,__TI_sysc_table(%r12) # address of system call table
300 lghi %r8,0 # svc 0 returns -ENOSYS 301 lghi %r8,0 # svc 0 returns -ENOSYS
301 llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number 302 llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number
302 cghi %r1,NR_syscalls 303 cghi %r1,NR_syscalls
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 0ce9fb245034..d86e64eddb42 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -67,6 +67,11 @@ static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn)
67 case 0xac: /* stnsm */ 67 case 0xac: /* stnsm */
68 case 0xad: /* stosm */ 68 case 0xad: /* stosm */
69 return -EINVAL; 69 return -EINVAL;
70 case 0xc6:
71 switch (insn[0] & 0x0f) {
72 case 0x00: /* exrl */
73 return -EINVAL;
74 }
70 } 75 }
71 switch (insn[0]) { 76 switch (insn[0]) {
72 case 0x0101: /* pr */ 77 case 0x0101: /* pr */
@@ -180,7 +185,6 @@ static int __kprobes is_insn_relative_long(kprobe_opcode_t *insn)
180 break; 185 break;
181 case 0xc6: 186 case 0xc6:
182 switch (insn[0] & 0x0f) { 187 switch (insn[0] & 0x0f) {
183 case 0x00: /* exrl */
184 case 0x02: /* pfdrl */ 188 case 0x02: /* pfdrl */
185 case 0x04: /* cghrl */ 189 case 0x04: /* cghrl */
186 case 0x05: /* chrl */ 190 case 0x05: /* chrl */
diff --git a/arch/score/Kconfig b/arch/score/Kconfig
index a1be70db75fe..305f7ee1f382 100644
--- a/arch/score/Kconfig
+++ b/arch/score/Kconfig
@@ -2,6 +2,7 @@ menu "Machine selection"
2 2
3config SCORE 3config SCORE
4 def_bool y 4 def_bool y
5 select HAVE_GENERIC_HARDIRQS
5 select GENERIC_IRQ_SHOW 6 select GENERIC_IRQ_SHOW
6 select GENERIC_IOMAP 7 select GENERIC_IOMAP
7 select GENERIC_ATOMIC64 8 select GENERIC_ATOMIC64
@@ -110,3 +111,6 @@ source "security/Kconfig"
110source "crypto/Kconfig" 111source "crypto/Kconfig"
111 112
112source "lib/Kconfig" 113source "lib/Kconfig"
114
115config NO_IOMEM
116 def_bool y
diff --git a/arch/score/Makefile b/arch/score/Makefile
index 974aefe86123..9e3e060290e0 100644
--- a/arch/score/Makefile
+++ b/arch/score/Makefile
@@ -20,8 +20,8 @@ cflags-y += -G0 -pipe -mel -mnhwloop -D__SCOREEL__ \
20# 20#
21KBUILD_AFLAGS += $(cflags-y) 21KBUILD_AFLAGS += $(cflags-y)
22KBUILD_CFLAGS += $(cflags-y) 22KBUILD_CFLAGS += $(cflags-y)
23KBUILD_AFLAGS_MODULE += -mlong-calls 23KBUILD_AFLAGS_MODULE +=
24KBUILD_CFLAGS_MODULE += -mlong-calls 24KBUILD_CFLAGS_MODULE +=
25LDFLAGS += --oformat elf32-littlescore 25LDFLAGS += --oformat elf32-littlescore
26LDFLAGS_vmlinux += -G0 -static -nostdlib 26LDFLAGS_vmlinux += -G0 -static -nostdlib
27 27
diff --git a/arch/score/include/asm/checksum.h b/arch/score/include/asm/checksum.h
index f909ac3144a4..961bd64015a8 100644
--- a/arch/score/include/asm/checksum.h
+++ b/arch/score/include/asm/checksum.h
@@ -184,48 +184,57 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
184 __wsum sum) 184 __wsum sum)
185{ 185{
186 __asm__ __volatile__( 186 __asm__ __volatile__(
187 ".set\tnoreorder\t\t\t# csum_ipv6_magic\n\t" 187 ".set\tvolatile\t\t\t# csum_ipv6_magic\n\t"
188 ".set\tnoat\n\t" 188 "add\t%0, %0, %5\t\t\t# proto (long in network byte order)\n\t"
189 "addu\t%0, %5\t\t\t# proto (long in network byte order)\n\t" 189 "cmp.c\t%5, %0\n\t"
190 "sltu\t$1, %0, %5\n\t" 190 "bleu 1f\n\t"
191 "addu\t%0, $1\n\t" 191 "addi\t%0, 0x1\n\t"
192 "addu\t%0, %6\t\t\t# csum\n\t" 192 "1:add\t%0, %0, %6\t\t\t# csum\n\t"
193 "sltu\t$1, %0, %6\n\t" 193 "cmp.c\t%6, %0\n\t"
194 "lw\t%1, 0(%2)\t\t\t# four words source address\n\t" 194 "lw\t%1, [%2, 0]\t\t\t# four words source address\n\t"
195 "addu\t%0, $1\n\t" 195 "bleu 1f\n\t"
196 "addu\t%0, %1\n\t" 196 "addi\t%0, 0x1\n\t"
197 "sltu\t$1, %0, %1\n\t" 197 "1:add\t%0, %0, %1\n\t"
198 "lw\t%1, 4(%2)\n\t" 198 "cmp.c\t%1, %0\n\t"
199 "addu\t%0, $1\n\t" 199 "1:lw\t%1, [%2, 4]\n\t"
200 "addu\t%0, %1\n\t" 200 "bleu 1f\n\t"
201 "sltu\t$1, %0, %1\n\t" 201 "addi\t%0, 0x1\n\t"
202 "lw\t%1, 8(%2)\n\t" 202 "1:add\t%0, %0, %1\n\t"
203 "addu\t%0, $1\n\t" 203 "cmp.c\t%1, %0\n\t"
204 "addu\t%0, %1\n\t" 204 "lw\t%1, [%2,8]\n\t"
205 "sltu\t$1, %0, %1\n\t" 205 "bleu 1f\n\t"
206 "lw\t%1, 12(%2)\n\t" 206 "addi\t%0, 0x1\n\t"
207 "addu\t%0, $1\n\t" 207 "1:add\t%0, %0, %1\n\t"
208 "addu\t%0, %1\n\t" 208 "cmp.c\t%1, %0\n\t"
209 "sltu\t$1, %0, %1\n\t" 209 "lw\t%1, [%2, 12]\n\t"
210 "lw\t%1, 0(%3)\n\t" 210 "bleu 1f\n\t"
211 "addu\t%0, $1\n\t" 211 "addi\t%0, 0x1\n\t"
212 "addu\t%0, %1\n\t" 212 "1:add\t%0, %0,%1\n\t"
213 "sltu\t$1, %0, %1\n\t" 213 "cmp.c\t%1, %0\n\t"
214 "lw\t%1, 4(%3)\n\t" 214 "lw\t%1, [%3, 0]\n\t"
215 "addu\t%0, $1\n\t" 215 "bleu 1f\n\t"
216 "addu\t%0, %1\n\t" 216 "addi\t%0, 0x1\n\t"
217 "sltu\t$1, %0, %1\n\t" 217 "1:add\t%0, %0, %1\n\t"
218 "lw\t%1, 8(%3)\n\t" 218 "cmp.c\t%1, %0\n\t"
219 "addu\t%0, $1\n\t" 219 "lw\t%1, [%3, 4]\n\t"
220 "addu\t%0, %1\n\t" 220 "bleu 1f\n\t"
221 "sltu\t$1, %0, %1\n\t" 221 "addi\t%0, 0x1\n\t"
222 "lw\t%1, 12(%3)\n\t" 222 "1:add\t%0, %0, %1\n\t"
223 "addu\t%0, $1\n\t" 223 "cmp.c\t%1, %0\n\t"
224 "addu\t%0, %1\n\t" 224 "lw\t%1, [%3, 8]\n\t"
225 "sltu\t$1, %0, %1\n\t" 225 "bleu 1f\n\t"
226 "addu\t%0, $1\t\t\t# Add final carry\n\t" 226 "addi\t%0, 0x1\n\t"
227 ".set\tnoat\n\t" 227 "1:add\t%0, %0, %1\n\t"
228 ".set\tnoreorder" 228 "cmp.c\t%1, %0\n\t"
229 "lw\t%1, [%3, 12]\n\t"
230 "bleu 1f\n\t"
231 "addi\t%0, 0x1\n\t"
232 "1:add\t%0, %0, %1\n\t"
233 "cmp.c\t%1, %0\n\t"
234 "bleu 1f\n\t"
235 "addi\t%0, 0x1\n\t"
236 "1:\n\t"
237 ".set\toptimize"
229 : "=r" (sum), "=r" (proto) 238 : "=r" (sum), "=r" (proto)
230 : "r" (saddr), "r" (daddr), 239 : "r" (saddr), "r" (daddr),
231 "0" (htonl(len)), "1" (htonl(proto)), "r" (sum)); 240 "0" (htonl(len)), "1" (htonl(proto)), "r" (sum));
diff --git a/arch/score/include/asm/io.h b/arch/score/include/asm/io.h
index fbbfd7132e3b..574c8827abe2 100644
--- a/arch/score/include/asm/io.h
+++ b/arch/score/include/asm/io.h
@@ -5,5 +5,4 @@
5 5
6#define virt_to_bus virt_to_phys 6#define virt_to_bus virt_to_phys
7#define bus_to_virt phys_to_virt 7#define bus_to_virt phys_to_virt
8
9#endif /* _ASM_SCORE_IO_H */ 8#endif /* _ASM_SCORE_IO_H */
diff --git a/arch/score/include/asm/pgalloc.h b/arch/score/include/asm/pgalloc.h
index 059a61b7071b..716b3fd1d863 100644
--- a/arch/score/include/asm/pgalloc.h
+++ b/arch/score/include/asm/pgalloc.h
@@ -2,7 +2,7 @@
2#define _ASM_SCORE_PGALLOC_H 2#define _ASM_SCORE_PGALLOC_H
3 3
4#include <linux/mm.h> 4#include <linux/mm.h>
5 5#include <linux/highmem.h>
6static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, 6static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
7 pte_t *pte) 7 pte_t *pte)
8{ 8{
diff --git a/arch/score/kernel/entry.S b/arch/score/kernel/entry.S
index 7234ed09b7b7..befb87d30a89 100644
--- a/arch/score/kernel/entry.S
+++ b/arch/score/kernel/entry.S
@@ -264,7 +264,7 @@ resume_kernel:
264 disable_irq 264 disable_irq
265 lw r8, [r28, TI_PRE_COUNT] 265 lw r8, [r28, TI_PRE_COUNT]
266 cmpz.c r8 266 cmpz.c r8
267 bne r8, restore_all 267 bne restore_all
268need_resched: 268need_resched:
269 lw r8, [r28, TI_FLAGS] 269 lw r8, [r28, TI_FLAGS]
270 andri.c r9, r8, _TIF_NEED_RESCHED 270 andri.c r9, r8, _TIF_NEED_RESCHED
@@ -415,7 +415,7 @@ ENTRY(handle_sys)
415 sw r9, [r0, PT_EPC] 415 sw r9, [r0, PT_EPC]
416 416
417 cmpi.c r27, __NR_syscalls # check syscall number 417 cmpi.c r27, __NR_syscalls # check syscall number
418 bgeu illegal_syscall 418 bcs illegal_syscall
419 419
420 slli r8, r27, 2 # get syscall routine 420 slli r8, r27, 2 # get syscall routine
421 la r11, sys_call_table 421 la r11, sys_call_table
diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
index f4c6d02421d3..a1519ad3d49d 100644
--- a/arch/score/kernel/process.c
+++ b/arch/score/kernel/process.c
@@ -78,8 +78,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
78 p->thread.reg0 = (unsigned long) childregs; 78 p->thread.reg0 = (unsigned long) childregs;
79 if (unlikely(p->flags & PF_KTHREAD)) { 79 if (unlikely(p->flags & PF_KTHREAD)) {
80 memset(childregs, 0, sizeof(struct pt_regs)); 80 memset(childregs, 0, sizeof(struct pt_regs));
81 p->thread->reg12 = usp; 81 p->thread.reg12 = usp;
82 p->thread->reg13 = arg; 82 p->thread.reg13 = arg;
83 p->thread.reg3 = (unsigned long) ret_from_kernel_thread; 83 p->thread.reg3 = (unsigned long) ret_from_kernel_thread;
84 } else { 84 } else {
85 *childregs = *current_pt_regs(); 85 *childregs = *current_pt_regs();
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 2137ad667438..78c4fdb91bc5 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -506,12 +506,17 @@ config SUN_OPENPROMFS
506 Only choose N if you know in advance that you will not need to modify 506 Only choose N if you know in advance that you will not need to modify
507 OpenPROM settings on the running system. 507 OpenPROM settings on the running system.
508 508
509# Makefile helper 509# Makefile helpers
510config SPARC64_PCI 510config SPARC64_PCI
511 bool 511 bool
512 default y 512 default y
513 depends on SPARC64 && PCI 513 depends on SPARC64 && PCI
514 514
515config SPARC64_PCI_MSI
516 bool
517 default y
518 depends on SPARC64_PCI && PCI_MSI
519
515endmenu 520endmenu
516 521
517menu "Executable file formats" 522menu "Executable file formats"
diff --git a/arch/sparc/include/asm/floppy_64.h b/arch/sparc/include/asm/floppy_64.h
index e204f902e6c9..7c90c50c200d 100644
--- a/arch/sparc/include/asm/floppy_64.h
+++ b/arch/sparc/include/asm/floppy_64.h
@@ -254,7 +254,7 @@ static int sun_fd_request_irq(void)
254 once = 1; 254 once = 1;
255 255
256 error = request_irq(FLOPPY_IRQ, sparc_floppy_irq, 256 error = request_irq(FLOPPY_IRQ, sparc_floppy_irq,
257 IRQF_DISABLED, "floppy", NULL); 257 0, "floppy", NULL);
258 258
259 return ((error == 0) ? 0 : -1); 259 return ((error == 0) ? 0 : -1);
260 } 260 }
diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
index 5080d16a832f..ec2e2e2aba7d 100644
--- a/arch/sparc/include/asm/jump_label.h
+++ b/arch/sparc/include/asm/jump_label.h
@@ -9,7 +9,7 @@
9 9
10static __always_inline bool arch_static_branch(struct static_key *key) 10static __always_inline bool arch_static_branch(struct static_key *key)
11{ 11{
12 asm goto("1:\n\t" 12 asm_volatile_goto("1:\n\t"
13 "nop\n\t" 13 "nop\n\t"
14 "nop\n\t" 14 "nop\n\t"
15 ".pushsection __jump_table, \"aw\"\n\t" 15 ".pushsection __jump_table, \"aw\"\n\t"
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index d432fb20358e..d15cc1794b0e 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -1,3 +1,4 @@
1
1# 2#
2# Makefile for the linux kernel. 3# Makefile for the linux kernel.
3# 4#
@@ -99,7 +100,7 @@ obj-$(CONFIG_STACKTRACE) += stacktrace.o
99obj-$(CONFIG_SPARC64_PCI) += pci.o pci_common.o psycho_common.o 100obj-$(CONFIG_SPARC64_PCI) += pci.o pci_common.o psycho_common.o
100obj-$(CONFIG_SPARC64_PCI) += pci_psycho.o pci_sabre.o pci_schizo.o 101obj-$(CONFIG_SPARC64_PCI) += pci_psycho.o pci_sabre.o pci_schizo.o
101obj-$(CONFIG_SPARC64_PCI) += pci_sun4v.o pci_sun4v_asm.o pci_fire.o 102obj-$(CONFIG_SPARC64_PCI) += pci_sun4v.o pci_sun4v_asm.o pci_fire.o
102obj-$(CONFIG_PCI_MSI) += pci_msi.o 103obj-$(CONFIG_SPARC64_PCI_MSI) += pci_msi.o
103 104
104obj-$(CONFIG_COMPAT) += sys32.o sys_sparc32.o signal32.o 105obj-$(CONFIG_COMPAT) += sys32.o sys_sparc32.o signal32.o
105 106
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index 62d6b153ffa2..dff60abbea01 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -849,9 +849,8 @@ void ldom_reboot(const char *boot_command)
849 if (boot_command && strlen(boot_command)) { 849 if (boot_command && strlen(boot_command)) {
850 unsigned long len; 850 unsigned long len;
851 851
852 strcpy(full_boot_str, "boot "); 852 snprintf(full_boot_str, sizeof(full_boot_str), "boot %s",
853 strlcpy(full_boot_str + strlen("boot "), boot_command, 853 boot_command);
854 sizeof(full_boot_str + strlen("boot ")));
855 len = strlen(full_boot_str); 854 len = strlen(full_boot_str);
856 855
857 if (reboot_data_supported) { 856 if (reboot_data_supported) {
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index 54df554b82d9..e01d75d40329 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -1249,12 +1249,12 @@ int ldc_bind(struct ldc_channel *lp, const char *name)
1249 snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name); 1249 snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name);
1250 snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name); 1250 snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
1251 1251
1252 err = request_irq(lp->cfg.rx_irq, ldc_rx, IRQF_DISABLED, 1252 err = request_irq(lp->cfg.rx_irq, ldc_rx, 0,
1253 lp->rx_irq_name, lp); 1253 lp->rx_irq_name, lp);
1254 if (err) 1254 if (err)
1255 return err; 1255 return err;
1256 1256
1257 err = request_irq(lp->cfg.tx_irq, ldc_tx, IRQF_DISABLED, 1257 err = request_irq(lp->cfg.tx_irq, ldc_tx, 0,
1258 lp->tx_irq_name, lp); 1258 lp->tx_irq_name, lp);
1259 if (err) { 1259 if (err) {
1260 free_irq(lp->cfg.rx_irq, lp); 1260 free_irq(lp->cfg.rx_irq, lp);
diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h
index d385eaadece7..709798460763 100644
--- a/arch/tile/include/asm/atomic.h
+++ b/arch/tile/include/asm/atomic.h
@@ -166,7 +166,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
166 * 166 *
167 * Atomically sets @v to @i and returns old @v 167 * Atomically sets @v to @i and returns old @v
168 */ 168 */
169static inline u64 atomic64_xchg(atomic64_t *v, u64 n) 169static inline long long atomic64_xchg(atomic64_t *v, long long n)
170{ 170{
171 return xchg64(&v->counter, n); 171 return xchg64(&v->counter, n);
172} 172}
@@ -180,7 +180,8 @@ static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
180 * Atomically checks if @v holds @o and replaces it with @n if so. 180 * Atomically checks if @v holds @o and replaces it with @n if so.
181 * Returns the old value at @v. 181 * Returns the old value at @v.
182 */ 182 */
183static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n) 183static inline long long atomic64_cmpxchg(atomic64_t *v, long long o,
184 long long n)
184{ 185{
185 return cmpxchg64(&v->counter, o, n); 186 return cmpxchg64(&v->counter, o, n);
186} 187}
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index 0d0395b1b152..1ad4a1f7d42b 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -80,7 +80,7 @@ static inline void atomic_set(atomic_t *v, int n)
80/* A 64bit atomic type */ 80/* A 64bit atomic type */
81 81
82typedef struct { 82typedef struct {
83 u64 __aligned(8) counter; 83 long long counter;
84} atomic64_t; 84} atomic64_t;
85 85
86#define ATOMIC64_INIT(val) { (val) } 86#define ATOMIC64_INIT(val) { (val) }
@@ -91,14 +91,14 @@ typedef struct {
91 * 91 *
92 * Atomically reads the value of @v. 92 * Atomically reads the value of @v.
93 */ 93 */
94static inline u64 atomic64_read(const atomic64_t *v) 94static inline long long atomic64_read(const atomic64_t *v)
95{ 95{
96 /* 96 /*
97 * Requires an atomic op to read both 32-bit parts consistently. 97 * Requires an atomic op to read both 32-bit parts consistently.
98 * Casting away const is safe since the atomic support routines 98 * Casting away const is safe since the atomic support routines
99 * do not write to memory if the value has not been modified. 99 * do not write to memory if the value has not been modified.
100 */ 100 */
101 return _atomic64_xchg_add((u64 *)&v->counter, 0); 101 return _atomic64_xchg_add((long long *)&v->counter, 0);
102} 102}
103 103
104/** 104/**
@@ -108,7 +108,7 @@ static inline u64 atomic64_read(const atomic64_t *v)
108 * 108 *
109 * Atomically adds @i to @v. 109 * Atomically adds @i to @v.
110 */ 110 */
111static inline void atomic64_add(u64 i, atomic64_t *v) 111static inline void atomic64_add(long long i, atomic64_t *v)
112{ 112{
113 _atomic64_xchg_add(&v->counter, i); 113 _atomic64_xchg_add(&v->counter, i);
114} 114}
@@ -120,7 +120,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
120 * 120 *
121 * Atomically adds @i to @v and returns @i + @v 121 * Atomically adds @i to @v and returns @i + @v
122 */ 122 */
123static inline u64 atomic64_add_return(u64 i, atomic64_t *v) 123static inline long long atomic64_add_return(long long i, atomic64_t *v)
124{ 124{
125 smp_mb(); /* barrier for proper semantics */ 125 smp_mb(); /* barrier for proper semantics */
126 return _atomic64_xchg_add(&v->counter, i) + i; 126 return _atomic64_xchg_add(&v->counter, i) + i;
@@ -135,7 +135,8 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
135 * Atomically adds @a to @v, so long as @v was not already @u. 135 * Atomically adds @a to @v, so long as @v was not already @u.
136 * Returns non-zero if @v was not @u, and zero otherwise. 136 * Returns non-zero if @v was not @u, and zero otherwise.
137 */ 137 */
138static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) 138static inline long long atomic64_add_unless(atomic64_t *v, long long a,
139 long long u)
139{ 140{
140 smp_mb(); /* barrier for proper semantics */ 141 smp_mb(); /* barrier for proper semantics */
141 return _atomic64_xchg_add_unless(&v->counter, a, u) != u; 142 return _atomic64_xchg_add_unless(&v->counter, a, u) != u;
@@ -151,7 +152,7 @@ static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
151 * atomic64_set() can't be just a raw store, since it would be lost if it 152 * atomic64_set() can't be just a raw store, since it would be lost if it
152 * fell between the load and store of one of the other atomic ops. 153 * fell between the load and store of one of the other atomic ops.
153 */ 154 */
154static inline void atomic64_set(atomic64_t *v, u64 n) 155static inline void atomic64_set(atomic64_t *v, long long n)
155{ 156{
156 _atomic64_xchg(&v->counter, n); 157 _atomic64_xchg(&v->counter, n);
157} 158}
@@ -236,11 +237,13 @@ extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
236extern struct __get_user __atomic_or(volatile int *p, int *lock, int n); 237extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
237extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n); 238extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
238extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n); 239extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
239extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n); 240extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
240extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n); 241 long long o, long long n);
241extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n); 242extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n);
242extern u64 __atomic64_xchg_add_unless(volatile u64 *p, 243extern long long __atomic64_xchg_add(volatile long long *p, int *lock,
243 int *lock, u64 o, u64 n); 244 long long n);
245extern long long __atomic64_xchg_add_unless(volatile long long *p,
246 int *lock, long long o, long long n);
244 247
245/* Return failure from the atomic wrappers. */ 248/* Return failure from the atomic wrappers. */
246struct __get_user __atomic_bad_address(int __user *addr); 249struct __get_user __atomic_bad_address(int __user *addr);
diff --git a/arch/tile/include/asm/cmpxchg.h b/arch/tile/include/asm/cmpxchg.h
index 4001d5eab4bb..0ccda3c425be 100644
--- a/arch/tile/include/asm/cmpxchg.h
+++ b/arch/tile/include/asm/cmpxchg.h
@@ -35,10 +35,10 @@ int _atomic_xchg(int *ptr, int n);
35int _atomic_xchg_add(int *v, int i); 35int _atomic_xchg_add(int *v, int i);
36int _atomic_xchg_add_unless(int *v, int a, int u); 36int _atomic_xchg_add_unless(int *v, int a, int u);
37int _atomic_cmpxchg(int *ptr, int o, int n); 37int _atomic_cmpxchg(int *ptr, int o, int n);
38u64 _atomic64_xchg(u64 *v, u64 n); 38long long _atomic64_xchg(long long *v, long long n);
39u64 _atomic64_xchg_add(u64 *v, u64 i); 39long long _atomic64_xchg_add(long long *v, long long i);
40u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u); 40long long _atomic64_xchg_add_unless(long long *v, long long a, long long u);
41u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); 41long long _atomic64_cmpxchg(long long *v, long long o, long long n);
42 42
43#define xchg(ptr, n) \ 43#define xchg(ptr, n) \
44 ({ \ 44 ({ \
@@ -53,7 +53,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
53 if (sizeof(*(ptr)) != 4) \ 53 if (sizeof(*(ptr)) != 4) \
54 __cmpxchg_called_with_bad_pointer(); \ 54 __cmpxchg_called_with_bad_pointer(); \
55 smp_mb(); \ 55 smp_mb(); \
56 (typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, (int)n); \ 56 (typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, \
57 (int)n); \
57 }) 58 })
58 59
59#define xchg64(ptr, n) \ 60#define xchg64(ptr, n) \
@@ -61,7 +62,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
61 if (sizeof(*(ptr)) != 8) \ 62 if (sizeof(*(ptr)) != 8) \
62 __xchg_called_with_bad_pointer(); \ 63 __xchg_called_with_bad_pointer(); \
63 smp_mb(); \ 64 smp_mb(); \
64 (typeof(*(ptr)))_atomic64_xchg((u64 *)(ptr), (u64)(n)); \ 65 (typeof(*(ptr)))_atomic64_xchg((long long *)(ptr), \
66 (long long)(n)); \
65 }) 67 })
66 68
67#define cmpxchg64(ptr, o, n) \ 69#define cmpxchg64(ptr, o, n) \
@@ -69,7 +71,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
69 if (sizeof(*(ptr)) != 8) \ 71 if (sizeof(*(ptr)) != 8) \
70 __cmpxchg_called_with_bad_pointer(); \ 72 __cmpxchg_called_with_bad_pointer(); \
71 smp_mb(); \ 73 smp_mb(); \
72 (typeof(*(ptr)))_atomic64_cmpxchg((u64 *)ptr, (u64)o, (u64)n); \ 74 (typeof(*(ptr)))_atomic64_cmpxchg((long long *)ptr, \
75 (long long)o, (long long)n); \
73 }) 76 })
74 77
75#else 78#else
@@ -81,10 +84,11 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
81 switch (sizeof(*(ptr))) { \ 84 switch (sizeof(*(ptr))) { \
82 case 4: \ 85 case 4: \
83 __x = (typeof(__x))(unsigned long) \ 86 __x = (typeof(__x))(unsigned long) \
84 __insn_exch4((ptr), (u32)(unsigned long)(n)); \ 87 __insn_exch4((ptr), \
88 (u32)(unsigned long)(n)); \
85 break; \ 89 break; \
86 case 8: \ 90 case 8: \
87 __x = (typeof(__x)) \ 91 __x = (typeof(__x)) \
88 __insn_exch((ptr), (unsigned long)(n)); \ 92 __insn_exch((ptr), (unsigned long)(n)); \
89 break; \ 93 break; \
90 default: \ 94 default: \
@@ -103,10 +107,12 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
103 switch (sizeof(*(ptr))) { \ 107 switch (sizeof(*(ptr))) { \
104 case 4: \ 108 case 4: \
105 __x = (typeof(__x))(unsigned long) \ 109 __x = (typeof(__x))(unsigned long) \
106 __insn_cmpexch4((ptr), (u32)(unsigned long)(n)); \ 110 __insn_cmpexch4((ptr), \
111 (u32)(unsigned long)(n)); \
107 break; \ 112 break; \
108 case 8: \ 113 case 8: \
109 __x = (typeof(__x))__insn_cmpexch((ptr), (u64)(n)); \ 114 __x = (typeof(__x))__insn_cmpexch((ptr), \
115 (long long)(n)); \
110 break; \ 116 break; \
111 default: \ 117 default: \
112 __cmpxchg_called_with_bad_pointer(); \ 118 __cmpxchg_called_with_bad_pointer(); \
diff --git a/arch/tile/include/asm/percpu.h b/arch/tile/include/asm/percpu.h
index 63294f5a8efb..4f7ae39fa202 100644
--- a/arch/tile/include/asm/percpu.h
+++ b/arch/tile/include/asm/percpu.h
@@ -15,9 +15,37 @@
15#ifndef _ASM_TILE_PERCPU_H 15#ifndef _ASM_TILE_PERCPU_H
16#define _ASM_TILE_PERCPU_H 16#define _ASM_TILE_PERCPU_H
17 17
18register unsigned long __my_cpu_offset __asm__("tp"); 18register unsigned long my_cpu_offset_reg asm("tp");
19#define __my_cpu_offset __my_cpu_offset 19
20#define set_my_cpu_offset(tp) (__my_cpu_offset = (tp)) 20#ifdef CONFIG_PREEMPT
21/*
22 * For full preemption, we can't just use the register variable
23 * directly, since we need barrier() to hazard against it, causing the
24 * compiler to reload anything computed from a previous "tp" value.
25 * But we also don't want to use volatile asm, since we'd like the
26 * compiler to be able to cache the value across multiple percpu reads.
27 * So we use a fake stack read as a hazard against barrier().
28 * The 'U' constraint is like 'm' but disallows postincrement.
29 */
30static inline unsigned long __my_cpu_offset(void)
31{
32 unsigned long tp;
33 register unsigned long *sp asm("sp");
34 asm("move %0, tp" : "=r" (tp) : "U" (*sp));
35 return tp;
36}
37#define __my_cpu_offset __my_cpu_offset()
38#else
39/*
40 * We don't need to hazard against barrier() since "tp" doesn't ever
41 * change with PREEMPT_NONE, and with PREEMPT_VOLUNTARY it only
42 * changes at function call points, at which we are already re-reading
43 * the value of "tp" due to "my_cpu_offset_reg" being a global variable.
44 */
45#define __my_cpu_offset my_cpu_offset_reg
46#endif
47
48#define set_my_cpu_offset(tp) (my_cpu_offset_reg = (tp))
21 49
22#include <asm-generic/percpu.h> 50#include <asm-generic/percpu.h>
23 51
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c
index df27a1fd94a3..531f4c365351 100644
--- a/arch/tile/kernel/hardwall.c
+++ b/arch/tile/kernel/hardwall.c
@@ -66,7 +66,7 @@ static struct hardwall_type hardwall_types[] = {
66 0, 66 0,
67 "udn", 67 "udn",
68 LIST_HEAD_INIT(hardwall_types[HARDWALL_UDN].list), 68 LIST_HEAD_INIT(hardwall_types[HARDWALL_UDN].list),
69 __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_UDN].lock), 69 __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_UDN].lock),
70 NULL 70 NULL
71 }, 71 },
72#ifndef __tilepro__ 72#ifndef __tilepro__
@@ -77,7 +77,7 @@ static struct hardwall_type hardwall_types[] = {
77 1, /* disabled pending hypervisor support */ 77 1, /* disabled pending hypervisor support */
78 "idn", 78 "idn",
79 LIST_HEAD_INIT(hardwall_types[HARDWALL_IDN].list), 79 LIST_HEAD_INIT(hardwall_types[HARDWALL_IDN].list),
80 __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IDN].lock), 80 __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IDN].lock),
81 NULL 81 NULL
82 }, 82 },
83 { /* access to user-space IPI */ 83 { /* access to user-space IPI */
@@ -87,7 +87,7 @@ static struct hardwall_type hardwall_types[] = {
87 0, 87 0,
88 "ipi", 88 "ipi",
89 LIST_HEAD_INIT(hardwall_types[HARDWALL_IPI].list), 89 LIST_HEAD_INIT(hardwall_types[HARDWALL_IPI].list),
90 __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IPI].lock), 90 __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IPI].lock),
91 NULL 91 NULL
92 }, 92 },
93#endif 93#endif
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index 088d5c141e68..2cbe6d5dd6b0 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -815,6 +815,9 @@ STD_ENTRY(interrupt_return)
815 } 815 }
816 bzt r28, 1f 816 bzt r28, 1f
817 bnz r29, 1f 817 bnz r29, 1f
818 /* Disable interrupts explicitly for preemption. */
819 IRQ_DISABLE(r20,r21)
820 TRACE_IRQS_OFF
818 jal preempt_schedule_irq 821 jal preempt_schedule_irq
819 FEEDBACK_REENTER(interrupt_return) 822 FEEDBACK_REENTER(interrupt_return)
8201: 8231:
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S
index ec755d3f3734..b8fc497f2437 100644
--- a/arch/tile/kernel/intvec_64.S
+++ b/arch/tile/kernel/intvec_64.S
@@ -841,6 +841,9 @@ STD_ENTRY(interrupt_return)
841 } 841 }
842 beqzt r28, 1f 842 beqzt r28, 1f
843 bnez r29, 1f 843 bnez r29, 1f
844 /* Disable interrupts explicitly for preemption. */
845 IRQ_DISABLE(r20,r21)
846 TRACE_IRQS_OFF
844 jal preempt_schedule_irq 847 jal preempt_schedule_irq
845 FEEDBACK_REENTER(interrupt_return) 848 FEEDBACK_REENTER(interrupt_return)
8461: 8491:
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index 362284af3afd..c93977a62116 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -23,6 +23,7 @@
23#include <linux/mmzone.h> 23#include <linux/mmzone.h>
24#include <linux/dcache.h> 24#include <linux/dcache.h>
25#include <linux/fs.h> 25#include <linux/fs.h>
26#include <linux/string.h>
26#include <asm/backtrace.h> 27#include <asm/backtrace.h>
27#include <asm/page.h> 28#include <asm/page.h>
28#include <asm/ucontext.h> 29#include <asm/ucontext.h>
@@ -332,21 +333,18 @@ static void describe_addr(struct KBacktraceIterator *kbt,
332 } 333 }
333 334
334 if (vma->vm_file) { 335 if (vma->vm_file) {
335 char *s;
336 p = d_path(&vma->vm_file->f_path, buf, bufsize); 336 p = d_path(&vma->vm_file->f_path, buf, bufsize);
337 if (IS_ERR(p)) 337 if (IS_ERR(p))
338 p = "?"; 338 p = "?";
339 s = strrchr(p, '/'); 339 name = kbasename(p);
340 if (s)
341 p = s+1;
342 } else { 340 } else {
343 p = "anon"; 341 name = "anon";
344 } 342 }
345 343
346 /* Generate a string description of the vma info. */ 344 /* Generate a string description of the vma info. */
347 namelen = strlen(p); 345 namelen = strlen(name);
348 remaining = (bufsize - 1) - namelen; 346 remaining = (bufsize - 1) - namelen;
349 memmove(buf, p, namelen); 347 memmove(buf, name, namelen);
350 snprintf(buf + namelen, remaining, "[%lx+%lx] ", 348 snprintf(buf + namelen, remaining, "[%lx+%lx] ",
351 vma->vm_start, vma->vm_end - vma->vm_start); 349 vma->vm_start, vma->vm_end - vma->vm_start);
352} 350}
diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c
index 759efa337be8..c89b211fd9e7 100644
--- a/arch/tile/lib/atomic_32.c
+++ b/arch/tile/lib/atomic_32.c
@@ -107,19 +107,19 @@ unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask)
107EXPORT_SYMBOL(_atomic_xor); 107EXPORT_SYMBOL(_atomic_xor);
108 108
109 109
110u64 _atomic64_xchg(u64 *v, u64 n) 110long long _atomic64_xchg(long long *v, long long n)
111{ 111{
112 return __atomic64_xchg(v, __atomic_setup(v), n); 112 return __atomic64_xchg(v, __atomic_setup(v), n);
113} 113}
114EXPORT_SYMBOL(_atomic64_xchg); 114EXPORT_SYMBOL(_atomic64_xchg);
115 115
116u64 _atomic64_xchg_add(u64 *v, u64 i) 116long long _atomic64_xchg_add(long long *v, long long i)
117{ 117{
118 return __atomic64_xchg_add(v, __atomic_setup(v), i); 118 return __atomic64_xchg_add(v, __atomic_setup(v), i);
119} 119}
120EXPORT_SYMBOL(_atomic64_xchg_add); 120EXPORT_SYMBOL(_atomic64_xchg_add);
121 121
122u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u) 122long long _atomic64_xchg_add_unless(long long *v, long long a, long long u)
123{ 123{
124 /* 124 /*
125 * Note: argument order is switched here since it is easier 125 * Note: argument order is switched here since it is easier
@@ -130,7 +130,7 @@ u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u)
130} 130}
131EXPORT_SYMBOL(_atomic64_xchg_add_unless); 131EXPORT_SYMBOL(_atomic64_xchg_add_unless);
132 132
133u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n) 133long long _atomic64_cmpxchg(long long *v, long long o, long long n)
134{ 134{
135 return __atomic64_cmpxchg(v, __atomic_setup(v), o, n); 135 return __atomic64_cmpxchg(v, __atomic_setup(v), o, n);
136} 136}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index ee2fb9d37745..145d703227bf 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -860,7 +860,7 @@ source "kernel/Kconfig.preempt"
860 860
861config X86_UP_APIC 861config X86_UP_APIC
862 bool "Local APIC support on uniprocessors" 862 bool "Local APIC support on uniprocessors"
863 depends on X86_32 && !SMP && !X86_32_NON_STANDARD 863 depends on X86_32 && !SMP && !X86_32_NON_STANDARD && !PCI_MSI
864 ---help--- 864 ---help---
865 A local APIC (Advanced Programmable Interrupt Controller) is an 865 A local APIC (Advanced Programmable Interrupt Controller) is an
866 integrated interrupt controller in the CPU. If you have a single-CPU 866 integrated interrupt controller in the CPU. If you have a single-CPU
@@ -885,11 +885,11 @@ config X86_UP_IOAPIC
885 885
886config X86_LOCAL_APIC 886config X86_LOCAL_APIC
887 def_bool y 887 def_bool y
888 depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC 888 depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC || PCI_MSI
889 889
890config X86_IO_APIC 890config X86_IO_APIC
891 def_bool y 891 def_bool y
892 depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC 892 depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC || PCI_MSI
893 893
894config X86_VISWS_APIC 894config X86_VISWS_APIC
895 def_bool y 895 def_bool y
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index d3f5c63078d8..89270b4318db 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -374,7 +374,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
374 * Catch too early usage of this before alternatives 374 * Catch too early usage of this before alternatives
375 * have run. 375 * have run.
376 */ 376 */
377 asm goto("1: jmp %l[t_warn]\n" 377 asm_volatile_goto("1: jmp %l[t_warn]\n"
378 "2:\n" 378 "2:\n"
379 ".section .altinstructions,\"a\"\n" 379 ".section .altinstructions,\"a\"\n"
380 " .long 1b - .\n" 380 " .long 1b - .\n"
@@ -388,7 +388,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
388 388
389#endif 389#endif
390 390
391 asm goto("1: jmp %l[t_no]\n" 391 asm_volatile_goto("1: jmp %l[t_no]\n"
392 "2:\n" 392 "2:\n"
393 ".section .altinstructions,\"a\"\n" 393 ".section .altinstructions,\"a\"\n"
394 " .long 1b - .\n" 394 " .long 1b - .\n"
@@ -453,7 +453,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
453 * have. Thus, we force the jump to the widest, 4-byte, signed relative 453 * have. Thus, we force the jump to the widest, 4-byte, signed relative
454 * offset even though the last would often fit in less bytes. 454 * offset even though the last would often fit in less bytes.
455 */ 455 */
456 asm goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n" 456 asm_volatile_goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n"
457 "2:\n" 457 "2:\n"
458 ".section .altinstructions,\"a\"\n" 458 ".section .altinstructions,\"a\"\n"
459 " .long 1b - .\n" /* src offset */ 459 " .long 1b - .\n" /* src offset */
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index 64507f35800c..6a2cefb4395a 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -18,7 +18,7 @@
18 18
19static __always_inline bool arch_static_branch(struct static_key *key) 19static __always_inline bool arch_static_branch(struct static_key *key)
20{ 20{
21 asm goto("1:" 21 asm_volatile_goto("1:"
22 ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t" 22 ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
23 ".pushsection __jump_table, \"aw\" \n\t" 23 ".pushsection __jump_table, \"aw\" \n\t"
24 _ASM_ALIGN "\n\t" 24 _ASM_ALIGN "\n\t"
diff --git a/arch/x86/include/asm/mutex_64.h b/arch/x86/include/asm/mutex_64.h
index e7e6751648ed..07537a44216e 100644
--- a/arch/x86/include/asm/mutex_64.h
+++ b/arch/x86/include/asm/mutex_64.h
@@ -20,7 +20,7 @@
20static inline void __mutex_fastpath_lock(atomic_t *v, 20static inline void __mutex_fastpath_lock(atomic_t *v,
21 void (*fail_fn)(atomic_t *)) 21 void (*fail_fn)(atomic_t *))
22{ 22{
23 asm volatile goto(LOCK_PREFIX " decl %0\n" 23 asm_volatile_goto(LOCK_PREFIX " decl %0\n"
24 " jns %l[exit]\n" 24 " jns %l[exit]\n"
25 : : "m" (v->counter) 25 : : "m" (v->counter)
26 : "memory", "cc" 26 : "memory", "cc"
@@ -75,7 +75,7 @@ static inline int __mutex_fastpath_lock_retval(atomic_t *count)
75static inline void __mutex_fastpath_unlock(atomic_t *v, 75static inline void __mutex_fastpath_unlock(atomic_t *v,
76 void (*fail_fn)(atomic_t *)) 76 void (*fail_fn)(atomic_t *))
77{ 77{
78 asm volatile goto(LOCK_PREFIX " incl %0\n" 78 asm_volatile_goto(LOCK_PREFIX " incl %0\n"
79 " jg %l[exit]\n" 79 " jg %l[exit]\n"
80 : : "m" (v->counter) 80 : : "m" (v->counter)
81 : "memory", "cc" 81 : "memory", "cc"
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 6aef9fbc09b7..b913915e8e63 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -79,30 +79,38 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn)
79 return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY; 79 return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY;
80} 80}
81 81
82static inline unsigned long mfn_to_pfn(unsigned long mfn) 82static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn)
83{ 83{
84 unsigned long pfn; 84 unsigned long pfn;
85 int ret = 0; 85 int ret;
86 86
87 if (xen_feature(XENFEAT_auto_translated_physmap)) 87 if (xen_feature(XENFEAT_auto_translated_physmap))
88 return mfn; 88 return mfn;
89 89
90 if (unlikely(mfn >= machine_to_phys_nr)) { 90 if (unlikely(mfn >= machine_to_phys_nr))
91 pfn = ~0; 91 return ~0;
92 goto try_override; 92
93 }
94 pfn = 0;
95 /* 93 /*
96 * The array access can fail (e.g., device space beyond end of RAM). 94 * The array access can fail (e.g., device space beyond end of RAM).
97 * In such cases it doesn't matter what we return (we return garbage), 95 * In such cases it doesn't matter what we return (we return garbage),
98 * but we must handle the fault without crashing! 96 * but we must handle the fault without crashing!
99 */ 97 */
100 ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); 98 ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
101try_override:
102 /* ret might be < 0 if there are no entries in the m2p for mfn */
103 if (ret < 0) 99 if (ret < 0)
104 pfn = ~0; 100 return ~0;
105 else if (get_phys_to_machine(pfn) != mfn) 101
102 return pfn;
103}
104
105static inline unsigned long mfn_to_pfn(unsigned long mfn)
106{
107 unsigned long pfn;
108
109 if (xen_feature(XENFEAT_auto_translated_physmap))
110 return mfn;
111
112 pfn = mfn_to_pfn_no_overrides(mfn);
113 if (get_phys_to_machine(pfn) != mfn) {
106 /* 114 /*
107 * If this appears to be a foreign mfn (because the pfn 115 * If this appears to be a foreign mfn (because the pfn
108 * doesn't map back to the mfn), then check the local override 116 * doesn't map back to the mfn), then check the local override
@@ -111,6 +119,7 @@ try_override:
111 * m2p_find_override_pfn returns ~0 if it doesn't find anything. 119 * m2p_find_override_pfn returns ~0 if it doesn't find anything.
112 */ 120 */
113 pfn = m2p_find_override_pfn(mfn, ~0); 121 pfn = m2p_find_override_pfn(mfn, ~0);
122 }
114 123
115 /* 124 /*
116 * pfn is ~0 if there are no entries in the m2p for mfn or if the 125 * pfn is ~0 if there are no entries in the m2p for mfn or if the
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 8355c84b9729..9d8449158cf9 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1506,7 +1506,7 @@ static int __init init_hw_perf_events(void)
1506 err = amd_pmu_init(); 1506 err = amd_pmu_init();
1507 break; 1507 break;
1508 default: 1508 default:
1509 return 0; 1509 err = -ENOTSUPP;
1510 } 1510 }
1511 if (err != 0) { 1511 if (err != 0) {
1512 pr_cont("no PMU driver, software events only.\n"); 1512 pr_cont("no PMU driver, software events only.\n");
@@ -1883,26 +1883,21 @@ static struct pmu pmu = {
1883 1883
1884void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) 1884void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
1885{ 1885{
1886 userpg->cap_usr_time = 0; 1886 userpg->cap_user_time = 0;
1887 userpg->cap_usr_time_zero = 0; 1887 userpg->cap_user_time_zero = 0;
1888 userpg->cap_usr_rdpmc = x86_pmu.attr_rdpmc; 1888 userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc;
1889 userpg->pmc_width = x86_pmu.cntval_bits; 1889 userpg->pmc_width = x86_pmu.cntval_bits;
1890 1890
1891 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 1891 if (!sched_clock_stable)
1892 return;
1893
1894 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
1895 return; 1892 return;
1896 1893
1897 userpg->cap_usr_time = 1; 1894 userpg->cap_user_time = 1;
1898 userpg->time_mult = this_cpu_read(cyc2ns); 1895 userpg->time_mult = this_cpu_read(cyc2ns);
1899 userpg->time_shift = CYC2NS_SCALE_FACTOR; 1896 userpg->time_shift = CYC2NS_SCALE_FACTOR;
1900 userpg->time_offset = this_cpu_read(cyc2ns_offset) - now; 1897 userpg->time_offset = this_cpu_read(cyc2ns_offset) - now;
1901 1898
1902 if (sched_clock_stable && !check_tsc_disabled()) { 1899 userpg->cap_user_time_zero = 1;
1903 userpg->cap_usr_time_zero = 1; 1900 userpg->time_zero = this_cpu_read(cyc2ns_offset);
1904 userpg->time_zero = this_cpu_read(cyc2ns_offset);
1905 }
1906} 1901}
1907 1902
1908/* 1903/*
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 9db76c31b3c3..f31a1655d1ff 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2325,6 +2325,7 @@ __init int intel_pmu_init(void)
2325 break; 2325 break;
2326 2326
2327 case 55: /* Atom 22nm "Silvermont" */ 2327 case 55: /* Atom 22nm "Silvermont" */
2328 case 77: /* Avoton "Silvermont" */
2328 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids, 2329 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
2329 sizeof(hw_cache_event_ids)); 2330 sizeof(hw_cache_event_ids));
2330 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs, 2331 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 8ed44589b0e4..4118f9f68315 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -2706,14 +2706,14 @@ static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
2706 box->hrtimer.function = uncore_pmu_hrtimer; 2706 box->hrtimer.function = uncore_pmu_hrtimer;
2707} 2707}
2708 2708
2709struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int cpu) 2709static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int node)
2710{ 2710{
2711 struct intel_uncore_box *box; 2711 struct intel_uncore_box *box;
2712 int i, size; 2712 int i, size;
2713 2713
2714 size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg); 2714 size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
2715 2715
2716 box = kzalloc_node(size, GFP_KERNEL, cpu_to_node(cpu)); 2716 box = kzalloc_node(size, GFP_KERNEL, node);
2717 if (!box) 2717 if (!box)
2718 return NULL; 2718 return NULL;
2719 2719
@@ -3031,7 +3031,7 @@ static int uncore_validate_group(struct intel_uncore_pmu *pmu,
3031 struct intel_uncore_box *fake_box; 3031 struct intel_uncore_box *fake_box;
3032 int ret = -EINVAL, n; 3032 int ret = -EINVAL, n;
3033 3033
3034 fake_box = uncore_alloc_box(pmu->type, smp_processor_id()); 3034 fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
3035 if (!fake_box) 3035 if (!fake_box)
3036 return -ENOMEM; 3036 return -ENOMEM;
3037 3037
@@ -3294,7 +3294,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
3294 } 3294 }
3295 3295
3296 type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)]; 3296 type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
3297 box = uncore_alloc_box(type, 0); 3297 box = uncore_alloc_box(type, NUMA_NO_NODE);
3298 if (!box) 3298 if (!box)
3299 return -ENOMEM; 3299 return -ENOMEM;
3300 3300
@@ -3499,7 +3499,7 @@ static int uncore_cpu_prepare(int cpu, int phys_id)
3499 if (pmu->func_id < 0) 3499 if (pmu->func_id < 0)
3500 pmu->func_id = j; 3500 pmu->func_id = j;
3501 3501
3502 box = uncore_alloc_box(type, cpu); 3502 box = uncore_alloc_box(type, cpu_to_node(cpu));
3503 if (!box) 3503 if (!box)
3504 return -ENOMEM; 3504 return -ENOMEM;
3505 3505
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index 7123b5df479d..af99f71aeb7f 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -216,6 +216,7 @@ int apply_microcode_amd(int cpu)
216 /* need to apply patch? */ 216 /* need to apply patch? */
217 if (rev >= mc_amd->hdr.patch_id) { 217 if (rev >= mc_amd->hdr.patch_id) {
218 c->microcode = rev; 218 c->microcode = rev;
219 uci->cpu_sig.rev = rev;
219 return 0; 220 return 0;
220 } 221 }
221 222
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 563ed91e6faa..7e920bff99a3 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -326,6 +326,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
326 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"), 326 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"),
327 }, 327 },
328 }, 328 },
329 { /* Handle problems with rebooting on the Latitude E5410. */
330 .callback = set_pci_reboot,
331 .ident = "Dell Latitude E5410",
332 .matches = {
333 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
334 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5410"),
335 },
336 },
329 { /* Handle problems with rebooting on the Latitude E5420. */ 337 { /* Handle problems with rebooting on the Latitude E5420. */
330 .callback = set_pci_reboot, 338 .callback = set_pci_reboot,
331 .ident = "Dell Latitude E5420", 339 .ident = "Dell Latitude E5420",
@@ -352,12 +360,28 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
352 }, 360 },
353 { /* Handle problems with rebooting on the Precision M6600. */ 361 { /* Handle problems with rebooting on the Precision M6600. */
354 .callback = set_pci_reboot, 362 .callback = set_pci_reboot,
355 .ident = "Dell OptiPlex 990", 363 .ident = "Dell Precision M6600",
356 .matches = { 364 .matches = {
357 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 365 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
358 DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"), 366 DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
359 }, 367 },
360 }, 368 },
369 { /* Handle problems with rebooting on the Dell PowerEdge C6100. */
370 .callback = set_pci_reboot,
371 .ident = "Dell PowerEdge C6100",
372 .matches = {
373 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
374 DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
375 },
376 },
377 { /* Some C6100 machines were shipped with vendor being 'Dell'. */
378 .callback = set_pci_reboot,
379 .ident = "Dell PowerEdge C6100",
380 .matches = {
381 DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
382 DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
383 },
384 },
361 { } 385 { }
362}; 386};
363 387
diff --git a/arch/x86/kernel/sysfb_simplefb.c b/arch/x86/kernel/sysfb_simplefb.c
index 22513e96b012..86179d409893 100644
--- a/arch/x86/kernel/sysfb_simplefb.c
+++ b/arch/x86/kernel/sysfb_simplefb.c
@@ -72,14 +72,14 @@ __init int create_simplefb(const struct screen_info *si,
72 * the part that is occupied by the framebuffer */ 72 * the part that is occupied by the framebuffer */
73 len = mode->height * mode->stride; 73 len = mode->height * mode->stride;
74 len = PAGE_ALIGN(len); 74 len = PAGE_ALIGN(len);
75 if (len > si->lfb_size << 16) { 75 if (len > (u64)si->lfb_size << 16) {
76 printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n"); 76 printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n");
77 return -EINVAL; 77 return -EINVAL;
78 } 78 }
79 79
80 /* setup IORESOURCE_MEM as framebuffer memory */ 80 /* setup IORESOURCE_MEM as framebuffer memory */
81 memset(&res, 0, sizeof(res)); 81 memset(&res, 0, sizeof(res));
82 res.flags = IORESOURCE_MEM; 82 res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
83 res.name = simplefb_resname; 83 res.name = simplefb_resname;
84 res.start = si->lfb_base; 84 res.start = si->lfb_base;
85 res.end = si->lfb_base + len - 1; 85 res.end = si->lfb_base + len - 1;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a1216de9ffda..2b2fce1b2009 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3255,25 +3255,29 @@ static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
3255 3255
3256static void ept_load_pdptrs(struct kvm_vcpu *vcpu) 3256static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
3257{ 3257{
3258 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3259
3258 if (!test_bit(VCPU_EXREG_PDPTR, 3260 if (!test_bit(VCPU_EXREG_PDPTR,
3259 (unsigned long *)&vcpu->arch.regs_dirty)) 3261 (unsigned long *)&vcpu->arch.regs_dirty))
3260 return; 3262 return;
3261 3263
3262 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { 3264 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
3263 vmcs_write64(GUEST_PDPTR0, vcpu->arch.mmu.pdptrs[0]); 3265 vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]);
3264 vmcs_write64(GUEST_PDPTR1, vcpu->arch.mmu.pdptrs[1]); 3266 vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]);
3265 vmcs_write64(GUEST_PDPTR2, vcpu->arch.mmu.pdptrs[2]); 3267 vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]);
3266 vmcs_write64(GUEST_PDPTR3, vcpu->arch.mmu.pdptrs[3]); 3268 vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]);
3267 } 3269 }
3268} 3270}
3269 3271
3270static void ept_save_pdptrs(struct kvm_vcpu *vcpu) 3272static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
3271{ 3273{
3274 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3275
3272 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { 3276 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
3273 vcpu->arch.mmu.pdptrs[0] = vmcs_read64(GUEST_PDPTR0); 3277 mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
3274 vcpu->arch.mmu.pdptrs[1] = vmcs_read64(GUEST_PDPTR1); 3278 mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
3275 vcpu->arch.mmu.pdptrs[2] = vmcs_read64(GUEST_PDPTR2); 3279 mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
3276 vcpu->arch.mmu.pdptrs[3] = vmcs_read64(GUEST_PDPTR3); 3280 mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
3277 } 3281 }
3278 3282
3279 __set_bit(VCPU_EXREG_PDPTR, 3283 __set_bit(VCPU_EXREG_PDPTR,
@@ -5345,7 +5349,9 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
5345 * There are errata that may cause this bit to not be set: 5349 * There are errata that may cause this bit to not be set:
5346 * AAK134, BY25. 5350 * AAK134, BY25.
5347 */ 5351 */
5348 if (exit_qualification & INTR_INFO_UNBLOCK_NMI) 5352 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
5353 cpu_has_virtual_nmis() &&
5354 (exit_qualification & INTR_INFO_UNBLOCK_NMI))
5349 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); 5355 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
5350 5356
5351 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); 5357 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
@@ -7775,10 +7781,6 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
7775 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); 7781 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
7776 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); 7782 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
7777 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); 7783 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
7778 __clear_bit(VCPU_EXREG_PDPTR,
7779 (unsigned long *)&vcpu->arch.regs_avail);
7780 __clear_bit(VCPU_EXREG_PDPTR,
7781 (unsigned long *)&vcpu->arch.regs_dirty);
7782 } 7784 }
7783 7785
7784 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp); 7786 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 5596c7bdd327..082e88129712 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -700,7 +700,7 @@ int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end,
700 if (!(pci_probe & PCI_PROBE_MMCONF) || pci_mmcfg_arch_init_failed) 700 if (!(pci_probe & PCI_PROBE_MMCONF) || pci_mmcfg_arch_init_failed)
701 return -ENODEV; 701 return -ENODEV;
702 702
703 if (start > end || !addr) 703 if (start > end)
704 return -EINVAL; 704 return -EINVAL;
705 705
706 mutex_lock(&pci_mmcfg_lock); 706 mutex_lock(&pci_mmcfg_lock);
@@ -716,6 +716,11 @@ int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end,
716 return -EEXIST; 716 return -EEXIST;
717 } 717 }
718 718
719 if (!addr) {
720 mutex_unlock(&pci_mmcfg_lock);
721 return -EINVAL;
722 }
723
719 rc = -EBUSY; 724 rc = -EBUSY;
720 cfg = pci_mmconfig_alloc(seg, start, end, addr); 725 cfg = pci_mmconfig_alloc(seg, start, end, addr);
721 if (cfg == NULL) { 726 if (cfg == NULL) {
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 90f6ed127096..c7e22ab29a5a 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -912,10 +912,13 @@ void __init efi_enter_virtual_mode(void)
912 912
913 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { 913 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
914 md = p; 914 md = p;
915 if (!(md->attribute & EFI_MEMORY_RUNTIME) && 915 if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
916 md->type != EFI_BOOT_SERVICES_CODE && 916#ifdef CONFIG_X86_64
917 md->type != EFI_BOOT_SERVICES_DATA) 917 if (md->type != EFI_BOOT_SERVICES_CODE &&
918 continue; 918 md->type != EFI_BOOT_SERVICES_DATA)
919#endif
920 continue;
921 }
919 922
920 size = md->num_pages << EFI_PAGE_SHIFT; 923 size = md->num_pages << EFI_PAGE_SHIFT;
921 end = md->phys_addr + size; 924 end = md->phys_addr + size;
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 8b901e8d782d..a61c7d5811be 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -879,7 +879,6 @@ int m2p_add_override(unsigned long mfn, struct page *page,
879 unsigned long uninitialized_var(address); 879 unsigned long uninitialized_var(address);
880 unsigned level; 880 unsigned level;
881 pte_t *ptep = NULL; 881 pte_t *ptep = NULL;
882 int ret = 0;
883 882
884 pfn = page_to_pfn(page); 883 pfn = page_to_pfn(page);
885 if (!PageHighMem(page)) { 884 if (!PageHighMem(page)) {
@@ -926,8 +925,8 @@ int m2p_add_override(unsigned long mfn, struct page *page,
926 * frontend pages while they are being shared with the backend, 925 * frontend pages while they are being shared with the backend,
927 * because mfn_to_pfn (that ends up being called by GUPF) will 926 * because mfn_to_pfn (that ends up being called by GUPF) will
928 * return the backend pfn rather than the frontend pfn. */ 927 * return the backend pfn rather than the frontend pfn. */
929 ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); 928 pfn = mfn_to_pfn_no_overrides(mfn);
930 if (ret == 0 && get_phys_to_machine(pfn) == mfn) 929 if (get_phys_to_machine(pfn) == mfn)
931 set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)); 930 set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
932 931
933 return 0; 932 return 0;
@@ -942,7 +941,6 @@ int m2p_remove_override(struct page *page,
942 unsigned long uninitialized_var(address); 941 unsigned long uninitialized_var(address);
943 unsigned level; 942 unsigned level;
944 pte_t *ptep = NULL; 943 pte_t *ptep = NULL;
945 int ret = 0;
946 944
947 pfn = page_to_pfn(page); 945 pfn = page_to_pfn(page);
948 mfn = get_phys_to_machine(pfn); 946 mfn = get_phys_to_machine(pfn);
@@ -1029,8 +1027,8 @@ int m2p_remove_override(struct page *page,
1029 * the original pfn causes mfn_to_pfn(mfn) to return the frontend 1027 * the original pfn causes mfn_to_pfn(mfn) to return the frontend
1030 * pfn again. */ 1028 * pfn again. */
1031 mfn &= ~FOREIGN_FRAME_BIT; 1029 mfn &= ~FOREIGN_FRAME_BIT;
1032 ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); 1030 pfn = mfn_to_pfn_no_overrides(mfn);
1033 if (ret == 0 && get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) && 1031 if (get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
1034 m2p_find_override(mfn) == NULL) 1032 m2p_find_override(mfn) == NULL)
1035 set_phys_to_machine(pfn, mfn); 1033 set_phys_to_machine(pfn, mfn);
1036 1034
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 253f63fceea1..be6b86078957 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -259,6 +259,14 @@ void xen_uninit_lock_cpu(int cpu)
259} 259}
260 260
261 261
262/*
263 * Our init of PV spinlocks is split in two init functions due to us
264 * using paravirt patching and jump labels patching and having to do
265 * all of this before SMP code is invoked.
266 *
267 * The paravirt patching needs to be done _before_ the alternative asm code
268 * is started, otherwise we would not patch the core kernel code.
269 */
262void __init xen_init_spinlocks(void) 270void __init xen_init_spinlocks(void)
263{ 271{
264 272
@@ -267,12 +275,26 @@ void __init xen_init_spinlocks(void)
267 return; 275 return;
268 } 276 }
269 277
270 static_key_slow_inc(&paravirt_ticketlocks_enabled);
271
272 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning); 278 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning);
273 pv_lock_ops.unlock_kick = xen_unlock_kick; 279 pv_lock_ops.unlock_kick = xen_unlock_kick;
274} 280}
275 281
282/*
283 * While the jump_label init code needs to happend _after_ the jump labels are
284 * enabled and before SMP is started. Hence we use pre-SMP initcall level
285 * init. We cannot do it in xen_init_spinlocks as that is done before
286 * jump labels are activated.
287 */
288static __init int xen_init_spinlocks_jump(void)
289{
290 if (!xen_pvspin)
291 return 0;
292
293 static_key_slow_inc(&paravirt_ticketlocks_enabled);
294 return 0;
295}
296early_initcall(xen_init_spinlocks_jump);
297
276static __init int xen_parse_nopvspin(char *arg) 298static __init int xen_parse_nopvspin(char *arg)
277{ 299{
278 xen_pvspin = false; 300 xen_pvspin = false;
diff --git a/block/Kconfig b/block/Kconfig
index 7f38e40fee08..2429515c05c2 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -99,11 +99,16 @@ config BLK_DEV_THROTTLING
99 99
100 See Documentation/cgroups/blkio-controller.txt for more information. 100 See Documentation/cgroups/blkio-controller.txt for more information.
101 101
102config CMDLINE_PARSER 102config BLK_CMDLINE_PARSER
103 bool "Block device command line partition parser" 103 bool "Block device command line partition parser"
104 default n 104 default n
105 ---help--- 105 ---help---
106 Parsing command line, get the partitions information. 106 Enabling this option allows you to specify the partition layout from
107 the kernel boot args. This is typically of use for embedded devices
108 which don't otherwise have any standardized method for listing the
109 partitions on a block device.
110
111 See Documentation/block/cmdline-partition.txt for more information.
107 112
108menu "Partition Types" 113menu "Partition Types"
109 114
diff --git a/block/Makefile b/block/Makefile
index 4fa4be544ece..671a83d063a5 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -18,4 +18,4 @@ obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
18 18
19obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o 19obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
20obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o 20obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o
21obj-$(CONFIG_CMDLINE_PARSER) += cmdline-parser.o 21obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o
diff --git a/block/partitions/Kconfig b/block/partitions/Kconfig
index 87a32086535d..9b29a996c311 100644
--- a/block/partitions/Kconfig
+++ b/block/partitions/Kconfig
@@ -263,7 +263,7 @@ config SYSV68_PARTITION
263 263
264config CMDLINE_PARTITION 264config CMDLINE_PARTITION
265 bool "Command line partition support" if PARTITION_ADVANCED 265 bool "Command line partition support" if PARTITION_ADVANCED
266 select CMDLINE_PARSER 266 select BLK_CMDLINE_PARSER
267 help 267 help
268 Say Y here if you would read the partitions table from bootargs. 268 Say Y here if you want to read the partition table from bootargs.
269 The format for the command line is just like mtdparts. 269 The format for the command line is just like mtdparts.
diff --git a/block/partitions/cmdline.c b/block/partitions/cmdline.c
index 56cf4ffad51e..5141b563adf1 100644
--- a/block/partitions/cmdline.c
+++ b/block/partitions/cmdline.c
@@ -2,15 +2,15 @@
2 * Copyright (C) 2013 HUAWEI 2 * Copyright (C) 2013 HUAWEI
3 * Author: Cai Zhiyong <caizhiyong@huawei.com> 3 * Author: Cai Zhiyong <caizhiyong@huawei.com>
4 * 4 *
5 * Read block device partition table from command line. 5 * Read block device partition table from the command line.
6 * The partition used for fixed block device (eMMC) embedded device. 6 * Typically used for fixed block (eMMC) embedded devices.
7 * It is no MBR, save storage space. Bootloader can be easily accessed 7 * It has no MBR, so saves storage space. Bootloader can be easily accessed
8 * by absolute address of data on the block device. 8 * by absolute address of data on the block device.
9 * Users can easily change the partition. 9 * Users can easily change the partition.
10 * 10 *
11 * The format for the command line is just like mtdparts. 11 * The format for the command line is just like mtdparts.
12 * 12 *
13 * Verbose config please reference "Documentation/block/cmdline-partition.txt" 13 * For further information, see "Documentation/block/cmdline-partition.txt"
14 * 14 *
15 */ 15 */
16 16
diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c
index f40acef80269..a6977e12d574 100644
--- a/drivers/acpi/acpi_ipmi.c
+++ b/drivers/acpi/acpi_ipmi.c
@@ -39,6 +39,7 @@
39#include <linux/ipmi.h> 39#include <linux/ipmi.h>
40#include <linux/device.h> 40#include <linux/device.h>
41#include <linux/pnp.h> 41#include <linux/pnp.h>
42#include <linux/spinlock.h>
42 43
43MODULE_AUTHOR("Zhao Yakui"); 44MODULE_AUTHOR("Zhao Yakui");
44MODULE_DESCRIPTION("ACPI IPMI Opregion driver"); 45MODULE_DESCRIPTION("ACPI IPMI Opregion driver");
@@ -57,7 +58,7 @@ struct acpi_ipmi_device {
57 struct list_head head; 58 struct list_head head;
58 /* the IPMI request message list */ 59 /* the IPMI request message list */
59 struct list_head tx_msg_list; 60 struct list_head tx_msg_list;
60 struct mutex tx_msg_lock; 61 spinlock_t tx_msg_lock;
61 acpi_handle handle; 62 acpi_handle handle;
62 struct pnp_dev *pnp_dev; 63 struct pnp_dev *pnp_dev;
63 ipmi_user_t user_interface; 64 ipmi_user_t user_interface;
@@ -147,6 +148,7 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
147 struct kernel_ipmi_msg *msg; 148 struct kernel_ipmi_msg *msg;
148 struct acpi_ipmi_buffer *buffer; 149 struct acpi_ipmi_buffer *buffer;
149 struct acpi_ipmi_device *device; 150 struct acpi_ipmi_device *device;
151 unsigned long flags;
150 152
151 msg = &tx_msg->tx_message; 153 msg = &tx_msg->tx_message;
152 /* 154 /*
@@ -177,10 +179,10 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
177 179
178 /* Get the msgid */ 180 /* Get the msgid */
179 device = tx_msg->device; 181 device = tx_msg->device;
180 mutex_lock(&device->tx_msg_lock); 182 spin_lock_irqsave(&device->tx_msg_lock, flags);
181 device->curr_msgid++; 183 device->curr_msgid++;
182 tx_msg->tx_msgid = device->curr_msgid; 184 tx_msg->tx_msgid = device->curr_msgid;
183 mutex_unlock(&device->tx_msg_lock); 185 spin_unlock_irqrestore(&device->tx_msg_lock, flags);
184} 186}
185 187
186static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg, 188static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
@@ -242,6 +244,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
242 int msg_found = 0; 244 int msg_found = 0;
243 struct acpi_ipmi_msg *tx_msg; 245 struct acpi_ipmi_msg *tx_msg;
244 struct pnp_dev *pnp_dev = ipmi_device->pnp_dev; 246 struct pnp_dev *pnp_dev = ipmi_device->pnp_dev;
247 unsigned long flags;
245 248
246 if (msg->user != ipmi_device->user_interface) { 249 if (msg->user != ipmi_device->user_interface) {
247 dev_warn(&pnp_dev->dev, "Unexpected response is returned. " 250 dev_warn(&pnp_dev->dev, "Unexpected response is returned. "
@@ -250,7 +253,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
250 ipmi_free_recv_msg(msg); 253 ipmi_free_recv_msg(msg);
251 return; 254 return;
252 } 255 }
253 mutex_lock(&ipmi_device->tx_msg_lock); 256 spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
254 list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) { 257 list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) {
255 if (msg->msgid == tx_msg->tx_msgid) { 258 if (msg->msgid == tx_msg->tx_msgid) {
256 msg_found = 1; 259 msg_found = 1;
@@ -258,7 +261,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
258 } 261 }
259 } 262 }
260 263
261 mutex_unlock(&ipmi_device->tx_msg_lock); 264 spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
262 if (!msg_found) { 265 if (!msg_found) {
263 dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is " 266 dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is "
264 "returned.\n", msg->msgid); 267 "returned.\n", msg->msgid);
@@ -378,6 +381,7 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
378 struct acpi_ipmi_device *ipmi_device = handler_context; 381 struct acpi_ipmi_device *ipmi_device = handler_context;
379 int err, rem_time; 382 int err, rem_time;
380 acpi_status status; 383 acpi_status status;
384 unsigned long flags;
381 /* 385 /*
382 * IPMI opregion message. 386 * IPMI opregion message.
383 * IPMI message is firstly written to the BMC and system software 387 * IPMI message is firstly written to the BMC and system software
@@ -395,9 +399,9 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
395 return AE_NO_MEMORY; 399 return AE_NO_MEMORY;
396 400
397 acpi_format_ipmi_msg(tx_msg, address, value); 401 acpi_format_ipmi_msg(tx_msg, address, value);
398 mutex_lock(&ipmi_device->tx_msg_lock); 402 spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
399 list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list); 403 list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
400 mutex_unlock(&ipmi_device->tx_msg_lock); 404 spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
401 err = ipmi_request_settime(ipmi_device->user_interface, 405 err = ipmi_request_settime(ipmi_device->user_interface,
402 &tx_msg->addr, 406 &tx_msg->addr,
403 tx_msg->tx_msgid, 407 tx_msg->tx_msgid,
@@ -413,9 +417,9 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
413 status = AE_OK; 417 status = AE_OK;
414 418
415end_label: 419end_label:
416 mutex_lock(&ipmi_device->tx_msg_lock); 420 spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
417 list_del(&tx_msg->head); 421 list_del(&tx_msg->head);
418 mutex_unlock(&ipmi_device->tx_msg_lock); 422 spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
419 kfree(tx_msg); 423 kfree(tx_msg);
420 return status; 424 return status;
421} 425}
@@ -457,7 +461,7 @@ static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device)
457 461
458 INIT_LIST_HEAD(&ipmi_device->head); 462 INIT_LIST_HEAD(&ipmi_device->head);
459 463
460 mutex_init(&ipmi_device->tx_msg_lock); 464 spin_lock_init(&ipmi_device->tx_msg_lock);
461 INIT_LIST_HEAD(&ipmi_device->tx_msg_list); 465 INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
462 ipmi_install_space_handler(ipmi_device); 466 ipmi_install_space_handler(ipmi_device);
463 467
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index fbdb82e70d10..407ad13cac2f 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -968,7 +968,7 @@ int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
968 } 968 }
969 return 0; 969 return 0;
970} 970}
971EXPORT_SYMBOL_GPL(acpi_bus_get_device); 971EXPORT_SYMBOL(acpi_bus_get_device);
972 972
973int acpi_device_add(struct acpi_device *device, 973int acpi_device_add(struct acpi_device *device,
974 void (*release)(struct device *)) 974 void (*release)(struct device *))
@@ -1121,7 +1121,7 @@ int acpi_bus_register_driver(struct acpi_driver *driver)
1121EXPORT_SYMBOL(acpi_bus_register_driver); 1121EXPORT_SYMBOL(acpi_bus_register_driver);
1122 1122
1123/** 1123/**
1124 * acpi_bus_unregister_driver - unregisters a driver with the APIC bus 1124 * acpi_bus_unregister_driver - unregisters a driver with the ACPI bus
1125 * @driver: driver to unregister 1125 * @driver: driver to unregister
1126 * 1126 *
1127 * Unregisters a driver with the ACPI bus. Searches the namespace for all 1127 * Unregisters a driver with the ACPI bus. Searches the namespace for all
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 958ba2a420c3..97f4acb54ad6 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -2,7 +2,7 @@
2 * sata_promise.c - Promise SATA 2 * sata_promise.c - Promise SATA
3 * 3 *
4 * Maintained by: Tejun Heo <tj@kernel.org> 4 * Maintained by: Tejun Heo <tj@kernel.org>
5 * Mikael Pettersson <mikpe@it.uu.se> 5 * Mikael Pettersson
6 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * Please ALWAYS copy linux-ide@vger.kernel.org
7 * on emails. 7 * on emails.
8 * 8 *
diff --git a/drivers/base/core.c b/drivers/base/core.c
index c7cfadcf6752..34abf4d8a45f 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -2017,7 +2017,7 @@ EXPORT_SYMBOL_GPL(device_move);
2017 */ 2017 */
2018void device_shutdown(void) 2018void device_shutdown(void)
2019{ 2019{
2020 struct device *dev; 2020 struct device *dev, *parent;
2021 2021
2022 spin_lock(&devices_kset->list_lock); 2022 spin_lock(&devices_kset->list_lock);
2023 /* 2023 /*
@@ -2034,7 +2034,7 @@ void device_shutdown(void)
2034 * prevent it from being freed because parent's 2034 * prevent it from being freed because parent's
2035 * lock is to be held 2035 * lock is to be held
2036 */ 2036 */
2037 get_device(dev->parent); 2037 parent = get_device(dev->parent);
2038 get_device(dev); 2038 get_device(dev);
2039 /* 2039 /*
2040 * Make sure the device is off the kset list, in the 2040 * Make sure the device is off the kset list, in the
@@ -2044,8 +2044,8 @@ void device_shutdown(void)
2044 spin_unlock(&devices_kset->list_lock); 2044 spin_unlock(&devices_kset->list_lock);
2045 2045
2046 /* hold lock to avoid race with probe/release */ 2046 /* hold lock to avoid race with probe/release */
2047 if (dev->parent) 2047 if (parent)
2048 device_lock(dev->parent); 2048 device_lock(parent);
2049 device_lock(dev); 2049 device_lock(dev);
2050 2050
2051 /* Don't allow any more runtime suspends */ 2051 /* Don't allow any more runtime suspends */
@@ -2063,11 +2063,11 @@ void device_shutdown(void)
2063 } 2063 }
2064 2064
2065 device_unlock(dev); 2065 device_unlock(dev);
2066 if (dev->parent) 2066 if (parent)
2067 device_unlock(dev->parent); 2067 device_unlock(parent);
2068 2068
2069 put_device(dev); 2069 put_device(dev);
2070 put_device(dev->parent); 2070 put_device(parent);
2071 2071
2072 spin_lock(&devices_kset->list_lock); 2072 spin_lock(&devices_kset->list_lock);
2073 } 2073 }
diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c
index c9fd6943ce45..50329d1057ed 100644
--- a/drivers/bcma/driver_pci.c
+++ b/drivers/bcma/driver_pci.c
@@ -210,25 +210,6 @@ static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc)
210 } 210 }
211} 211}
212 212
213static void bcma_core_pci_power_save(struct bcma_drv_pci *pc, bool up)
214{
215 u16 data;
216
217 if (pc->core->id.rev >= 15 && pc->core->id.rev <= 20) {
218 data = up ? 0x74 : 0x7C;
219 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
220 BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7F64);
221 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
222 BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
223 } else if (pc->core->id.rev >= 21 && pc->core->id.rev <= 22) {
224 data = up ? 0x75 : 0x7D;
225 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
226 BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7E65);
227 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
228 BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
229 }
230}
231
232/************************************************** 213/**************************************************
233 * Init. 214 * Init.
234 **************************************************/ 215 **************************************************/
@@ -255,6 +236,32 @@ void bcma_core_pci_init(struct bcma_drv_pci *pc)
255 bcma_core_pci_clientmode_init(pc); 236 bcma_core_pci_clientmode_init(pc);
256} 237}
257 238
239void bcma_core_pci_power_save(struct bcma_bus *bus, bool up)
240{
241 struct bcma_drv_pci *pc;
242 u16 data;
243
244 if (bus->hosttype != BCMA_HOSTTYPE_PCI)
245 return;
246
247 pc = &bus->drv_pci[0];
248
249 if (pc->core->id.rev >= 15 && pc->core->id.rev <= 20) {
250 data = up ? 0x74 : 0x7C;
251 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
252 BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7F64);
253 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
254 BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
255 } else if (pc->core->id.rev >= 21 && pc->core->id.rev <= 22) {
256 data = up ? 0x75 : 0x7D;
257 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
258 BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7E65);
259 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
260 BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
261 }
262}
263EXPORT_SYMBOL_GPL(bcma_core_pci_power_save);
264
258int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core, 265int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core,
259 bool enable) 266 bool enable)
260{ 267{
@@ -310,8 +317,6 @@ void bcma_core_pci_up(struct bcma_bus *bus)
310 317
311 pc = &bus->drv_pci[0]; 318 pc = &bus->drv_pci[0];
312 319
313 bcma_core_pci_power_save(pc, true);
314
315 bcma_core_pci_extend_L1timer(pc, true); 320 bcma_core_pci_extend_L1timer(pc, true);
316} 321}
317EXPORT_SYMBOL_GPL(bcma_core_pci_up); 322EXPORT_SYMBOL_GPL(bcma_core_pci_up);
@@ -326,7 +331,5 @@ void bcma_core_pci_down(struct bcma_bus *bus)
326 pc = &bus->drv_pci[0]; 331 pc = &bus->drv_pci[0];
327 332
328 bcma_core_pci_extend_L1timer(pc, false); 333 bcma_core_pci_extend_L1timer(pc, false);
329
330 bcma_core_pci_power_save(pc, false);
331} 334}
332EXPORT_SYMBOL_GPL(bcma_core_pci_down); 335EXPORT_SYMBOL_GPL(bcma_core_pci_down);
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index d2d95ff5353b..edfa2515bc86 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1189,6 +1189,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
1189 int err; 1189 int err;
1190 u32 cp; 1190 u32 cp;
1191 1191
1192 memset(&arg64, 0, sizeof(arg64));
1192 err = 0; 1193 err = 0;
1193 err |= 1194 err |=
1194 copy_from_user(&arg64.LUN_info, &arg32->LUN_info, 1195 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 639d26b90b91..2b9440384536 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -1193,6 +1193,7 @@ out_passthru:
1193 ida_pci_info_struct pciinfo; 1193 ida_pci_info_struct pciinfo;
1194 1194
1195 if (!arg) return -EINVAL; 1195 if (!arg) return -EINVAL;
1196 memset(&pciinfo, 0, sizeof(pciinfo));
1196 pciinfo.bus = host->pci_dev->bus->number; 1197 pciinfo.bus = host->pci_dev->bus->number;
1197 pciinfo.dev_fn = host->pci_dev->devfn; 1198 pciinfo.dev_fn = host->pci_dev->devfn;
1198 pciinfo.board_id = host->board_id; 1199 pciinfo.board_id = host->board_id;
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index a12b923bbaca..0a327f4154a2 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -85,6 +85,7 @@ static struct usb_device_id ath3k_table[] = {
85 { USB_DEVICE(0x04CA, 0x3008) }, 85 { USB_DEVICE(0x04CA, 0x3008) },
86 { USB_DEVICE(0x13d3, 0x3362) }, 86 { USB_DEVICE(0x13d3, 0x3362) },
87 { USB_DEVICE(0x0CF3, 0xE004) }, 87 { USB_DEVICE(0x0CF3, 0xE004) },
88 { USB_DEVICE(0x0CF3, 0xE005) },
88 { USB_DEVICE(0x0930, 0x0219) }, 89 { USB_DEVICE(0x0930, 0x0219) },
89 { USB_DEVICE(0x0489, 0xe057) }, 90 { USB_DEVICE(0x0489, 0xe057) },
90 { USB_DEVICE(0x13d3, 0x3393) }, 91 { USB_DEVICE(0x13d3, 0x3393) },
@@ -126,6 +127,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
126 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, 127 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
127 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, 128 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
128 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, 129 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
130 { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
129 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, 131 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
130 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, 132 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
131 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, 133 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 8e16f0af6358..f3dfc0a88fdc 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -102,6 +102,7 @@ static struct usb_device_id btusb_table[] = {
102 102
103 /* Broadcom BCM20702A0 */ 103 /* Broadcom BCM20702A0 */
104 { USB_DEVICE(0x0b05, 0x17b5) }, 104 { USB_DEVICE(0x0b05, 0x17b5) },
105 { USB_DEVICE(0x0b05, 0x17cb) },
105 { USB_DEVICE(0x04ca, 0x2003) }, 106 { USB_DEVICE(0x04ca, 0x2003) },
106 { USB_DEVICE(0x0489, 0xe042) }, 107 { USB_DEVICE(0x0489, 0xe042) },
107 { USB_DEVICE(0x413c, 0x8197) }, 108 { USB_DEVICE(0x413c, 0x8197) },
@@ -112,6 +113,9 @@ static struct usb_device_id btusb_table[] = {
112 /*Broadcom devices with vendor specific id */ 113 /*Broadcom devices with vendor specific id */
113 { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) }, 114 { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) },
114 115
116 /* Belkin F8065bf - Broadcom based */
117 { USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01) },
118
115 { } /* Terminating entry */ 119 { } /* Terminating entry */
116}; 120};
117 121
@@ -148,6 +152,7 @@ static struct usb_device_id blacklist_table[] = {
148 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, 152 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
149 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, 153 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
150 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, 154 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
155 { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
151 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, 156 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
152 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, 157 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
153 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, 158 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index 19ab6ff53d59..2394e9753ef5 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -700,6 +700,7 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
700 phys_addr_t sdramwins_phys_base, 700 phys_addr_t sdramwins_phys_base,
701 size_t sdramwins_size) 701 size_t sdramwins_size)
702{ 702{
703 struct device_node *np;
703 int win; 704 int win;
704 705
705 mbus->mbuswins_base = ioremap(mbuswins_phys_base, mbuswins_size); 706 mbus->mbuswins_base = ioremap(mbuswins_phys_base, mbuswins_size);
@@ -712,8 +713,11 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
712 return -ENOMEM; 713 return -ENOMEM;
713 } 714 }
714 715
715 if (of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric")) 716 np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric");
717 if (np) {
716 mbus->hw_io_coherency = 1; 718 mbus->hw_io_coherency = 1;
719 of_node_put(np);
720 }
717 721
718 for (win = 0; win < mbus->soc->num_wins; win++) 722 for (win = 0; win < mbus->soc->num_wins; win++)
719 mvebu_mbus_disable_window(mbus, win); 723 mvebu_mbus_disable_window(mbus, win);
@@ -861,11 +865,13 @@ static void __init mvebu_mbus_get_pcie_resources(struct device_node *np,
861 int ret; 865 int ret;
862 866
863 /* 867 /*
864 * These are optional, so we clear them and they'll 868 * These are optional, so we make sure that resource_size(x) will
865 * be zero if they are missing from the DT. 869 * return 0.
866 */ 870 */
867 memset(mem, 0, sizeof(struct resource)); 871 memset(mem, 0, sizeof(struct resource));
872 mem->end = -1;
868 memset(io, 0, sizeof(struct resource)); 873 memset(io, 0, sizeof(struct resource));
874 io->end = -1;
869 875
870 ret = of_property_read_u32_array(np, "pcie-mem-aperture", reg, ARRAY_SIZE(reg)); 876 ret = of_property_read_u32_array(np, "pcie-mem-aperture", reg, ARRAY_SIZE(reg));
871 if (!ret) { 877 if (!ret) {
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 7737b5bd26af..7a744d391756 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -640,7 +640,7 @@ struct timer_rand_state {
640 */ 640 */
641void add_device_randomness(const void *buf, unsigned int size) 641void add_device_randomness(const void *buf, unsigned int size)
642{ 642{
643 unsigned long time = get_cycles() ^ jiffies; 643 unsigned long time = random_get_entropy() ^ jiffies;
644 644
645 mix_pool_bytes(&input_pool, buf, size, NULL); 645 mix_pool_bytes(&input_pool, buf, size, NULL);
646 mix_pool_bytes(&input_pool, &time, sizeof(time), NULL); 646 mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
@@ -677,7 +677,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
677 goto out; 677 goto out;
678 678
679 sample.jiffies = jiffies; 679 sample.jiffies = jiffies;
680 sample.cycles = get_cycles(); 680 sample.cycles = random_get_entropy();
681 sample.num = num; 681 sample.num = num;
682 mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL); 682 mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL);
683 683
@@ -744,7 +744,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
744 struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness); 744 struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
745 struct pt_regs *regs = get_irq_regs(); 745 struct pt_regs *regs = get_irq_regs();
746 unsigned long now = jiffies; 746 unsigned long now = jiffies;
747 __u32 input[4], cycles = get_cycles(); 747 __u32 input[4], cycles = random_get_entropy();
748 748
749 input[0] = cycles ^ jiffies; 749 input[0] = cycles ^ jiffies;
750 input[1] = irq; 750 input[1] = irq;
@@ -1459,12 +1459,11 @@ struct ctl_table random_table[] = {
1459 1459
1460static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned; 1460static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
1461 1461
1462static int __init random_int_secret_init(void) 1462int random_int_secret_init(void)
1463{ 1463{
1464 get_random_bytes(random_int_secret, sizeof(random_int_secret)); 1464 get_random_bytes(random_int_secret, sizeof(random_int_secret));
1465 return 0; 1465 return 0;
1466} 1466}
1467late_initcall(random_int_secret_init);
1468 1467
1469/* 1468/*
1470 * Get a random word for internal kernel use only. Similar to urandom but 1469 * Get a random word for internal kernel use only. Similar to urandom but
@@ -1483,7 +1482,7 @@ unsigned int get_random_int(void)
1483 1482
1484 hash = get_cpu_var(get_random_int_hash); 1483 hash = get_cpu_var(get_random_int_hash);
1485 1484
1486 hash[0] += current->pid + jiffies + get_cycles(); 1485 hash[0] += current->pid + jiffies + random_get_entropy();
1487 md5_transform(hash, random_int_secret); 1486 md5_transform(hash, random_int_secret);
1488 ret = hash[0]; 1487 ret = hash[0];
1489 put_cpu_var(get_random_int_hash); 1488 put_cpu_var(get_random_int_hash);
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 7a7929ba2658..06189e55b4e5 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -142,32 +142,6 @@ static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
142 return length; 142 return length;
143} 143}
144 144
145ssize_t tpm_show_locality(struct device *dev, struct device_attribute *attr,
146 char *buf)
147{
148 struct tpm_chip *chip = dev_get_drvdata(dev);
149 struct tpm_private *priv = TPM_VPRIV(chip);
150 u8 locality = priv->shr->locality;
151
152 return sprintf(buf, "%d\n", locality);
153}
154
155ssize_t tpm_store_locality(struct device *dev, struct device_attribute *attr,
156 const char *buf, size_t len)
157{
158 struct tpm_chip *chip = dev_get_drvdata(dev);
159 struct tpm_private *priv = TPM_VPRIV(chip);
160 u8 val;
161
162 int rv = kstrtou8(buf, 0, &val);
163 if (rv)
164 return rv;
165
166 priv->shr->locality = val;
167
168 return len;
169}
170
171static const struct file_operations vtpm_ops = { 145static const struct file_operations vtpm_ops = {
172 .owner = THIS_MODULE, 146 .owner = THIS_MODULE,
173 .llseek = no_llseek, 147 .llseek = no_llseek,
@@ -188,8 +162,6 @@ static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
188static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); 162static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
189static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL); 163static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
190static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL); 164static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
191static DEVICE_ATTR(locality, S_IRUGO | S_IWUSR, tpm_show_locality,
192 tpm_store_locality);
193 165
194static struct attribute *vtpm_attrs[] = { 166static struct attribute *vtpm_attrs[] = {
195 &dev_attr_pubek.attr, 167 &dev_attr_pubek.attr,
@@ -202,7 +174,6 @@ static struct attribute *vtpm_attrs[] = {
202 &dev_attr_cancel.attr, 174 &dev_attr_cancel.attr,
203 &dev_attr_durations.attr, 175 &dev_attr_durations.attr,
204 &dev_attr_timeouts.attr, 176 &dev_attr_timeouts.attr,
205 &dev_attr_locality.attr,
206 NULL, 177 NULL,
207}; 178};
208 179
@@ -210,8 +181,6 @@ static struct attribute_group vtpm_attr_grp = {
210 .attrs = vtpm_attrs, 181 .attrs = vtpm_attrs,
211}; 182};
212 183
213#define TPM_LONG_TIMEOUT (10 * 60 * HZ)
214
215static const struct tpm_vendor_specific tpm_vtpm = { 184static const struct tpm_vendor_specific tpm_vtpm = {
216 .status = vtpm_status, 185 .status = vtpm_status,
217 .recv = vtpm_recv, 186 .recv = vtpm_recv,
@@ -224,11 +193,6 @@ static const struct tpm_vendor_specific tpm_vtpm = {
224 .miscdev = { 193 .miscdev = {
225 .fops = &vtpm_ops, 194 .fops = &vtpm_ops,
226 }, 195 },
227 .duration = {
228 TPM_LONG_TIMEOUT,
229 TPM_LONG_TIMEOUT,
230 TPM_LONG_TIMEOUT,
231 },
232}; 196};
233 197
234static irqreturn_t tpmif_interrupt(int dummy, void *dev_id) 198static irqreturn_t tpmif_interrupt(int dummy, void *dev_id)
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 41c69469ce20..971d796e071d 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -26,6 +26,7 @@ config DW_APB_TIMER_OF
26 26
27config ARMADA_370_XP_TIMER 27config ARMADA_370_XP_TIMER
28 bool 28 bool
29 select CLKSRC_OF
29 30
30config ORION_TIMER 31config ORION_TIMER
31 select CLKSRC_OF 32 select CLKSRC_OF
diff --git a/drivers/clocksource/clksrc-of.c b/drivers/clocksource/clksrc-of.c
index 37f5325bec95..b9ddd9e3a2f5 100644
--- a/drivers/clocksource/clksrc-of.c
+++ b/drivers/clocksource/clksrc-of.c
@@ -30,6 +30,9 @@ void __init clocksource_of_init(void)
30 clocksource_of_init_fn init_func; 30 clocksource_of_init_fn init_func;
31 31
32 for_each_matching_node_and_match(np, __clksrc_of_table, &match) { 32 for_each_matching_node_and_match(np, __clksrc_of_table, &match) {
33 if (!of_device_is_available(np))
34 continue;
35
33 init_func = match->data; 36 init_func = match->data;
34 init_func(np); 37 init_func(np);
35 } 38 }
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
index b9c81b7c3a3b..3a5909c12d42 100644
--- a/drivers/clocksource/em_sti.c
+++ b/drivers/clocksource/em_sti.c
@@ -301,7 +301,7 @@ static void em_sti_register_clockevent(struct em_sti_priv *p)
301 ced->name = dev_name(&p->pdev->dev); 301 ced->name = dev_name(&p->pdev->dev);
302 ced->features = CLOCK_EVT_FEAT_ONESHOT; 302 ced->features = CLOCK_EVT_FEAT_ONESHOT;
303 ced->rating = 200; 303 ced->rating = 200;
304 ced->cpumask = cpumask_of(0); 304 ced->cpumask = cpu_possible_mask;
305 ced->set_next_event = em_sti_clock_event_next; 305 ced->set_next_event = em_sti_clock_event_next;
306 ced->set_mode = em_sti_clock_event_mode; 306 ced->set_mode = em_sti_clock_event_mode;
307 307
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 5b34768f4d7c..62b0de6a1837 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -428,7 +428,6 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
428 evt->irq); 428 evt->irq);
429 return -EIO; 429 return -EIO;
430 } 430 }
431 irq_set_affinity(evt->irq, cpumask_of(cpu));
432 } else { 431 } else {
433 enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0); 432 enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
434 } 433 }
@@ -449,6 +448,7 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
449 unsigned long action, void *hcpu) 448 unsigned long action, void *hcpu)
450{ 449{
451 struct mct_clock_event_device *mevt; 450 struct mct_clock_event_device *mevt;
451 unsigned int cpu;
452 452
453 /* 453 /*
454 * Grab cpu pointer in each case to avoid spurious 454 * Grab cpu pointer in each case to avoid spurious
@@ -459,6 +459,12 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
459 mevt = this_cpu_ptr(&percpu_mct_tick); 459 mevt = this_cpu_ptr(&percpu_mct_tick);
460 exynos4_local_timer_setup(&mevt->evt); 460 exynos4_local_timer_setup(&mevt->evt);
461 break; 461 break;
462 case CPU_ONLINE:
463 cpu = (unsigned long)hcpu;
464 if (mct_int_type == MCT_INT_SPI)
465 irq_set_affinity(mct_irqs[MCT_L0_IRQ + cpu],
466 cpumask_of(cpu));
467 break;
462 case CPU_DYING: 468 case CPU_DYING:
463 mevt = this_cpu_ptr(&percpu_mct_tick); 469 mevt = this_cpu_ptr(&percpu_mct_tick);
464 exynos4_local_timer_stop(&mevt->evt); 470 exynos4_local_timer_stop(&mevt->evt);
@@ -500,6 +506,8 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem
500 &percpu_mct_tick); 506 &percpu_mct_tick);
501 WARN(err, "MCT: can't request IRQ %d (%d)\n", 507 WARN(err, "MCT: can't request IRQ %d (%d)\n",
502 mct_irqs[MCT_L0_IRQ], err); 508 mct_irqs[MCT_L0_IRQ], err);
509 } else {
510 irq_set_affinity(mct_irqs[MCT_L0_IRQ], cpumask_of(0));
503 } 511 }
504 512
505 err = register_cpu_notifier(&exynos4_mct_cpu_nb); 513 err = register_cpu_notifier(&exynos4_mct_cpu_nb);
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index a1260b4549db..d2c3253e015e 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -986,6 +986,10 @@ static int __init acpi_cpufreq_init(void)
986{ 986{
987 int ret; 987 int ret;
988 988
989 /* don't keep reloading if cpufreq_driver exists */
990 if (cpufreq_get_current_driver())
991 return 0;
992
989 if (acpi_disabled) 993 if (acpi_disabled)
990 return 0; 994 return 0;
991 995
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index 78c49d8e0f4a..c522a95c0e16 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -229,7 +229,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
229 if (of_property_read_u32(np, "clock-latency", &transition_latency)) 229 if (of_property_read_u32(np, "clock-latency", &transition_latency))
230 transition_latency = CPUFREQ_ETERNAL; 230 transition_latency = CPUFREQ_ETERNAL;
231 231
232 if (cpu_reg) { 232 if (!IS_ERR(cpu_reg)) {
233 struct opp *opp; 233 struct opp *opp;
234 unsigned long min_uV, max_uV; 234 unsigned long min_uV, max_uV;
235 int i; 235 int i;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 89b3c52cd5c3..04548f7023af 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1460,6 +1460,9 @@ unsigned int cpufreq_get(unsigned int cpu)
1460{ 1460{
1461 unsigned int ret_freq = 0; 1461 unsigned int ret_freq = 0;
1462 1462
1463 if (cpufreq_disabled() || !cpufreq_driver)
1464 return -ENOENT;
1465
1463 if (!down_read_trylock(&cpufreq_rwsem)) 1466 if (!down_read_trylock(&cpufreq_rwsem))
1464 return 0; 1467 return 0;
1465 1468
diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c
index d514c152fd1a..be5380ecdcd4 100644
--- a/drivers/cpufreq/exynos5440-cpufreq.c
+++ b/drivers/cpufreq/exynos5440-cpufreq.c
@@ -457,7 +457,7 @@ err_free_table:
457 opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table); 457 opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
458err_put_node: 458err_put_node:
459 of_node_put(np); 459 of_node_put(np);
460 dev_err(dvfs_info->dev, "%s: failed initialization\n", __func__); 460 dev_err(&pdev->dev, "%s: failed initialization\n", __func__);
461 return ret; 461 return ret;
462} 462}
463 463
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 9733f29ed148..32b3479a2405 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -394,7 +394,10 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
394 trace_cpu_frequency(pstate * 100000, cpu->cpu); 394 trace_cpu_frequency(pstate * 100000, cpu->cpu);
395 395
396 cpu->pstate.current_pstate = pstate; 396 cpu->pstate.current_pstate = pstate;
397 wrmsrl(MSR_IA32_PERF_CTL, pstate << 8); 397 if (limits.no_turbo)
398 wrmsrl(MSR_IA32_PERF_CTL, BIT(32) | (pstate << 8));
399 else
400 wrmsrl(MSR_IA32_PERF_CTL, pstate << 8);
398 401
399} 402}
400 403
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 19e364fa5955..3f418166ce02 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -113,7 +113,7 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
113 unsigned int target_freq, unsigned int relation) 113 unsigned int target_freq, unsigned int relation)
114{ 114{
115 struct cpufreq_freqs freqs; 115 struct cpufreq_freqs freqs;
116 unsigned long newfreq; 116 long newfreq;
117 struct clk *srcclk; 117 struct clk *srcclk;
118 int index, ret, mult = 1; 118 int index, ret, mult = 1;
119 119
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 526ec77c7ba0..f238cfd33847 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -198,6 +198,7 @@ config TI_EDMA
198 depends on ARCH_DAVINCI || ARCH_OMAP 198 depends on ARCH_DAVINCI || ARCH_OMAP
199 select DMA_ENGINE 199 select DMA_ENGINE
200 select DMA_VIRTUAL_CHANNELS 200 select DMA_VIRTUAL_CHANNELS
201 select TI_PRIV_EDMA
201 default n 202 default n
202 help 203 help
203 Enable support for the TI EDMA controller. This DMA 204 Enable support for the TI EDMA controller. This DMA
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index ff50ff4c6a57..3519111c566b 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -306,6 +306,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
306 EDMA_SLOT_ANY); 306 EDMA_SLOT_ANY);
307 if (echan->slot[i] < 0) { 307 if (echan->slot[i] < 0) {
308 dev_err(dev, "Failed to allocate slot\n"); 308 dev_err(dev, "Failed to allocate slot\n");
309 kfree(edesc);
309 return NULL; 310 return NULL;
310 } 311 }
311 } 312 }
@@ -749,6 +750,6 @@ static void __exit edma_exit(void)
749} 750}
750module_exit(edma_exit); 751module_exit(edma_exit);
751 752
752MODULE_AUTHOR("Matt Porter <mporter@ti.com>"); 753MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
753MODULE_DESCRIPTION("TI EDMA DMA engine driver"); 754MODULE_DESCRIPTION("TI EDMA DMA engine driver");
754MODULE_LICENSE("GPL v2"); 755MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 78f8ca5fccee..55852c026791 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -437,17 +437,18 @@ static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
437 struct imxdma_engine *imxdma = imxdmac->imxdma; 437 struct imxdma_engine *imxdma = imxdmac->imxdma;
438 int chno = imxdmac->channel; 438 int chno = imxdmac->channel;
439 struct imxdma_desc *desc; 439 struct imxdma_desc *desc;
440 unsigned long flags;
440 441
441 spin_lock(&imxdma->lock); 442 spin_lock_irqsave(&imxdma->lock, flags);
442 if (list_empty(&imxdmac->ld_active)) { 443 if (list_empty(&imxdmac->ld_active)) {
443 spin_unlock(&imxdma->lock); 444 spin_unlock_irqrestore(&imxdma->lock, flags);
444 goto out; 445 goto out;
445 } 446 }
446 447
447 desc = list_first_entry(&imxdmac->ld_active, 448 desc = list_first_entry(&imxdmac->ld_active,
448 struct imxdma_desc, 449 struct imxdma_desc,
449 node); 450 node);
450 spin_unlock(&imxdma->lock); 451 spin_unlock_irqrestore(&imxdma->lock, flags);
451 452
452 if (desc->sg) { 453 if (desc->sg) {
453 u32 tmp; 454 u32 tmp;
@@ -519,7 +520,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
519{ 520{
520 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); 521 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
521 struct imxdma_engine *imxdma = imxdmac->imxdma; 522 struct imxdma_engine *imxdma = imxdmac->imxdma;
522 unsigned long flags;
523 int slot = -1; 523 int slot = -1;
524 int i; 524 int i;
525 525
@@ -527,7 +527,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
527 switch (d->type) { 527 switch (d->type) {
528 case IMXDMA_DESC_INTERLEAVED: 528 case IMXDMA_DESC_INTERLEAVED:
529 /* Try to get a free 2D slot */ 529 /* Try to get a free 2D slot */
530 spin_lock_irqsave(&imxdma->lock, flags);
531 for (i = 0; i < IMX_DMA_2D_SLOTS; i++) { 530 for (i = 0; i < IMX_DMA_2D_SLOTS; i++) {
532 if ((imxdma->slots_2d[i].count > 0) && 531 if ((imxdma->slots_2d[i].count > 0) &&
533 ((imxdma->slots_2d[i].xsr != d->x) || 532 ((imxdma->slots_2d[i].xsr != d->x) ||
@@ -537,10 +536,8 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
537 slot = i; 536 slot = i;
538 break; 537 break;
539 } 538 }
540 if (slot < 0) { 539 if (slot < 0)
541 spin_unlock_irqrestore(&imxdma->lock, flags);
542 return -EBUSY; 540 return -EBUSY;
543 }
544 541
545 imxdma->slots_2d[slot].xsr = d->x; 542 imxdma->slots_2d[slot].xsr = d->x;
546 imxdma->slots_2d[slot].ysr = d->y; 543 imxdma->slots_2d[slot].ysr = d->y;
@@ -549,7 +546,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
549 546
550 imxdmac->slot_2d = slot; 547 imxdmac->slot_2d = slot;
551 imxdmac->enabled_2d = true; 548 imxdmac->enabled_2d = true;
552 spin_unlock_irqrestore(&imxdma->lock, flags);
553 549
554 if (slot == IMX_DMA_2D_SLOT_A) { 550 if (slot == IMX_DMA_2D_SLOT_A) {
555 d->config_mem &= ~CCR_MSEL_B; 551 d->config_mem &= ~CCR_MSEL_B;
@@ -625,18 +621,17 @@ static void imxdma_tasklet(unsigned long data)
625 struct imxdma_channel *imxdmac = (void *)data; 621 struct imxdma_channel *imxdmac = (void *)data;
626 struct imxdma_engine *imxdma = imxdmac->imxdma; 622 struct imxdma_engine *imxdma = imxdmac->imxdma;
627 struct imxdma_desc *desc; 623 struct imxdma_desc *desc;
624 unsigned long flags;
628 625
629 spin_lock(&imxdma->lock); 626 spin_lock_irqsave(&imxdma->lock, flags);
630 627
631 if (list_empty(&imxdmac->ld_active)) { 628 if (list_empty(&imxdmac->ld_active)) {
632 /* Someone might have called terminate all */ 629 /* Someone might have called terminate all */
633 goto out; 630 spin_unlock_irqrestore(&imxdma->lock, flags);
631 return;
634 } 632 }
635 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node); 633 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);
636 634
637 if (desc->desc.callback)
638 desc->desc.callback(desc->desc.callback_param);
639
640 /* If we are dealing with a cyclic descriptor, keep it on ld_active 635 /* If we are dealing with a cyclic descriptor, keep it on ld_active
641 * and dont mark the descriptor as complete. 636 * and dont mark the descriptor as complete.
642 * Only in non-cyclic cases it would be marked as complete 637 * Only in non-cyclic cases it would be marked as complete
@@ -663,7 +658,11 @@ static void imxdma_tasklet(unsigned long data)
663 __func__, imxdmac->channel); 658 __func__, imxdmac->channel);
664 } 659 }
665out: 660out:
666 spin_unlock(&imxdma->lock); 661 spin_unlock_irqrestore(&imxdma->lock, flags);
662
663 if (desc->desc.callback)
664 desc->desc.callback(desc->desc.callback_param);
665
667} 666}
668 667
669static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 668static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
@@ -883,7 +882,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
883 kfree(imxdmac->sg_list); 882 kfree(imxdmac->sg_list);
884 883
885 imxdmac->sg_list = kcalloc(periods + 1, 884 imxdmac->sg_list = kcalloc(periods + 1,
886 sizeof(struct scatterlist), GFP_KERNEL); 885 sizeof(struct scatterlist), GFP_ATOMIC);
887 if (!imxdmac->sg_list) 886 if (!imxdmac->sg_list)
888 return NULL; 887 return NULL;
889 888
diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c
index 45a520281ce1..ebad84591a6e 100644
--- a/drivers/dma/sh/rcar-hpbdma.c
+++ b/drivers/dma/sh/rcar-hpbdma.c
@@ -93,6 +93,7 @@ struct hpb_dmae_chan {
93 void __iomem *base; 93 void __iomem *base;
94 const struct hpb_dmae_slave_config *cfg; 94 const struct hpb_dmae_slave_config *cfg;
95 char dev_id[16]; /* unique name per DMAC of channel */ 95 char dev_id[16]; /* unique name per DMAC of channel */
96 dma_addr_t slave_addr;
96}; 97};
97 98
98struct hpb_dmae_device { 99struct hpb_dmae_device {
@@ -432,7 +433,6 @@ hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan *hpb_chan,
432 hpb_chan->xfer_mode = XFER_DOUBLE; 433 hpb_chan->xfer_mode = XFER_DOUBLE;
433 } else { 434 } else {
434 dev_err(hpb_chan->shdma_chan.dev, "DCR setting error"); 435 dev_err(hpb_chan->shdma_chan.dev, "DCR setting error");
435 shdma_free_irq(&hpb_chan->shdma_chan);
436 return -EINVAL; 436 return -EINVAL;
437 } 437 }
438 438
@@ -446,7 +446,8 @@ hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan *hpb_chan,
446 return 0; 446 return 0;
447} 447}
448 448
449static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id, bool try) 449static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id,
450 dma_addr_t slave_addr, bool try)
450{ 451{
451 struct hpb_dmae_chan *chan = to_chan(schan); 452 struct hpb_dmae_chan *chan = to_chan(schan);
452 const struct hpb_dmae_slave_config *sc = 453 const struct hpb_dmae_slave_config *sc =
@@ -457,6 +458,7 @@ static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id, bool try)
457 if (try) 458 if (try)
458 return 0; 459 return 0;
459 chan->cfg = sc; 460 chan->cfg = sc;
461 chan->slave_addr = slave_addr ? : sc->addr;
460 return hpb_dmae_alloc_chan_resources(chan, sc); 462 return hpb_dmae_alloc_chan_resources(chan, sc);
461} 463}
462 464
@@ -468,7 +470,7 @@ static dma_addr_t hpb_dmae_slave_addr(struct shdma_chan *schan)
468{ 470{
469 struct hpb_dmae_chan *chan = to_chan(schan); 471 struct hpb_dmae_chan *chan = to_chan(schan);
470 472
471 return chan->cfg->addr; 473 return chan->slave_addr;
472} 474}
473 475
474static struct shdma_desc *hpb_dmae_embedded_desc(void *buf, int i) 476static struct shdma_desc *hpb_dmae_embedded_desc(void *buf, int i)
@@ -614,7 +616,6 @@ static void hpb_dmae_chan_remove(struct hpb_dmae_device *hpbdev)
614 shdma_for_each_chan(schan, &hpbdev->shdma_dev, i) { 616 shdma_for_each_chan(schan, &hpbdev->shdma_dev, i) {
615 BUG_ON(!schan); 617 BUG_ON(!schan);
616 618
617 shdma_free_irq(schan);
618 shdma_chan_remove(schan); 619 shdma_chan_remove(schan);
619 } 620 }
620 dma_dev->chancnt = 0; 621 dma_dev->chancnt = 0;
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 0ff43552d472..89675f862308 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -63,6 +63,7 @@ struct gpio_bank {
63 struct gpio_chip chip; 63 struct gpio_chip chip;
64 struct clk *dbck; 64 struct clk *dbck;
65 u32 mod_usage; 65 u32 mod_usage;
66 u32 irq_usage;
66 u32 dbck_enable_mask; 67 u32 dbck_enable_mask;
67 bool dbck_enabled; 68 bool dbck_enabled;
68 struct device *dev; 69 struct device *dev;
@@ -86,6 +87,9 @@ struct gpio_bank {
86#define GPIO_BIT(bank, gpio) (1 << GPIO_INDEX(bank, gpio)) 87#define GPIO_BIT(bank, gpio) (1 << GPIO_INDEX(bank, gpio))
87#define GPIO_MOD_CTRL_BIT BIT(0) 88#define GPIO_MOD_CTRL_BIT BIT(0)
88 89
90#define BANK_USED(bank) (bank->mod_usage || bank->irq_usage)
91#define LINE_USED(line, offset) (line & (1 << offset))
92
89static int irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq) 93static int irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq)
90{ 94{
91 return bank->chip.base + gpio_irq; 95 return bank->chip.base + gpio_irq;
@@ -420,15 +424,69 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio,
420 return 0; 424 return 0;
421} 425}
422 426
427static void _enable_gpio_module(struct gpio_bank *bank, unsigned offset)
428{
429 if (bank->regs->pinctrl) {
430 void __iomem *reg = bank->base + bank->regs->pinctrl;
431
432 /* Claim the pin for MPU */
433 __raw_writel(__raw_readl(reg) | (1 << offset), reg);
434 }
435
436 if (bank->regs->ctrl && !BANK_USED(bank)) {
437 void __iomem *reg = bank->base + bank->regs->ctrl;
438 u32 ctrl;
439
440 ctrl = __raw_readl(reg);
441 /* Module is enabled, clocks are not gated */
442 ctrl &= ~GPIO_MOD_CTRL_BIT;
443 __raw_writel(ctrl, reg);
444 bank->context.ctrl = ctrl;
445 }
446}
447
448static void _disable_gpio_module(struct gpio_bank *bank, unsigned offset)
449{
450 void __iomem *base = bank->base;
451
452 if (bank->regs->wkup_en &&
453 !LINE_USED(bank->mod_usage, offset) &&
454 !LINE_USED(bank->irq_usage, offset)) {
455 /* Disable wake-up during idle for dynamic tick */
456 _gpio_rmw(base, bank->regs->wkup_en, 1 << offset, 0);
457 bank->context.wake_en =
458 __raw_readl(bank->base + bank->regs->wkup_en);
459 }
460
461 if (bank->regs->ctrl && !BANK_USED(bank)) {
462 void __iomem *reg = bank->base + bank->regs->ctrl;
463 u32 ctrl;
464
465 ctrl = __raw_readl(reg);
466 /* Module is disabled, clocks are gated */
467 ctrl |= GPIO_MOD_CTRL_BIT;
468 __raw_writel(ctrl, reg);
469 bank->context.ctrl = ctrl;
470 }
471}
472
473static int gpio_is_input(struct gpio_bank *bank, int mask)
474{
475 void __iomem *reg = bank->base + bank->regs->direction;
476
477 return __raw_readl(reg) & mask;
478}
479
423static int gpio_irq_type(struct irq_data *d, unsigned type) 480static int gpio_irq_type(struct irq_data *d, unsigned type)
424{ 481{
425 struct gpio_bank *bank = irq_data_get_irq_chip_data(d); 482 struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
426 unsigned gpio = 0; 483 unsigned gpio = 0;
427 int retval; 484 int retval;
428 unsigned long flags; 485 unsigned long flags;
486 unsigned offset;
429 487
430 if (WARN_ON(!bank->mod_usage)) 488 if (!BANK_USED(bank))
431 return -EINVAL; 489 pm_runtime_get_sync(bank->dev);
432 490
433#ifdef CONFIG_ARCH_OMAP1 491#ifdef CONFIG_ARCH_OMAP1
434 if (d->irq > IH_MPUIO_BASE) 492 if (d->irq > IH_MPUIO_BASE)
@@ -446,7 +504,17 @@ static int gpio_irq_type(struct irq_data *d, unsigned type)
446 return -EINVAL; 504 return -EINVAL;
447 505
448 spin_lock_irqsave(&bank->lock, flags); 506 spin_lock_irqsave(&bank->lock, flags);
449 retval = _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), type); 507 offset = GPIO_INDEX(bank, gpio);
508 retval = _set_gpio_triggering(bank, offset, type);
509 if (!LINE_USED(bank->mod_usage, offset)) {
510 _enable_gpio_module(bank, offset);
511 _set_gpio_direction(bank, offset, 1);
512 } else if (!gpio_is_input(bank, 1 << offset)) {
513 spin_unlock_irqrestore(&bank->lock, flags);
514 return -EINVAL;
515 }
516
517 bank->irq_usage |= 1 << GPIO_INDEX(bank, gpio);
450 spin_unlock_irqrestore(&bank->lock, flags); 518 spin_unlock_irqrestore(&bank->lock, flags);
451 519
452 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) 520 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
@@ -603,35 +671,19 @@ static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
603 * If this is the first gpio_request for the bank, 671 * If this is the first gpio_request for the bank,
604 * enable the bank module. 672 * enable the bank module.
605 */ 673 */
606 if (!bank->mod_usage) 674 if (!BANK_USED(bank))
607 pm_runtime_get_sync(bank->dev); 675 pm_runtime_get_sync(bank->dev);
608 676
609 spin_lock_irqsave(&bank->lock, flags); 677 spin_lock_irqsave(&bank->lock, flags);
610 /* Set trigger to none. You need to enable the desired trigger with 678 /* Set trigger to none. You need to enable the desired trigger with
611 * request_irq() or set_irq_type(). 679 * request_irq() or set_irq_type(). Only do this if the IRQ line has
680 * not already been requested.
612 */ 681 */
613 _set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); 682 if (!LINE_USED(bank->irq_usage, offset)) {
614 683 _set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
615 if (bank->regs->pinctrl) { 684 _enable_gpio_module(bank, offset);
616 void __iomem *reg = bank->base + bank->regs->pinctrl;
617
618 /* Claim the pin for MPU */
619 __raw_writel(__raw_readl(reg) | (1 << offset), reg);
620 }
621
622 if (bank->regs->ctrl && !bank->mod_usage) {
623 void __iomem *reg = bank->base + bank->regs->ctrl;
624 u32 ctrl;
625
626 ctrl = __raw_readl(reg);
627 /* Module is enabled, clocks are not gated */
628 ctrl &= ~GPIO_MOD_CTRL_BIT;
629 __raw_writel(ctrl, reg);
630 bank->context.ctrl = ctrl;
631 } 685 }
632
633 bank->mod_usage |= 1 << offset; 686 bank->mod_usage |= 1 << offset;
634
635 spin_unlock_irqrestore(&bank->lock, flags); 687 spin_unlock_irqrestore(&bank->lock, flags);
636 688
637 return 0; 689 return 0;
@@ -640,31 +692,11 @@ static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
640static void omap_gpio_free(struct gpio_chip *chip, unsigned offset) 692static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
641{ 693{
642 struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip); 694 struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
643 void __iomem *base = bank->base;
644 unsigned long flags; 695 unsigned long flags;
645 696
646 spin_lock_irqsave(&bank->lock, flags); 697 spin_lock_irqsave(&bank->lock, flags);
647
648 if (bank->regs->wkup_en) {
649 /* Disable wake-up during idle for dynamic tick */
650 _gpio_rmw(base, bank->regs->wkup_en, 1 << offset, 0);
651 bank->context.wake_en =
652 __raw_readl(bank->base + bank->regs->wkup_en);
653 }
654
655 bank->mod_usage &= ~(1 << offset); 698 bank->mod_usage &= ~(1 << offset);
656 699 _disable_gpio_module(bank, offset);
657 if (bank->regs->ctrl && !bank->mod_usage) {
658 void __iomem *reg = bank->base + bank->regs->ctrl;
659 u32 ctrl;
660
661 ctrl = __raw_readl(reg);
662 /* Module is disabled, clocks are gated */
663 ctrl |= GPIO_MOD_CTRL_BIT;
664 __raw_writel(ctrl, reg);
665 bank->context.ctrl = ctrl;
666 }
667
668 _reset_gpio(bank, bank->chip.base + offset); 700 _reset_gpio(bank, bank->chip.base + offset);
669 spin_unlock_irqrestore(&bank->lock, flags); 701 spin_unlock_irqrestore(&bank->lock, flags);
670 702
@@ -672,7 +704,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
672 * If this is the last gpio to be freed in the bank, 704 * If this is the last gpio to be freed in the bank,
673 * disable the bank module. 705 * disable the bank module.
674 */ 706 */
675 if (!bank->mod_usage) 707 if (!BANK_USED(bank))
676 pm_runtime_put(bank->dev); 708 pm_runtime_put(bank->dev);
677} 709}
678 710
@@ -762,10 +794,20 @@ static void gpio_irq_shutdown(struct irq_data *d)
762 struct gpio_bank *bank = irq_data_get_irq_chip_data(d); 794 struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
763 unsigned int gpio = irq_to_gpio(bank, d->hwirq); 795 unsigned int gpio = irq_to_gpio(bank, d->hwirq);
764 unsigned long flags; 796 unsigned long flags;
797 unsigned offset = GPIO_INDEX(bank, gpio);
765 798
766 spin_lock_irqsave(&bank->lock, flags); 799 spin_lock_irqsave(&bank->lock, flags);
800 bank->irq_usage &= ~(1 << offset);
801 _disable_gpio_module(bank, offset);
767 _reset_gpio(bank, gpio); 802 _reset_gpio(bank, gpio);
768 spin_unlock_irqrestore(&bank->lock, flags); 803 spin_unlock_irqrestore(&bank->lock, flags);
804
805 /*
806 * If this is the last IRQ to be freed in the bank,
807 * disable the bank module.
808 */
809 if (!BANK_USED(bank))
810 pm_runtime_put(bank->dev);
769} 811}
770 812
771static void gpio_ack_irq(struct irq_data *d) 813static void gpio_ack_irq(struct irq_data *d)
@@ -897,13 +939,6 @@ static int gpio_input(struct gpio_chip *chip, unsigned offset)
897 return 0; 939 return 0;
898} 940}
899 941
900static int gpio_is_input(struct gpio_bank *bank, int mask)
901{
902 void __iomem *reg = bank->base + bank->regs->direction;
903
904 return __raw_readl(reg) & mask;
905}
906
907static int gpio_get(struct gpio_chip *chip, unsigned offset) 942static int gpio_get(struct gpio_chip *chip, unsigned offset)
908{ 943{
909 struct gpio_bank *bank; 944 struct gpio_bank *bank;
@@ -922,13 +957,22 @@ static int gpio_output(struct gpio_chip *chip, unsigned offset, int value)
922{ 957{
923 struct gpio_bank *bank; 958 struct gpio_bank *bank;
924 unsigned long flags; 959 unsigned long flags;
960 int retval = 0;
925 961
926 bank = container_of(chip, struct gpio_bank, chip); 962 bank = container_of(chip, struct gpio_bank, chip);
927 spin_lock_irqsave(&bank->lock, flags); 963 spin_lock_irqsave(&bank->lock, flags);
964
965 if (LINE_USED(bank->irq_usage, offset)) {
966 retval = -EINVAL;
967 goto exit;
968 }
969
928 bank->set_dataout(bank, offset, value); 970 bank->set_dataout(bank, offset, value);
929 _set_gpio_direction(bank, offset, 0); 971 _set_gpio_direction(bank, offset, 0);
972
973exit:
930 spin_unlock_irqrestore(&bank->lock, flags); 974 spin_unlock_irqrestore(&bank->lock, flags);
931 return 0; 975 return retval;
932} 976}
933 977
934static int gpio_debounce(struct gpio_chip *chip, unsigned offset, 978static int gpio_debounce(struct gpio_chip *chip, unsigned offset,
@@ -1400,7 +1444,7 @@ void omap2_gpio_prepare_for_idle(int pwr_mode)
1400 struct gpio_bank *bank; 1444 struct gpio_bank *bank;
1401 1445
1402 list_for_each_entry(bank, &omap_gpio_list, node) { 1446 list_for_each_entry(bank, &omap_gpio_list, node) {
1403 if (!bank->mod_usage || !bank->loses_context) 1447 if (!BANK_USED(bank) || !bank->loses_context)
1404 continue; 1448 continue;
1405 1449
1406 bank->power_mode = pwr_mode; 1450 bank->power_mode = pwr_mode;
@@ -1414,7 +1458,7 @@ void omap2_gpio_resume_after_idle(void)
1414 struct gpio_bank *bank; 1458 struct gpio_bank *bank;
1415 1459
1416 list_for_each_entry(bank, &omap_gpio_list, node) { 1460 list_for_each_entry(bank, &omap_gpio_list, node) {
1417 if (!bank->mod_usage || !bank->loses_context) 1461 if (!BANK_USED(bank) || !bank->loses_context)
1418 continue; 1462 continue;
1419 1463
1420 pm_runtime_get_sync(bank->dev); 1464 pm_runtime_get_sync(bank->dev);
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index e3745eb07570..6038966ab045 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -293,10 +293,9 @@ static void gpio_rcar_parse_pdata(struct gpio_rcar_priv *p)
293 if (pdata) { 293 if (pdata) {
294 p->config = *pdata; 294 p->config = *pdata;
295 } else if (IS_ENABLED(CONFIG_OF) && np) { 295 } else if (IS_ENABLED(CONFIG_OF) && np) {
296 ret = of_parse_phandle_with_args(np, "gpio-ranges", 296 ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0,
297 "#gpio-range-cells", 0, &args); 297 &args);
298 p->config.number_of_pins = ret == 0 && args.args_count == 3 298 p->config.number_of_pins = ret == 0 ? args.args[2]
299 ? args.args[2]
300 : RCAR_MAX_GPIO_PER_BANK; 299 : RCAR_MAX_GPIO_PER_BANK;
301 p->config.gpio_base = -1; 300 p->config.gpio_base = -1;
302 } 301 }
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 1688ff500513..830f7501cb4d 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -2925,6 +2925,8 @@ int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb)
2925 /* Speaker Allocation Data Block */ 2925 /* Speaker Allocation Data Block */
2926 if (dbl == 3) { 2926 if (dbl == 3) {
2927 *sadb = kmalloc(dbl, GFP_KERNEL); 2927 *sadb = kmalloc(dbl, GFP_KERNEL);
2928 if (!*sadb)
2929 return -ENOMEM;
2928 memcpy(*sadb, &db[1], dbl); 2930 memcpy(*sadb, &db[1], dbl);
2929 count = dbl; 2931 count = dbl;
2930 break; 2932 break;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index f6f6cc7fc133..3d13ca6e257f 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -416,14 +416,6 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
416 return; 416 return;
417 417
418 /* 418 /*
419 * fbdev->blank can be called from irq context in case of a panic.
420 * Since we already have our own special panic handler which will
421 * restore the fbdev console mode completely, just bail out early.
422 */
423 if (oops_in_progress)
424 return;
425
426 /*
427 * For each CRTC in this fb, turn the connectors on/off. 419 * For each CRTC in this fb, turn the connectors on/off.
428 */ 420 */
429 drm_modeset_lock_all(dev); 421 drm_modeset_lock_all(dev);
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index 92babac362ec..2db731f00930 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -204,6 +204,7 @@ static int psb_gtt_attach_pages(struct gtt_range *gt)
204 if (IS_ERR(pages)) 204 if (IS_ERR(pages))
205 return PTR_ERR(pages); 205 return PTR_ERR(pages);
206 206
207 gt->npage = gt->gem.size / PAGE_SIZE;
207 gt->pages = pages; 208 gt->pages = pages;
208 209
209 return 0; 210 return 0;
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index b1f8fc69023f..60e84043aa34 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -707,8 +707,7 @@ tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
707 reg_write(encoder, REG_VIP_CNTRL_2, priv->vip_cntrl_2); 707 reg_write(encoder, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
708 break; 708 break;
709 case DRM_MODE_DPMS_OFF: 709 case DRM_MODE_DPMS_OFF:
710 /* disable audio and video ports */ 710 /* disable video ports */
711 reg_write(encoder, REG_ENA_AP, 0x00);
712 reg_write(encoder, REG_ENA_VP_0, 0x00); 711 reg_write(encoder, REG_ENA_VP_0, 0x00);
713 reg_write(encoder, REG_ENA_VP_1, 0x00); 712 reg_write(encoder, REG_ENA_VP_1, 0x00);
714 reg_write(encoder, REG_ENA_VP_2, 0x00); 713 reg_write(encoder, REG_ENA_VP_2, 0x00);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index c27a21034a5e..d5c784d48671 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1290,12 +1290,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
1290 * then we do not take part in VGA arbitration and the 1290 * then we do not take part in VGA arbitration and the
1291 * vga_client_register() fails with -ENODEV. 1291 * vga_client_register() fails with -ENODEV.
1292 */ 1292 */
1293 if (!HAS_PCH_SPLIT(dev)) { 1293 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
1294 ret = vga_client_register(dev->pdev, dev, NULL, 1294 if (ret && ret != -ENODEV)
1295 i915_vga_set_decode); 1295 goto out;
1296 if (ret && ret != -ENODEV)
1297 goto out;
1298 }
1299 1296
1300 intel_register_dsm_handler(); 1297 intel_register_dsm_handler();
1301 1298
@@ -1351,12 +1348,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
1351 */ 1348 */
1352 intel_fbdev_initial_config(dev); 1349 intel_fbdev_initial_config(dev);
1353 1350
1354 /*
1355 * Must do this after fbcon init so that
1356 * vgacon_save_screen() works during the handover.
1357 */
1358 i915_disable_vga_mem(dev);
1359
1360 /* Only enable hotplug handling once the fbdev is fully set up. */ 1351 /* Only enable hotplug handling once the fbdev is fully set up. */
1361 dev_priv->enable_hotplug_processing = true; 1352 dev_priv->enable_hotplug_processing = true;
1362 1353
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index df9253d890ee..cdfb9da0e4ce 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4800,10 +4800,10 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
4800 4800
4801 if (!mutex_trylock(&dev->struct_mutex)) { 4801 if (!mutex_trylock(&dev->struct_mutex)) {
4802 if (!mutex_is_locked_by(&dev->struct_mutex, current)) 4802 if (!mutex_is_locked_by(&dev->struct_mutex, current))
4803 return SHRINK_STOP; 4803 return 0;
4804 4804
4805 if (dev_priv->mm.shrinker_no_lock_stealing) 4805 if (dev_priv->mm.shrinker_no_lock_stealing)
4806 return SHRINK_STOP; 4806 return 0;
4807 4807
4808 unlock = false; 4808 unlock = false;
4809 } 4809 }
@@ -4901,10 +4901,10 @@ i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
4901 4901
4902 if (!mutex_trylock(&dev->struct_mutex)) { 4902 if (!mutex_trylock(&dev->struct_mutex)) {
4903 if (!mutex_is_locked_by(&dev->struct_mutex, current)) 4903 if (!mutex_is_locked_by(&dev->struct_mutex, current))
4904 return 0; 4904 return SHRINK_STOP;
4905 4905
4906 if (dev_priv->mm.shrinker_no_lock_stealing) 4906 if (dev_priv->mm.shrinker_no_lock_stealing)
4907 return 0; 4907 return SHRINK_STOP;
4908 4908
4909 unlock = false; 4909 unlock = false;
4910 } 4910 }
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index aba9d7498996..dae364f0028c 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -143,8 +143,10 @@ static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
143 143
144 /* Seek the first printf which is hits start position */ 144 /* Seek the first printf which is hits start position */
145 if (e->pos < e->start) { 145 if (e->pos < e->start) {
146 len = vsnprintf(NULL, 0, f, args); 146 va_list tmp;
147 if (!__i915_error_seek(e, len)) 147
148 va_copy(tmp, args);
149 if (!__i915_error_seek(e, vsnprintf(NULL, 0, f, tmp)))
148 return; 150 return;
149 } 151 }
150 152
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index c159e1a6810f..38f96f65d87a 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3881,6 +3881,9 @@
3881#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030 3881#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
3882#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) 3882#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
3883 3883
3884#define HSW_SCRATCH1 0xb038
3885#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27)
3886
3884#define HSW_FUSE_STRAP 0x42014 3887#define HSW_FUSE_STRAP 0x42014
3885#define HSW_CDCLK_LIMIT (1 << 24) 3888#define HSW_CDCLK_LIMIT (1 << 24)
3886 3889
@@ -4728,6 +4731,9 @@
4728#define GEN7_ROW_CHICKEN2_GT2 0xf4f4 4731#define GEN7_ROW_CHICKEN2_GT2 0xf4f4
4729#define DOP_CLOCK_GATING_DISABLE (1<<0) 4732#define DOP_CLOCK_GATING_DISABLE (1<<0)
4730 4733
4734#define HSW_ROW_CHICKEN3 0xe49c
4735#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
4736
4731#define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020) 4737#define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020)
4732#define INTEL_AUDIO_DEVCL 0x808629FB 4738#define INTEL_AUDIO_DEVCL 0x808629FB
4733#define INTEL_AUDIO_DEVBLC 0x80862801 4739#define INTEL_AUDIO_DEVBLC 0x80862801
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index d8a1d98693e7..581fb4b2f766 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3941,8 +3941,6 @@ static void intel_connector_check_state(struct intel_connector *connector)
3941 * consider. */ 3941 * consider. */
3942void intel_connector_dpms(struct drm_connector *connector, int mode) 3942void intel_connector_dpms(struct drm_connector *connector, int mode)
3943{ 3943{
3944 struct intel_encoder *encoder = intel_attached_encoder(connector);
3945
3946 /* All the simple cases only support two dpms states. */ 3944 /* All the simple cases only support two dpms states. */
3947 if (mode != DRM_MODE_DPMS_ON) 3945 if (mode != DRM_MODE_DPMS_ON)
3948 mode = DRM_MODE_DPMS_OFF; 3946 mode = DRM_MODE_DPMS_OFF;
@@ -3953,10 +3951,8 @@ void intel_connector_dpms(struct drm_connector *connector, int mode)
3953 connector->dpms = mode; 3951 connector->dpms = mode;
3954 3952
3955 /* Only need to change hw state when actually enabled */ 3953 /* Only need to change hw state when actually enabled */
3956 if (encoder->base.crtc) 3954 if (connector->encoder)
3957 intel_encoder_dpms(encoder, mode); 3955 intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
3958 else
3959 WARN_ON(encoder->connectors_active != false);
3960 3956
3961 intel_modeset_check_state(connector->dev); 3957 intel_modeset_check_state(connector->dev);
3962} 3958}
@@ -4775,6 +4771,10 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
4775 4771
4776 pipeconf = 0; 4772 pipeconf = 0;
4777 4773
4774 if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
4775 I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
4776 pipeconf |= PIPECONF_ENABLE;
4777
4778 if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) { 4778 if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) {
4779 /* Enable pixel doubling when the dot clock is > 90% of the (display) 4779 /* Enable pixel doubling when the dot clock is > 90% of the (display)
4780 * core speed. 4780 * core speed.
@@ -10045,33 +10045,6 @@ static void i915_disable_vga(struct drm_device *dev)
10045 POSTING_READ(vga_reg); 10045 POSTING_READ(vga_reg);
10046} 10046}
10047 10047
10048static void i915_enable_vga_mem(struct drm_device *dev)
10049{
10050 /* Enable VGA memory on Intel HD */
10051 if (HAS_PCH_SPLIT(dev)) {
10052 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
10053 outb(inb(VGA_MSR_READ) | VGA_MSR_MEM_EN, VGA_MSR_WRITE);
10054 vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO |
10055 VGA_RSRC_LEGACY_MEM |
10056 VGA_RSRC_NORMAL_IO |
10057 VGA_RSRC_NORMAL_MEM);
10058 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
10059 }
10060}
10061
10062void i915_disable_vga_mem(struct drm_device *dev)
10063{
10064 /* Disable VGA memory on Intel HD */
10065 if (HAS_PCH_SPLIT(dev)) {
10066 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
10067 outb(inb(VGA_MSR_READ) & ~VGA_MSR_MEM_EN, VGA_MSR_WRITE);
10068 vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO |
10069 VGA_RSRC_NORMAL_IO |
10070 VGA_RSRC_NORMAL_MEM);
10071 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
10072 }
10073}
10074
10075void intel_modeset_init_hw(struct drm_device *dev) 10048void intel_modeset_init_hw(struct drm_device *dev)
10076{ 10049{
10077 intel_init_power_well(dev); 10050 intel_init_power_well(dev);
@@ -10350,7 +10323,6 @@ void i915_redisable_vga(struct drm_device *dev)
10350 if (I915_READ(vga_reg) != VGA_DISP_DISABLE) { 10323 if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
10351 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); 10324 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
10352 i915_disable_vga(dev); 10325 i915_disable_vga(dev);
10353 i915_disable_vga_mem(dev);
10354 } 10326 }
10355} 10327}
10356 10328
@@ -10564,8 +10536,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
10564 10536
10565 intel_disable_fbc(dev); 10537 intel_disable_fbc(dev);
10566 10538
10567 i915_enable_vga_mem(dev);
10568
10569 intel_disable_gt_powersave(dev); 10539 intel_disable_gt_powersave(dev);
10570 10540
10571 ironlake_teardown_rc6(dev); 10541 ironlake_teardown_rc6(dev);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 2151d13772b8..2c555f91bfae 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -588,7 +588,18 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
588 DRM_DEBUG_KMS("aux_ch native nack\n"); 588 DRM_DEBUG_KMS("aux_ch native nack\n");
589 return -EREMOTEIO; 589 return -EREMOTEIO;
590 case AUX_NATIVE_REPLY_DEFER: 590 case AUX_NATIVE_REPLY_DEFER:
591 udelay(100); 591 /*
592 * For now, just give more slack to branch devices. We
593 * could check the DPCD for I2C bit rate capabilities,
594 * and if available, adjust the interval. We could also
595 * be more careful with DP-to-Legacy adapters where a
596 * long legacy cable may force very low I2C bit rates.
597 */
598 if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
599 DP_DWN_STRM_PORT_PRESENT)
600 usleep_range(500, 600);
601 else
602 usleep_range(300, 400);
592 continue; 603 continue;
593 default: 604 default:
594 DRM_ERROR("aux_ch invalid native reply 0x%02x\n", 605 DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
@@ -1456,7 +1467,7 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp)
1456 1467
1457 /* Avoid continuous PSR exit by masking memup and hpd */ 1468 /* Avoid continuous PSR exit by masking memup and hpd */
1458 I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP | 1469 I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
1459 EDP_PSR_DEBUG_MASK_HPD); 1470 EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
1460 1471
1461 intel_dp->psr_setup_done = true; 1472 intel_dp->psr_setup_done = true;
1462} 1473}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 28cae80495e2..9b7b68fd5d47 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -793,6 +793,5 @@ extern void hsw_pc8_disable_interrupts(struct drm_device *dev);
793extern void hsw_pc8_restore_interrupts(struct drm_device *dev); 793extern void hsw_pc8_restore_interrupts(struct drm_device *dev);
794extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv); 794extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
795extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv); 795extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
796extern void i915_disable_vga_mem(struct drm_device *dev);
797 796
798#endif /* __INTEL_DRV_H__ */ 797#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index dd176b7296c1..f4c5e95b2d6f 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3864,8 +3864,6 @@ static void valleyview_enable_rps(struct drm_device *dev)
3864 dev_priv->rps.rpe_delay), 3864 dev_priv->rps.rpe_delay),
3865 dev_priv->rps.rpe_delay); 3865 dev_priv->rps.rpe_delay);
3866 3866
3867 INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
3868
3869 valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay); 3867 valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
3870 3868
3871 gen6_enable_rps_interrupts(dev); 3869 gen6_enable_rps_interrupts(dev);
@@ -4955,6 +4953,11 @@ static void haswell_init_clock_gating(struct drm_device *dev)
4955 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, 4953 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
4956 GEN7_WA_L3_CHICKEN_MODE); 4954 GEN7_WA_L3_CHICKEN_MODE);
4957 4955
4956 /* L3 caching of data atomics doesn't work -- disable it. */
4957 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
4958 I915_WRITE(HSW_ROW_CHICKEN3,
4959 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
4960
4958 /* This is required by WaCatErrorRejectionIssue:hsw */ 4961 /* This is required by WaCatErrorRejectionIssue:hsw */
4959 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 4962 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
4960 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 4963 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
@@ -5681,5 +5684,7 @@ void intel_pm_init(struct drm_device *dev)
5681 5684
5682 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, 5685 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
5683 intel_gen6_powersave_work); 5686 intel_gen6_powersave_work);
5687
5688 INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
5684} 5689}
5685 5690
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index f2c6d7909ae2..dd6f84bf6c22 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -916,6 +916,14 @@ intel_tv_compute_config(struct intel_encoder *encoder,
916 DRM_DEBUG_KMS("forcing bpc to 8 for TV\n"); 916 DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
917 pipe_config->pipe_bpp = 8*3; 917 pipe_config->pipe_bpp = 8*3;
918 918
919 /* TV has it's own notion of sync and other mode flags, so clear them. */
920 pipe_config->adjusted_mode.flags = 0;
921
922 /*
923 * FIXME: We don't check whether the input mode is actually what we want
924 * or whether userspace is doing something stupid.
925 */
926
919 return true; 927 return true;
920} 928}
921 929
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
index 5db5bbaedae2..bc7fd11ad8be 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
@@ -19,8 +19,6 @@
19#include "msm_drv.h" 19#include "msm_drv.h"
20#include "mdp4_kms.h" 20#include "mdp4_kms.h"
21 21
22#include <mach/iommu.h>
23
24static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev); 22static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev);
25 23
26static int mdp4_hw_init(struct msm_kms *kms) 24static int mdp4_hw_init(struct msm_kms *kms)
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 008d772384c7..b3a2f1629041 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -18,8 +18,6 @@
18#include "msm_drv.h" 18#include "msm_drv.h"
19#include "msm_gpu.h" 19#include "msm_gpu.h"
20 20
21#include <mach/iommu.h>
22
23static void msm_fb_output_poll_changed(struct drm_device *dev) 21static void msm_fb_output_poll_changed(struct drm_device *dev)
24{ 22{
25 struct msm_drm_private *priv = dev->dev_private; 23 struct msm_drm_private *priv = dev->dev_private;
@@ -62,6 +60,8 @@ int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
62 int i, ret; 60 int i, ret;
63 61
64 for (i = 0; i < cnt; i++) { 62 for (i = 0; i < cnt; i++) {
63 /* TODO maybe some day msm iommu won't require this hack: */
64 struct device *msm_iommu_get_ctx(const char *ctx_name);
65 struct device *ctx = msm_iommu_get_ctx(names[i]); 65 struct device *ctx = msm_iommu_get_ctx(names[i]);
66 if (!ctx) 66 if (!ctx)
67 continue; 67 continue;
@@ -199,7 +199,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
199 * imx drm driver on iMX5 199 * imx drm driver on iMX5
200 */ 200 */
201 dev_err(dev->dev, "failed to load kms\n"); 201 dev_err(dev->dev, "failed to load kms\n");
202 ret = PTR_ERR(priv->kms); 202 ret = PTR_ERR(kms);
203 goto fail; 203 goto fail;
204 } 204 }
205 205
@@ -697,7 +697,7 @@ static struct drm_driver msm_driver = {
697 .gem_vm_ops = &vm_ops, 697 .gem_vm_ops = &vm_ops,
698 .dumb_create = msm_gem_dumb_create, 698 .dumb_create = msm_gem_dumb_create,
699 .dumb_map_offset = msm_gem_dumb_map_offset, 699 .dumb_map_offset = msm_gem_dumb_map_offset,
700 .dumb_destroy = msm_gem_dumb_destroy, 700 .dumb_destroy = drm_gem_dumb_destroy,
701#ifdef CONFIG_DEBUG_FS 701#ifdef CONFIG_DEBUG_FS
702 .debugfs_init = msm_debugfs_init, 702 .debugfs_init = msm_debugfs_init,
703 .debugfs_cleanup = msm_debugfs_cleanup, 703 .debugfs_cleanup = msm_debugfs_cleanup,
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 29eacfa29cfb..2bae46c66a30 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -319,13 +319,6 @@ int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
319 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle); 319 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
320} 320}
321 321
322int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
323 uint32_t handle)
324{
325 /* No special work needed, drop the reference and see what falls out */
326 return drm_gem_handle_delete(file, handle);
327}
328
329int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 322int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
330 uint32_t handle, uint64_t *offset) 323 uint32_t handle, uint64_t *offset)
331{ 324{
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
index 37712a6df923..e290cfa4acee 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -113,7 +113,7 @@ nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
113 pmc->use_msi = false; 113 pmc->use_msi = false;
114 break; 114 break;
115 default: 115 default:
116 pmc->use_msi = nouveau_boolopt(device->cfgopt, "NvMSI", true); 116 pmc->use_msi = nouveau_boolopt(device->cfgopt, "NvMSI", false);
117 if (pmc->use_msi) { 117 if (pmc->use_msi) {
118 pmc->use_msi = pci_enable_msi(device->pdev) == 0; 118 pmc->use_msi = pci_enable_msi(device->pdev) == 0;
119 if (pmc->use_msi) { 119 if (pmc->use_msi) {
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 05ff315e8e9e..9b6950d9b3c0 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -1168,6 +1168,23 @@ static const struct radeon_blacklist_clocks btc_blacklist_clocks[] =
1168 { 25000, 30000, RADEON_SCLK_UP } 1168 { 25000, 30000, RADEON_SCLK_UP }
1169}; 1169};
1170 1170
1171void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
1172 u32 *max_clock)
1173{
1174 u32 i, clock = 0;
1175
1176 if ((table == NULL) || (table->count == 0)) {
1177 *max_clock = clock;
1178 return;
1179 }
1180
1181 for (i = 0; i < table->count; i++) {
1182 if (clock < table->entries[i].clk)
1183 clock = table->entries[i].clk;
1184 }
1185 *max_clock = clock;
1186}
1187
1171void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table, 1188void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
1172 u32 clock, u16 max_voltage, u16 *voltage) 1189 u32 clock, u16 max_voltage, u16 *voltage)
1173{ 1190{
@@ -1913,7 +1930,7 @@ static int btc_set_mc_special_registers(struct radeon_device *rdev,
1913 } 1930 }
1914 j++; 1931 j++;
1915 1932
1916 if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE) 1933 if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
1917 return -EINVAL; 1934 return -EINVAL;
1918 1935
1919 tmp = RREG32(MC_PMG_CMD_MRS); 1936 tmp = RREG32(MC_PMG_CMD_MRS);
@@ -1928,7 +1945,7 @@ static int btc_set_mc_special_registers(struct radeon_device *rdev,
1928 } 1945 }
1929 j++; 1946 j++;
1930 1947
1931 if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE) 1948 if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
1932 return -EINVAL; 1949 return -EINVAL;
1933 break; 1950 break;
1934 case MC_SEQ_RESERVE_M >> 2: 1951 case MC_SEQ_RESERVE_M >> 2:
@@ -1942,7 +1959,7 @@ static int btc_set_mc_special_registers(struct radeon_device *rdev,
1942 } 1959 }
1943 j++; 1960 j++;
1944 1961
1945 if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE) 1962 if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
1946 return -EINVAL; 1963 return -EINVAL;
1947 break; 1964 break;
1948 default: 1965 default:
@@ -2080,6 +2097,7 @@ static void btc_apply_state_adjust_rules(struct radeon_device *rdev,
2080 bool disable_mclk_switching; 2097 bool disable_mclk_switching;
2081 u32 mclk, sclk; 2098 u32 mclk, sclk;
2082 u16 vddc, vddci; 2099 u16 vddc, vddci;
2100 u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
2083 2101
2084 if ((rdev->pm.dpm.new_active_crtc_count > 1) || 2102 if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
2085 btc_dpm_vblank_too_short(rdev)) 2103 btc_dpm_vblank_too_short(rdev))
@@ -2121,6 +2139,39 @@ static void btc_apply_state_adjust_rules(struct radeon_device *rdev,
2121 ps->low.vddci = max_limits->vddci; 2139 ps->low.vddci = max_limits->vddci;
2122 } 2140 }
2123 2141
2142 /* limit clocks to max supported clocks based on voltage dependency tables */
2143 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
2144 &max_sclk_vddc);
2145 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2146 &max_mclk_vddci);
2147 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2148 &max_mclk_vddc);
2149
2150 if (max_sclk_vddc) {
2151 if (ps->low.sclk > max_sclk_vddc)
2152 ps->low.sclk = max_sclk_vddc;
2153 if (ps->medium.sclk > max_sclk_vddc)
2154 ps->medium.sclk = max_sclk_vddc;
2155 if (ps->high.sclk > max_sclk_vddc)
2156 ps->high.sclk = max_sclk_vddc;
2157 }
2158 if (max_mclk_vddci) {
2159 if (ps->low.mclk > max_mclk_vddci)
2160 ps->low.mclk = max_mclk_vddci;
2161 if (ps->medium.mclk > max_mclk_vddci)
2162 ps->medium.mclk = max_mclk_vddci;
2163 if (ps->high.mclk > max_mclk_vddci)
2164 ps->high.mclk = max_mclk_vddci;
2165 }
2166 if (max_mclk_vddc) {
2167 if (ps->low.mclk > max_mclk_vddc)
2168 ps->low.mclk = max_mclk_vddc;
2169 if (ps->medium.mclk > max_mclk_vddc)
2170 ps->medium.mclk = max_mclk_vddc;
2171 if (ps->high.mclk > max_mclk_vddc)
2172 ps->high.mclk = max_mclk_vddc;
2173 }
2174
2124 /* XXX validate the min clocks required for display */ 2175 /* XXX validate the min clocks required for display */
2125 2176
2126 if (disable_mclk_switching) { 2177 if (disable_mclk_switching) {
diff --git a/drivers/gpu/drm/radeon/btc_dpm.h b/drivers/gpu/drm/radeon/btc_dpm.h
index 1a15e0e41950..3b6f12b7760b 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.h
+++ b/drivers/gpu/drm/radeon/btc_dpm.h
@@ -46,6 +46,8 @@ void btc_adjust_clock_combinations(struct radeon_device *rdev,
46 struct rv7xx_pl *pl); 46 struct rv7xx_pl *pl);
47void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table, 47void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
48 u32 clock, u16 max_voltage, u16 *voltage); 48 u32 clock, u16 max_voltage, u16 *voltage);
49void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
50 u32 *max_clock);
49void btc_apply_voltage_delta_rules(struct radeon_device *rdev, 51void btc_apply_voltage_delta_rules(struct radeon_device *rdev,
50 u16 max_vddc, u16 max_vddci, 52 u16 max_vddc, u16 max_vddci,
51 u16 *vddc, u16 *vddci); 53 u16 *vddc, u16 *vddci);
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 899627443030..51e947a97edf 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -146,6 +146,8 @@ static const struct ci_pt_config_reg didt_config_ci[] =
146}; 146};
147 147
148extern u8 rv770_get_memory_module_index(struct radeon_device *rdev); 148extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
149extern void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
150 u32 *max_clock);
149extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev, 151extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
150 u32 arb_freq_src, u32 arb_freq_dest); 152 u32 arb_freq_src, u32 arb_freq_dest);
151extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock); 153extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
@@ -712,6 +714,7 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
712 struct radeon_clock_and_voltage_limits *max_limits; 714 struct radeon_clock_and_voltage_limits *max_limits;
713 bool disable_mclk_switching; 715 bool disable_mclk_switching;
714 u32 sclk, mclk; 716 u32 sclk, mclk;
717 u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
715 int i; 718 int i;
716 719
717 if ((rdev->pm.dpm.new_active_crtc_count > 1) || 720 if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
@@ -739,6 +742,29 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
739 } 742 }
740 } 743 }
741 744
745 /* limit clocks to max supported clocks based on voltage dependency tables */
746 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
747 &max_sclk_vddc);
748 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
749 &max_mclk_vddci);
750 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
751 &max_mclk_vddc);
752
753 for (i = 0; i < ps->performance_level_count; i++) {
754 if (max_sclk_vddc) {
755 if (ps->performance_levels[i].sclk > max_sclk_vddc)
756 ps->performance_levels[i].sclk = max_sclk_vddc;
757 }
758 if (max_mclk_vddci) {
759 if (ps->performance_levels[i].mclk > max_mclk_vddci)
760 ps->performance_levels[i].mclk = max_mclk_vddci;
761 }
762 if (max_mclk_vddc) {
763 if (ps->performance_levels[i].mclk > max_mclk_vddc)
764 ps->performance_levels[i].mclk = max_mclk_vddc;
765 }
766 }
767
742 /* XXX validate the min clocks required for display */ 768 /* XXX validate the min clocks required for display */
743 769
744 if (disable_mclk_switching) { 770 if (disable_mclk_switching) {
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index adbdb6503b05..b874ccdf52f7 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -77,6 +77,8 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev);
77static void cik_program_aspm(struct radeon_device *rdev); 77static void cik_program_aspm(struct radeon_device *rdev);
78static void cik_init_pg(struct radeon_device *rdev); 78static void cik_init_pg(struct radeon_device *rdev);
79static void cik_init_cg(struct radeon_device *rdev); 79static void cik_init_cg(struct radeon_device *rdev);
80static void cik_fini_pg(struct radeon_device *rdev);
81static void cik_fini_cg(struct radeon_device *rdev);
80static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev, 82static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
81 bool enable); 83 bool enable);
82 84
@@ -2845,10 +2847,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
2845 rdev->config.cik.tile_config |= (3 << 0); 2847 rdev->config.cik.tile_config |= (3 << 0);
2846 break; 2848 break;
2847 } 2849 }
2848 if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) 2850 rdev->config.cik.tile_config |=
2849 rdev->config.cik.tile_config |= 1 << 4; 2851 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
2850 else
2851 rdev->config.cik.tile_config |= 0 << 4;
2852 rdev->config.cik.tile_config |= 2852 rdev->config.cik.tile_config |=
2853 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; 2853 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
2854 rdev->config.cik.tile_config |= 2854 rdev->config.cik.tile_config |=
@@ -4187,6 +4187,10 @@ static void cik_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
4187 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 4187 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
4188 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); 4188 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
4189 4189
4190 /* disable CG/PG */
4191 cik_fini_pg(rdev);
4192 cik_fini_cg(rdev);
4193
4190 /* stop the rlc */ 4194 /* stop the rlc */
4191 cik_rlc_stop(rdev); 4195 cik_rlc_stop(rdev);
4192 4196
@@ -4456,8 +4460,8 @@ static int cik_mc_init(struct radeon_device *rdev)
4456 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); 4460 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
4457 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); 4461 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
4458 /* size in MB on si */ 4462 /* size in MB on si */
4459 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 4463 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
4460 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 4464 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
4461 rdev->mc.visible_vram_size = rdev->mc.aper_size; 4465 rdev->mc.visible_vram_size = rdev->mc.aper_size;
4462 si_vram_gtt_location(rdev, &rdev->mc); 4466 si_vram_gtt_location(rdev, &rdev->mc);
4463 radeon_update_bandwidth_info(rdev); 4467 radeon_update_bandwidth_info(rdev);
@@ -4735,12 +4739,13 @@ static void cik_vm_decode_fault(struct radeon_device *rdev,
4735 u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT; 4739 u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
4736 u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT; 4740 u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
4737 u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT; 4741 u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
4738 char *block = (char *)&mc_client; 4742 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
4743 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
4739 4744
4740 printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n", 4745 printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
4741 protections, vmid, addr, 4746 protections, vmid, addr,
4742 (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read", 4747 (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
4743 block, mc_id); 4748 block, mc_client, mc_id);
4744} 4749}
4745 4750
4746/** 4751/**
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 555164e270a7..b5c67a99dda9 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -3131,7 +3131,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
3131 rdev->config.evergreen.sx_max_export_size = 256; 3131 rdev->config.evergreen.sx_max_export_size = 256;
3132 rdev->config.evergreen.sx_max_export_pos_size = 64; 3132 rdev->config.evergreen.sx_max_export_pos_size = 64;
3133 rdev->config.evergreen.sx_max_export_smx_size = 192; 3133 rdev->config.evergreen.sx_max_export_smx_size = 192;
3134 rdev->config.evergreen.max_hw_contexts = 8; 3134 rdev->config.evergreen.max_hw_contexts = 4;
3135 rdev->config.evergreen.sq_num_cf_insts = 2; 3135 rdev->config.evergreen.sq_num_cf_insts = 2;
3136 3136
3137 rdev->config.evergreen.sc_prim_fifo_size = 0x40; 3137 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index f71ce390aebe..f815c20640bd 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -288,8 +288,7 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
288 /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */ 288 /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
289 289
290 WREG32(HDMI_ACR_PACKET_CONTROL + offset, 290 WREG32(HDMI_ACR_PACKET_CONTROL + offset,
291 HDMI_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */ 291 HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
292 HDMI_ACR_SOURCE); /* select SW CTS value */
293 292
294 evergreen_hdmi_update_ACR(encoder, mode->clock); 293 evergreen_hdmi_update_ACR(encoder, mode->clock);
295 294
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 8768fd6a1e27..4f6d2962767d 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -1501,7 +1501,7 @@
1501 * 6. COMMAND [29:22] | BYTE_COUNT [20:0] 1501 * 6. COMMAND [29:22] | BYTE_COUNT [20:0]
1502 */ 1502 */
1503# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20) 1503# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
1504 /* 0 - SRC_ADDR 1504 /* 0 - DST_ADDR
1505 * 1 - GDS 1505 * 1 - GDS
1506 */ 1506 */
1507# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27) 1507# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
@@ -1516,7 +1516,7 @@
1516# define PACKET3_CP_DMA_CP_SYNC (1 << 31) 1516# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
1517/* COMMAND */ 1517/* COMMAND */
1518# define PACKET3_CP_DMA_DIS_WC (1 << 21) 1518# define PACKET3_CP_DMA_DIS_WC (1 << 21)
1519# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23) 1519# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
1520 /* 0 - none 1520 /* 0 - none
1521 * 1 - 8 in 16 1521 * 1 - 8 in 16
1522 * 2 - 8 in 32 1522 * 2 - 8 in 32
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index 6c398a456d78..f26339028154 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -787,6 +787,7 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
787 bool disable_mclk_switching; 787 bool disable_mclk_switching;
788 u32 mclk, sclk; 788 u32 mclk, sclk;
789 u16 vddc, vddci; 789 u16 vddc, vddci;
790 u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
790 int i; 791 int i;
791 792
792 if ((rdev->pm.dpm.new_active_crtc_count > 1) || 793 if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
@@ -813,6 +814,29 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
813 } 814 }
814 } 815 }
815 816
817 /* limit clocks to max supported clocks based on voltage dependency tables */
818 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
819 &max_sclk_vddc);
820 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
821 &max_mclk_vddci);
822 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
823 &max_mclk_vddc);
824
825 for (i = 0; i < ps->performance_level_count; i++) {
826 if (max_sclk_vddc) {
827 if (ps->performance_levels[i].sclk > max_sclk_vddc)
828 ps->performance_levels[i].sclk = max_sclk_vddc;
829 }
830 if (max_mclk_vddci) {
831 if (ps->performance_levels[i].mclk > max_mclk_vddci)
832 ps->performance_levels[i].mclk = max_mclk_vddci;
833 }
834 if (max_mclk_vddc) {
835 if (ps->performance_levels[i].mclk > max_mclk_vddc)
836 ps->performance_levels[i].mclk = max_mclk_vddc;
837 }
838 }
839
816 /* XXX validate the min clocks required for display */ 840 /* XXX validate the min clocks required for display */
817 841
818 if (disable_mclk_switching) { 842 if (disable_mclk_switching) {
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 24175717307b..d71333033b2b 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -2933,9 +2933,11 @@ static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
2933 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp); 2933 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
2934 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); 2934 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
2935 seq_printf(m, "%u dwords in ring\n", count); 2935 seq_printf(m, "%u dwords in ring\n", count);
2936 for (j = 0; j <= count; j++) { 2936 if (ring->ready) {
2937 i = (rdp + j) & ring->ptr_mask; 2937 for (j = 0; j <= count; j++) {
2938 seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]); 2938 i = (rdp + j) & ring->ptr_mask;
2939 seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
2940 }
2939 } 2941 }
2940 return 0; 2942 return 0;
2941} 2943}
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index e65f211a7be0..5513d8f06252 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -1084,7 +1084,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
1084 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk = 1084 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
1085 le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16); 1085 le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
1086 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v = 1086 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
1087 le16_to_cpu(limits->entries[i].usVoltage); 1087 le16_to_cpu(entry->usVoltage);
1088 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *) 1088 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
1089 ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record)); 1089 ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
1090 } 1090 }
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index f443010ce90b..5b729319f27b 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -57,15 +57,15 @@ enum r600_hdmi_iec_status_bits {
57static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = { 57static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
58 /* 32kHz 44.1kHz 48kHz */ 58 /* 32kHz 44.1kHz 48kHz */
59 /* Clock N CTS N CTS N CTS */ 59 /* Clock N CTS N CTS N CTS */
60 { 25174, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */ 60 { 25175, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */
61 { 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */ 61 { 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */
62 { 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */ 62 { 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */
63 { 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */ 63 { 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */
64 { 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */ 64 { 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */
65 { 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */ 65 { 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */
66 { 74175, 11648, 210937, 17836, 234375, 11648, 140625 }, /* 74.25/1.001 MHz */ 66 { 74176, 11648, 210937, 17836, 234375, 11648, 140625 }, /* 74.25/1.001 MHz */
67 { 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */ 67 { 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */
68 { 148351, 11648, 421875, 8918, 234375, 5824, 140625 }, /* 148.50/1.001 MHz */ 68 { 148352, 11648, 421875, 8918, 234375, 5824, 140625 }, /* 148.50/1.001 MHz */
69 { 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */ 69 { 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */
70 { 0, 4096, 0, 6272, 0, 6144, 0 } /* Other */ 70 { 0, 4096, 0, 6272, 0, 6144, 0 } /* Other */
71}; 71};
@@ -75,8 +75,15 @@ static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
75 */ 75 */
76static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int N, int freq) 76static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int N, int freq)
77{ 77{
78 if (*CTS == 0) 78 u64 n;
79 *CTS = clock * N / (128 * freq) * 1000; 79 u32 d;
80
81 if (*CTS == 0) {
82 n = (u64)clock * (u64)N * 1000ULL;
83 d = 128 * freq;
84 do_div(n, d);
85 *CTS = n;
86 }
80 DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n", 87 DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n",
81 N, *CTS, freq); 88 N, *CTS, freq);
82} 89}
@@ -257,10 +264,7 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
257 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE 264 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
258 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator 265 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
259 */ 266 */
260 if (ASIC_IS_DCE3(rdev)) { 267 if (ASIC_IS_DCE32(rdev)) {
261 /* according to the reg specs, this should DCE3.2 only, but in
262 * practice it seems to cover DCE3.0 as well.
263 */
264 if (dig->dig_encoder == 0) { 268 if (dig->dig_encoder == 0) {
265 dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; 269 dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
266 dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); 270 dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
@@ -276,8 +280,21 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
276 WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo); 280 WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo);
277 WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ 281 WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
278 } 282 }
283 } else if (ASIC_IS_DCE3(rdev)) {
284 /* according to the reg specs, this should DCE3.2 only, but in
285 * practice it seems to cover DCE3.0/3.1 as well.
286 */
287 if (dig->dig_encoder == 0) {
288 WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
289 WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
290 WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
291 } else {
292 WREG32(DCCG_AUDIO_DTO1_PHASE, base_rate * 100);
293 WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100);
294 WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
295 }
279 } else { 296 } else {
280 /* according to the reg specs, this should be DCE2.0 and DCE3.0 */ 297 /* according to the reg specs, this should be DCE2.0 and DCE3.0/3.1 */
281 WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) | 298 WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
282 AUDIO_DTO_MODULE(clock / 10)); 299 AUDIO_DTO_MODULE(clock / 10));
283 } 300 }
@@ -434,8 +451,8 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
434 } 451 }
435 452
436 WREG32(HDMI0_ACR_PACKET_CONTROL + offset, 453 WREG32(HDMI0_ACR_PACKET_CONTROL + offset,
437 HDMI0_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */ 454 HDMI0_ACR_SOURCE | /* select SW CTS value - XXX verify that hw CTS works on all families */
438 HDMI0_ACR_SOURCE); /* select SW CTS value */ 455 HDMI0_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
439 456
440 WREG32(HDMI0_VBI_PACKET_CONTROL + offset, 457 WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
441 HDMI0_NULL_SEND | /* send null packets when required */ 458 HDMI0_NULL_SEND | /* send null packets when required */
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index e673fe26ea84..7b3c7b5932c5 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1523,7 +1523,7 @@
1523 */ 1523 */
1524# define PACKET3_CP_DMA_CP_SYNC (1 << 31) 1524# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
1525/* COMMAND */ 1525/* COMMAND */
1526# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23) 1526# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
1527 /* 0 - none 1527 /* 0 - none
1528 * 1 - 8 in 16 1528 * 1 - 8 in 16
1529 * 2 - 8 in 32 1529 * 2 - 8 in 32
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 5003385a7512..8f7e04538fd6 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1004,6 +1004,8 @@ static struct radeon_asic rv6xx_asic = {
1004 .wait_for_vblank = &avivo_wait_for_vblank, 1004 .wait_for_vblank = &avivo_wait_for_vblank,
1005 .set_backlight_level = &atombios_set_backlight_level, 1005 .set_backlight_level = &atombios_set_backlight_level,
1006 .get_backlight_level = &atombios_get_backlight_level, 1006 .get_backlight_level = &atombios_get_backlight_level,
1007 .hdmi_enable = &r600_hdmi_enable,
1008 .hdmi_setmode = &r600_hdmi_setmode,
1007 }, 1009 },
1008 .copy = { 1010 .copy = {
1009 .blit = &r600_copy_cpdma, 1011 .blit = &r600_copy_cpdma,
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 404e25d285ba..f79ee184ffd5 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1367,6 +1367,7 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
1367 int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info); 1367 int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info);
1368 uint16_t data_offset, size; 1368 uint16_t data_offset, size;
1369 struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info; 1369 struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
1370 struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT *ss_assign;
1370 uint8_t frev, crev; 1371 uint8_t frev, crev;
1371 int i, num_indices; 1372 int i, num_indices;
1372 1373
@@ -1378,18 +1379,21 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
1378 1379
1379 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / 1380 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
1380 sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT); 1381 sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT);
1381 1382 ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*)
1383 ((u8 *)&ss_info->asSS_Info[0]);
1382 for (i = 0; i < num_indices; i++) { 1384 for (i = 0; i < num_indices; i++) {
1383 if (ss_info->asSS_Info[i].ucSS_Id == id) { 1385 if (ss_assign->ucSS_Id == id) {
1384 ss->percentage = 1386 ss->percentage =
1385 le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage); 1387 le16_to_cpu(ss_assign->usSpreadSpectrumPercentage);
1386 ss->type = ss_info->asSS_Info[i].ucSpreadSpectrumType; 1388 ss->type = ss_assign->ucSpreadSpectrumType;
1387 ss->step = ss_info->asSS_Info[i].ucSS_Step; 1389 ss->step = ss_assign->ucSS_Step;
1388 ss->delay = ss_info->asSS_Info[i].ucSS_Delay; 1390 ss->delay = ss_assign->ucSS_Delay;
1389 ss->range = ss_info->asSS_Info[i].ucSS_Range; 1391 ss->range = ss_assign->ucSS_Range;
1390 ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div; 1392 ss->refdiv = ss_assign->ucRecommendedRef_Div;
1391 return true; 1393 return true;
1392 } 1394 }
1395 ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*)
1396 ((u8 *)ss_assign + sizeof(struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT));
1393 } 1397 }
1394 } 1398 }
1395 return false; 1399 return false;
@@ -1477,6 +1481,12 @@ union asic_ss_info {
1477 struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3; 1481 struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3;
1478}; 1482};
1479 1483
1484union asic_ss_assignment {
1485 struct _ATOM_ASIC_SS_ASSIGNMENT v1;
1486 struct _ATOM_ASIC_SS_ASSIGNMENT_V2 v2;
1487 struct _ATOM_ASIC_SS_ASSIGNMENT_V3 v3;
1488};
1489
1480bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, 1490bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1481 struct radeon_atom_ss *ss, 1491 struct radeon_atom_ss *ss,
1482 int id, u32 clock) 1492 int id, u32 clock)
@@ -1485,6 +1495,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1485 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); 1495 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
1486 uint16_t data_offset, size; 1496 uint16_t data_offset, size;
1487 union asic_ss_info *ss_info; 1497 union asic_ss_info *ss_info;
1498 union asic_ss_assignment *ss_assign;
1488 uint8_t frev, crev; 1499 uint8_t frev, crev;
1489 int i, num_indices; 1500 int i, num_indices;
1490 1501
@@ -1509,45 +1520,52 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1509 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / 1520 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
1510 sizeof(ATOM_ASIC_SS_ASSIGNMENT); 1521 sizeof(ATOM_ASIC_SS_ASSIGNMENT);
1511 1522
1523 ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info.asSpreadSpectrum[0]);
1512 for (i = 0; i < num_indices; i++) { 1524 for (i = 0; i < num_indices; i++) {
1513 if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) && 1525 if ((ss_assign->v1.ucClockIndication == id) &&
1514 (clock <= le32_to_cpu(ss_info->info.asSpreadSpectrum[i].ulTargetClockRange))) { 1526 (clock <= le32_to_cpu(ss_assign->v1.ulTargetClockRange))) {
1515 ss->percentage = 1527 ss->percentage =
1516 le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage); 1528 le16_to_cpu(ss_assign->v1.usSpreadSpectrumPercentage);
1517 ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode; 1529 ss->type = ss_assign->v1.ucSpreadSpectrumMode;
1518 ss->rate = le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadRateInKhz); 1530 ss->rate = le16_to_cpu(ss_assign->v1.usSpreadRateInKhz);
1519 return true; 1531 return true;
1520 } 1532 }
1533 ss_assign = (union asic_ss_assignment *)
1534 ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT));
1521 } 1535 }
1522 break; 1536 break;
1523 case 2: 1537 case 2:
1524 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / 1538 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
1525 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2); 1539 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
1540 ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_2.asSpreadSpectrum[0]);
1526 for (i = 0; i < num_indices; i++) { 1541 for (i = 0; i < num_indices; i++) {
1527 if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) && 1542 if ((ss_assign->v2.ucClockIndication == id) &&
1528 (clock <= le32_to_cpu(ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange))) { 1543 (clock <= le32_to_cpu(ss_assign->v2.ulTargetClockRange))) {
1529 ss->percentage = 1544 ss->percentage =
1530 le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage); 1545 le16_to_cpu(ss_assign->v2.usSpreadSpectrumPercentage);
1531 ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode; 1546 ss->type = ss_assign->v2.ucSpreadSpectrumMode;
1532 ss->rate = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadRateIn10Hz); 1547 ss->rate = le16_to_cpu(ss_assign->v2.usSpreadRateIn10Hz);
1533 if ((crev == 2) && 1548 if ((crev == 2) &&
1534 ((id == ASIC_INTERNAL_ENGINE_SS) || 1549 ((id == ASIC_INTERNAL_ENGINE_SS) ||
1535 (id == ASIC_INTERNAL_MEMORY_SS))) 1550 (id == ASIC_INTERNAL_MEMORY_SS)))
1536 ss->rate /= 100; 1551 ss->rate /= 100;
1537 return true; 1552 return true;
1538 } 1553 }
1554 ss_assign = (union asic_ss_assignment *)
1555 ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2));
1539 } 1556 }
1540 break; 1557 break;
1541 case 3: 1558 case 3:
1542 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / 1559 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
1543 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3); 1560 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
1561 ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_3.asSpreadSpectrum[0]);
1544 for (i = 0; i < num_indices; i++) { 1562 for (i = 0; i < num_indices; i++) {
1545 if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) && 1563 if ((ss_assign->v3.ucClockIndication == id) &&
1546 (clock <= le32_to_cpu(ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange))) { 1564 (clock <= le32_to_cpu(ss_assign->v3.ulTargetClockRange))) {
1547 ss->percentage = 1565 ss->percentage =
1548 le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage); 1566 le16_to_cpu(ss_assign->v3.usSpreadSpectrumPercentage);
1549 ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode; 1567 ss->type = ss_assign->v3.ucSpreadSpectrumMode;
1550 ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz); 1568 ss->rate = le16_to_cpu(ss_assign->v3.usSpreadRateIn10Hz);
1551 if ((id == ASIC_INTERNAL_ENGINE_SS) || 1569 if ((id == ASIC_INTERNAL_ENGINE_SS) ||
1552 (id == ASIC_INTERNAL_MEMORY_SS)) 1570 (id == ASIC_INTERNAL_MEMORY_SS))
1553 ss->rate /= 100; 1571 ss->rate /= 100;
@@ -1555,6 +1573,8 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1555 radeon_atombios_get_igp_ss_overrides(rdev, ss, id); 1573 radeon_atombios_get_igp_ss_overrides(rdev, ss, id);
1556 return true; 1574 return true;
1557 } 1575 }
1576 ss_assign = (union asic_ss_assignment *)
1577 ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3));
1558 } 1578 }
1559 break; 1579 break;
1560 default: 1580 default:
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index ac6ece61a476..66c222836631 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -85,8 +85,9 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
85 VRAM, also but everything into VRAM on AGP cards to avoid 85 VRAM, also but everything into VRAM on AGP cards to avoid
86 image corruptions */ 86 image corruptions */
87 if (p->ring == R600_RING_TYPE_UVD_INDEX && 87 if (p->ring == R600_RING_TYPE_UVD_INDEX &&
88 (i == 0 || p->rdev->flags & RADEON_IS_AGP)) { 88 p->rdev->family < CHIP_PALM &&
89 /* TODO: is this still needed for NI+ ? */ 89 (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) {
90
90 p->relocs[i].lobj.domain = 91 p->relocs[i].lobj.domain =
91 RADEON_GEM_DOMAIN_VRAM; 92 RADEON_GEM_DOMAIN_VRAM;
92 93
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index e29faa73b574..841d0e09be3e 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1320,13 +1320,22 @@ int radeon_device_init(struct radeon_device *rdev,
1320 return r; 1320 return r;
1321 } 1321 }
1322 if ((radeon_testing & 1)) { 1322 if ((radeon_testing & 1)) {
1323 radeon_test_moves(rdev); 1323 if (rdev->accel_working)
1324 radeon_test_moves(rdev);
1325 else
1326 DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
1324 } 1327 }
1325 if ((radeon_testing & 2)) { 1328 if ((radeon_testing & 2)) {
1326 radeon_test_syncing(rdev); 1329 if (rdev->accel_working)
1330 radeon_test_syncing(rdev);
1331 else
1332 DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
1327 } 1333 }
1328 if (radeon_benchmarking) { 1334 if (radeon_benchmarking) {
1329 radeon_benchmark(rdev, radeon_benchmarking); 1335 if (rdev->accel_working)
1336 radeon_benchmark(rdev, radeon_benchmarking);
1337 else
1338 DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
1330 } 1339 }
1331 return 0; 1340 return 0;
1332} 1341}
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 87e1d69e8fdb..4f6b7fc7ad3c 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -945,6 +945,8 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
945 if (enable) { 945 if (enable) {
946 mutex_lock(&rdev->pm.mutex); 946 mutex_lock(&rdev->pm.mutex);
947 rdev->pm.dpm.uvd_active = true; 947 rdev->pm.dpm.uvd_active = true;
948 /* disable this for now */
949#if 0
948 if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0)) 950 if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
949 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD; 951 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
950 else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0)) 952 else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
@@ -954,6 +956,7 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
954 else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2)) 956 else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
955 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2; 957 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
956 else 958 else
959#endif
957 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD; 960 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
958 rdev->pm.dpm.state = dpm_state; 961 rdev->pm.dpm.state = dpm_state;
959 mutex_unlock(&rdev->pm.mutex); 962 mutex_unlock(&rdev->pm.mutex);
@@ -1002,7 +1005,7 @@ static void radeon_pm_resume_old(struct radeon_device *rdev)
1002{ 1005{
1003 /* set up the default clocks if the MC ucode is loaded */ 1006 /* set up the default clocks if the MC ucode is loaded */
1004 if ((rdev->family >= CHIP_BARTS) && 1007 if ((rdev->family >= CHIP_BARTS) &&
1005 (rdev->family <= CHIP_HAINAN) && 1008 (rdev->family <= CHIP_CAYMAN) &&
1006 rdev->mc_fw) { 1009 rdev->mc_fw) {
1007 if (rdev->pm.default_vddc) 1010 if (rdev->pm.default_vddc)
1008 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1011 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
@@ -1046,7 +1049,7 @@ static void radeon_pm_resume_dpm(struct radeon_device *rdev)
1046 if (ret) { 1049 if (ret) {
1047 DRM_ERROR("radeon: dpm resume failed\n"); 1050 DRM_ERROR("radeon: dpm resume failed\n");
1048 if ((rdev->family >= CHIP_BARTS) && 1051 if ((rdev->family >= CHIP_BARTS) &&
1049 (rdev->family <= CHIP_HAINAN) && 1052 (rdev->family <= CHIP_CAYMAN) &&
1050 rdev->mc_fw) { 1053 rdev->mc_fw) {
1051 if (rdev->pm.default_vddc) 1054 if (rdev->pm.default_vddc)
1052 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1055 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
@@ -1097,7 +1100,7 @@ static int radeon_pm_init_old(struct radeon_device *rdev)
1097 radeon_pm_init_profile(rdev); 1100 radeon_pm_init_profile(rdev);
1098 /* set up the default clocks if the MC ucode is loaded */ 1101 /* set up the default clocks if the MC ucode is loaded */
1099 if ((rdev->family >= CHIP_BARTS) && 1102 if ((rdev->family >= CHIP_BARTS) &&
1100 (rdev->family <= CHIP_HAINAN) && 1103 (rdev->family <= CHIP_CAYMAN) &&
1101 rdev->mc_fw) { 1104 rdev->mc_fw) {
1102 if (rdev->pm.default_vddc) 1105 if (rdev->pm.default_vddc)
1103 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1106 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
@@ -1183,7 +1186,7 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
1183 if (ret) { 1186 if (ret) {
1184 rdev->pm.dpm_enabled = false; 1187 rdev->pm.dpm_enabled = false;
1185 if ((rdev->family >= CHIP_BARTS) && 1188 if ((rdev->family >= CHIP_BARTS) &&
1186 (rdev->family <= CHIP_HAINAN) && 1189 (rdev->family <= CHIP_CAYMAN) &&
1187 rdev->mc_fw) { 1190 rdev->mc_fw) {
1188 if (rdev->pm.default_vddc) 1191 if (rdev->pm.default_vddc)
1189 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1192 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 46a25f037b84..18254e1c3e71 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -839,9 +839,11 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
839 * packet that is the root issue 839 * packet that is the root issue
840 */ 840 */
841 i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask; 841 i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
842 for (j = 0; j <= (count + 32); j++) { 842 if (ring->ready) {
843 seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]); 843 for (j = 0; j <= (count + 32); j++) {
844 i = (i + 1) & ring->ptr_mask; 844 seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]);
845 i = (i + 1) & ring->ptr_mask;
846 }
845 } 847 }
846 return 0; 848 return 0;
847} 849}
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index f4d6bcee9006..12e8099a0823 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -36,8 +36,8 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
36 struct radeon_bo *vram_obj = NULL; 36 struct radeon_bo *vram_obj = NULL;
37 struct radeon_bo **gtt_obj = NULL; 37 struct radeon_bo **gtt_obj = NULL;
38 uint64_t gtt_addr, vram_addr; 38 uint64_t gtt_addr, vram_addr;
39 unsigned i, n, size; 39 unsigned n, size;
40 int r, ring; 40 int i, r, ring;
41 41
42 switch (flag) { 42 switch (flag) {
43 case RADEON_TEST_COPY_DMA: 43 case RADEON_TEST_COPY_DMA:
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 1a01bbff9bfa..4f2e73f79638 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -476,8 +476,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
476 return -EINVAL; 476 return -EINVAL;
477 } 477 }
478 478
479 /* TODO: is this still necessary on NI+ ? */ 479 if (p->rdev->family < CHIP_PALM && (cmd == 0 || cmd == 0x3) &&
480 if ((cmd == 0 || cmd == 0x3) &&
481 (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) { 480 (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
482 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", 481 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
483 start, end); 482 start, end);
@@ -799,7 +798,8 @@ void radeon_uvd_note_usage(struct radeon_device *rdev)
799 (rdev->pm.dpm.hd != hd)) { 798 (rdev->pm.dpm.hd != hd)) {
800 rdev->pm.dpm.sd = sd; 799 rdev->pm.dpm.sd = sd;
801 rdev->pm.dpm.hd = hd; 800 rdev->pm.dpm.hd = hd;
802 streams_changed = true; 801 /* disable this for now */
802 /*streams_changed = true;*/
803 } 803 }
804 } 804 }
805 805
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index c354c1094967..d4652af425b8 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -85,6 +85,9 @@ extern void si_dma_vm_set_page(struct radeon_device *rdev,
85 uint32_t incr, uint32_t flags); 85 uint32_t incr, uint32_t flags);
86static void si_enable_gui_idle_interrupt(struct radeon_device *rdev, 86static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
87 bool enable); 87 bool enable);
88static void si_fini_pg(struct radeon_device *rdev);
89static void si_fini_cg(struct radeon_device *rdev);
90static void si_rlc_stop(struct radeon_device *rdev);
88 91
89static const u32 verde_rlc_save_restore_register_list[] = 92static const u32 verde_rlc_save_restore_register_list[] =
90{ 93{
@@ -3608,6 +3611,13 @@ static void si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
3608 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 3611 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
3609 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); 3612 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
3610 3613
3614 /* disable PG/CG */
3615 si_fini_pg(rdev);
3616 si_fini_cg(rdev);
3617
3618 /* stop the rlc */
3619 si_rlc_stop(rdev);
3620
3611 /* Disable CP parsing/prefetching */ 3621 /* Disable CP parsing/prefetching */
3612 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT); 3622 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
3613 3623
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index cfe5d4d28915..2332aa1bf93c 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2910,6 +2910,7 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
2910 bool disable_sclk_switching = false; 2910 bool disable_sclk_switching = false;
2911 u32 mclk, sclk; 2911 u32 mclk, sclk;
2912 u16 vddc, vddci; 2912 u16 vddc, vddci;
2913 u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
2913 int i; 2914 int i;
2914 2915
2915 if ((rdev->pm.dpm.new_active_crtc_count > 1) || 2916 if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
@@ -2943,6 +2944,29 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
2943 } 2944 }
2944 } 2945 }
2945 2946
2947 /* limit clocks to max supported clocks based on voltage dependency tables */
2948 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
2949 &max_sclk_vddc);
2950 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2951 &max_mclk_vddci);
2952 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2953 &max_mclk_vddc);
2954
2955 for (i = 0; i < ps->performance_level_count; i++) {
2956 if (max_sclk_vddc) {
2957 if (ps->performance_levels[i].sclk > max_sclk_vddc)
2958 ps->performance_levels[i].sclk = max_sclk_vddc;
2959 }
2960 if (max_mclk_vddci) {
2961 if (ps->performance_levels[i].mclk > max_mclk_vddci)
2962 ps->performance_levels[i].mclk = max_mclk_vddci;
2963 }
2964 if (max_mclk_vddc) {
2965 if (ps->performance_levels[i].mclk > max_mclk_vddc)
2966 ps->performance_levels[i].mclk = max_mclk_vddc;
2967 }
2968 }
2969
2946 /* XXX validate the min clocks required for display */ 2970 /* XXX validate the min clocks required for display */
2947 2971
2948 if (disable_mclk_switching) { 2972 if (disable_mclk_switching) {
@@ -5184,7 +5208,7 @@ static int si_set_mc_special_registers(struct radeon_device *rdev,
5184 table->mc_reg_table_entry[k].mc_data[j] |= 0x100; 5208 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
5185 } 5209 }
5186 j++; 5210 j++;
5187 if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) 5211 if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
5188 return -EINVAL; 5212 return -EINVAL;
5189 5213
5190 if (!pi->mem_gddr5) { 5214 if (!pi->mem_gddr5) {
@@ -5194,7 +5218,7 @@ static int si_set_mc_special_registers(struct radeon_device *rdev,
5194 table->mc_reg_table_entry[k].mc_data[j] = 5218 table->mc_reg_table_entry[k].mc_data[j] =
5195 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; 5219 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
5196 j++; 5220 j++;
5197 if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) 5221 if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
5198 return -EINVAL; 5222 return -EINVAL;
5199 } 5223 }
5200 break; 5224 break;
@@ -5207,7 +5231,7 @@ static int si_set_mc_special_registers(struct radeon_device *rdev,
5207 (temp_reg & 0xffff0000) | 5231 (temp_reg & 0xffff0000) |
5208 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); 5232 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
5209 j++; 5233 j++;
5210 if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) 5234 if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
5211 return -EINVAL; 5235 return -EINVAL;
5212 break; 5236 break;
5213 default: 5237 default:
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 52d2ab6b67a0..7e2e0ea66a00 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -1553,7 +1553,7 @@
1553 * 6. COMMAND [30:21] | BYTE_COUNT [20:0] 1553 * 6. COMMAND [30:21] | BYTE_COUNT [20:0]
1554 */ 1554 */
1555# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20) 1555# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
1556 /* 0 - SRC_ADDR 1556 /* 0 - DST_ADDR
1557 * 1 - GDS 1557 * 1 - GDS
1558 */ 1558 */
1559# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27) 1559# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
@@ -1568,7 +1568,7 @@
1568# define PACKET3_CP_DMA_CP_SYNC (1 << 31) 1568# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
1569/* COMMAND */ 1569/* COMMAND */
1570# define PACKET3_CP_DMA_DIS_WC (1 << 21) 1570# define PACKET3_CP_DMA_DIS_WC (1 << 21)
1571# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23) 1571# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
1572 /* 0 - none 1572 /* 0 - none
1573 * 1 - 8 in 16 1573 * 1 - 8 in 16
1574 * 2 - 8 in 32 1574 * 2 - 8 in 32
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 7f998bf1cc9d..9364129ba292 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1868,7 +1868,7 @@ int trinity_dpm_init(struct radeon_device *rdev)
1868 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) 1868 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
1869 pi->at[i] = TRINITY_AT_DFLT; 1869 pi->at[i] = TRINITY_AT_DFLT;
1870 1870
1871 pi->enable_bapm = true; 1871 pi->enable_bapm = false;
1872 pi->enable_nbps_policy = true; 1872 pi->enable_nbps_policy = true;
1873 pi->enable_sclk_ds = true; 1873 pi->enable_sclk_ds = true;
1874 pi->enable_gfx_power_gating = true; 1874 pi->enable_gfx_power_gating = true;
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
index 7266805d9786..3100fa9cb52f 100644
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -212,8 +212,8 @@ int uvd_v1_0_start(struct radeon_device *rdev)
212 /* enable VCPU clock */ 212 /* enable VCPU clock */
213 WREG32(UVD_VCPU_CNTL, 1 << 9); 213 WREG32(UVD_VCPU_CNTL, 1 << 9);
214 214
215 /* enable UMC */ 215 /* enable UMC and NC0 */
216 WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8)); 216 WREG32_P(UVD_LMI_CTRL2, 1 << 13, ~((1 << 8) | (1 << 13)));
217 217
218 /* boot up the VCPU */ 218 /* boot up the VCPU */
219 WREG32(UVD_SOFT_RESET, 0); 219 WREG32(UVD_SOFT_RESET, 0);
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 71b70e3a7a71..c91d547191dd 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -241,6 +241,7 @@ config HID_HOLTEK
241 - Sharkoon Drakonia / Perixx MX-2000 gaming mice 241 - Sharkoon Drakonia / Perixx MX-2000 gaming mice
242 - Tracer Sniper TRM-503 / NOVA Gaming Slider X200 / 242 - Tracer Sniper TRM-503 / NOVA Gaming Slider X200 /
243 Zalman ZM-GM1 243 Zalman ZM-GM1
244 - SHARKOON DarkGlider Gaming mouse
244 245
245config HOLTEK_FF 246config HOLTEK_FF
246 bool "Holtek On Line Grip force feedback support" 247 bool "Holtek On Line Grip force feedback support"
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index b8470b1a10fe..5a8c01112a23 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1715,6 +1715,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1715 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) }, 1715 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) },
1716 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) }, 1716 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },
1717 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) }, 1717 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
1718 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
1718 { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_580) }, 1719 { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_580) },
1719 { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) }, 1720 { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
1720 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) }, 1721 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
diff --git a/drivers/hid/hid-holtek-mouse.c b/drivers/hid/hid-holtek-mouse.c
index 7e6db3cf46f9..e696566cde46 100644
--- a/drivers/hid/hid-holtek-mouse.c
+++ b/drivers/hid/hid-holtek-mouse.c
@@ -27,6 +27,7 @@
27 * - USB ID 04d9:a067, sold as Sharkoon Drakonia and Perixx MX-2000 27 * - USB ID 04d9:a067, sold as Sharkoon Drakonia and Perixx MX-2000
28 * - USB ID 04d9:a04a, sold as Tracer Sniper TRM-503, NOVA Gaming Slider X200 28 * - USB ID 04d9:a04a, sold as Tracer Sniper TRM-503, NOVA Gaming Slider X200
29 * and Zalman ZM-GM1 29 * and Zalman ZM-GM1
30 * - USB ID 04d9:a081, sold as SHARKOON DarkGlider Gaming mouse
30 */ 31 */
31 32
32static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc, 33static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
@@ -46,6 +47,7 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
46 } 47 }
47 break; 48 break;
48 case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A: 49 case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A:
50 case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081:
49 if (*rsize >= 113 && rdesc[106] == 0xff && rdesc[107] == 0x7f 51 if (*rsize >= 113 && rdesc[106] == 0xff && rdesc[107] == 0x7f
50 && rdesc[111] == 0xff && rdesc[112] == 0x7f) { 52 && rdesc[111] == 0xff && rdesc[112] == 0x7f) {
51 hid_info(hdev, "Fixing up report descriptor\n"); 53 hid_info(hdev, "Fixing up report descriptor\n");
@@ -63,6 +65,8 @@ static const struct hid_device_id holtek_mouse_devices[] = {
63 USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) }, 65 USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
64 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, 66 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
65 USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) }, 67 USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },
68 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
69 USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
66 { } 70 { }
67}; 71};
68MODULE_DEVICE_TABLE(hid, holtek_mouse_devices); 72MODULE_DEVICE_TABLE(hid, holtek_mouse_devices);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index e60e8d530697..9cbc7ab07dfa 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -450,6 +450,7 @@
450#define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD 0xa055 450#define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD 0xa055
451#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067 0xa067 451#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067 0xa067
452#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A 0xa04a 452#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A 0xa04a
453#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081 0xa081
453 454
454#define USB_VENDOR_ID_IMATION 0x0718 455#define USB_VENDOR_ID_IMATION 0x0718
455#define USB_DEVICE_ID_DISC_STAKKA 0xd000 456#define USB_DEVICE_ID_DISC_STAKKA 0xd000
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
index 602c188e9d86..6101816a7ddd 100644
--- a/drivers/hid/hid-roccat-kone.c
+++ b/drivers/hid/hid-roccat-kone.c
@@ -382,7 +382,7 @@ static ssize_t kone_sysfs_write_profilex(struct file *fp,
382} 382}
383#define PROFILE_ATTR(number) \ 383#define PROFILE_ATTR(number) \
384static struct bin_attribute bin_attr_profile##number = { \ 384static struct bin_attribute bin_attr_profile##number = { \
385 .attr = { .name = "profile##number", .mode = 0660 }, \ 385 .attr = { .name = "profile" #number, .mode = 0660 }, \
386 .size = sizeof(struct kone_profile), \ 386 .size = sizeof(struct kone_profile), \
387 .read = kone_sysfs_read_profilex, \ 387 .read = kone_sysfs_read_profilex, \
388 .write = kone_sysfs_write_profilex, \ 388 .write = kone_sysfs_write_profilex, \
diff --git a/drivers/hid/hid-roccat-koneplus.c b/drivers/hid/hid-roccat-koneplus.c
index 5ddf605b6b89..5e99fcdc71b9 100644
--- a/drivers/hid/hid-roccat-koneplus.c
+++ b/drivers/hid/hid-roccat-koneplus.c
@@ -229,13 +229,13 @@ static ssize_t koneplus_sysfs_read_profilex_buttons(struct file *fp,
229 229
230#define PROFILE_ATTR(number) \ 230#define PROFILE_ATTR(number) \
231static struct bin_attribute bin_attr_profile##number##_settings = { \ 231static struct bin_attribute bin_attr_profile##number##_settings = { \
232 .attr = { .name = "profile##number##_settings", .mode = 0440 }, \ 232 .attr = { .name = "profile" #number "_settings", .mode = 0440 }, \
233 .size = KONEPLUS_SIZE_PROFILE_SETTINGS, \ 233 .size = KONEPLUS_SIZE_PROFILE_SETTINGS, \
234 .read = koneplus_sysfs_read_profilex_settings, \ 234 .read = koneplus_sysfs_read_profilex_settings, \
235 .private = &profile_numbers[number-1], \ 235 .private = &profile_numbers[number-1], \
236}; \ 236}; \
237static struct bin_attribute bin_attr_profile##number##_buttons = { \ 237static struct bin_attribute bin_attr_profile##number##_buttons = { \
238 .attr = { .name = "profile##number##_buttons", .mode = 0440 }, \ 238 .attr = { .name = "profile" #number "_buttons", .mode = 0440 }, \
239 .size = KONEPLUS_SIZE_PROFILE_BUTTONS, \ 239 .size = KONEPLUS_SIZE_PROFILE_BUTTONS, \
240 .read = koneplus_sysfs_read_profilex_buttons, \ 240 .read = koneplus_sysfs_read_profilex_buttons, \
241 .private = &profile_numbers[number-1], \ 241 .private = &profile_numbers[number-1], \
diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
index 515bc03136c0..0c8e1ef0b67d 100644
--- a/drivers/hid/hid-roccat-kovaplus.c
+++ b/drivers/hid/hid-roccat-kovaplus.c
@@ -257,13 +257,13 @@ static ssize_t kovaplus_sysfs_read_profilex_buttons(struct file *fp,
257 257
258#define PROFILE_ATTR(number) \ 258#define PROFILE_ATTR(number) \
259static struct bin_attribute bin_attr_profile##number##_settings = { \ 259static struct bin_attribute bin_attr_profile##number##_settings = { \
260 .attr = { .name = "profile##number##_settings", .mode = 0440 }, \ 260 .attr = { .name = "profile" #number "_settings", .mode = 0440 }, \
261 .size = KOVAPLUS_SIZE_PROFILE_SETTINGS, \ 261 .size = KOVAPLUS_SIZE_PROFILE_SETTINGS, \
262 .read = kovaplus_sysfs_read_profilex_settings, \ 262 .read = kovaplus_sysfs_read_profilex_settings, \
263 .private = &profile_numbers[number-1], \ 263 .private = &profile_numbers[number-1], \
264}; \ 264}; \
265static struct bin_attribute bin_attr_profile##number##_buttons = { \ 265static struct bin_attribute bin_attr_profile##number##_buttons = { \
266 .attr = { .name = "profile##number##_buttons", .mode = 0440 }, \ 266 .attr = { .name = "profile" #number "_buttons", .mode = 0440 }, \
267 .size = KOVAPLUS_SIZE_PROFILE_BUTTONS, \ 267 .size = KOVAPLUS_SIZE_PROFILE_BUTTONS, \
268 .read = kovaplus_sysfs_read_profilex_buttons, \ 268 .read = kovaplus_sysfs_read_profilex_buttons, \
269 .private = &profile_numbers[number-1], \ 269 .private = &profile_numbers[number-1], \
diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c
index 5a6dbbeee790..1a07e07d99a0 100644
--- a/drivers/hid/hid-roccat-pyra.c
+++ b/drivers/hid/hid-roccat-pyra.c
@@ -225,13 +225,13 @@ static ssize_t pyra_sysfs_read_profilex_buttons(struct file *fp,
225 225
226#define PROFILE_ATTR(number) \ 226#define PROFILE_ATTR(number) \
227static struct bin_attribute bin_attr_profile##number##_settings = { \ 227static struct bin_attribute bin_attr_profile##number##_settings = { \
228 .attr = { .name = "profile##number##_settings", .mode = 0440 }, \ 228 .attr = { .name = "profile" #number "_settings", .mode = 0440 }, \
229 .size = PYRA_SIZE_PROFILE_SETTINGS, \ 229 .size = PYRA_SIZE_PROFILE_SETTINGS, \
230 .read = pyra_sysfs_read_profilex_settings, \ 230 .read = pyra_sysfs_read_profilex_settings, \
231 .private = &profile_numbers[number-1], \ 231 .private = &profile_numbers[number-1], \
232}; \ 232}; \
233static struct bin_attribute bin_attr_profile##number##_buttons = { \ 233static struct bin_attribute bin_attr_profile##number##_buttons = { \
234 .attr = { .name = "profile##number##_buttons", .mode = 0440 }, \ 234 .attr = { .name = "profile" #number "_buttons", .mode = 0440 }, \
235 .size = PYRA_SIZE_PROFILE_BUTTONS, \ 235 .size = PYRA_SIZE_PROFILE_BUTTONS, \
236 .read = pyra_sysfs_read_profilex_buttons, \ 236 .read = pyra_sysfs_read_profilex_buttons, \
237 .private = &profile_numbers[number-1], \ 237 .private = &profile_numbers[number-1], \
diff --git a/drivers/hid/hid-wiimote-modules.c b/drivers/hid/hid-wiimote-modules.c
index 2e7d644dba18..71adf9e60b13 100644
--- a/drivers/hid/hid-wiimote-modules.c
+++ b/drivers/hid/hid-wiimote-modules.c
@@ -119,12 +119,22 @@ static const struct wiimod_ops wiimod_keys = {
119 * the rumble motor, this flag shouldn't be set. 119 * the rumble motor, this flag shouldn't be set.
120 */ 120 */
121 121
122/* used by wiimod_rumble and wiipro_rumble */
123static void wiimod_rumble_worker(struct work_struct *work)
124{
125 struct wiimote_data *wdata = container_of(work, struct wiimote_data,
126 rumble_worker);
127
128 spin_lock_irq(&wdata->state.lock);
129 wiiproto_req_rumble(wdata, wdata->state.cache_rumble);
130 spin_unlock_irq(&wdata->state.lock);
131}
132
122static int wiimod_rumble_play(struct input_dev *dev, void *data, 133static int wiimod_rumble_play(struct input_dev *dev, void *data,
123 struct ff_effect *eff) 134 struct ff_effect *eff)
124{ 135{
125 struct wiimote_data *wdata = input_get_drvdata(dev); 136 struct wiimote_data *wdata = input_get_drvdata(dev);
126 __u8 value; 137 __u8 value;
127 unsigned long flags;
128 138
129 /* 139 /*
130 * The wiimote supports only a single rumble motor so if any magnitude 140 * The wiimote supports only a single rumble motor so if any magnitude
@@ -137,9 +147,10 @@ static int wiimod_rumble_play(struct input_dev *dev, void *data,
137 else 147 else
138 value = 0; 148 value = 0;
139 149
140 spin_lock_irqsave(&wdata->state.lock, flags); 150 /* Locking state.lock here might deadlock with input_event() calls.
141 wiiproto_req_rumble(wdata, value); 151 * schedule_work acts as barrier. Merging multiple changes is fine. */
142 spin_unlock_irqrestore(&wdata->state.lock, flags); 152 wdata->state.cache_rumble = value;
153 schedule_work(&wdata->rumble_worker);
143 154
144 return 0; 155 return 0;
145} 156}
@@ -147,6 +158,8 @@ static int wiimod_rumble_play(struct input_dev *dev, void *data,
147static int wiimod_rumble_probe(const struct wiimod_ops *ops, 158static int wiimod_rumble_probe(const struct wiimod_ops *ops,
148 struct wiimote_data *wdata) 159 struct wiimote_data *wdata)
149{ 160{
161 INIT_WORK(&wdata->rumble_worker, wiimod_rumble_worker);
162
150 set_bit(FF_RUMBLE, wdata->input->ffbit); 163 set_bit(FF_RUMBLE, wdata->input->ffbit);
151 if (input_ff_create_memless(wdata->input, NULL, wiimod_rumble_play)) 164 if (input_ff_create_memless(wdata->input, NULL, wiimod_rumble_play))
152 return -ENOMEM; 165 return -ENOMEM;
@@ -159,6 +172,8 @@ static void wiimod_rumble_remove(const struct wiimod_ops *ops,
159{ 172{
160 unsigned long flags; 173 unsigned long flags;
161 174
175 cancel_work_sync(&wdata->rumble_worker);
176
162 spin_lock_irqsave(&wdata->state.lock, flags); 177 spin_lock_irqsave(&wdata->state.lock, flags);
163 wiiproto_req_rumble(wdata, 0); 178 wiiproto_req_rumble(wdata, 0);
164 spin_unlock_irqrestore(&wdata->state.lock, flags); 179 spin_unlock_irqrestore(&wdata->state.lock, flags);
@@ -1731,7 +1746,6 @@ static int wiimod_pro_play(struct input_dev *dev, void *data,
1731{ 1746{
1732 struct wiimote_data *wdata = input_get_drvdata(dev); 1747 struct wiimote_data *wdata = input_get_drvdata(dev);
1733 __u8 value; 1748 __u8 value;
1734 unsigned long flags;
1735 1749
1736 /* 1750 /*
1737 * The wiimote supports only a single rumble motor so if any magnitude 1751 * The wiimote supports only a single rumble motor so if any magnitude
@@ -1744,9 +1758,10 @@ static int wiimod_pro_play(struct input_dev *dev, void *data,
1744 else 1758 else
1745 value = 0; 1759 value = 0;
1746 1760
1747 spin_lock_irqsave(&wdata->state.lock, flags); 1761 /* Locking state.lock here might deadlock with input_event() calls.
1748 wiiproto_req_rumble(wdata, value); 1762 * schedule_work acts as barrier. Merging multiple changes is fine. */
1749 spin_unlock_irqrestore(&wdata->state.lock, flags); 1763 wdata->state.cache_rumble = value;
1764 schedule_work(&wdata->rumble_worker);
1750 1765
1751 return 0; 1766 return 0;
1752} 1767}
@@ -1756,6 +1771,8 @@ static int wiimod_pro_probe(const struct wiimod_ops *ops,
1756{ 1771{
1757 int ret, i; 1772 int ret, i;
1758 1773
1774 INIT_WORK(&wdata->rumble_worker, wiimod_rumble_worker);
1775
1759 wdata->extension.input = input_allocate_device(); 1776 wdata->extension.input = input_allocate_device();
1760 if (!wdata->extension.input) 1777 if (!wdata->extension.input)
1761 return -ENOMEM; 1778 return -ENOMEM;
@@ -1817,12 +1834,13 @@ static void wiimod_pro_remove(const struct wiimod_ops *ops,
1817 if (!wdata->extension.input) 1834 if (!wdata->extension.input)
1818 return; 1835 return;
1819 1836
1837 input_unregister_device(wdata->extension.input);
1838 wdata->extension.input = NULL;
1839 cancel_work_sync(&wdata->rumble_worker);
1840
1820 spin_lock_irqsave(&wdata->state.lock, flags); 1841 spin_lock_irqsave(&wdata->state.lock, flags);
1821 wiiproto_req_rumble(wdata, 0); 1842 wiiproto_req_rumble(wdata, 0);
1822 spin_unlock_irqrestore(&wdata->state.lock, flags); 1843 spin_unlock_irqrestore(&wdata->state.lock, flags);
1823
1824 input_unregister_device(wdata->extension.input);
1825 wdata->extension.input = NULL;
1826} 1844}
1827 1845
1828static const struct wiimod_ops wiimod_pro = { 1846static const struct wiimod_ops wiimod_pro = {
diff --git a/drivers/hid/hid-wiimote.h b/drivers/hid/hid-wiimote.h
index f1474f372c0b..75db0c400037 100644
--- a/drivers/hid/hid-wiimote.h
+++ b/drivers/hid/hid-wiimote.h
@@ -133,13 +133,15 @@ struct wiimote_state {
133 __u8 *cmd_read_buf; 133 __u8 *cmd_read_buf;
134 __u8 cmd_read_size; 134 __u8 cmd_read_size;
135 135
136 /* calibration data */ 136 /* calibration/cache data */
137 __u16 calib_bboard[4][3]; 137 __u16 calib_bboard[4][3];
138 __u8 cache_rumble;
138}; 139};
139 140
140struct wiimote_data { 141struct wiimote_data {
141 struct hid_device *hdev; 142 struct hid_device *hdev;
142 struct input_dev *input; 143 struct input_dev *input;
144 struct work_struct rumble_worker;
143 struct led_classdev *leds[4]; 145 struct led_classdev *leds[4];
144 struct input_dev *accel; 146 struct input_dev *accel;
145 struct input_dev *ir; 147 struct input_dev *ir;
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 8918dd12bb69..6a6dd5cd7833 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -308,18 +308,25 @@ static int hidraw_fasync(int fd, struct file *file, int on)
308static void drop_ref(struct hidraw *hidraw, int exists_bit) 308static void drop_ref(struct hidraw *hidraw, int exists_bit)
309{ 309{
310 if (exists_bit) { 310 if (exists_bit) {
311 hid_hw_close(hidraw->hid);
312 hidraw->exist = 0; 311 hidraw->exist = 0;
313 if (hidraw->open) 312 if (hidraw->open) {
313 hid_hw_close(hidraw->hid);
314 wake_up_interruptible(&hidraw->wait); 314 wake_up_interruptible(&hidraw->wait);
315 }
315 } else { 316 } else {
316 --hidraw->open; 317 --hidraw->open;
317 } 318 }
318 319 if (!hidraw->open) {
319 if (!hidraw->open && !hidraw->exist) { 320 if (!hidraw->exist) {
320 device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor)); 321 device_destroy(hidraw_class,
321 hidraw_table[hidraw->minor] = NULL; 322 MKDEV(hidraw_major, hidraw->minor));
322 kfree(hidraw); 323 hidraw_table[hidraw->minor] = NULL;
324 kfree(hidraw);
325 } else {
326 /* close device for last reader */
327 hid_hw_power(hidraw->hid, PM_HINT_NORMAL);
328 hid_hw_close(hidraw->hid);
329 }
323 } 330 }
324} 331}
325 332
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index 5bf2fb785844..93b00d76374c 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -615,7 +615,7 @@ static const struct file_operations uhid_fops = {
615 615
616static struct miscdevice uhid_misc = { 616static struct miscdevice uhid_misc = {
617 .fops = &uhid_fops, 617 .fops = &uhid_fops,
618 .minor = MISC_DYNAMIC_MINOR, 618 .minor = UHID_MINOR,
619 .name = UHID_NAME, 619 .name = UHID_NAME,
620}; 620};
621 621
@@ -634,4 +634,5 @@ module_exit(uhid_exit);
634MODULE_LICENSE("GPL"); 634MODULE_LICENSE("GPL");
635MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>"); 635MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
636MODULE_DESCRIPTION("User-space I/O driver support for HID subsystem"); 636MODULE_DESCRIPTION("User-space I/O driver support for HID subsystem");
637MODULE_ALIAS_MISCDEV(UHID_MINOR);
637MODULE_ALIAS("devname:" UHID_NAME); 638MODULE_ALIAS("devname:" UHID_NAME);
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 8f4743ab5fb2..936093e0271e 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -195,7 +195,7 @@ int vmbus_connect(void)
195 195
196 do { 196 do {
197 ret = vmbus_negotiate_version(msginfo, version); 197 ret = vmbus_negotiate_version(msginfo, version);
198 if (ret) 198 if (ret == -ETIMEDOUT)
199 goto cleanup; 199 goto cleanup;
200 200
201 if (vmbus_connection.conn_state == CONNECTED) 201 if (vmbus_connection.conn_state == CONNECTED)
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index 28b03325b872..09988b289622 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -32,13 +32,17 @@
32/* 32/*
33 * Pre win8 version numbers used in ws2008 and ws 2008 r2 (win7) 33 * Pre win8 version numbers used in ws2008 and ws 2008 r2 (win7)
34 */ 34 */
35#define WS2008_SRV_MAJOR 1
36#define WS2008_SRV_MINOR 0
37#define WS2008_SRV_VERSION (WS2008_SRV_MAJOR << 16 | WS2008_SRV_MINOR)
38
35#define WIN7_SRV_MAJOR 3 39#define WIN7_SRV_MAJOR 3
36#define WIN7_SRV_MINOR 0 40#define WIN7_SRV_MINOR 0
37#define WIN7_SRV_MAJOR_MINOR (WIN7_SRV_MAJOR << 16 | WIN7_SRV_MINOR) 41#define WIN7_SRV_VERSION (WIN7_SRV_MAJOR << 16 | WIN7_SRV_MINOR)
38 42
39#define WIN8_SRV_MAJOR 4 43#define WIN8_SRV_MAJOR 4
40#define WIN8_SRV_MINOR 0 44#define WIN8_SRV_MINOR 0
41#define WIN8_SRV_MAJOR_MINOR (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR) 45#define WIN8_SRV_VERSION (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
42 46
43/* 47/*
44 * Global state maintained for transaction that is being processed. 48 * Global state maintained for transaction that is being processed.
@@ -587,6 +591,8 @@ void hv_kvp_onchannelcallback(void *context)
587 591
588 struct icmsg_hdr *icmsghdrp; 592 struct icmsg_hdr *icmsghdrp;
589 struct icmsg_negotiate *negop = NULL; 593 struct icmsg_negotiate *negop = NULL;
594 int util_fw_version;
595 int kvp_srv_version;
590 596
591 if (kvp_transaction.active) { 597 if (kvp_transaction.active) {
592 /* 598 /*
@@ -606,17 +612,26 @@ void hv_kvp_onchannelcallback(void *context)
606 612
607 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { 613 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
608 /* 614 /*
609 * We start with win8 version and if the host cannot 615 * Based on the host, select appropriate
610 * support that we use the previous version. 616 * framework and service versions we will
617 * negotiate.
611 */ 618 */
612 if (vmbus_prep_negotiate_resp(icmsghdrp, negop, 619 switch (vmbus_proto_version) {
613 recv_buffer, UTIL_FW_MAJOR_MINOR, 620 case (VERSION_WS2008):
614 WIN8_SRV_MAJOR_MINOR)) 621 util_fw_version = UTIL_WS2K8_FW_VERSION;
615 goto done; 622 kvp_srv_version = WS2008_SRV_VERSION;
616 623 break;
624 case (VERSION_WIN7):
625 util_fw_version = UTIL_FW_VERSION;
626 kvp_srv_version = WIN7_SRV_VERSION;
627 break;
628 default:
629 util_fw_version = UTIL_FW_VERSION;
630 kvp_srv_version = WIN8_SRV_VERSION;
631 }
617 vmbus_prep_negotiate_resp(icmsghdrp, negop, 632 vmbus_prep_negotiate_resp(icmsghdrp, negop,
618 recv_buffer, UTIL_FW_MAJOR_MINOR, 633 recv_buffer, util_fw_version,
619 WIN7_SRV_MAJOR_MINOR); 634 kvp_srv_version);
620 635
621 } else { 636 } else {
622 kvp_msg = (struct hv_kvp_msg *)&recv_buffer[ 637 kvp_msg = (struct hv_kvp_msg *)&recv_buffer[
@@ -649,7 +664,6 @@ void hv_kvp_onchannelcallback(void *context)
649 return; 664 return;
650 665
651 } 666 }
652done:
653 667
654 icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION 668 icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
655 | ICMSGHDRFLAG_RESPONSE; 669 | ICMSGHDRFLAG_RESPONSE;
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index e4572f3f2834..0c3546224376 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -26,7 +26,7 @@
26 26
27#define VSS_MAJOR 5 27#define VSS_MAJOR 5
28#define VSS_MINOR 0 28#define VSS_MINOR 0
29#define VSS_MAJOR_MINOR (VSS_MAJOR << 16 | VSS_MINOR) 29#define VSS_VERSION (VSS_MAJOR << 16 | VSS_MINOR)
30 30
31 31
32 32
@@ -190,8 +190,8 @@ void hv_vss_onchannelcallback(void *context)
190 190
191 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { 191 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
192 vmbus_prep_negotiate_resp(icmsghdrp, negop, 192 vmbus_prep_negotiate_resp(icmsghdrp, negop,
193 recv_buffer, UTIL_FW_MAJOR_MINOR, 193 recv_buffer, UTIL_FW_VERSION,
194 VSS_MAJOR_MINOR); 194 VSS_VERSION);
195 } else { 195 } else {
196 vss_msg = (struct hv_vss_msg *)&recv_buffer[ 196 vss_msg = (struct hv_vss_msg *)&recv_buffer[
197 sizeof(struct vmbuspipe_hdr) + 197 sizeof(struct vmbuspipe_hdr) +
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index cb82233541b1..273e3ddb3a20 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -28,17 +28,32 @@
28#include <linux/reboot.h> 28#include <linux/reboot.h>
29#include <linux/hyperv.h> 29#include <linux/hyperv.h>
30 30
31#define SHUTDOWN_MAJOR 3
32#define SHUTDOWN_MINOR 0
33#define SHUTDOWN_MAJOR_MINOR (SHUTDOWN_MAJOR << 16 | SHUTDOWN_MINOR)
34 31
35#define TIMESYNCH_MAJOR 3 32#define SD_MAJOR 3
36#define TIMESYNCH_MINOR 0 33#define SD_MINOR 0
37#define TIMESYNCH_MAJOR_MINOR (TIMESYNCH_MAJOR << 16 | TIMESYNCH_MINOR) 34#define SD_VERSION (SD_MAJOR << 16 | SD_MINOR)
38 35
39#define HEARTBEAT_MAJOR 3 36#define SD_WS2008_MAJOR 1
40#define HEARTBEAT_MINOR 0 37#define SD_WS2008_VERSION (SD_WS2008_MAJOR << 16 | SD_MINOR)
41#define HEARTBEAT_MAJOR_MINOR (HEARTBEAT_MAJOR << 16 | HEARTBEAT_MINOR) 38
39#define TS_MAJOR 3
40#define TS_MINOR 0
41#define TS_VERSION (TS_MAJOR << 16 | TS_MINOR)
42
43#define TS_WS2008_MAJOR 1
44#define TS_WS2008_VERSION (TS_WS2008_MAJOR << 16 | TS_MINOR)
45
46#define HB_MAJOR 3
47#define HB_MINOR 0
48#define HB_VERSION (HB_MAJOR << 16 | HB_MINOR)
49
50#define HB_WS2008_MAJOR 1
51#define HB_WS2008_VERSION (HB_WS2008_MAJOR << 16 | HB_MINOR)
52
53static int sd_srv_version;
54static int ts_srv_version;
55static int hb_srv_version;
56static int util_fw_version;
42 57
43static void shutdown_onchannelcallback(void *context); 58static void shutdown_onchannelcallback(void *context);
44static struct hv_util_service util_shutdown = { 59static struct hv_util_service util_shutdown = {
@@ -99,8 +114,8 @@ static void shutdown_onchannelcallback(void *context)
99 114
100 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { 115 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
101 vmbus_prep_negotiate_resp(icmsghdrp, negop, 116 vmbus_prep_negotiate_resp(icmsghdrp, negop,
102 shut_txf_buf, UTIL_FW_MAJOR_MINOR, 117 shut_txf_buf, util_fw_version,
103 SHUTDOWN_MAJOR_MINOR); 118 sd_srv_version);
104 } else { 119 } else {
105 shutdown_msg = 120 shutdown_msg =
106 (struct shutdown_msg_data *)&shut_txf_buf[ 121 (struct shutdown_msg_data *)&shut_txf_buf[
@@ -216,6 +231,7 @@ static void timesync_onchannelcallback(void *context)
216 struct icmsg_hdr *icmsghdrp; 231 struct icmsg_hdr *icmsghdrp;
217 struct ictimesync_data *timedatap; 232 struct ictimesync_data *timedatap;
218 u8 *time_txf_buf = util_timesynch.recv_buffer; 233 u8 *time_txf_buf = util_timesynch.recv_buffer;
234 struct icmsg_negotiate *negop = NULL;
219 235
220 vmbus_recvpacket(channel, time_txf_buf, 236 vmbus_recvpacket(channel, time_txf_buf,
221 PAGE_SIZE, &recvlen, &requestid); 237 PAGE_SIZE, &recvlen, &requestid);
@@ -225,9 +241,10 @@ static void timesync_onchannelcallback(void *context)
225 sizeof(struct vmbuspipe_hdr)]; 241 sizeof(struct vmbuspipe_hdr)];
226 242
227 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { 243 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
228 vmbus_prep_negotiate_resp(icmsghdrp, NULL, time_txf_buf, 244 vmbus_prep_negotiate_resp(icmsghdrp, negop,
229 UTIL_FW_MAJOR_MINOR, 245 time_txf_buf,
230 TIMESYNCH_MAJOR_MINOR); 246 util_fw_version,
247 ts_srv_version);
231 } else { 248 } else {
232 timedatap = (struct ictimesync_data *)&time_txf_buf[ 249 timedatap = (struct ictimesync_data *)&time_txf_buf[
233 sizeof(struct vmbuspipe_hdr) + 250 sizeof(struct vmbuspipe_hdr) +
@@ -257,6 +274,7 @@ static void heartbeat_onchannelcallback(void *context)
257 struct icmsg_hdr *icmsghdrp; 274 struct icmsg_hdr *icmsghdrp;
258 struct heartbeat_msg_data *heartbeat_msg; 275 struct heartbeat_msg_data *heartbeat_msg;
259 u8 *hbeat_txf_buf = util_heartbeat.recv_buffer; 276 u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
277 struct icmsg_negotiate *negop = NULL;
260 278
261 vmbus_recvpacket(channel, hbeat_txf_buf, 279 vmbus_recvpacket(channel, hbeat_txf_buf,
262 PAGE_SIZE, &recvlen, &requestid); 280 PAGE_SIZE, &recvlen, &requestid);
@@ -266,9 +284,9 @@ static void heartbeat_onchannelcallback(void *context)
266 sizeof(struct vmbuspipe_hdr)]; 284 sizeof(struct vmbuspipe_hdr)];
267 285
268 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { 286 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
269 vmbus_prep_negotiate_resp(icmsghdrp, NULL, 287 vmbus_prep_negotiate_resp(icmsghdrp, negop,
270 hbeat_txf_buf, UTIL_FW_MAJOR_MINOR, 288 hbeat_txf_buf, util_fw_version,
271 HEARTBEAT_MAJOR_MINOR); 289 hb_srv_version);
272 } else { 290 } else {
273 heartbeat_msg = 291 heartbeat_msg =
274 (struct heartbeat_msg_data *)&hbeat_txf_buf[ 292 (struct heartbeat_msg_data *)&hbeat_txf_buf[
@@ -321,6 +339,25 @@ static int util_probe(struct hv_device *dev,
321 goto error; 339 goto error;
322 340
323 hv_set_drvdata(dev, srv); 341 hv_set_drvdata(dev, srv);
342 /*
343 * Based on the host; initialize the framework and
344 * service version numbers we will negotiate.
345 */
346 switch (vmbus_proto_version) {
347 case (VERSION_WS2008):
348 util_fw_version = UTIL_WS2K8_FW_VERSION;
349 sd_srv_version = SD_WS2008_VERSION;
350 ts_srv_version = TS_WS2008_VERSION;
351 hb_srv_version = HB_WS2008_VERSION;
352 break;
353
354 default:
355 util_fw_version = UTIL_FW_VERSION;
356 sd_srv_version = SD_VERSION;
357 ts_srv_version = TS_VERSION;
358 hb_srv_version = HB_VERSION;
359 }
360
324 return 0; 361 return 0;
325 362
326error: 363error:
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 62c2e32e25ef..3288f13d2d87 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -230,6 +230,7 @@ static int send_argument(const char *key)
230 230
231static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len) 231static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
232{ 232{
233 u8 status, data = 0;
233 int i; 234 int i;
234 235
235 if (send_command(cmd) || send_argument(key)) { 236 if (send_command(cmd) || send_argument(key)) {
@@ -237,6 +238,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
237 return -EIO; 238 return -EIO;
238 } 239 }
239 240
241 /* This has no effect on newer (2012) SMCs */
240 if (send_byte(len, APPLESMC_DATA_PORT)) { 242 if (send_byte(len, APPLESMC_DATA_PORT)) {
241 pr_warn("%.4s: read len fail\n", key); 243 pr_warn("%.4s: read len fail\n", key);
242 return -EIO; 244 return -EIO;
@@ -250,6 +252,17 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
250 buffer[i] = inb(APPLESMC_DATA_PORT); 252 buffer[i] = inb(APPLESMC_DATA_PORT);
251 } 253 }
252 254
255 /* Read the data port until bit0 is cleared */
256 for (i = 0; i < 16; i++) {
257 udelay(APPLESMC_MIN_WAIT);
258 status = inb(APPLESMC_CMD_PORT);
259 if (!(status & 0x01))
260 break;
261 data = inb(APPLESMC_DATA_PORT);
262 }
263 if (i)
264 pr_warn("flushed %d bytes, last value is: %d\n", i, data);
265
253 return 0; 266 return 0;
254} 267}
255 268
@@ -525,16 +538,25 @@ static int applesmc_init_smcreg_try(void)
525{ 538{
526 struct applesmc_registers *s = &smcreg; 539 struct applesmc_registers *s = &smcreg;
527 bool left_light_sensor, right_light_sensor; 540 bool left_light_sensor, right_light_sensor;
541 unsigned int count;
528 u8 tmp[1]; 542 u8 tmp[1];
529 int ret; 543 int ret;
530 544
531 if (s->init_complete) 545 if (s->init_complete)
532 return 0; 546 return 0;
533 547
534 ret = read_register_count(&s->key_count); 548 ret = read_register_count(&count);
535 if (ret) 549 if (ret)
536 return ret; 550 return ret;
537 551
552 if (s->cache && s->key_count != count) {
553 pr_warn("key count changed from %d to %d\n",
554 s->key_count, count);
555 kfree(s->cache);
556 s->cache = NULL;
557 }
558 s->key_count = count;
559
538 if (!s->cache) 560 if (!s->cache)
539 s->cache = kcalloc(s->key_count, sizeof(*s->cache), GFP_KERNEL); 561 s->cache = kcalloc(s->key_count, sizeof(*s->cache), GFP_KERNEL);
540 if (!s->cache) 562 if (!s->cache)
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index dbecf08399f8..5888feef1ac5 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -98,6 +98,8 @@
98 98
99#define DW_IC_ERR_TX_ABRT 0x1 99#define DW_IC_ERR_TX_ABRT 0x1
100 100
101#define DW_IC_TAR_10BITADDR_MASTER BIT(12)
102
101/* 103/*
102 * status codes 104 * status codes
103 */ 105 */
@@ -388,22 +390,34 @@ static int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev)
388static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) 390static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
389{ 391{
390 struct i2c_msg *msgs = dev->msgs; 392 struct i2c_msg *msgs = dev->msgs;
391 u32 ic_con; 393 u32 ic_con, ic_tar = 0;
392 394
393 /* Disable the adapter */ 395 /* Disable the adapter */
394 __i2c_dw_enable(dev, false); 396 __i2c_dw_enable(dev, false);
395 397
396 /* set the slave (target) address */
397 dw_writel(dev, msgs[dev->msg_write_idx].addr, DW_IC_TAR);
398
399 /* if the slave address is ten bit address, enable 10BITADDR */ 398 /* if the slave address is ten bit address, enable 10BITADDR */
400 ic_con = dw_readl(dev, DW_IC_CON); 399 ic_con = dw_readl(dev, DW_IC_CON);
401 if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) 400 if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) {
402 ic_con |= DW_IC_CON_10BITADDR_MASTER; 401 ic_con |= DW_IC_CON_10BITADDR_MASTER;
403 else 402 /*
403 * If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing
404 * mode has to be enabled via bit 12 of IC_TAR register.
405 * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be
406 * detected from registers.
407 */
408 ic_tar = DW_IC_TAR_10BITADDR_MASTER;
409 } else {
404 ic_con &= ~DW_IC_CON_10BITADDR_MASTER; 410 ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
411 }
412
405 dw_writel(dev, ic_con, DW_IC_CON); 413 dw_writel(dev, ic_con, DW_IC_CON);
406 414
415 /*
416 * Set the slave (target) address and enable 10-bit addressing mode
417 * if applicable.
418 */
419 dw_writel(dev, msgs[dev->msg_write_idx].addr | ic_tar, DW_IC_TAR);
420
407 /* Enable the adapter */ 421 /* Enable the adapter */
408 __i2c_dw_enable(dev, true); 422 __i2c_dw_enable(dev, true);
409 423
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 4c1b60539a25..0aa01136f8d9 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -270,7 +270,8 @@ static SIMPLE_DEV_PM_OPS(dw_i2c_dev_pm_ops, dw_i2c_suspend, dw_i2c_resume);
270MODULE_ALIAS("platform:i2c_designware"); 270MODULE_ALIAS("platform:i2c_designware");
271 271
272static struct platform_driver dw_i2c_driver = { 272static struct platform_driver dw_i2c_driver = {
273 .remove = dw_i2c_remove, 273 .probe = dw_i2c_probe,
274 .remove = dw_i2c_remove,
274 .driver = { 275 .driver = {
275 .name = "i2c_designware", 276 .name = "i2c_designware",
276 .owner = THIS_MODULE, 277 .owner = THIS_MODULE,
@@ -282,7 +283,7 @@ static struct platform_driver dw_i2c_driver = {
282 283
283static int __init dw_i2c_init_driver(void) 284static int __init dw_i2c_init_driver(void)
284{ 285{
285 return platform_driver_probe(&dw_i2c_driver, dw_i2c_probe); 286 return platform_driver_register(&dw_i2c_driver);
286} 287}
287subsys_initcall(dw_i2c_init_driver); 288subsys_initcall(dw_i2c_init_driver);
288 289
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index ccf46656bdad..1d7efa3169cd 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -365,7 +365,7 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx)
365 clk_disable_unprepare(i2c_imx->clk); 365 clk_disable_unprepare(i2c_imx->clk);
366} 366}
367 367
368static void __init i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx, 368static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
369 unsigned int rate) 369 unsigned int rate)
370{ 370{
371 struct imx_i2c_clk_pair *i2c_clk_div = i2c_imx->hwdata->clk_div; 371 struct imx_i2c_clk_pair *i2c_clk_div = i2c_imx->hwdata->clk_div;
@@ -589,7 +589,7 @@ static struct i2c_algorithm i2c_imx_algo = {
589 .functionality = i2c_imx_func, 589 .functionality = i2c_imx_func,
590}; 590};
591 591
592static int __init i2c_imx_probe(struct platform_device *pdev) 592static int i2c_imx_probe(struct platform_device *pdev)
593{ 593{
594 const struct of_device_id *of_id = of_match_device(i2c_imx_dt_ids, 594 const struct of_device_id *of_id = of_match_device(i2c_imx_dt_ids,
595 &pdev->dev); 595 &pdev->dev);
@@ -697,7 +697,7 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
697 return 0; /* Return OK */ 697 return 0; /* Return OK */
698} 698}
699 699
700static int __exit i2c_imx_remove(struct platform_device *pdev) 700static int i2c_imx_remove(struct platform_device *pdev)
701{ 701{
702 struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev); 702 struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev);
703 703
@@ -715,7 +715,8 @@ static int __exit i2c_imx_remove(struct platform_device *pdev)
715} 715}
716 716
717static struct platform_driver i2c_imx_driver = { 717static struct platform_driver i2c_imx_driver = {
718 .remove = __exit_p(i2c_imx_remove), 718 .probe = i2c_imx_probe,
719 .remove = i2c_imx_remove,
719 .driver = { 720 .driver = {
720 .name = DRIVER_NAME, 721 .name = DRIVER_NAME,
721 .owner = THIS_MODULE, 722 .owner = THIS_MODULE,
@@ -726,7 +727,7 @@ static struct platform_driver i2c_imx_driver = {
726 727
727static int __init i2c_adap_imx_init(void) 728static int __init i2c_adap_imx_init(void)
728{ 729{
729 return platform_driver_probe(&i2c_imx_driver, i2c_imx_probe); 730 return platform_driver_register(&i2c_imx_driver);
730} 731}
731subsys_initcall(i2c_adap_imx_init); 732subsys_initcall(i2c_adap_imx_init);
732 733
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 8ed79a086f85..1672effbcebb 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -393,6 +393,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
393 393
394 desc = &priv->hw[priv->head]; 394 desc = &priv->hw[priv->head];
395 395
396 /* Initialize the DMA buffer */
397 memset(priv->dma_buffer, 0, sizeof(priv->dma_buffer));
398
396 /* Initialize the descriptor */ 399 /* Initialize the descriptor */
397 memset(desc, 0, sizeof(struct ismt_desc)); 400 memset(desc, 0, sizeof(struct ismt_desc));
398 desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write); 401 desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write);
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 7f3a47443494..d3e9cc3153a9 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -234,9 +234,9 @@ static int mv64xxx_i2c_offload_msg(struct mv64xxx_i2c_data *drv_data)
234 ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_WR | 234 ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_WR |
235 (msg->len - 1) << MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT; 235 (msg->len - 1) << MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT;
236 236
237 writel_relaxed(data_reg_lo, 237 writel(data_reg_lo,
238 drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_LO); 238 drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_LO);
239 writel_relaxed(data_reg_hi, 239 writel(data_reg_hi,
240 drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_HI); 240 drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_HI);
241 241
242 } else { 242 } else {
@@ -697,6 +697,7 @@ static const struct of_device_id mv64xxx_i2c_of_match_table[] = {
697MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table); 697MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table);
698 698
699#ifdef CONFIG_OF 699#ifdef CONFIG_OF
700#ifdef CONFIG_HAVE_CLK
700static int 701static int
701mv64xxx_calc_freq(const int tclk, const int n, const int m) 702mv64xxx_calc_freq(const int tclk, const int n, const int m)
702{ 703{
@@ -726,16 +727,12 @@ mv64xxx_find_baud_factors(const u32 req_freq, const u32 tclk, u32 *best_n,
726 return false; 727 return false;
727 return true; 728 return true;
728} 729}
730#endif /* CONFIG_HAVE_CLK */
729 731
730static int 732static int
731mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data, 733mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
732 struct device *dev) 734 struct device *dev)
733{ 735{
734 const struct of_device_id *device;
735 struct device_node *np = dev->of_node;
736 u32 bus_freq, tclk;
737 int rc = 0;
738
739 /* CLK is mandatory when using DT to describe the i2c bus. We 736 /* CLK is mandatory when using DT to describe the i2c bus. We
740 * need to know tclk in order to calculate bus clock 737 * need to know tclk in order to calculate bus clock
741 * factors. 738 * factors.
@@ -744,6 +741,11 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
744 /* Have OF but no CLK */ 741 /* Have OF but no CLK */
745 return -ENODEV; 742 return -ENODEV;
746#else 743#else
744 const struct of_device_id *device;
745 struct device_node *np = dev->of_node;
746 u32 bus_freq, tclk;
747 int rc = 0;
748
747 if (IS_ERR(drv_data->clk)) { 749 if (IS_ERR(drv_data->clk)) {
748 rc = -ENODEV; 750 rc = -ENODEV;
749 goto out; 751 goto out;
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index f4a01675fa71..b7c857774708 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -780,12 +780,13 @@ static struct platform_driver mxs_i2c_driver = {
780 .owner = THIS_MODULE, 780 .owner = THIS_MODULE,
781 .of_match_table = mxs_i2c_dt_ids, 781 .of_match_table = mxs_i2c_dt_ids,
782 }, 782 },
783 .probe = mxs_i2c_probe,
783 .remove = mxs_i2c_remove, 784 .remove = mxs_i2c_remove,
784}; 785};
785 786
786static int __init mxs_i2c_init(void) 787static int __init mxs_i2c_init(void)
787{ 788{
788 return platform_driver_probe(&mxs_i2c_driver, mxs_i2c_probe); 789 return platform_driver_register(&mxs_i2c_driver);
789} 790}
790subsys_initcall(mxs_i2c_init); 791subsys_initcall(mxs_i2c_init);
791 792
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 6d8308d5dc4e..9967a6f9c2ff 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -939,6 +939,9 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
939 /* 939 /*
940 * ProDB0017052: Clear ARDY bit twice 940 * ProDB0017052: Clear ARDY bit twice
941 */ 941 */
942 if (stat & OMAP_I2C_STAT_ARDY)
943 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_ARDY);
944
942 if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK | 945 if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK |
943 OMAP_I2C_STAT_AL)) { 946 OMAP_I2C_STAT_AL)) {
944 omap_i2c_ack_stat(dev, (OMAP_I2C_STAT_RRDY | 947 omap_i2c_ack_stat(dev, (OMAP_I2C_STAT_RRDY |
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 3535f3c0f7b4..3747b9bf67d6 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -1178,8 +1178,6 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev)
1178 1178
1179 i2c_del_adapter(&i2c->adap); 1179 i2c_del_adapter(&i2c->adap);
1180 1180
1181 clk_disable_unprepare(i2c->clk);
1182
1183 if (pdev->dev.of_node && IS_ERR(i2c->pctrl)) 1181 if (pdev->dev.of_node && IS_ERR(i2c->pctrl))
1184 s3c24xx_i2c_dt_gpio_free(i2c); 1182 s3c24xx_i2c_dt_gpio_free(i2c);
1185 1183
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index f8f6f2e552db..04a17b9b38bb 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -859,8 +859,7 @@ static const struct i2c_algorithm stu300_algo = {
859 .functionality = stu300_func, 859 .functionality = stu300_func,
860}; 860};
861 861
862static int __init 862static int stu300_probe(struct platform_device *pdev)
863stu300_probe(struct platform_device *pdev)
864{ 863{
865 struct stu300_dev *dev; 864 struct stu300_dev *dev;
866 struct i2c_adapter *adap; 865 struct i2c_adapter *adap;
@@ -966,8 +965,7 @@ static SIMPLE_DEV_PM_OPS(stu300_pm, stu300_suspend, stu300_resume);
966#define STU300_I2C_PM NULL 965#define STU300_I2C_PM NULL
967#endif 966#endif
968 967
969static int __exit 968static int stu300_remove(struct platform_device *pdev)
970stu300_remove(struct platform_device *pdev)
971{ 969{
972 struct stu300_dev *dev = platform_get_drvdata(pdev); 970 struct stu300_dev *dev = platform_get_drvdata(pdev);
973 971
@@ -989,13 +987,14 @@ static struct platform_driver stu300_i2c_driver = {
989 .pm = STU300_I2C_PM, 987 .pm = STU300_I2C_PM,
990 .of_match_table = stu300_dt_match, 988 .of_match_table = stu300_dt_match,
991 }, 989 },
992 .remove = __exit_p(stu300_remove), 990 .probe = stu300_probe,
991 .remove = stu300_remove,
993 992
994}; 993};
995 994
996static int __init stu300_init(void) 995static int __init stu300_init(void)
997{ 996{
998 return platform_driver_probe(&stu300_i2c_driver, stu300_probe); 997 return platform_driver_register(&stu300_i2c_driver);
999} 998}
1000 999
1001static void __exit stu300_exit(void) 1000static void __exit stu300_exit(void)
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 29d3f045a2bf..3be58f89ac77 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -1134,6 +1134,9 @@ static void acpi_i2c_register_devices(struct i2c_adapter *adap)
1134 acpi_handle handle; 1134 acpi_handle handle;
1135 acpi_status status; 1135 acpi_status status;
1136 1136
1137 if (!adap->dev.parent)
1138 return;
1139
1137 handle = ACPI_HANDLE(adap->dev.parent); 1140 handle = ACPI_HANDLE(adap->dev.parent);
1138 if (!handle) 1141 if (!handle)
1139 return; 1142 return;
diff --git a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
index 74b41ae690f3..928656e241dd 100644
--- a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
+++ b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
@@ -200,7 +200,7 @@ static int i2c_arbitrator_probe(struct platform_device *pdev)
200 arb->parent = of_find_i2c_adapter_by_node(parent_np); 200 arb->parent = of_find_i2c_adapter_by_node(parent_np);
201 if (!arb->parent) { 201 if (!arb->parent) {
202 dev_err(dev, "Cannot find parent bus\n"); 202 dev_err(dev, "Cannot find parent bus\n");
203 return -EINVAL; 203 return -EPROBE_DEFER;
204 } 204 }
205 205
206 /* Actually add the mux adapter */ 206 /* Actually add the mux adapter */
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index 5d4a99ba743e..a764da777f08 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -66,7 +66,7 @@ static int i2c_mux_gpio_probe_dt(struct gpiomux *mux,
66 struct device_node *adapter_np, *child; 66 struct device_node *adapter_np, *child;
67 struct i2c_adapter *adapter; 67 struct i2c_adapter *adapter;
68 unsigned *values, *gpios; 68 unsigned *values, *gpios;
69 int i = 0; 69 int i = 0, ret;
70 70
71 if (!np) 71 if (!np)
72 return -ENODEV; 72 return -ENODEV;
@@ -79,7 +79,7 @@ static int i2c_mux_gpio_probe_dt(struct gpiomux *mux,
79 adapter = of_find_i2c_adapter_by_node(adapter_np); 79 adapter = of_find_i2c_adapter_by_node(adapter_np);
80 if (!adapter) { 80 if (!adapter) {
81 dev_err(&pdev->dev, "Cannot find parent bus\n"); 81 dev_err(&pdev->dev, "Cannot find parent bus\n");
82 return -ENODEV; 82 return -EPROBE_DEFER;
83 } 83 }
84 mux->data.parent = i2c_adapter_id(adapter); 84 mux->data.parent = i2c_adapter_id(adapter);
85 put_device(&adapter->dev); 85 put_device(&adapter->dev);
@@ -116,8 +116,12 @@ static int i2c_mux_gpio_probe_dt(struct gpiomux *mux,
116 return -ENOMEM; 116 return -ENOMEM;
117 } 117 }
118 118
119 for (i = 0; i < mux->data.n_gpios; i++) 119 for (i = 0; i < mux->data.n_gpios; i++) {
120 gpios[i] = of_get_named_gpio(np, "mux-gpios", i); 120 ret = of_get_named_gpio(np, "mux-gpios", i);
121 if (ret < 0)
122 return ret;
123 gpios[i] = ret;
124 }
121 125
122 mux->data.gpios = gpios; 126 mux->data.gpios = gpios;
123 127
@@ -177,7 +181,7 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev)
177 if (!parent) { 181 if (!parent) {
178 dev_err(&pdev->dev, "Parent adapter (%d) not found\n", 182 dev_err(&pdev->dev, "Parent adapter (%d) not found\n",
179 mux->data.parent); 183 mux->data.parent);
180 return -ENODEV; 184 return -EPROBE_DEFER;
181 } 185 }
182 186
183 mux->parent = parent; 187 mux->parent = parent;
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
index 69a91732ae65..68a37157377d 100644
--- a/drivers/i2c/muxes/i2c-mux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c
@@ -113,7 +113,7 @@ static int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux,
113 adapter = of_find_i2c_adapter_by_node(adapter_np); 113 adapter = of_find_i2c_adapter_by_node(adapter_np);
114 if (!adapter) { 114 if (!adapter) {
115 dev_err(mux->dev, "Cannot find parent bus\n"); 115 dev_err(mux->dev, "Cannot find parent bus\n");
116 return -ENODEV; 116 return -EPROBE_DEFER;
117 } 117 }
118 mux->pdata->parent_bus_num = i2c_adapter_id(adapter); 118 mux->pdata->parent_bus_num = i2c_adapter_id(adapter);
119 put_device(&adapter->dev); 119 put_device(&adapter->dev);
@@ -211,7 +211,7 @@ static int i2c_mux_pinctrl_probe(struct platform_device *pdev)
211 if (!mux->parent) { 211 if (!mux->parent) {
212 dev_err(&pdev->dev, "Parent adapter (%d) not found\n", 212 dev_err(&pdev->dev, "Parent adapter (%d) not found\n",
213 mux->pdata->parent_bus_num); 213 mux->pdata->parent_bus_num);
214 ret = -ENODEV; 214 ret = -EPROBE_DEFER;
215 goto err; 215 goto err;
216 } 216 }
217 217
diff --git a/drivers/iio/amplifiers/ad8366.c b/drivers/iio/amplifiers/ad8366.c
index d0a79a4bce1c..ba6f6a91dfff 100644
--- a/drivers/iio/amplifiers/ad8366.c
+++ b/drivers/iio/amplifiers/ad8366.c
@@ -185,10 +185,8 @@ static int ad8366_remove(struct spi_device *spi)
185 185
186 iio_device_unregister(indio_dev); 186 iio_device_unregister(indio_dev);
187 187
188 if (!IS_ERR(reg)) { 188 if (!IS_ERR(reg))
189 regulator_disable(reg); 189 regulator_disable(reg);
190 regulator_put(reg);
191 }
192 190
193 return 0; 191 return 0;
194} 192}
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 8e84cd522e49..f95c6979efd8 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -852,7 +852,6 @@ static void iio_dev_release(struct device *device)
852 iio_device_unregister_trigger_consumer(indio_dev); 852 iio_device_unregister_trigger_consumer(indio_dev);
853 iio_device_unregister_eventset(indio_dev); 853 iio_device_unregister_eventset(indio_dev);
854 iio_device_unregister_sysfs(indio_dev); 854 iio_device_unregister_sysfs(indio_dev);
855 iio_device_unregister_debugfs(indio_dev);
856 855
857 ida_simple_remove(&iio_ida, indio_dev->id); 856 ida_simple_remove(&iio_ida, indio_dev->id);
858 kfree(indio_dev); 857 kfree(indio_dev);
@@ -1087,6 +1086,7 @@ void iio_device_unregister(struct iio_dev *indio_dev)
1087 1086
1088 if (indio_dev->chrdev.dev) 1087 if (indio_dev->chrdev.dev)
1089 cdev_del(&indio_dev->chrdev); 1088 cdev_del(&indio_dev->chrdev);
1089 iio_device_unregister_debugfs(indio_dev);
1090 1090
1091 iio_disable_all_buffers(indio_dev); 1091 iio_disable_all_buffers(indio_dev);
1092 1092
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index e8d2849cc81d..cab3bc7494a2 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -29,9 +29,9 @@
29#define ST_MAGN_NUMBER_DATA_CHANNELS 3 29#define ST_MAGN_NUMBER_DATA_CHANNELS 3
30 30
31/* DEFAULT VALUE FOR SENSORS */ 31/* DEFAULT VALUE FOR SENSORS */
32#define ST_MAGN_DEFAULT_OUT_X_L_ADDR 0X04 32#define ST_MAGN_DEFAULT_OUT_X_H_ADDR 0X03
33#define ST_MAGN_DEFAULT_OUT_Y_L_ADDR 0X08 33#define ST_MAGN_DEFAULT_OUT_Y_H_ADDR 0X07
34#define ST_MAGN_DEFAULT_OUT_Z_L_ADDR 0X06 34#define ST_MAGN_DEFAULT_OUT_Z_H_ADDR 0X05
35 35
36/* FULLSCALE */ 36/* FULLSCALE */
37#define ST_MAGN_FS_AVL_1300MG 1300 37#define ST_MAGN_FS_AVL_1300MG 1300
@@ -117,16 +117,16 @@
117static const struct iio_chan_spec st_magn_16bit_channels[] = { 117static const struct iio_chan_spec st_magn_16bit_channels[] = {
118 ST_SENSORS_LSM_CHANNELS(IIO_MAGN, 118 ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
119 BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), 119 BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
120 ST_SENSORS_SCAN_X, 1, IIO_MOD_X, 's', IIO_LE, 16, 16, 120 ST_SENSORS_SCAN_X, 1, IIO_MOD_X, 's', IIO_BE, 16, 16,
121 ST_MAGN_DEFAULT_OUT_X_L_ADDR), 121 ST_MAGN_DEFAULT_OUT_X_H_ADDR),
122 ST_SENSORS_LSM_CHANNELS(IIO_MAGN, 122 ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
123 BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), 123 BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
124 ST_SENSORS_SCAN_Y, 1, IIO_MOD_Y, 's', IIO_LE, 16, 16, 124 ST_SENSORS_SCAN_Y, 1, IIO_MOD_Y, 's', IIO_BE, 16, 16,
125 ST_MAGN_DEFAULT_OUT_Y_L_ADDR), 125 ST_MAGN_DEFAULT_OUT_Y_H_ADDR),
126 ST_SENSORS_LSM_CHANNELS(IIO_MAGN, 126 ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
127 BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), 127 BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
128 ST_SENSORS_SCAN_Z, 1, IIO_MOD_Z, 's', IIO_LE, 16, 16, 128 ST_SENSORS_SCAN_Z, 1, IIO_MOD_Z, 's', IIO_BE, 16, 16,
129 ST_MAGN_DEFAULT_OUT_Z_L_ADDR), 129 ST_MAGN_DEFAULT_OUT_Z_H_ADDR),
130 IIO_CHAN_SOFT_TIMESTAMP(3) 130 IIO_CHAN_SOFT_TIMESTAMP(3)
131}; 131};
132 132
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 653ac6bfc57a..6c923c7039a1 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1588,7 +1588,7 @@ static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
1588 int resp_data_len; 1588 int resp_data_len;
1589 int resp_len; 1589 int resp_len;
1590 1590
1591 resp_data_len = (rsp_code == SRP_TSK_MGMT_SUCCESS) ? 0 : 4; 1591 resp_data_len = 4;
1592 resp_len = sizeof(*srp_rsp) + resp_data_len; 1592 resp_len = sizeof(*srp_rsp) + resp_data_len;
1593 1593
1594 srp_rsp = ioctx->ioctx.buf; 1594 srp_rsp = ioctx->ioctx.buf;
@@ -1600,11 +1600,9 @@ static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
1600 + atomic_xchg(&ch->req_lim_delta, 0)); 1600 + atomic_xchg(&ch->req_lim_delta, 0));
1601 srp_rsp->tag = tag; 1601 srp_rsp->tag = tag;
1602 1602
1603 if (rsp_code != SRP_TSK_MGMT_SUCCESS) { 1603 srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
1604 srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID; 1604 srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
1605 srp_rsp->resp_data_len = cpu_to_be32(resp_data_len); 1605 srp_rsp->data[3] = rsp_code;
1606 srp_rsp->data[3] = rsp_code;
1607 }
1608 1606
1609 return resp_len; 1607 return resp_len;
1610} 1608}
@@ -2358,6 +2356,8 @@ static void srpt_release_channel_work(struct work_struct *w)
2358 transport_deregister_session(se_sess); 2356 transport_deregister_session(se_sess);
2359 ch->sess = NULL; 2357 ch->sess = NULL;
2360 2358
2359 ib_destroy_cm_id(ch->cm_id);
2360
2361 srpt_destroy_ch_ib(ch); 2361 srpt_destroy_ch_ib(ch);
2362 2362
2363 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring, 2363 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
@@ -2368,8 +2368,6 @@ static void srpt_release_channel_work(struct work_struct *w)
2368 list_del(&ch->list); 2368 list_del(&ch->list);
2369 spin_unlock_irq(&sdev->spinlock); 2369 spin_unlock_irq(&sdev->spinlock);
2370 2370
2371 ib_destroy_cm_id(ch->cm_id);
2372
2373 if (ch->release_done) 2371 if (ch->release_done)
2374 complete(ch->release_done); 2372 complete(ch->release_done);
2375 2373
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index fe302e33f72e..c880ebaf1553 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -52,7 +52,7 @@ config AMD_IOMMU
52 select PCI_PRI 52 select PCI_PRI
53 select PCI_PASID 53 select PCI_PASID
54 select IOMMU_API 54 select IOMMU_API
55 depends on X86_64 && PCI && ACPI && X86_IO_APIC 55 depends on X86_64 && PCI && ACPI
56 ---help--- 56 ---help---
57 With this option you can enable support for AMD IOMMU hardware in 57 With this option you can enable support for AMD IOMMU hardware in
58 your system. An IOMMU is a hardware component which provides 58 your system. An IOMMU is a hardware component which provides
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index f417e89e1e7e..181c9ba929cd 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -377,6 +377,7 @@ struct arm_smmu_cfg {
377 u32 cbar; 377 u32 cbar;
378 pgd_t *pgd; 378 pgd_t *pgd;
379}; 379};
380#define INVALID_IRPTNDX 0xff
380 381
381#define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx) 382#define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx)
382#define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1) 383#define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1)
@@ -840,7 +841,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
840 if (IS_ERR_VALUE(ret)) { 841 if (IS_ERR_VALUE(ret)) {
841 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", 842 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
842 root_cfg->irptndx, irq); 843 root_cfg->irptndx, irq);
843 root_cfg->irptndx = -1; 844 root_cfg->irptndx = INVALID_IRPTNDX;
844 goto out_free_context; 845 goto out_free_context;
845 } 846 }
846 847
@@ -869,7 +870,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
869 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); 870 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
870 arm_smmu_tlb_inv_context(root_cfg); 871 arm_smmu_tlb_inv_context(root_cfg);
871 872
872 if (root_cfg->irptndx != -1) { 873 if (root_cfg->irptndx != INVALID_IRPTNDX) {
873 irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx]; 874 irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx];
874 free_irq(irq, domain); 875 free_irq(irq, domain);
875 } 876 }
@@ -1857,8 +1858,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1857 goto out_put_parent; 1858 goto out_put_parent;
1858 } 1859 }
1859 1860
1860 arm_smmu_device_reset(smmu);
1861
1862 for (i = 0; i < smmu->num_global_irqs; ++i) { 1861 for (i = 0; i < smmu->num_global_irqs; ++i) {
1863 err = request_irq(smmu->irqs[i], 1862 err = request_irq(smmu->irqs[i],
1864 arm_smmu_global_fault, 1863 arm_smmu_global_fault,
@@ -1876,6 +1875,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1876 spin_lock(&arm_smmu_devices_lock); 1875 spin_lock(&arm_smmu_devices_lock);
1877 list_add(&smmu->list, &arm_smmu_devices); 1876 list_add(&smmu->list, &arm_smmu_devices);
1878 spin_unlock(&arm_smmu_devices_lock); 1877 spin_unlock(&arm_smmu_devices_lock);
1878
1879 arm_smmu_device_reset(smmu);
1879 return 0; 1880 return 0;
1880 1881
1881out_free_irqs: 1882out_free_irqs:
@@ -1966,10 +1967,10 @@ static int __init arm_smmu_init(void)
1966 return ret; 1967 return ret;
1967 1968
1968 /* Oh, for a proper bus abstraction */ 1969 /* Oh, for a proper bus abstraction */
1969 if (!iommu_present(&platform_bus_type)); 1970 if (!iommu_present(&platform_bus_type))
1970 bus_set_iommu(&platform_bus_type, &arm_smmu_ops); 1971 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
1971 1972
1972 if (!iommu_present(&amba_bustype)); 1973 if (!iommu_present(&amba_bustype))
1973 bus_set_iommu(&amba_bustype, &arm_smmu_ops); 1974 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
1974 1975
1975 return 0; 1976 return 0;
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index b39f6f0b45f2..0f12382aa35d 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -498,7 +498,7 @@ struct cached_dev {
498 */ 498 */
499 atomic_t has_dirty; 499 atomic_t has_dirty;
500 500
501 struct ratelimit writeback_rate; 501 struct bch_ratelimit writeback_rate;
502 struct delayed_work writeback_rate_update; 502 struct delayed_work writeback_rate_update;
503 503
504 /* 504 /*
@@ -507,10 +507,9 @@ struct cached_dev {
507 */ 507 */
508 sector_t last_read; 508 sector_t last_read;
509 509
510 /* Number of writeback bios in flight */ 510 /* Limit number of writeback bios in flight */
511 atomic_t in_flight; 511 struct semaphore in_flight;
512 struct closure_with_timer writeback; 512 struct closure_with_timer writeback;
513 struct closure_waitlist writeback_wait;
514 513
515 struct keybuf writeback_keys; 514 struct keybuf writeback_keys;
516 515
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 8010eed06a51..22d1ae72c282 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -926,28 +926,45 @@ struct bkey *bch_next_recurse_key(struct btree *b, struct bkey *search)
926 926
927/* Mergesort */ 927/* Mergesort */
928 928
929static void sort_key_next(struct btree_iter *iter,
930 struct btree_iter_set *i)
931{
932 i->k = bkey_next(i->k);
933
934 if (i->k == i->end)
935 *i = iter->data[--iter->used];
936}
937
929static void btree_sort_fixup(struct btree_iter *iter) 938static void btree_sort_fixup(struct btree_iter *iter)
930{ 939{
931 while (iter->used > 1) { 940 while (iter->used > 1) {
932 struct btree_iter_set *top = iter->data, *i = top + 1; 941 struct btree_iter_set *top = iter->data, *i = top + 1;
933 struct bkey *k;
934 942
935 if (iter->used > 2 && 943 if (iter->used > 2 &&
936 btree_iter_cmp(i[0], i[1])) 944 btree_iter_cmp(i[0], i[1]))
937 i++; 945 i++;
938 946
939 for (k = i->k; 947 if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
940 k != i->end && bkey_cmp(top->k, &START_KEY(k)) > 0;
941 k = bkey_next(k))
942 if (top->k > i->k)
943 __bch_cut_front(top->k, k);
944 else if (KEY_SIZE(k))
945 bch_cut_back(&START_KEY(k), top->k);
946
947 if (top->k < i->k || k == i->k)
948 break; 948 break;
949 949
950 heap_sift(iter, i - top, btree_iter_cmp); 950 if (!KEY_SIZE(i->k)) {
951 sort_key_next(iter, i);
952 heap_sift(iter, i - top, btree_iter_cmp);
953 continue;
954 }
955
956 if (top->k > i->k) {
957 if (bkey_cmp(top->k, i->k) >= 0)
958 sort_key_next(iter, i);
959 else
960 bch_cut_front(top->k, i->k);
961
962 heap_sift(iter, i - top, btree_iter_cmp);
963 } else {
964 /* can't happen because of comparison func */
965 BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
966 bch_cut_back(&START_KEY(i->k), top->k);
967 }
951 } 968 }
952} 969}
953 970
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index f9764e61978b..f42fc7ed9cd6 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -255,7 +255,7 @@ void bch_btree_node_read(struct btree *b)
255 255
256 return; 256 return;
257err: 257err:
258 bch_cache_set_error(b->c, "io error reading bucket %lu", 258 bch_cache_set_error(b->c, "io error reading bucket %zu",
259 PTR_BUCKET_NR(b->c, &b->key, 0)); 259 PTR_BUCKET_NR(b->c, &b->key, 0));
260} 260}
261 261
@@ -612,7 +612,7 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
612 return SHRINK_STOP; 612 return SHRINK_STOP;
613 613
614 /* Return -1 if we can't do anything right now */ 614 /* Return -1 if we can't do anything right now */
615 if (sc->gfp_mask & __GFP_WAIT) 615 if (sc->gfp_mask & __GFP_IO)
616 mutex_lock(&c->bucket_lock); 616 mutex_lock(&c->bucket_lock);
617 else if (!mutex_trylock(&c->bucket_lock)) 617 else if (!mutex_trylock(&c->bucket_lock))
618 return -1; 618 return -1;
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index ba95ab84b2be..8435f81e5d85 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -153,7 +153,8 @@ int bch_journal_read(struct cache_set *c, struct list_head *list,
153 bitmap_zero(bitmap, SB_JOURNAL_BUCKETS); 153 bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
154 pr_debug("%u journal buckets", ca->sb.njournal_buckets); 154 pr_debug("%u journal buckets", ca->sb.njournal_buckets);
155 155
156 /* Read journal buckets ordered by golden ratio hash to quickly 156 /*
157 * Read journal buckets ordered by golden ratio hash to quickly
157 * find a sequence of buckets with valid journal entries 158 * find a sequence of buckets with valid journal entries
158 */ 159 */
159 for (i = 0; i < ca->sb.njournal_buckets; i++) { 160 for (i = 0; i < ca->sb.njournal_buckets; i++) {
@@ -166,18 +167,20 @@ int bch_journal_read(struct cache_set *c, struct list_head *list,
166 goto bsearch; 167 goto bsearch;
167 } 168 }
168 169
169 /* If that fails, check all the buckets we haven't checked 170 /*
171 * If that fails, check all the buckets we haven't checked
170 * already 172 * already
171 */ 173 */
172 pr_debug("falling back to linear search"); 174 pr_debug("falling back to linear search");
173 175
174 for (l = 0; l < ca->sb.njournal_buckets; l++) { 176 for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
175 if (test_bit(l, bitmap)) 177 l < ca->sb.njournal_buckets;
176 continue; 178 l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1))
177
178 if (read_bucket(l)) 179 if (read_bucket(l))
179 goto bsearch; 180 goto bsearch;
180 } 181
182 if (list_empty(list))
183 continue;
181bsearch: 184bsearch:
182 /* Binary search */ 185 /* Binary search */
183 m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1); 186 m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
@@ -197,10 +200,12 @@ bsearch:
197 r = m; 200 r = m;
198 } 201 }
199 202
200 /* Read buckets in reverse order until we stop finding more 203 /*
204 * Read buckets in reverse order until we stop finding more
201 * journal entries 205 * journal entries
202 */ 206 */
203 pr_debug("finishing up"); 207 pr_debug("finishing up: m %u njournal_buckets %u",
208 m, ca->sb.njournal_buckets);
204 l = m; 209 l = m;
205 210
206 while (1) { 211 while (1) {
@@ -228,9 +233,10 @@ bsearch:
228 } 233 }
229 } 234 }
230 235
231 c->journal.seq = list_entry(list->prev, 236 if (!list_empty(list))
232 struct journal_replay, 237 c->journal.seq = list_entry(list->prev,
233 list)->j.seq; 238 struct journal_replay,
239 list)->j.seq;
234 240
235 return 0; 241 return 0;
236#undef read_bucket 242#undef read_bucket
@@ -428,7 +434,7 @@ static void do_journal_discard(struct cache *ca)
428 return; 434 return;
429 } 435 }
430 436
431 switch (atomic_read(&ja->discard_in_flight) == DISCARD_IN_FLIGHT) { 437 switch (atomic_read(&ja->discard_in_flight)) {
432 case DISCARD_IN_FLIGHT: 438 case DISCARD_IN_FLIGHT:
433 return; 439 return;
434 440
@@ -689,6 +695,7 @@ void bch_journal_meta(struct cache_set *c, struct closure *cl)
689 if (cl) 695 if (cl)
690 BUG_ON(!closure_wait(&w->wait, cl)); 696 BUG_ON(!closure_wait(&w->wait, cl));
691 697
698 closure_flush(&c->journal.io);
692 __journal_try_write(c, true); 699 __journal_try_write(c, true);
693 } 700 }
694} 701}
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 786a1a4f74d8..b6a74bcbb08f 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -996,17 +996,19 @@ static void request_write(struct cached_dev *dc, struct search *s)
996 closure_bio_submit(bio, cl, s->d); 996 closure_bio_submit(bio, cl, s->d);
997 } else { 997 } else {
998 bch_writeback_add(dc); 998 bch_writeback_add(dc);
999 s->op.cache_bio = bio;
999 1000
1000 if (s->op.flush_journal) { 1001 if (bio->bi_rw & REQ_FLUSH) {
1001 /* Also need to send a flush to the backing device */ 1002 /* Also need to send a flush to the backing device */
1002 s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO, 1003 struct bio *flush = bio_alloc_bioset(0, GFP_NOIO,
1003 dc->disk.bio_split); 1004 dc->disk.bio_split);
1004 1005
1005 bio->bi_size = 0; 1006 flush->bi_rw = WRITE_FLUSH;
1006 bio->bi_vcnt = 0; 1007 flush->bi_bdev = bio->bi_bdev;
1007 closure_bio_submit(bio, cl, s->d); 1008 flush->bi_end_io = request_endio;
1008 } else { 1009 flush->bi_private = cl;
1009 s->op.cache_bio = bio; 1010
1011 closure_bio_submit(flush, cl, s->d);
1010 } 1012 }
1011 } 1013 }
1012out: 1014out:
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 4fe6ab2fbe2e..924dcfdae111 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -223,8 +223,13 @@ STORE(__cached_dev)
223 } 223 }
224 224
225 if (attr == &sysfs_label) { 225 if (attr == &sysfs_label) {
226 /* note: endlines are preserved */ 226 if (size > SB_LABEL_SIZE)
227 memcpy(dc->sb.label, buf, SB_LABEL_SIZE); 227 return -EINVAL;
228 memcpy(dc->sb.label, buf, size);
229 if (size < SB_LABEL_SIZE)
230 dc->sb.label[size] = '\0';
231 if (size && dc->sb.label[size - 1] == '\n')
232 dc->sb.label[size - 1] = '\0';
228 bch_write_bdev_super(dc, NULL); 233 bch_write_bdev_super(dc, NULL);
229 if (dc->disk.c) { 234 if (dc->disk.c) {
230 memcpy(dc->disk.c->uuids[dc->disk.id].label, 235 memcpy(dc->disk.c->uuids[dc->disk.id].label,
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index 98eb81159a22..420dad545c7d 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -190,7 +190,16 @@ void bch_time_stats_update(struct time_stats *stats, uint64_t start_time)
190 stats->last = now ?: 1; 190 stats->last = now ?: 1;
191} 191}
192 192
193unsigned bch_next_delay(struct ratelimit *d, uint64_t done) 193/**
194 * bch_next_delay() - increment @d by the amount of work done, and return how
195 * long to delay until the next time to do some work.
196 *
197 * @d - the struct bch_ratelimit to update
198 * @done - the amount of work done, in arbitrary units
199 *
200 * Returns the amount of time to delay by, in jiffies
201 */
202uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
194{ 203{
195 uint64_t now = local_clock(); 204 uint64_t now = local_clock();
196 205
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index 1ae2a73ad85f..ea345c6896f4 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -450,17 +450,23 @@ read_attribute(name ## _last_ ## frequency_units)
450 (ewma) >> factor; \ 450 (ewma) >> factor; \
451}) 451})
452 452
453struct ratelimit { 453struct bch_ratelimit {
454 /* Next time we want to do some work, in nanoseconds */
454 uint64_t next; 455 uint64_t next;
456
457 /*
458 * Rate at which we want to do work, in units per nanosecond
459 * The units here correspond to the units passed to bch_next_delay()
460 */
455 unsigned rate; 461 unsigned rate;
456}; 462};
457 463
458static inline void ratelimit_reset(struct ratelimit *d) 464static inline void bch_ratelimit_reset(struct bch_ratelimit *d)
459{ 465{
460 d->next = local_clock(); 466 d->next = local_clock();
461} 467}
462 468
463unsigned bch_next_delay(struct ratelimit *d, uint64_t done); 469uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done);
464 470
465#define __DIV_SAFE(n, d, zero) \ 471#define __DIV_SAFE(n, d, zero) \
466({ \ 472({ \
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 22cbff551628..ba3ee48320f2 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -94,11 +94,15 @@ static void update_writeback_rate(struct work_struct *work)
94 94
95static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) 95static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
96{ 96{
97 uint64_t ret;
98
97 if (atomic_read(&dc->disk.detaching) || 99 if (atomic_read(&dc->disk.detaching) ||
98 !dc->writeback_percent) 100 !dc->writeback_percent)
99 return 0; 101 return 0;
100 102
101 return bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL); 103 ret = bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL);
104
105 return min_t(uint64_t, ret, HZ);
102} 106}
103 107
104/* Background writeback */ 108/* Background writeback */
@@ -208,7 +212,7 @@ normal_refill:
208 212
209 up_write(&dc->writeback_lock); 213 up_write(&dc->writeback_lock);
210 214
211 ratelimit_reset(&dc->writeback_rate); 215 bch_ratelimit_reset(&dc->writeback_rate);
212 216
213 /* Punt to workqueue only so we don't recurse and blow the stack */ 217 /* Punt to workqueue only so we don't recurse and blow the stack */
214 continue_at(cl, read_dirty, dirty_wq); 218 continue_at(cl, read_dirty, dirty_wq);
@@ -318,9 +322,7 @@ static void write_dirty_finish(struct closure *cl)
318 } 322 }
319 323
320 bch_keybuf_del(&dc->writeback_keys, w); 324 bch_keybuf_del(&dc->writeback_keys, w);
321 atomic_dec_bug(&dc->in_flight); 325 up(&dc->in_flight);
322
323 closure_wake_up(&dc->writeback_wait);
324 326
325 closure_return_with_destructor(cl, dirty_io_destructor); 327 closure_return_with_destructor(cl, dirty_io_destructor);
326} 328}
@@ -349,7 +351,7 @@ static void write_dirty(struct closure *cl)
349 351
350 closure_bio_submit(&io->bio, cl, &io->dc->disk); 352 closure_bio_submit(&io->bio, cl, &io->dc->disk);
351 353
352 continue_at(cl, write_dirty_finish, dirty_wq); 354 continue_at(cl, write_dirty_finish, system_wq);
353} 355}
354 356
355static void read_dirty_endio(struct bio *bio, int error) 357static void read_dirty_endio(struct bio *bio, int error)
@@ -369,7 +371,7 @@ static void read_dirty_submit(struct closure *cl)
369 371
370 closure_bio_submit(&io->bio, cl, &io->dc->disk); 372 closure_bio_submit(&io->bio, cl, &io->dc->disk);
371 373
372 continue_at(cl, write_dirty, dirty_wq); 374 continue_at(cl, write_dirty, system_wq);
373} 375}
374 376
375static void read_dirty(struct closure *cl) 377static void read_dirty(struct closure *cl)
@@ -394,12 +396,8 @@ static void read_dirty(struct closure *cl)
394 396
395 if (delay > 0 && 397 if (delay > 0 &&
396 (KEY_START(&w->key) != dc->last_read || 398 (KEY_START(&w->key) != dc->last_read ||
397 jiffies_to_msecs(delay) > 50)) { 399 jiffies_to_msecs(delay) > 50))
398 w->private = NULL; 400 delay = schedule_timeout_uninterruptible(delay);
399
400 closure_delay(&dc->writeback, delay);
401 continue_at(cl, read_dirty, dirty_wq);
402 }
403 401
404 dc->last_read = KEY_OFFSET(&w->key); 402 dc->last_read = KEY_OFFSET(&w->key);
405 403
@@ -424,15 +422,10 @@ static void read_dirty(struct closure *cl)
424 422
425 trace_bcache_writeback(&w->key); 423 trace_bcache_writeback(&w->key);
426 424
427 closure_call(&io->cl, read_dirty_submit, NULL, &dc->disk.cl); 425 down(&dc->in_flight);
426 closure_call(&io->cl, read_dirty_submit, NULL, cl);
428 427
429 delay = writeback_delay(dc, KEY_SIZE(&w->key)); 428 delay = writeback_delay(dc, KEY_SIZE(&w->key));
430
431 atomic_inc(&dc->in_flight);
432
433 if (!closure_wait_event(&dc->writeback_wait, cl,
434 atomic_read(&dc->in_flight) < 64))
435 continue_at(cl, read_dirty, dirty_wq);
436 } 429 }
437 430
438 if (0) { 431 if (0) {
@@ -442,7 +435,11 @@ err:
442 bch_keybuf_del(&dc->writeback_keys, w); 435 bch_keybuf_del(&dc->writeback_keys, w);
443 } 436 }
444 437
445 refill_dirty(cl); 438 /*
439 * Wait for outstanding writeback IOs to finish (and keybuf slots to be
440 * freed) before refilling again
441 */
442 continue_at(cl, refill_dirty, dirty_wq);
446} 443}
447 444
448/* Init */ 445/* Init */
@@ -484,6 +481,7 @@ void bch_sectors_dirty_init(struct cached_dev *dc)
484 481
485void bch_cached_dev_writeback_init(struct cached_dev *dc) 482void bch_cached_dev_writeback_init(struct cached_dev *dc)
486{ 483{
484 sema_init(&dc->in_flight, 64);
487 closure_init_unlocked(&dc->writeback); 485 closure_init_unlocked(&dc->writeback);
488 init_rwsem(&dc->writeback_lock); 486 init_rwsem(&dc->writeback_lock);
489 487
@@ -513,7 +511,7 @@ void bch_writeback_exit(void)
513 511
514int __init bch_writeback_init(void) 512int __init bch_writeback_init(void)
515{ 513{
516 dirty_wq = create_singlethread_workqueue("bcache_writeback"); 514 dirty_wq = create_workqueue("bcache_writeback");
517 if (!dirty_wq) 515 if (!dirty_wq)
518 return -ENOMEM; 516 return -ENOMEM;
519 517
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index ea49834377c8..2a20986a2fec 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -19,8 +19,6 @@
19#define DM_MSG_PREFIX "io" 19#define DM_MSG_PREFIX "io"
20 20
21#define DM_IO_MAX_REGIONS BITS_PER_LONG 21#define DM_IO_MAX_REGIONS BITS_PER_LONG
22#define MIN_IOS 16
23#define MIN_BIOS 16
24 22
25struct dm_io_client { 23struct dm_io_client {
26 mempool_t *pool; 24 mempool_t *pool;
@@ -50,16 +48,17 @@ static struct kmem_cache *_dm_io_cache;
50struct dm_io_client *dm_io_client_create(void) 48struct dm_io_client *dm_io_client_create(void)
51{ 49{
52 struct dm_io_client *client; 50 struct dm_io_client *client;
51 unsigned min_ios = dm_get_reserved_bio_based_ios();
53 52
54 client = kmalloc(sizeof(*client), GFP_KERNEL); 53 client = kmalloc(sizeof(*client), GFP_KERNEL);
55 if (!client) 54 if (!client)
56 return ERR_PTR(-ENOMEM); 55 return ERR_PTR(-ENOMEM);
57 56
58 client->pool = mempool_create_slab_pool(MIN_IOS, _dm_io_cache); 57 client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache);
59 if (!client->pool) 58 if (!client->pool)
60 goto bad; 59 goto bad;
61 60
62 client->bios = bioset_create(MIN_BIOS, 0); 61 client->bios = bioset_create(min_ios, 0);
63 if (!client->bios) 62 if (!client->bios)
64 goto bad; 63 goto bad;
65 64
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index b759a127f9c3..de570a558764 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -7,6 +7,7 @@
7 7
8#include <linux/device-mapper.h> 8#include <linux/device-mapper.h>
9 9
10#include "dm.h"
10#include "dm-path-selector.h" 11#include "dm-path-selector.h"
11#include "dm-uevent.h" 12#include "dm-uevent.h"
12 13
@@ -116,8 +117,6 @@ struct dm_mpath_io {
116 117
117typedef int (*action_fn) (struct pgpath *pgpath); 118typedef int (*action_fn) (struct pgpath *pgpath);
118 119
119#define MIN_IOS 256 /* Mempool size */
120
121static struct kmem_cache *_mpio_cache; 120static struct kmem_cache *_mpio_cache;
122 121
123static struct workqueue_struct *kmultipathd, *kmpath_handlerd; 122static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
@@ -190,6 +189,7 @@ static void free_priority_group(struct priority_group *pg,
190static struct multipath *alloc_multipath(struct dm_target *ti) 189static struct multipath *alloc_multipath(struct dm_target *ti)
191{ 190{
192 struct multipath *m; 191 struct multipath *m;
192 unsigned min_ios = dm_get_reserved_rq_based_ios();
193 193
194 m = kzalloc(sizeof(*m), GFP_KERNEL); 194 m = kzalloc(sizeof(*m), GFP_KERNEL);
195 if (m) { 195 if (m) {
@@ -202,7 +202,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
202 INIT_WORK(&m->trigger_event, trigger_event); 202 INIT_WORK(&m->trigger_event, trigger_event);
203 init_waitqueue_head(&m->pg_init_wait); 203 init_waitqueue_head(&m->pg_init_wait);
204 mutex_init(&m->work_mutex); 204 mutex_init(&m->work_mutex);
205 m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); 205 m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
206 if (!m->mpio_pool) { 206 if (!m->mpio_pool) {
207 kfree(m); 207 kfree(m);
208 return NULL; 208 return NULL;
@@ -1268,6 +1268,7 @@ static int noretry_error(int error)
1268 case -EREMOTEIO: 1268 case -EREMOTEIO:
1269 case -EILSEQ: 1269 case -EILSEQ:
1270 case -ENODATA: 1270 case -ENODATA:
1271 case -ENOSPC:
1271 return 1; 1272 return 1;
1272 } 1273 }
1273 1274
@@ -1298,8 +1299,17 @@ static int do_end_io(struct multipath *m, struct request *clone,
1298 if (!error && !clone->errors) 1299 if (!error && !clone->errors)
1299 return 0; /* I/O complete */ 1300 return 0; /* I/O complete */
1300 1301
1301 if (noretry_error(error)) 1302 if (noretry_error(error)) {
1303 if ((clone->cmd_flags & REQ_WRITE_SAME) &&
1304 !clone->q->limits.max_write_same_sectors) {
1305 struct queue_limits *limits;
1306
1307 /* device doesn't really support WRITE SAME, disable it */
1308 limits = dm_get_queue_limits(dm_table_get_md(m->ti->table));
1309 limits->max_write_same_sectors = 0;
1310 }
1302 return error; 1311 return error;
1312 }
1303 1313
1304 if (mpio->pgpath) 1314 if (mpio->pgpath)
1305 fail_path(mpio->pgpath); 1315 fail_path(mpio->pgpath);
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 3ac415675b6c..4caa8e6d59d7 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -256,7 +256,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
256 */ 256 */
257 INIT_WORK_ONSTACK(&req.work, do_metadata); 257 INIT_WORK_ONSTACK(&req.work, do_metadata);
258 queue_work(ps->metadata_wq, &req.work); 258 queue_work(ps->metadata_wq, &req.work);
259 flush_work(&req.work); 259 flush_workqueue(ps->metadata_wq);
260 260
261 return req.result; 261 return req.result;
262} 262}
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index c434e5aab2df..aec57d76db5d 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -725,17 +725,16 @@ static int calc_max_buckets(void)
725 */ 725 */
726static int init_hash_tables(struct dm_snapshot *s) 726static int init_hash_tables(struct dm_snapshot *s)
727{ 727{
728 sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets; 728 sector_t hash_size, cow_dev_size, max_buckets;
729 729
730 /* 730 /*
731 * Calculate based on the size of the original volume or 731 * Calculate based on the size of the original volume or
732 * the COW volume... 732 * the COW volume...
733 */ 733 */
734 cow_dev_size = get_dev_size(s->cow->bdev); 734 cow_dev_size = get_dev_size(s->cow->bdev);
735 origin_dev_size = get_dev_size(s->origin->bdev);
736 max_buckets = calc_max_buckets(); 735 max_buckets = calc_max_buckets();
737 736
738 hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift; 737 hash_size = cow_dev_size >> s->store->chunk_shift;
739 hash_size = min(hash_size, max_buckets); 738 hash_size = min(hash_size, max_buckets);
740 739
741 if (hash_size < 64) 740 if (hash_size < 64)
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 8ae31e8d3d64..3d404c1371ed 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -451,19 +451,26 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
451 struct dm_stat_percpu *p; 451 struct dm_stat_percpu *p;
452 452
453 /* 453 /*
454 * For strict correctness we should use local_irq_disable/enable 454 * For strict correctness we should use local_irq_save/restore
455 * instead of preempt_disable/enable. 455 * instead of preempt_disable/enable.
456 * 456 *
457 * This is racy if the driver finishes bios from non-interrupt 457 * preempt_disable/enable is racy if the driver finishes bios
458 * context as well as from interrupt context or from more different 458 * from non-interrupt context as well as from interrupt context
459 * interrupts. 459 * or from more different interrupts.
460 * 460 *
461 * However, the race only results in not counting some events, 461 * On 64-bit architectures the race only results in not counting some
462 * so it is acceptable. 462 * events, so it is acceptable. On 32-bit architectures the race could
463 * cause the counter going off by 2^32, so we need to do proper locking
464 * there.
463 * 465 *
464 * part_stat_lock()/part_stat_unlock() have this race too. 466 * part_stat_lock()/part_stat_unlock() have this race too.
465 */ 467 */
468#if BITS_PER_LONG == 32
469 unsigned long flags;
470 local_irq_save(flags);
471#else
466 preempt_disable(); 472 preempt_disable();
473#endif
467 p = &s->stat_percpu[smp_processor_id()][entry]; 474 p = &s->stat_percpu[smp_processor_id()][entry];
468 475
469 if (!end) { 476 if (!end) {
@@ -478,7 +485,11 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
478 p->ticks[idx] += duration; 485 p->ticks[idx] += duration;
479 } 486 }
480 487
488#if BITS_PER_LONG == 32
489 local_irq_restore(flags);
490#else
481 preempt_enable(); 491 preempt_enable();
492#endif
482} 493}
483 494
484static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw, 495static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw,
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index ed063427d676..2c0cf511ec23 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2095,6 +2095,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
2095 * them down to the data device. The thin device's discard 2095 * them down to the data device. The thin device's discard
2096 * processing will cause mappings to be removed from the btree. 2096 * processing will cause mappings to be removed from the btree.
2097 */ 2097 */
2098 ti->discard_zeroes_data_unsupported = true;
2098 if (pf.discard_enabled && pf.discard_passdown) { 2099 if (pf.discard_enabled && pf.discard_passdown) {
2099 ti->num_discard_bios = 1; 2100 ti->num_discard_bios = 1;
2100 2101
@@ -2104,7 +2105,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
2104 * thin devices' discard limits consistent). 2105 * thin devices' discard limits consistent).
2105 */ 2106 */
2106 ti->discards_supported = true; 2107 ti->discards_supported = true;
2107 ti->discard_zeroes_data_unsupported = true;
2108 } 2108 }
2109 ti->private = pt; 2109 ti->private = pt;
2110 2110
@@ -2689,8 +2689,16 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
2689 * They get transferred to the live pool in bind_control_target() 2689 * They get transferred to the live pool in bind_control_target()
2690 * called from pool_preresume(). 2690 * called from pool_preresume().
2691 */ 2691 */
2692 if (!pt->adjusted_pf.discard_enabled) 2692 if (!pt->adjusted_pf.discard_enabled) {
2693 /*
2694 * Must explicitly disallow stacking discard limits otherwise the
2695 * block layer will stack them if pool's data device has support.
2696 * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the
2697 * user to see that, so make sure to set all discard limits to 0.
2698 */
2699 limits->discard_granularity = 0;
2693 return; 2700 return;
2701 }
2694 2702
2695 disable_passdown_if_not_supported(pt); 2703 disable_passdown_if_not_supported(pt);
2696 2704
@@ -2826,10 +2834,10 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
2826 ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook); 2834 ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
2827 2835
2828 /* In case the pool supports discards, pass them on. */ 2836 /* In case the pool supports discards, pass them on. */
2837 ti->discard_zeroes_data_unsupported = true;
2829 if (tc->pool->pf.discard_enabled) { 2838 if (tc->pool->pf.discard_enabled) {
2830 ti->discards_supported = true; 2839 ti->discards_supported = true;
2831 ti->num_discard_bios = 1; 2840 ti->num_discard_bios = 1;
2832 ti->discard_zeroes_data_unsupported = true;
2833 /* Discard bios must be split on a block boundary */ 2841 /* Discard bios must be split on a block boundary */
2834 ti->split_discard_bios = true; 2842 ti->split_discard_bios = true;
2835 } 2843 }
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 6a5e9ed2fcc3..b3e26c7d1417 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -211,10 +211,55 @@ struct dm_md_mempools {
211 struct bio_set *bs; 211 struct bio_set *bs;
212}; 212};
213 213
214#define MIN_IOS 256 214#define RESERVED_BIO_BASED_IOS 16
215#define RESERVED_REQUEST_BASED_IOS 256
216#define RESERVED_MAX_IOS 1024
215static struct kmem_cache *_io_cache; 217static struct kmem_cache *_io_cache;
216static struct kmem_cache *_rq_tio_cache; 218static struct kmem_cache *_rq_tio_cache;
217 219
220/*
221 * Bio-based DM's mempools' reserved IOs set by the user.
222 */
223static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
224
225/*
226 * Request-based DM's mempools' reserved IOs set by the user.
227 */
228static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
229
230static unsigned __dm_get_reserved_ios(unsigned *reserved_ios,
231 unsigned def, unsigned max)
232{
233 unsigned ios = ACCESS_ONCE(*reserved_ios);
234 unsigned modified_ios = 0;
235
236 if (!ios)
237 modified_ios = def;
238 else if (ios > max)
239 modified_ios = max;
240
241 if (modified_ios) {
242 (void)cmpxchg(reserved_ios, ios, modified_ios);
243 ios = modified_ios;
244 }
245
246 return ios;
247}
248
249unsigned dm_get_reserved_bio_based_ios(void)
250{
251 return __dm_get_reserved_ios(&reserved_bio_based_ios,
252 RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS);
253}
254EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
255
256unsigned dm_get_reserved_rq_based_ios(void)
257{
258 return __dm_get_reserved_ios(&reserved_rq_based_ios,
259 RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS);
260}
261EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
262
218static int __init local_init(void) 263static int __init local_init(void)
219{ 264{
220 int r = -ENOMEM; 265 int r = -ENOMEM;
@@ -2278,6 +2323,17 @@ struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
2278} 2323}
2279 2324
2280/* 2325/*
2326 * The queue_limits are only valid as long as you have a reference
2327 * count on 'md'.
2328 */
2329struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
2330{
2331 BUG_ON(!atomic_read(&md->holders));
2332 return &md->queue->limits;
2333}
2334EXPORT_SYMBOL_GPL(dm_get_queue_limits);
2335
2336/*
2281 * Fully initialize a request-based queue (->elevator, ->request_fn, etc). 2337 * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
2282 */ 2338 */
2283static int dm_init_request_based_queue(struct mapped_device *md) 2339static int dm_init_request_based_queue(struct mapped_device *md)
@@ -2862,18 +2918,18 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, u
2862 2918
2863 if (type == DM_TYPE_BIO_BASED) { 2919 if (type == DM_TYPE_BIO_BASED) {
2864 cachep = _io_cache; 2920 cachep = _io_cache;
2865 pool_size = 16; 2921 pool_size = dm_get_reserved_bio_based_ios();
2866 front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); 2922 front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
2867 } else if (type == DM_TYPE_REQUEST_BASED) { 2923 } else if (type == DM_TYPE_REQUEST_BASED) {
2868 cachep = _rq_tio_cache; 2924 cachep = _rq_tio_cache;
2869 pool_size = MIN_IOS; 2925 pool_size = dm_get_reserved_rq_based_ios();
2870 front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 2926 front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
2871 /* per_bio_data_size is not used. See __bind_mempools(). */ 2927 /* per_bio_data_size is not used. See __bind_mempools(). */
2872 WARN_ON(per_bio_data_size != 0); 2928 WARN_ON(per_bio_data_size != 0);
2873 } else 2929 } else
2874 goto out; 2930 goto out;
2875 2931
2876 pools->io_pool = mempool_create_slab_pool(MIN_IOS, cachep); 2932 pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
2877 if (!pools->io_pool) 2933 if (!pools->io_pool)
2878 goto out; 2934 goto out;
2879 2935
@@ -2924,6 +2980,13 @@ module_exit(dm_exit);
2924 2980
2925module_param(major, uint, 0); 2981module_param(major, uint, 0);
2926MODULE_PARM_DESC(major, "The major number of the device mapper"); 2982MODULE_PARM_DESC(major, "The major number of the device mapper");
2983
2984module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
2985MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
2986
2987module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
2988MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
2989
2927MODULE_DESCRIPTION(DM_NAME " driver"); 2990MODULE_DESCRIPTION(DM_NAME " driver");
2928MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 2991MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2929MODULE_LICENSE("GPL"); 2992MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 5e604cc7b4aa..1d1ad7b7e527 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -184,6 +184,9 @@ void dm_free_md_mempools(struct dm_md_mempools *pools);
184/* 184/*
185 * Helpers that are used by DM core 185 * Helpers that are used by DM core
186 */ 186 */
187unsigned dm_get_reserved_bio_based_ios(void);
188unsigned dm_get_reserved_rq_based_ios(void);
189
187static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen) 190static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
188{ 191{
189 return !maxlen || strlen(result) + 1 >= maxlen; 192 return !maxlen || strlen(result) + 1 >= maxlen;
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index d0fdc134068a..f6ff711aa5bb 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -57,6 +57,7 @@ void mei_amthif_reset_params(struct mei_device *dev)
57 dev->iamthif_ioctl = false; 57 dev->iamthif_ioctl = false;
58 dev->iamthif_state = MEI_IAMTHIF_IDLE; 58 dev->iamthif_state = MEI_IAMTHIF_IDLE;
59 dev->iamthif_timer = 0; 59 dev->iamthif_timer = 0;
60 dev->iamthif_stall_timer = 0;
60} 61}
61 62
62/** 63/**
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 6d0282c08a06..cd2033cd7120 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -297,10 +297,13 @@ int __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
297 297
298 if (cl->reading_state != MEI_READ_COMPLETE && 298 if (cl->reading_state != MEI_READ_COMPLETE &&
299 !waitqueue_active(&cl->rx_wait)) { 299 !waitqueue_active(&cl->rx_wait)) {
300
300 mutex_unlock(&dev->device_lock); 301 mutex_unlock(&dev->device_lock);
301 302
302 if (wait_event_interruptible(cl->rx_wait, 303 if (wait_event_interruptible(cl->rx_wait,
303 (MEI_READ_COMPLETE == cl->reading_state))) { 304 cl->reading_state == MEI_READ_COMPLETE ||
305 mei_cl_is_transitioning(cl))) {
306
304 if (signal_pending(current)) 307 if (signal_pending(current))
305 return -EINTR; 308 return -EINTR;
306 return -ERESTARTSYS; 309 return -ERESTARTSYS;
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index 9eb031e92070..892cc4207fa2 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -90,6 +90,12 @@ static inline bool mei_cl_is_connected(struct mei_cl *cl)
90 cl->dev->dev_state == MEI_DEV_ENABLED && 90 cl->dev->dev_state == MEI_DEV_ENABLED &&
91 cl->state == MEI_FILE_CONNECTED); 91 cl->state == MEI_FILE_CONNECTED);
92} 92}
93static inline bool mei_cl_is_transitioning(struct mei_cl *cl)
94{
95 return (MEI_FILE_INITIALIZING == cl->state ||
96 MEI_FILE_DISCONNECTED == cl->state ||
97 MEI_FILE_DISCONNECTING == cl->state);
98}
93 99
94bool mei_cl_is_other_connecting(struct mei_cl *cl); 100bool mei_cl_is_other_connecting(struct mei_cl *cl);
95int mei_cl_disconnect(struct mei_cl *cl); 101int mei_cl_disconnect(struct mei_cl *cl);
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 6127ab64bb39..0a0448326e9d 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -35,11 +35,15 @@ static void mei_hbm_me_cl_allocate(struct mei_device *dev)
35 struct mei_me_client *clients; 35 struct mei_me_client *clients;
36 int b; 36 int b;
37 37
38 dev->me_clients_num = 0;
39 dev->me_client_presentation_num = 0;
40 dev->me_client_index = 0;
41
38 /* count how many ME clients we have */ 42 /* count how many ME clients we have */
39 for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX) 43 for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX)
40 dev->me_clients_num++; 44 dev->me_clients_num++;
41 45
42 if (dev->me_clients_num <= 0) 46 if (dev->me_clients_num == 0)
43 return; 47 return;
44 48
45 kfree(dev->me_clients); 49 kfree(dev->me_clients);
@@ -221,7 +225,7 @@ static int mei_hbm_prop_req(struct mei_device *dev)
221 struct hbm_props_request *prop_req; 225 struct hbm_props_request *prop_req;
222 const size_t len = sizeof(struct hbm_props_request); 226 const size_t len = sizeof(struct hbm_props_request);
223 unsigned long next_client_index; 227 unsigned long next_client_index;
224 u8 client_num; 228 unsigned long client_num;
225 229
226 230
227 client_num = dev->me_client_presentation_num; 231 client_num = dev->me_client_presentation_num;
@@ -677,8 +681,6 @@ void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
677 if (dev->dev_state == MEI_DEV_INIT_CLIENTS && 681 if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
678 dev->hbm_state == MEI_HBM_ENUM_CLIENTS) { 682 dev->hbm_state == MEI_HBM_ENUM_CLIENTS) {
679 dev->init_clients_timer = 0; 683 dev->init_clients_timer = 0;
680 dev->me_client_presentation_num = 0;
681 dev->me_client_index = 0;
682 mei_hbm_me_cl_allocate(dev); 684 mei_hbm_me_cl_allocate(dev);
683 dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES; 685 dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES;
684 686
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 92c73118b13c..6197018e2f16 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -175,6 +175,9 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
175 memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg)); 175 memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg));
176 } 176 }
177 177
178 /* we're already in reset, cancel the init timer */
179 dev->init_clients_timer = 0;
180
178 dev->me_clients_num = 0; 181 dev->me_clients_num = 0;
179 dev->rd_msg_hdr = 0; 182 dev->rd_msg_hdr = 0;
180 dev->wd_pending = false; 183 dev->wd_pending = false;
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 173ff095be0d..cabeddd66c1f 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -249,19 +249,16 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
249 mutex_unlock(&dev->device_lock); 249 mutex_unlock(&dev->device_lock);
250 250
251 if (wait_event_interruptible(cl->rx_wait, 251 if (wait_event_interruptible(cl->rx_wait,
252 (MEI_READ_COMPLETE == cl->reading_state || 252 MEI_READ_COMPLETE == cl->reading_state ||
253 MEI_FILE_INITIALIZING == cl->state || 253 mei_cl_is_transitioning(cl))) {
254 MEI_FILE_DISCONNECTED == cl->state || 254
255 MEI_FILE_DISCONNECTING == cl->state))) {
256 if (signal_pending(current)) 255 if (signal_pending(current))
257 return -EINTR; 256 return -EINTR;
258 return -ERESTARTSYS; 257 return -ERESTARTSYS;
259 } 258 }
260 259
261 mutex_lock(&dev->device_lock); 260 mutex_lock(&dev->device_lock);
262 if (MEI_FILE_INITIALIZING == cl->state || 261 if (mei_cl_is_transitioning(cl)) {
263 MEI_FILE_DISCONNECTED == cl->state ||
264 MEI_FILE_DISCONNECTING == cl->state) {
265 rets = -EBUSY; 262 rets = -EBUSY;
266 goto out; 263 goto out;
267 } 264 }
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 7b918b2fb894..456b322013e2 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -396,9 +396,9 @@ struct mei_device {
396 struct mei_me_client *me_clients; /* Note: memory has to be allocated */ 396 struct mei_me_client *me_clients; /* Note: memory has to be allocated */
397 DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX); 397 DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX);
398 DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX); 398 DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX);
399 u8 me_clients_num; 399 unsigned long me_clients_num;
400 u8 me_client_presentation_num; 400 unsigned long me_client_presentation_num;
401 u8 me_client_index; 401 unsigned long me_client_index;
402 402
403 struct mei_cl wd_cl; 403 struct mei_cl wd_cl;
404 enum mei_wd_states wd_state; 404 enum mei_wd_states wd_state;
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index 87ed3fb5149a..f344659dceac 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -113,14 +113,14 @@ static const struct sh_mobile_sdhi_ops sdhi_ops = {
113}; 113};
114 114
115static const struct of_device_id sh_mobile_sdhi_of_match[] = { 115static const struct of_device_id sh_mobile_sdhi_of_match[] = {
116 { .compatible = "renesas,shmobile-sdhi" }, 116 { .compatible = "renesas,sdhi-shmobile" },
117 { .compatible = "renesas,sh7372-sdhi" }, 117 { .compatible = "renesas,sdhi-sh7372" },
118 { .compatible = "renesas,sh73a0-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], }, 118 { .compatible = "renesas,sdhi-sh73a0", .data = &sh_mobile_sdhi_of_cfg[0], },
119 { .compatible = "renesas,r8a73a4-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], }, 119 { .compatible = "renesas,sdhi-r8a73a4", .data = &sh_mobile_sdhi_of_cfg[0], },
120 { .compatible = "renesas,r8a7740-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], }, 120 { .compatible = "renesas,sdhi-r8a7740", .data = &sh_mobile_sdhi_of_cfg[0], },
121 { .compatible = "renesas,r8a7778-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], }, 121 { .compatible = "renesas,sdhi-r8a7778", .data = &sh_mobile_sdhi_of_cfg[0], },
122 { .compatible = "renesas,r8a7779-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], }, 122 { .compatible = "renesas,sdhi-r8a7779", .data = &sh_mobile_sdhi_of_cfg[0], },
123 { .compatible = "renesas,r8a7790-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], }, 123 { .compatible = "renesas,sdhi-r8a7790", .data = &sh_mobile_sdhi_of_cfg[0], },
124 {}, 124 {},
125}; 125};
126MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match); 126MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match);
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 26b14f9fcac6..6bc9618af094 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -168,12 +168,25 @@ static inline int write_disable(struct m25p *flash)
168 */ 168 */
169static inline int set_4byte(struct m25p *flash, u32 jedec_id, int enable) 169static inline int set_4byte(struct m25p *flash, u32 jedec_id, int enable)
170{ 170{
171 int status;
172 bool need_wren = false;
173
171 switch (JEDEC_MFR(jedec_id)) { 174 switch (JEDEC_MFR(jedec_id)) {
172 case CFI_MFR_MACRONIX:
173 case CFI_MFR_ST: /* Micron, actually */ 175 case CFI_MFR_ST: /* Micron, actually */
176 /* Some Micron need WREN command; all will accept it */
177 need_wren = true;
178 case CFI_MFR_MACRONIX:
174 case 0xEF /* winbond */: 179 case 0xEF /* winbond */:
180 if (need_wren)
181 write_enable(flash);
182
175 flash->command[0] = enable ? OPCODE_EN4B : OPCODE_EX4B; 183 flash->command[0] = enable ? OPCODE_EN4B : OPCODE_EX4B;
176 return spi_write(flash->spi, flash->command, 1); 184 status = spi_write(flash->spi, flash->command, 1);
185
186 if (need_wren)
187 write_disable(flash);
188
189 return status;
177 default: 190 default:
178 /* Spansion style */ 191 /* Spansion style */
179 flash->command[0] = OPCODE_BRWR; 192 flash->command[0] = OPCODE_BRWR;
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 7ed4841327f2..d340b2f198c6 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2869,10 +2869,8 @@ static int nand_flash_detect_ext_param_page(struct mtd_info *mtd,
2869 2869
2870 len = le16_to_cpu(p->ext_param_page_length) * 16; 2870 len = le16_to_cpu(p->ext_param_page_length) * 16;
2871 ep = kmalloc(len, GFP_KERNEL); 2871 ep = kmalloc(len, GFP_KERNEL);
2872 if (!ep) { 2872 if (!ep)
2873 ret = -ENOMEM; 2873 return -ENOMEM;
2874 goto ext_out;
2875 }
2876 2874
2877 /* Send our own NAND_CMD_PARAM. */ 2875 /* Send our own NAND_CMD_PARAM. */
2878 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1); 2876 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
@@ -2920,7 +2918,7 @@ static int nand_flash_detect_ext_param_page(struct mtd_info *mtd,
2920 } 2918 }
2921 2919
2922 pr_info("ONFI extended param page detected.\n"); 2920 pr_info("ONFI extended param page detected.\n");
2923 return 0; 2921 ret = 0;
2924 2922
2925ext_out: 2923ext_out:
2926 kfree(ep); 2924 kfree(ep);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 55bbb8b8200c..e883bfe2e727 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1724,6 +1724,7 @@ static int __bond_release_one(struct net_device *bond_dev,
1724 struct bonding *bond = netdev_priv(bond_dev); 1724 struct bonding *bond = netdev_priv(bond_dev);
1725 struct slave *slave, *oldcurrent; 1725 struct slave *slave, *oldcurrent;
1726 struct sockaddr addr; 1726 struct sockaddr addr;
1727 int old_flags = bond_dev->flags;
1727 netdev_features_t old_features = bond_dev->features; 1728 netdev_features_t old_features = bond_dev->features;
1728 1729
1729 /* slave is not a slave or master is not master of this slave */ 1730 /* slave is not a slave or master is not master of this slave */
@@ -1855,12 +1856,18 @@ static int __bond_release_one(struct net_device *bond_dev,
1855 * bond_change_active_slave(..., NULL) 1856 * bond_change_active_slave(..., NULL)
1856 */ 1857 */
1857 if (!USES_PRIMARY(bond->params.mode)) { 1858 if (!USES_PRIMARY(bond->params.mode)) {
1858 /* unset promiscuity level from slave */ 1859 /* unset promiscuity level from slave
1859 if (bond_dev->flags & IFF_PROMISC) 1860 * NOTE: The NETDEV_CHANGEADDR call above may change the value
1861 * of the IFF_PROMISC flag in the bond_dev, but we need the
1862 * value of that flag before that change, as that was the value
1863 * when this slave was attached, so we cache at the start of the
1864 * function and use it here. Same goes for ALLMULTI below
1865 */
1866 if (old_flags & IFF_PROMISC)
1860 dev_set_promiscuity(slave_dev, -1); 1867 dev_set_promiscuity(slave_dev, -1);
1861 1868
1862 /* unset allmulti level from slave */ 1869 /* unset allmulti level from slave */
1863 if (bond_dev->flags & IFF_ALLMULTI) 1870 if (old_flags & IFF_ALLMULTI)
1864 dev_set_allmulti(slave_dev, -1); 1871 dev_set_allmulti(slave_dev, -1);
1865 1872
1866 bond_hw_addr_flush(bond_dev, slave_dev); 1873 bond_hw_addr_flush(bond_dev, slave_dev);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 71c677e651d7..3f21142138b7 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -702,7 +702,6 @@ static int flexcan_chip_start(struct net_device *dev)
702{ 702{
703 struct flexcan_priv *priv = netdev_priv(dev); 703 struct flexcan_priv *priv = netdev_priv(dev);
704 struct flexcan_regs __iomem *regs = priv->base; 704 struct flexcan_regs __iomem *regs = priv->base;
705 unsigned int i;
706 int err; 705 int err;
707 u32 reg_mcr, reg_ctrl; 706 u32 reg_mcr, reg_ctrl;
708 707
@@ -772,17 +771,6 @@ static int flexcan_chip_start(struct net_device *dev)
772 netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl); 771 netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
773 flexcan_write(reg_ctrl, &regs->ctrl); 772 flexcan_write(reg_ctrl, &regs->ctrl);
774 773
775 for (i = 0; i < ARRAY_SIZE(regs->cantxfg); i++) {
776 flexcan_write(0, &regs->cantxfg[i].can_ctrl);
777 flexcan_write(0, &regs->cantxfg[i].can_id);
778 flexcan_write(0, &regs->cantxfg[i].data[0]);
779 flexcan_write(0, &regs->cantxfg[i].data[1]);
780
781 /* put MB into rx queue */
782 flexcan_write(FLEXCAN_MB_CNT_CODE(0x4),
783 &regs->cantxfg[i].can_ctrl);
784 }
785
786 /* acceptance mask/acceptance code (accept everything) */ 774 /* acceptance mask/acceptance code (accept everything) */
787 flexcan_write(0x0, &regs->rxgmask); 775 flexcan_write(0x0, &regs->rxgmask);
788 flexcan_write(0x0, &regs->rx14mask); 776 flexcan_write(0x0, &regs->rx14mask);
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 874188ba06f7..25377e547f9b 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -76,6 +76,10 @@ MODULE_PARM_DESC(maxdev, "Maximum number of slcan interfaces");
76/* maximum rx buffer len: extended CAN frame with timestamp */ 76/* maximum rx buffer len: extended CAN frame with timestamp */
77#define SLC_MTU (sizeof("T1111222281122334455667788EA5F\r")+1) 77#define SLC_MTU (sizeof("T1111222281122334455667788EA5F\r")+1)
78 78
79#define SLC_CMD_LEN 1
80#define SLC_SFF_ID_LEN 3
81#define SLC_EFF_ID_LEN 8
82
79struct slcan { 83struct slcan {
80 int magic; 84 int magic;
81 85
@@ -142,47 +146,63 @@ static void slc_bump(struct slcan *sl)
142{ 146{
143 struct sk_buff *skb; 147 struct sk_buff *skb;
144 struct can_frame cf; 148 struct can_frame cf;
145 int i, dlc_pos, tmp; 149 int i, tmp;
146 unsigned long ultmp; 150 u32 tmpid;
147 char cmd = sl->rbuff[0]; 151 char *cmd = sl->rbuff;
148 152
149 if ((cmd != 't') && (cmd != 'T') && (cmd != 'r') && (cmd != 'R')) 153 cf.can_id = 0;
154
155 switch (*cmd) {
156 case 'r':
157 cf.can_id = CAN_RTR_FLAG;
158 /* fallthrough */
159 case 't':
160 /* store dlc ASCII value and terminate SFF CAN ID string */
161 cf.can_dlc = sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN];
162 sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN] = 0;
163 /* point to payload data behind the dlc */
164 cmd += SLC_CMD_LEN + SLC_SFF_ID_LEN + 1;
165 break;
166 case 'R':
167 cf.can_id = CAN_RTR_FLAG;
168 /* fallthrough */
169 case 'T':
170 cf.can_id |= CAN_EFF_FLAG;
171 /* store dlc ASCII value and terminate EFF CAN ID string */
172 cf.can_dlc = sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN];
173 sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN] = 0;
174 /* point to payload data behind the dlc */
175 cmd += SLC_CMD_LEN + SLC_EFF_ID_LEN + 1;
176 break;
177 default:
150 return; 178 return;
179 }
151 180
152 if (cmd & 0x20) /* tiny chars 'r' 't' => standard frame format */ 181 if (kstrtou32(sl->rbuff + SLC_CMD_LEN, 16, &tmpid))
153 dlc_pos = 4; /* dlc position tiiid */
154 else
155 dlc_pos = 9; /* dlc position Tiiiiiiiid */
156
157 if (!((sl->rbuff[dlc_pos] >= '0') && (sl->rbuff[dlc_pos] < '9')))
158 return; 182 return;
159 183
160 cf.can_dlc = sl->rbuff[dlc_pos] - '0'; /* get can_dlc from ASCII val */ 184 cf.can_id |= tmpid;
161 185
162 sl->rbuff[dlc_pos] = 0; /* terminate can_id string */ 186 /* get can_dlc from sanitized ASCII value */
163 187 if (cf.can_dlc >= '0' && cf.can_dlc < '9')
164 if (kstrtoul(sl->rbuff+1, 16, &ultmp)) 188 cf.can_dlc -= '0';
189 else
165 return; 190 return;
166 191
167 cf.can_id = ultmp;
168
169 if (!(cmd & 0x20)) /* NO tiny chars => extended frame format */
170 cf.can_id |= CAN_EFF_FLAG;
171
172 if ((cmd | 0x20) == 'r') /* RTR frame */
173 cf.can_id |= CAN_RTR_FLAG;
174
175 *(u64 *) (&cf.data) = 0; /* clear payload */ 192 *(u64 *) (&cf.data) = 0; /* clear payload */
176 193
177 for (i = 0, dlc_pos++; i < cf.can_dlc; i++) { 194 /* RTR frames may have a dlc > 0 but they never have any data bytes */
178 tmp = hex_to_bin(sl->rbuff[dlc_pos++]); 195 if (!(cf.can_id & CAN_RTR_FLAG)) {
179 if (tmp < 0) 196 for (i = 0; i < cf.can_dlc; i++) {
180 return; 197 tmp = hex_to_bin(*cmd++);
181 cf.data[i] = (tmp << 4); 198 if (tmp < 0)
182 tmp = hex_to_bin(sl->rbuff[dlc_pos++]); 199 return;
183 if (tmp < 0) 200 cf.data[i] = (tmp << 4);
184 return; 201 tmp = hex_to_bin(*cmd++);
185 cf.data[i] |= tmp; 202 if (tmp < 0)
203 return;
204 cf.data[i] |= tmp;
205 }
186 } 206 }
187 207
188 skb = dev_alloc_skb(sizeof(struct can_frame) + 208 skb = dev_alloc_skb(sizeof(struct can_frame) +
@@ -209,7 +229,6 @@ static void slc_bump(struct slcan *sl)
209/* parse tty input stream */ 229/* parse tty input stream */
210static void slcan_unesc(struct slcan *sl, unsigned char s) 230static void slcan_unesc(struct slcan *sl, unsigned char s)
211{ 231{
212
213 if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */ 232 if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */
214 if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && 233 if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
215 (sl->rcount > 4)) { 234 (sl->rcount > 4)) {
@@ -236,27 +255,46 @@ static void slcan_unesc(struct slcan *sl, unsigned char s)
236/* Encapsulate one can_frame and stuff into a TTY queue. */ 255/* Encapsulate one can_frame and stuff into a TTY queue. */
237static void slc_encaps(struct slcan *sl, struct can_frame *cf) 256static void slc_encaps(struct slcan *sl, struct can_frame *cf)
238{ 257{
239 int actual, idx, i; 258 int actual, i;
240 char cmd; 259 unsigned char *pos;
260 unsigned char *endpos;
261 canid_t id = cf->can_id;
262
263 pos = sl->xbuff;
241 264
242 if (cf->can_id & CAN_RTR_FLAG) 265 if (cf->can_id & CAN_RTR_FLAG)
243 cmd = 'R'; /* becomes 'r' in standard frame format */ 266 *pos = 'R'; /* becomes 'r' in standard frame format (SFF) */
244 else 267 else
245 cmd = 'T'; /* becomes 't' in standard frame format */ 268 *pos = 'T'; /* becomes 't' in standard frame format (SSF) */
246 269
247 if (cf->can_id & CAN_EFF_FLAG) 270 /* determine number of chars for the CAN-identifier */
248 sprintf(sl->xbuff, "%c%08X%d", cmd, 271 if (cf->can_id & CAN_EFF_FLAG) {
249 cf->can_id & CAN_EFF_MASK, cf->can_dlc); 272 id &= CAN_EFF_MASK;
250 else 273 endpos = pos + SLC_EFF_ID_LEN;
251 sprintf(sl->xbuff, "%c%03X%d", cmd | 0x20, 274 } else {
252 cf->can_id & CAN_SFF_MASK, cf->can_dlc); 275 *pos |= 0x20; /* convert R/T to lower case for SFF */
276 id &= CAN_SFF_MASK;
277 endpos = pos + SLC_SFF_ID_LEN;
278 }
253 279
254 idx = strlen(sl->xbuff); 280 /* build 3 (SFF) or 8 (EFF) digit CAN identifier */
281 pos++;
282 while (endpos >= pos) {
283 *endpos-- = hex_asc_upper[id & 0xf];
284 id >>= 4;
285 }
286
287 pos += (cf->can_id & CAN_EFF_FLAG) ? SLC_EFF_ID_LEN : SLC_SFF_ID_LEN;
255 288
256 for (i = 0; i < cf->can_dlc; i++) 289 *pos++ = cf->can_dlc + '0';
257 sprintf(&sl->xbuff[idx + 2*i], "%02X", cf->data[i]); 290
291 /* RTR frames may have a dlc > 0 but they never have any data bytes */
292 if (!(cf->can_id & CAN_RTR_FLAG)) {
293 for (i = 0; i < cf->can_dlc; i++)
294 pos = hex_byte_pack_upper(pos, cf->data[i]);
295 }
258 296
259 strcat(sl->xbuff, "\r"); /* add terminating character */ 297 *pos++ = '\r';
260 298
261 /* Order of next two lines is *very* important. 299 /* Order of next two lines is *very* important.
262 * When we are sending a little amount of data, 300 * When we are sending a little amount of data,
@@ -267,8 +305,8 @@ static void slc_encaps(struct slcan *sl, struct can_frame *cf)
267 * 14 Oct 1994 Dmitry Gorodchanin. 305 * 14 Oct 1994 Dmitry Gorodchanin.
268 */ 306 */
269 set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); 307 set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
270 actual = sl->tty->ops->write(sl->tty, sl->xbuff, strlen(sl->xbuff)); 308 actual = sl->tty->ops->write(sl->tty, sl->xbuff, pos - sl->xbuff);
271 sl->xleft = strlen(sl->xbuff) - actual; 309 sl->xleft = (pos - sl->xbuff) - actual;
272 sl->xhead = sl->xbuff + actual; 310 sl->xhead = sl->xbuff + actual;
273 sl->dev->stats.tx_bytes += cf->can_dlc; 311 sl->dev->stats.tx_bytes += cf->can_dlc;
274} 312}
@@ -286,11 +324,13 @@ static void slcan_write_wakeup(struct tty_struct *tty)
286 if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) 324 if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
287 return; 325 return;
288 326
327 spin_lock(&sl->lock);
289 if (sl->xleft <= 0) { 328 if (sl->xleft <= 0) {
290 /* Now serial buffer is almost free & we can start 329 /* Now serial buffer is almost free & we can start
291 * transmission of another packet */ 330 * transmission of another packet */
292 sl->dev->stats.tx_packets++; 331 sl->dev->stats.tx_packets++;
293 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 332 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
333 spin_unlock(&sl->lock);
294 netif_wake_queue(sl->dev); 334 netif_wake_queue(sl->dev);
295 return; 335 return;
296 } 336 }
@@ -298,6 +338,7 @@ static void slcan_write_wakeup(struct tty_struct *tty)
298 actual = tty->ops->write(tty, sl->xhead, sl->xleft); 338 actual = tty->ops->write(tty, sl->xhead, sl->xleft);
299 sl->xleft -= actual; 339 sl->xleft -= actual;
300 sl->xhead += actual; 340 sl->xhead += actual;
341 spin_unlock(&sl->lock);
301} 342}
302 343
303/* Send a can_frame to a TTY queue. */ 344/* Send a can_frame to a TTY queue. */
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index a0f647f92bf5..0b7a4c3b01a2 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -463,7 +463,7 @@ static int peak_usb_start(struct peak_usb_device *dev)
463 if (i < PCAN_USB_MAX_TX_URBS) { 463 if (i < PCAN_USB_MAX_TX_URBS) {
464 if (i == 0) { 464 if (i == 0) {
465 netdev_err(netdev, "couldn't setup any tx URB\n"); 465 netdev_err(netdev, "couldn't setup any tx URB\n");
466 return err; 466 goto err_tx;
467 } 467 }
468 468
469 netdev_warn(netdev, "tx performance may be slow\n"); 469 netdev_warn(netdev, "tx performance may be slow\n");
@@ -472,7 +472,7 @@ static int peak_usb_start(struct peak_usb_device *dev)
472 if (dev->adapter->dev_start) { 472 if (dev->adapter->dev_start) {
473 err = dev->adapter->dev_start(dev); 473 err = dev->adapter->dev_start(dev);
474 if (err) 474 if (err)
475 goto failed; 475 goto err_adapter;
476 } 476 }
477 477
478 dev->state |= PCAN_USB_STATE_STARTED; 478 dev->state |= PCAN_USB_STATE_STARTED;
@@ -481,19 +481,26 @@ static int peak_usb_start(struct peak_usb_device *dev)
481 if (dev->adapter->dev_set_bus) { 481 if (dev->adapter->dev_set_bus) {
482 err = dev->adapter->dev_set_bus(dev, 1); 482 err = dev->adapter->dev_set_bus(dev, 1);
483 if (err) 483 if (err)
484 goto failed; 484 goto err_adapter;
485 } 485 }
486 486
487 dev->can.state = CAN_STATE_ERROR_ACTIVE; 487 dev->can.state = CAN_STATE_ERROR_ACTIVE;
488 488
489 return 0; 489 return 0;
490 490
491failed: 491err_adapter:
492 if (err == -ENODEV) 492 if (err == -ENODEV)
493 netif_device_detach(dev->netdev); 493 netif_device_detach(dev->netdev);
494 494
495 netdev_warn(netdev, "couldn't submit control: %d\n", err); 495 netdev_warn(netdev, "couldn't submit control: %d\n", err);
496 496
497 for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++) {
498 usb_free_urb(dev->tx_contexts[i].urb);
499 dev->tx_contexts[i].urb = NULL;
500 }
501err_tx:
502 usb_kill_anchored_urbs(&dev->rx_submitted);
503
497 return err; 504 return err;
498} 505}
499 506
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 61726af1de6e..e66beff2704d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2481,8 +2481,7 @@ load_error_cnic2:
2481load_error_cnic1: 2481load_error_cnic1:
2482 bnx2x_napi_disable_cnic(bp); 2482 bnx2x_napi_disable_cnic(bp);
2483 /* Update the number of queues without the cnic queues */ 2483 /* Update the number of queues without the cnic queues */
2484 rc = bnx2x_set_real_num_queues(bp, 0); 2484 if (bnx2x_set_real_num_queues(bp, 0))
2485 if (rc)
2486 BNX2X_ERR("Unable to set real_num_queues not including cnic\n"); 2485 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2487load_error_cnic0: 2486load_error_cnic0:
2488 BNX2X_ERR("CNIC-related load failed\n"); 2487 BNX2X_ERR("CNIC-related load failed\n");
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index d60a2ea3da19..51468227bf3b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -175,6 +175,7 @@ typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy,
175#define EDC_MODE_LINEAR 0x0022 175#define EDC_MODE_LINEAR 0x0022
176#define EDC_MODE_LIMITING 0x0044 176#define EDC_MODE_LIMITING 0x0044
177#define EDC_MODE_PASSIVE_DAC 0x0055 177#define EDC_MODE_PASSIVE_DAC 0x0055
178#define EDC_MODE_ACTIVE_DAC 0x0066
178 179
179/* ETS defines*/ 180/* ETS defines*/
180#define DCBX_INVALID_COS (0xFF) 181#define DCBX_INVALID_COS (0xFF)
@@ -3684,6 +3685,41 @@ static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy,
3684 bnx2x_update_link_attr(params, vars->link_attr_sync); 3685 bnx2x_update_link_attr(params, vars->link_attr_sync);
3685} 3686}
3686 3687
3688static void bnx2x_disable_kr2(struct link_params *params,
3689 struct link_vars *vars,
3690 struct bnx2x_phy *phy)
3691{
3692 struct bnx2x *bp = params->bp;
3693 int i;
3694 static struct bnx2x_reg_set reg_set[] = {
3695 /* Step 1 - Program the TX/RX alignment markers */
3696 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690},
3697 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647},
3698 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0},
3699 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690},
3700 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647},
3701 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0},
3702 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c},
3703 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000},
3704 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000},
3705 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002},
3706 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000},
3707 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7},
3708 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7},
3709 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002},
3710 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000}
3711 };
3712 DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n");
3713
3714 for (i = 0; i < ARRAY_SIZE(reg_set); i++)
3715 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
3716 reg_set[i].val);
3717 vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
3718 bnx2x_update_link_attr(params, vars->link_attr_sync);
3719
3720 vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
3721}
3722
3687static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy, 3723static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy,
3688 struct link_params *params) 3724 struct link_params *params)
3689{ 3725{
@@ -3715,7 +3751,6 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3715 struct link_params *params, 3751 struct link_params *params,
3716 struct link_vars *vars) { 3752 struct link_vars *vars) {
3717 u16 lane, i, cl72_ctrl, an_adv = 0; 3753 u16 lane, i, cl72_ctrl, an_adv = 0;
3718 u16 ucode_ver;
3719 struct bnx2x *bp = params->bp; 3754 struct bnx2x *bp = params->bp;
3720 static struct bnx2x_reg_set reg_set[] = { 3755 static struct bnx2x_reg_set reg_set[] = {
3721 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, 3756 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
@@ -3806,15 +3841,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3806 3841
3807 /* Advertise pause */ 3842 /* Advertise pause */
3808 bnx2x_ext_phy_set_pause(params, phy, vars); 3843 bnx2x_ext_phy_set_pause(params, phy, vars);
3809 /* Set KR Autoneg Work-Around flag for Warpcore version older than D108 3844 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
3810 */
3811 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3812 MDIO_WC_REG_UC_INFO_B1_VERSION, &ucode_ver);
3813 if (ucode_ver < 0xd108) {
3814 DP(NETIF_MSG_LINK, "Enable AN KR work-around. WC ver:0x%x\n",
3815 ucode_ver);
3816 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
3817 }
3818 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 3845 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3819 MDIO_WC_REG_DIGITAL5_MISC7, 0x100); 3846 MDIO_WC_REG_DIGITAL5_MISC7, 0x100);
3820 3847
@@ -3838,6 +3865,8 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3838 bnx2x_set_aer_mmd(params, phy); 3865 bnx2x_set_aer_mmd(params, phy);
3839 3866
3840 bnx2x_warpcore_enable_AN_KR2(phy, params, vars); 3867 bnx2x_warpcore_enable_AN_KR2(phy, params, vars);
3868 } else {
3869 bnx2x_disable_kr2(params, vars, phy);
3841 } 3870 }
3842 3871
3843 /* Enable Autoneg: only on the main lane */ 3872 /* Enable Autoneg: only on the main lane */
@@ -4347,20 +4376,14 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
4347 struct bnx2x *bp = params->bp; 4376 struct bnx2x *bp = params->bp;
4348 u32 serdes_net_if; 4377 u32 serdes_net_if;
4349 u16 gp_status1 = 0, lnkup = 0, lnkup_kr = 0; 4378 u16 gp_status1 = 0, lnkup = 0, lnkup_kr = 0;
4350 u16 lane = bnx2x_get_warpcore_lane(phy, params);
4351 4379
4352 vars->turn_to_run_wc_rt = vars->turn_to_run_wc_rt ? 0 : 1; 4380 vars->turn_to_run_wc_rt = vars->turn_to_run_wc_rt ? 0 : 1;
4353 4381
4354 if (!vars->turn_to_run_wc_rt) 4382 if (!vars->turn_to_run_wc_rt)
4355 return; 4383 return;
4356 4384
4357 /* Return if there is no link partner */
4358 if (!(bnx2x_warpcore_get_sigdet(phy, params))) {
4359 DP(NETIF_MSG_LINK, "bnx2x_warpcore_get_sigdet false\n");
4360 return;
4361 }
4362
4363 if (vars->rx_tx_asic_rst) { 4385 if (vars->rx_tx_asic_rst) {
4386 u16 lane = bnx2x_get_warpcore_lane(phy, params);
4364 serdes_net_if = (REG_RD(bp, params->shmem_base + 4387 serdes_net_if = (REG_RD(bp, params->shmem_base +
4365 offsetof(struct shmem_region, dev_info. 4388 offsetof(struct shmem_region, dev_info.
4366 port_hw_config[params->port].default_cfg)) & 4389 port_hw_config[params->port].default_cfg)) &
@@ -4375,14 +4398,8 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
4375 /*10G KR*/ 4398 /*10G KR*/
4376 lnkup_kr = (gp_status1 >> (12+lane)) & 0x1; 4399 lnkup_kr = (gp_status1 >> (12+lane)) & 0x1;
4377 4400
4378 DP(NETIF_MSG_LINK,
4379 "gp_status1 0x%x\n", gp_status1);
4380
4381 if (lnkup_kr || lnkup) { 4401 if (lnkup_kr || lnkup) {
4382 vars->rx_tx_asic_rst = 0; 4402 vars->rx_tx_asic_rst = 0;
4383 DP(NETIF_MSG_LINK,
4384 "link up, rx_tx_asic_rst 0x%x\n",
4385 vars->rx_tx_asic_rst);
4386 } else { 4403 } else {
4387 /* Reset the lane to see if link comes up.*/ 4404 /* Reset the lane to see if link comes up.*/
4388 bnx2x_warpcore_reset_lane(bp, phy, 1); 4405 bnx2x_warpcore_reset_lane(bp, phy, 1);
@@ -4507,10 +4524,14 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
4507 * enabled transmitter to avoid current leakage in case 4524 * enabled transmitter to avoid current leakage in case
4508 * no module is connected 4525 * no module is connected
4509 */ 4526 */
4510 if (bnx2x_is_sfp_module_plugged(phy, params)) 4527 if ((params->loopback_mode == LOOPBACK_NONE) ||
4511 bnx2x_sfp_module_detection(phy, params); 4528 (params->loopback_mode == LOOPBACK_EXT)) {
4512 else 4529 if (bnx2x_is_sfp_module_plugged(phy, params))
4513 bnx2x_sfp_e3_set_transmitter(params, phy, 1); 4530 bnx2x_sfp_module_detection(phy, params);
4531 else
4532 bnx2x_sfp_e3_set_transmitter(params,
4533 phy, 1);
4534 }
4514 4535
4515 bnx2x_warpcore_config_sfi(phy, params); 4536 bnx2x_warpcore_config_sfi(phy, params);
4516 break; 4537 break;
@@ -5757,6 +5778,11 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
5757 rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed, 5778 rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed,
5758 duplex); 5779 duplex);
5759 5780
5781 /* In case of KR link down, start up the recovering procedure */
5782 if ((!link_up) && (phy->media_type == ETH_PHY_KR) &&
5783 (!(phy->flags & FLAGS_WC_DUAL_MODE)))
5784 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
5785
5760 DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n", 5786 DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n",
5761 vars->duplex, vars->flow_ctrl, vars->link_status); 5787 vars->duplex, vars->flow_ctrl, vars->link_status);
5762 return rc; 5788 return rc;
@@ -6507,6 +6533,11 @@ static int bnx2x_link_initialize(struct link_params *params,
6507 params->phy[INT_PHY].config_init(phy, params, vars); 6533 params->phy[INT_PHY].config_init(phy, params, vars);
6508 } 6534 }
6509 6535
6536 /* Re-read this value in case it was changed inside config_init due to
6537 * limitations of optic module
6538 */
6539 vars->line_speed = params->phy[INT_PHY].req_line_speed;
6540
6510 /* Init external phy*/ 6541 /* Init external phy*/
6511 if (non_ext_phy) { 6542 if (non_ext_phy) {
6512 if (params->phy[INT_PHY].supported & 6543 if (params->phy[INT_PHY].supported &
@@ -8080,7 +8111,10 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
8080 if (copper_module_type & 8111 if (copper_module_type &
8081 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) { 8112 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) {
8082 DP(NETIF_MSG_LINK, "Active Copper cable detected\n"); 8113 DP(NETIF_MSG_LINK, "Active Copper cable detected\n");
8083 check_limiting_mode = 1; 8114 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8115 *edc_mode = EDC_MODE_ACTIVE_DAC;
8116 else
8117 check_limiting_mode = 1;
8084 } else if (copper_module_type & 8118 } else if (copper_module_type &
8085 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) { 8119 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
8086 DP(NETIF_MSG_LINK, 8120 DP(NETIF_MSG_LINK,
@@ -8555,6 +8589,7 @@ static void bnx2x_warpcore_set_limiting_mode(struct link_params *params,
8555 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT; 8589 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT;
8556 break; 8590 break;
8557 case EDC_MODE_PASSIVE_DAC: 8591 case EDC_MODE_PASSIVE_DAC:
8592 case EDC_MODE_ACTIVE_DAC:
8558 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC; 8593 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC;
8559 break; 8594 break;
8560 default: 8595 default:
@@ -9730,32 +9765,41 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
9730 MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL, 9765 MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
9731 an_1000_val); 9766 an_1000_val);
9732 9767
9733 /* set 100 speed advertisement */ 9768 /* Set 10/100 speed advertisement */
9734 if ((phy->req_line_speed == SPEED_AUTO_NEG) && 9769 if (phy->req_line_speed == SPEED_AUTO_NEG) {
9735 (phy->speed_cap_mask & 9770 if (phy->speed_cap_mask &
9736 (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | 9771 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
9737 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))) { 9772 /* Enable autoneg and restart autoneg for legacy speeds
9738 an_10_100_val |= (1<<7); 9773 */
9739 /* Enable autoneg and restart autoneg for legacy speeds */ 9774 autoneg_val |= (1<<9 | 1<<12);
9740 autoneg_val |= (1<<9 | 1<<12);
9741
9742 if (phy->req_duplex == DUPLEX_FULL)
9743 an_10_100_val |= (1<<8); 9775 an_10_100_val |= (1<<8);
9744 DP(NETIF_MSG_LINK, "Advertising 100M\n"); 9776 DP(NETIF_MSG_LINK, "Advertising 100M-FD\n");
9745 } 9777 }
9746 /* set 10 speed advertisement */ 9778
9747 if (((phy->req_line_speed == SPEED_AUTO_NEG) && 9779 if (phy->speed_cap_mask &
9748 (phy->speed_cap_mask & 9780 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
9749 (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL | 9781 /* Enable autoneg and restart autoneg for legacy speeds
9750 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) && 9782 */
9751 (phy->supported & 9783 autoneg_val |= (1<<9 | 1<<12);
9752 (SUPPORTED_10baseT_Half | 9784 an_10_100_val |= (1<<7);
9753 SUPPORTED_10baseT_Full)))) { 9785 DP(NETIF_MSG_LINK, "Advertising 100M-HD\n");
9754 an_10_100_val |= (1<<5); 9786 }
9755 autoneg_val |= (1<<9 | 1<<12); 9787
9756 if (phy->req_duplex == DUPLEX_FULL) 9788 if ((phy->speed_cap_mask &
9789 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) &&
9790 (phy->supported & SUPPORTED_10baseT_Full)) {
9757 an_10_100_val |= (1<<6); 9791 an_10_100_val |= (1<<6);
9758 DP(NETIF_MSG_LINK, "Advertising 10M\n"); 9792 autoneg_val |= (1<<9 | 1<<12);
9793 DP(NETIF_MSG_LINK, "Advertising 10M-FD\n");
9794 }
9795
9796 if ((phy->speed_cap_mask &
9797 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) &&
9798 (phy->supported & SUPPORTED_10baseT_Half)) {
9799 an_10_100_val |= (1<<5);
9800 autoneg_val |= (1<<9 | 1<<12);
9801 DP(NETIF_MSG_LINK, "Advertising 10M-HD\n");
9802 }
9759 } 9803 }
9760 9804
9761 /* Only 10/100 are allowed to work in FORCE mode */ 9805 /* Only 10/100 are allowed to work in FORCE mode */
@@ -13432,43 +13476,6 @@ static void bnx2x_sfp_tx_fault_detection(struct bnx2x_phy *phy,
13432 } 13476 }
13433 } 13477 }
13434} 13478}
13435static void bnx2x_disable_kr2(struct link_params *params,
13436 struct link_vars *vars,
13437 struct bnx2x_phy *phy)
13438{
13439 struct bnx2x *bp = params->bp;
13440 int i;
13441 static struct bnx2x_reg_set reg_set[] = {
13442 /* Step 1 - Program the TX/RX alignment markers */
13443 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690},
13444 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647},
13445 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0},
13446 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690},
13447 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647},
13448 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0},
13449 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c},
13450 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000},
13451 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000},
13452 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002},
13453 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000},
13454 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7},
13455 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7},
13456 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002},
13457 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000}
13458 };
13459 DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n");
13460
13461 for (i = 0; i < ARRAY_SIZE(reg_set); i++)
13462 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
13463 reg_set[i].val);
13464 vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
13465 bnx2x_update_link_attr(params, vars->link_attr_sync);
13466
13467 vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
13468 /* Restart AN on leading lane */
13469 bnx2x_warpcore_restart_AN_KR(phy, params);
13470}
13471
13472static void bnx2x_kr2_recovery(struct link_params *params, 13479static void bnx2x_kr2_recovery(struct link_params *params,
13473 struct link_vars *vars, 13480 struct link_vars *vars,
13474 struct bnx2x_phy *phy) 13481 struct bnx2x_phy *phy)
@@ -13546,6 +13553,8 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
13546 /* Disable KR2 on both lanes */ 13553 /* Disable KR2 on both lanes */
13547 DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, next_page); 13554 DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, next_page);
13548 bnx2x_disable_kr2(params, vars, phy); 13555 bnx2x_disable_kr2(params, vars, phy);
13556 /* Restart AN on leading lane */
13557 bnx2x_warpcore_restart_AN_KR(phy, params);
13549 return; 13558 return;
13550 } 13559 }
13551} 13560}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index a6704b555042..82b658d8c04c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -4703,6 +4703,14 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
4703 attn.sig[3] = REG_RD(bp, 4703 attn.sig[3] = REG_RD(bp,
4704 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + 4704 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
4705 port*4); 4705 port*4);
4706 /* Since MCP attentions can't be disabled inside the block, we need to
4707 * read AEU registers to see whether they're currently disabled
4708 */
4709 attn.sig[3] &= ((REG_RD(bp,
4710 !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
4711 : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) &
4712 MISC_AEU_ENABLE_MCP_PRTY_BITS) |
4713 ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
4706 4714
4707 if (!CHIP_IS_E1x(bp)) 4715 if (!CHIP_IS_E1x(bp))
4708 attn.sig[4] = REG_RD(bp, 4716 attn.sig[4] = REG_RD(bp,
@@ -5447,26 +5455,24 @@ static void bnx2x_timer(unsigned long data)
5447 if (IS_PF(bp) && 5455 if (IS_PF(bp) &&
5448 !BP_NOMCP(bp)) { 5456 !BP_NOMCP(bp)) {
5449 int mb_idx = BP_FW_MB_IDX(bp); 5457 int mb_idx = BP_FW_MB_IDX(bp);
5450 u32 drv_pulse; 5458 u16 drv_pulse;
5451 u32 mcp_pulse; 5459 u16 mcp_pulse;
5452 5460
5453 ++bp->fw_drv_pulse_wr_seq; 5461 ++bp->fw_drv_pulse_wr_seq;
5454 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; 5462 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5455 /* TBD - add SYSTEM_TIME */
5456 drv_pulse = bp->fw_drv_pulse_wr_seq; 5463 drv_pulse = bp->fw_drv_pulse_wr_seq;
5457 bnx2x_drv_pulse(bp); 5464 bnx2x_drv_pulse(bp);
5458 5465
5459 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) & 5466 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
5460 MCP_PULSE_SEQ_MASK); 5467 MCP_PULSE_SEQ_MASK);
5461 /* The delta between driver pulse and mcp response 5468 /* The delta between driver pulse and mcp response
5462 * should be 1 (before mcp response) or 0 (after mcp response) 5469 * should not get too big. If the MFW is more than 5 pulses
5470 * behind, we should worry about it enough to generate an error
5471 * log.
5463 */ 5472 */
5464 if ((drv_pulse != mcp_pulse) && 5473 if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5)
5465 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { 5474 BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5466 /* someone lost a heartbeat... */
5467 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5468 drv_pulse, mcp_pulse); 5475 drv_pulse, mcp_pulse);
5469 }
5470 } 5476 }
5471 5477
5472 if (bp->state == BNX2X_STATE_OPEN) 5478 if (bp->state == BNX2X_STATE_OPEN)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 2604b6204abe..9ad012bdd915 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1819,7 +1819,7 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
1819 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); 1819 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
1820 if (fid & IGU_FID_ENCODE_IS_PF) 1820 if (fid & IGU_FID_ENCODE_IS_PF)
1821 current_pf = fid & IGU_FID_PF_NUM_MASK; 1821 current_pf = fid & IGU_FID_PF_NUM_MASK;
1822 else if (current_pf == BP_ABS_FUNC(bp)) 1822 else if (current_pf == BP_FUNC(bp))
1823 bnx2x_vf_set_igu_info(bp, sb_id, 1823 bnx2x_vf_set_igu_info(bp, sb_id,
1824 (fid & IGU_FID_VF_NUM_MASK)); 1824 (fid & IGU_FID_VF_NUM_MASK));
1825 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", 1825 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
@@ -3180,6 +3180,7 @@ int bnx2x_enable_sriov(struct bnx2x *bp)
3180 /* set local queue arrays */ 3180 /* set local queue arrays */
3181 vf->vfqs = &bp->vfdb->vfqs[qcount]; 3181 vf->vfqs = &bp->vfdb->vfqs[qcount];
3182 qcount += vf_sb_count(vf); 3182 qcount += vf_sb_count(vf);
3183 bnx2x_iov_static_resc(bp, vf);
3183 } 3184 }
3184 3185
3185 /* prepare msix vectors in VF configuration space */ 3186 /* prepare msix vectors in VF configuration space */
@@ -3187,6 +3188,8 @@ int bnx2x_enable_sriov(struct bnx2x *bp)
3187 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); 3188 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
3188 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, 3189 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
3189 num_vf_queues); 3190 num_vf_queues);
3191 DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
3192 vf_idx, num_vf_queues);
3190 } 3193 }
3191 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 3194 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
3192 3195
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 6cfb88732452..da16953eb2ec 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -1765,28 +1765,28 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
1765 switch (mbx->first_tlv.tl.type) { 1765 switch (mbx->first_tlv.tl.type) {
1766 case CHANNEL_TLV_ACQUIRE: 1766 case CHANNEL_TLV_ACQUIRE:
1767 bnx2x_vf_mbx_acquire(bp, vf, mbx); 1767 bnx2x_vf_mbx_acquire(bp, vf, mbx);
1768 break; 1768 return;
1769 case CHANNEL_TLV_INIT: 1769 case CHANNEL_TLV_INIT:
1770 bnx2x_vf_mbx_init_vf(bp, vf, mbx); 1770 bnx2x_vf_mbx_init_vf(bp, vf, mbx);
1771 break; 1771 return;
1772 case CHANNEL_TLV_SETUP_Q: 1772 case CHANNEL_TLV_SETUP_Q:
1773 bnx2x_vf_mbx_setup_q(bp, vf, mbx); 1773 bnx2x_vf_mbx_setup_q(bp, vf, mbx);
1774 break; 1774 return;
1775 case CHANNEL_TLV_SET_Q_FILTERS: 1775 case CHANNEL_TLV_SET_Q_FILTERS:
1776 bnx2x_vf_mbx_set_q_filters(bp, vf, mbx); 1776 bnx2x_vf_mbx_set_q_filters(bp, vf, mbx);
1777 break; 1777 return;
1778 case CHANNEL_TLV_TEARDOWN_Q: 1778 case CHANNEL_TLV_TEARDOWN_Q:
1779 bnx2x_vf_mbx_teardown_q(bp, vf, mbx); 1779 bnx2x_vf_mbx_teardown_q(bp, vf, mbx);
1780 break; 1780 return;
1781 case CHANNEL_TLV_CLOSE: 1781 case CHANNEL_TLV_CLOSE:
1782 bnx2x_vf_mbx_close_vf(bp, vf, mbx); 1782 bnx2x_vf_mbx_close_vf(bp, vf, mbx);
1783 break; 1783 return;
1784 case CHANNEL_TLV_RELEASE: 1784 case CHANNEL_TLV_RELEASE:
1785 bnx2x_vf_mbx_release_vf(bp, vf, mbx); 1785 bnx2x_vf_mbx_release_vf(bp, vf, mbx);
1786 break; 1786 return;
1787 case CHANNEL_TLV_UPDATE_RSS: 1787 case CHANNEL_TLV_UPDATE_RSS:
1788 bnx2x_vf_mbx_update_rss(bp, vf, mbx); 1788 bnx2x_vf_mbx_update_rss(bp, vf, mbx);
1789 break; 1789 return;
1790 } 1790 }
1791 1791
1792 } else { 1792 } else {
@@ -1802,26 +1802,24 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
1802 for (i = 0; i < 20; i++) 1802 for (i = 0; i < 20; i++)
1803 DP_CONT(BNX2X_MSG_IOV, "%x ", 1803 DP_CONT(BNX2X_MSG_IOV, "%x ",
1804 mbx->msg->req.tlv_buf_size.tlv_buffer[i]); 1804 mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
1805 }
1805 1806
1806 /* test whether we can respond to the VF (do we have an address 1807 /* can we respond to VF (do we have an address for it?) */
1807 * for it?) 1808 if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
1808 */ 1809 /* mbx_resp uses the op_rc of the VF */
1809 if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) { 1810 vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
1810 /* mbx_resp uses the op_rc of the VF */
1811 vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
1812 1811
1813 /* notify the VF that we do not support this request */ 1812 /* notify the VF that we do not support this request */
1814 bnx2x_vf_mbx_resp(bp, vf); 1813 bnx2x_vf_mbx_resp(bp, vf);
1815 } else { 1814 } else {
1816 /* can't send a response since this VF is unknown to us 1815 /* can't send a response since this VF is unknown to us
1817 * just ack the FW to release the mailbox and unlock 1816 * just ack the FW to release the mailbox and unlock
1818 * the channel. 1817 * the channel.
1819 */ 1818 */
1820 storm_memset_vf_mbx_ack(bp, vf->abs_vfid); 1819 storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
1821 mmiowb(); 1820 /* Firmware ack should be written before unlocking channel */
1822 bnx2x_unlock_vf_pf_channel(bp, vf, 1821 mmiowb();
1823 mbx->first_tlv.tl.type); 1822 bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
1824 }
1825 } 1823 }
1826} 1824}
1827 1825
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index ace5050dba38..db020230bd0b 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -88,6 +88,7 @@ static inline char *nic_name(struct pci_dev *pdev)
88#define BE_MIN_MTU 256 88#define BE_MIN_MTU 256
89 89
90#define BE_NUM_VLANS_SUPPORTED 64 90#define BE_NUM_VLANS_SUPPORTED 64
91#define BE_UMC_NUM_VLANS_SUPPORTED 15
91#define BE_MAX_EQD 96u 92#define BE_MAX_EQD 96u
92#define BE_MAX_TX_FRAG_COUNT 30 93#define BE_MAX_TX_FRAG_COUNT 30
93 94
@@ -333,6 +334,7 @@ enum vf_state {
333 334
334#define BE_FLAGS_LINK_STATUS_INIT 1 335#define BE_FLAGS_LINK_STATUS_INIT 1
335#define BE_FLAGS_WORKER_SCHEDULED (1 << 3) 336#define BE_FLAGS_WORKER_SCHEDULED (1 << 3)
337#define BE_FLAGS_VLAN_PROMISC (1 << 4)
336#define BE_FLAGS_NAPI_ENABLED (1 << 9) 338#define BE_FLAGS_NAPI_ENABLED (1 << 9)
337#define BE_UC_PMAC_COUNT 30 339#define BE_UC_PMAC_COUNT 30
338#define BE_VF_UC_PMAC_COUNT 2 340#define BE_VF_UC_PMAC_COUNT 2
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 1ab5dab11eff..bd0e0c0bbcd8 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -180,6 +180,9 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
180 dev_err(&adapter->pdev->dev, 180 dev_err(&adapter->pdev->dev,
181 "opcode %d-%d failed:status %d-%d\n", 181 "opcode %d-%d failed:status %d-%d\n",
182 opcode, subsystem, compl_status, extd_status); 182 opcode, subsystem, compl_status, extd_status);
183
184 if (extd_status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
185 return extd_status;
183 } 186 }
184 } 187 }
185done: 188done:
@@ -1812,6 +1815,12 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1812 } else if (flags & IFF_ALLMULTI) { 1815 } else if (flags & IFF_ALLMULTI) {
1813 req->if_flags_mask = req->if_flags = 1816 req->if_flags_mask = req->if_flags =
1814 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); 1817 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1818 } else if (flags & BE_FLAGS_VLAN_PROMISC) {
1819 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
1820
1821 if (value == ON)
1822 req->if_flags =
1823 cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
1815 } else { 1824 } else {
1816 struct netdev_hw_addr *ha; 1825 struct netdev_hw_addr *ha;
1817 int i = 0; 1826 int i = 0;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index d026226db88c..108ca8abf0af 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -60,6 +60,8 @@ enum {
60 MCC_STATUS_NOT_SUPPORTED = 66 60 MCC_STATUS_NOT_SUPPORTED = 66
61}; 61};
62 62
63#define MCC_ADDL_STS_INSUFFICIENT_RESOURCES 0x16
64
63#define CQE_STATUS_COMPL_MASK 0xFFFF 65#define CQE_STATUS_COMPL_MASK 0xFFFF
64#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */ 66#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */
65#define CQE_STATUS_EXTD_MASK 0xFFFF 67#define CQE_STATUS_EXTD_MASK 0xFFFF
@@ -1791,7 +1793,7 @@ struct be_nic_res_desc {
1791 u8 acpi_params; 1793 u8 acpi_params;
1792 u8 wol_param; 1794 u8 wol_param;
1793 u16 rsvd7; 1795 u16 rsvd7;
1794 u32 rsvd8[3]; 1796 u32 rsvd8[7];
1795} __packed; 1797} __packed;
1796 1798
1797struct be_cmd_req_get_func_config { 1799struct be_cmd_req_get_func_config {
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 100b528b9bd0..2c38cc402119 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -855,11 +855,11 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
855 unsigned int eth_hdr_len; 855 unsigned int eth_hdr_len;
856 struct iphdr *ip; 856 struct iphdr *ip;
857 857
858 /* Lancer ASIC has a bug wherein packets that are 32 bytes or less 858 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
859 * may cause a transmit stall on that port. So the work-around is to 859 * may cause a transmit stall on that port. So the work-around is to
860 * pad such packets to a 36-byte length. 860 * pad short packets (<= 32 bytes) to a 36-byte length.
861 */ 861 */
862 if (unlikely(lancer_chip(adapter) && skb->len <= 32)) { 862 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
863 if (skb_padto(skb, 36)) 863 if (skb_padto(skb, 36))
864 goto tx_drop; 864 goto tx_drop;
865 skb->len = 36; 865 skb->len = 36;
@@ -1013,18 +1013,40 @@ static int be_vid_config(struct be_adapter *adapter)
1013 status = be_cmd_vlan_config(adapter, adapter->if_handle, 1013 status = be_cmd_vlan_config(adapter, adapter->if_handle,
1014 vids, num, 1, 0); 1014 vids, num, 1, 0);
1015 1015
1016 /* Set to VLAN promisc mode as setting VLAN filter failed */
1017 if (status) { 1016 if (status) {
1018 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n"); 1017 /* Set to VLAN promisc mode as setting VLAN filter failed */
1019 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n"); 1018 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1020 goto set_vlan_promisc; 1019 goto set_vlan_promisc;
1020 dev_err(&adapter->pdev->dev,
1021 "Setting HW VLAN filtering failed.\n");
1022 } else {
1023 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1024 /* hw VLAN filtering re-enabled. */
1025 status = be_cmd_rx_filter(adapter,
1026 BE_FLAGS_VLAN_PROMISC, OFF);
1027 if (!status) {
1028 dev_info(&adapter->pdev->dev,
1029 "Disabling VLAN Promiscuous mode.\n");
1030 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1031 dev_info(&adapter->pdev->dev,
1032 "Re-Enabling HW VLAN filtering\n");
1033 }
1034 }
1021 } 1035 }
1022 1036
1023 return status; 1037 return status;
1024 1038
1025set_vlan_promisc: 1039set_vlan_promisc:
1026 status = be_cmd_vlan_config(adapter, adapter->if_handle, 1040 dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1027 NULL, 0, 1, 1); 1041
1042 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1043 if (!status) {
1044 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
1045 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n");
1046 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1047 } else
1048 dev_err(&adapter->pdev->dev,
1049 "Failed to enable VLAN Promiscuous mode.\n");
1028 return status; 1050 return status;
1029} 1051}
1030 1052
@@ -1033,10 +1055,6 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1033 struct be_adapter *adapter = netdev_priv(netdev); 1055 struct be_adapter *adapter = netdev_priv(netdev);
1034 int status = 0; 1056 int status = 0;
1035 1057
1036 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1037 status = -EINVAL;
1038 goto ret;
1039 }
1040 1058
1041 /* Packets with VID 0 are always received by Lancer by default */ 1059 /* Packets with VID 0 are always received by Lancer by default */
1042 if (lancer_chip(adapter) && vid == 0) 1060 if (lancer_chip(adapter) && vid == 0)
@@ -1059,11 +1077,6 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1059 struct be_adapter *adapter = netdev_priv(netdev); 1077 struct be_adapter *adapter = netdev_priv(netdev);
1060 int status = 0; 1078 int status = 0;
1061 1079
1062 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1063 status = -EINVAL;
1064 goto ret;
1065 }
1066
1067 /* Packets with VID 0 are always received by Lancer by default */ 1080 /* Packets with VID 0 are always received by Lancer by default */
1068 if (lancer_chip(adapter) && vid == 0) 1081 if (lancer_chip(adapter) && vid == 0)
1069 goto ret; 1082 goto ret;
@@ -1188,8 +1201,8 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
1188 1201
1189 vi->vf = vf; 1202 vi->vf = vf;
1190 vi->tx_rate = vf_cfg->tx_rate; 1203 vi->tx_rate = vf_cfg->tx_rate;
1191 vi->vlan = vf_cfg->vlan_tag; 1204 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1192 vi->qos = 0; 1205 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1193 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN); 1206 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1194 1207
1195 return 0; 1208 return 0;
@@ -1199,28 +1212,29 @@ static int be_set_vf_vlan(struct net_device *netdev,
1199 int vf, u16 vlan, u8 qos) 1212 int vf, u16 vlan, u8 qos)
1200{ 1213{
1201 struct be_adapter *adapter = netdev_priv(netdev); 1214 struct be_adapter *adapter = netdev_priv(netdev);
1215 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1202 int status = 0; 1216 int status = 0;
1203 1217
1204 if (!sriov_enabled(adapter)) 1218 if (!sriov_enabled(adapter))
1205 return -EPERM; 1219 return -EPERM;
1206 1220
1207 if (vf >= adapter->num_vfs || vlan > 4095) 1221 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1208 return -EINVAL; 1222 return -EINVAL;
1209 1223
1210 if (vlan) { 1224 if (vlan || qos) {
1211 if (adapter->vf_cfg[vf].vlan_tag != vlan) { 1225 vlan |= qos << VLAN_PRIO_SHIFT;
1226 if (vf_cfg->vlan_tag != vlan) {
1212 /* If this is new value, program it. Else skip. */ 1227 /* If this is new value, program it. Else skip. */
1213 adapter->vf_cfg[vf].vlan_tag = vlan; 1228 vf_cfg->vlan_tag = vlan;
1214 1229 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1215 status = be_cmd_set_hsw_config(adapter, vlan, 1230 vf_cfg->if_handle, 0);
1216 vf + 1, adapter->vf_cfg[vf].if_handle, 0);
1217 } 1231 }
1218 } else { 1232 } else {
1219 /* Reset Transparent Vlan Tagging. */ 1233 /* Reset Transparent Vlan Tagging. */
1220 adapter->vf_cfg[vf].vlan_tag = 0; 1234 vf_cfg->vlan_tag = 0;
1221 vlan = adapter->vf_cfg[vf].def_vid; 1235 vlan = vf_cfg->def_vid;
1222 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, 1236 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1223 adapter->vf_cfg[vf].if_handle, 0); 1237 vf_cfg->if_handle, 0);
1224 } 1238 }
1225 1239
1226 1240
@@ -2963,6 +2977,8 @@ static void BEx_get_resources(struct be_adapter *adapter,
2963 2977
2964 if (adapter->function_mode & FLEX10_MODE) 2978 if (adapter->function_mode & FLEX10_MODE)
2965 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8; 2979 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2980 else if (adapter->function_mode & UMC_ENABLED)
2981 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
2966 else 2982 else
2967 res->max_vlans = BE_NUM_VLANS_SUPPORTED; 2983 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
2968 res->max_mcast_mac = BE_MAX_MC; 2984 res->max_mcast_mac = BE_MAX_MC;
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 098f133908ae..e006a09ba899 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -452,7 +452,9 @@ static int gianfar_ptp_probe(struct platform_device *dev)
452 err = -ENODEV; 452 err = -ENODEV;
453 453
454 etsects->caps = ptp_gianfar_caps; 454 etsects->caps = ptp_gianfar_caps;
455 etsects->cksel = DEFAULT_CKSEL; 455
456 if (get_of_u32(node, "fsl,cksel", &etsects->cksel))
457 etsects->cksel = DEFAULT_CKSEL;
456 458
457 if (get_of_u32(node, "fsl,tclk-period", &etsects->tclk_period) || 459 if (get_of_u32(node, "fsl,tclk-period", &etsects->tclk_period) ||
458 get_of_u32(node, "fsl,tmr-prsc", &etsects->tmr_prsc) || 460 get_of_u32(node, "fsl,tmr-prsc", &etsects->tmr_prsc) ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 0c524fa9f811..cfef7fc32cdd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -701,8 +701,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
701 701
702 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); 702 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
703 if (cmd_details) { 703 if (cmd_details) {
704 memcpy(details, cmd_details, 704 *details = *cmd_details;
705 sizeof(struct i40e_asq_cmd_details));
706 705
707 /* If the cmd_details are defined copy the cookie. The 706 /* If the cmd_details are defined copy the cookie. The
708 * cpu_to_le32 is not needed here because the data is ignored 707 * cpu_to_le32 is not needed here because the data is ignored
@@ -760,7 +759,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
760 desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); 759 desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
761 760
762 /* if the desc is available copy the temp desc to the right place */ 761 /* if the desc is available copy the temp desc to the right place */
763 memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc)); 762 *desc_on_ring = *desc;
764 763
765 /* if buff is not NULL assume indirect command */ 764 /* if buff is not NULL assume indirect command */
766 if (buff != NULL) { 765 if (buff != NULL) {
@@ -807,7 +806,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
807 806
808 /* if ready, copy the desc back to temp */ 807 /* if ready, copy the desc back to temp */
809 if (i40e_asq_done(hw)) { 808 if (i40e_asq_done(hw)) {
810 memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc)); 809 *desc = *desc_on_ring;
811 if (buff != NULL) 810 if (buff != NULL)
812 memcpy(buff, dma_buff->va, buff_size); 811 memcpy(buff, dma_buff->va, buff_size);
813 retval = le16_to_cpu(desc->retval); 812 retval = le16_to_cpu(desc->retval);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index c21df7bc3b1d..1e4ea134975a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -507,7 +507,7 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
507 507
508 /* save link status information */ 508 /* save link status information */
509 if (link) 509 if (link)
510 memcpy(link, hw_link_info, sizeof(struct i40e_link_status)); 510 *link = *hw_link_info;
511 511
512 /* flag cleared so helper functions don't call AQ again */ 512 /* flag cleared so helper functions don't call AQ again */
513 hw->phy.get_link_info = false; 513 hw->phy.get_link_info = false;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 601d482694ea..221aa4795017 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -101,10 +101,10 @@ int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
101 mem->size = ALIGN(size, alignment); 101 mem->size = ALIGN(size, alignment);
102 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size, 102 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
103 &mem->pa, GFP_KERNEL); 103 &mem->pa, GFP_KERNEL);
104 if (mem->va) 104 if (!mem->va)
105 return 0; 105 return -ENOMEM;
106 106
107 return -ENOMEM; 107 return 0;
108} 108}
109 109
110/** 110/**
@@ -136,10 +136,10 @@ int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
136 mem->size = size; 136 mem->size = size;
137 mem->va = kzalloc(size, GFP_KERNEL); 137 mem->va = kzalloc(size, GFP_KERNEL);
138 138
139 if (mem->va) 139 if (!mem->va)
140 return 0; 140 return -ENOMEM;
141 141
142 return -ENOMEM; 142 return 0;
143} 143}
144 144
145/** 145/**
@@ -174,8 +174,7 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
174 u16 needed, u16 id) 174 u16 needed, u16 id)
175{ 175{
176 int ret = -ENOMEM; 176 int ret = -ENOMEM;
177 int i = 0; 177 int i, j;
178 int j = 0;
179 178
180 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) { 179 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
181 dev_info(&pf->pdev->dev, 180 dev_info(&pf->pdev->dev,
@@ -186,7 +185,7 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
186 185
187 /* start the linear search with an imperfect hint */ 186 /* start the linear search with an imperfect hint */
188 i = pile->search_hint; 187 i = pile->search_hint;
189 while (i < pile->num_entries && ret < 0) { 188 while (i < pile->num_entries) {
190 /* skip already allocated entries */ 189 /* skip already allocated entries */
191 if (pile->list[i] & I40E_PILE_VALID_BIT) { 190 if (pile->list[i] & I40E_PILE_VALID_BIT) {
192 i++; 191 i++;
@@ -205,6 +204,7 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
205 pile->list[i+j] = id | I40E_PILE_VALID_BIT; 204 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
206 ret = i; 205 ret = i;
207 pile->search_hint = i + j; 206 pile->search_hint = i + j;
207 break;
208 } else { 208 } else {
209 /* not enough, so skip over it and continue looking */ 209 /* not enough, so skip over it and continue looking */
210 i += j; 210 i += j;
@@ -1388,7 +1388,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1388 bool add_happened = false; 1388 bool add_happened = false;
1389 int filter_list_len = 0; 1389 int filter_list_len = 0;
1390 u32 changed_flags = 0; 1390 u32 changed_flags = 0;
1391 i40e_status ret = 0; 1391 i40e_status aq_ret = 0;
1392 struct i40e_pf *pf; 1392 struct i40e_pf *pf;
1393 int num_add = 0; 1393 int num_add = 0;
1394 int num_del = 0; 1394 int num_del = 0;
@@ -1449,28 +1449,28 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1449 1449
1450 /* flush a full buffer */ 1450 /* flush a full buffer */
1451 if (num_del == filter_list_len) { 1451 if (num_del == filter_list_len) {
1452 ret = i40e_aq_remove_macvlan(&pf->hw, 1452 aq_ret = i40e_aq_remove_macvlan(&pf->hw,
1453 vsi->seid, del_list, num_del, 1453 vsi->seid, del_list, num_del,
1454 NULL); 1454 NULL);
1455 num_del = 0; 1455 num_del = 0;
1456 memset(del_list, 0, sizeof(*del_list)); 1456 memset(del_list, 0, sizeof(*del_list));
1457 1457
1458 if (ret) 1458 if (aq_ret)
1459 dev_info(&pf->pdev->dev, 1459 dev_info(&pf->pdev->dev,
1460 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n", 1460 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
1461 ret, 1461 aq_ret,
1462 pf->hw.aq.asq_last_status); 1462 pf->hw.aq.asq_last_status);
1463 } 1463 }
1464 } 1464 }
1465 if (num_del) { 1465 if (num_del) {
1466 ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, 1466 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
1467 del_list, num_del, NULL); 1467 del_list, num_del, NULL);
1468 num_del = 0; 1468 num_del = 0;
1469 1469
1470 if (ret) 1470 if (aq_ret)
1471 dev_info(&pf->pdev->dev, 1471 dev_info(&pf->pdev->dev,
1472 "ignoring delete macvlan error, err %d, aq_err %d\n", 1472 "ignoring delete macvlan error, err %d, aq_err %d\n",
1473 ret, pf->hw.aq.asq_last_status); 1473 aq_ret, pf->hw.aq.asq_last_status);
1474 } 1474 }
1475 1475
1476 kfree(del_list); 1476 kfree(del_list);
@@ -1515,32 +1515,30 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1515 1515
1516 /* flush a full buffer */ 1516 /* flush a full buffer */
1517 if (num_add == filter_list_len) { 1517 if (num_add == filter_list_len) {
1518 ret = i40e_aq_add_macvlan(&pf->hw, 1518 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1519 vsi->seid, 1519 add_list, num_add,
1520 add_list, 1520 NULL);
1521 num_add,
1522 NULL);
1523 num_add = 0; 1521 num_add = 0;
1524 1522
1525 if (ret) 1523 if (aq_ret)
1526 break; 1524 break;
1527 memset(add_list, 0, sizeof(*add_list)); 1525 memset(add_list, 0, sizeof(*add_list));
1528 } 1526 }
1529 } 1527 }
1530 if (num_add) { 1528 if (num_add) {
1531 ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, 1529 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1532 add_list, num_add, NULL); 1530 add_list, num_add, NULL);
1533 num_add = 0; 1531 num_add = 0;
1534 } 1532 }
1535 kfree(add_list); 1533 kfree(add_list);
1536 add_list = NULL; 1534 add_list = NULL;
1537 1535
1538 if (add_happened && (!ret)) { 1536 if (add_happened && (!aq_ret)) {
1539 /* do nothing */; 1537 /* do nothing */;
1540 } else if (add_happened && (ret)) { 1538 } else if (add_happened && (aq_ret)) {
1541 dev_info(&pf->pdev->dev, 1539 dev_info(&pf->pdev->dev,
1542 "add filter failed, err %d, aq_err %d\n", 1540 "add filter failed, err %d, aq_err %d\n",
1543 ret, pf->hw.aq.asq_last_status); 1541 aq_ret, pf->hw.aq.asq_last_status);
1544 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && 1542 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
1545 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, 1543 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1546 &vsi->state)) { 1544 &vsi->state)) {
@@ -1556,28 +1554,27 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1556 if (changed_flags & IFF_ALLMULTI) { 1554 if (changed_flags & IFF_ALLMULTI) {
1557 bool cur_multipromisc; 1555 bool cur_multipromisc;
1558 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); 1556 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
1559 ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, 1557 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
1560 vsi->seid, 1558 vsi->seid,
1561 cur_multipromisc, 1559 cur_multipromisc,
1562 NULL); 1560 NULL);
1563 if (ret) 1561 if (aq_ret)
1564 dev_info(&pf->pdev->dev, 1562 dev_info(&pf->pdev->dev,
1565 "set multi promisc failed, err %d, aq_err %d\n", 1563 "set multi promisc failed, err %d, aq_err %d\n",
1566 ret, pf->hw.aq.asq_last_status); 1564 aq_ret, pf->hw.aq.asq_last_status);
1567 } 1565 }
1568 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { 1566 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
1569 bool cur_promisc; 1567 bool cur_promisc;
1570 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || 1568 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
1571 test_bit(__I40E_FILTER_OVERFLOW_PROMISC, 1569 test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1572 &vsi->state)); 1570 &vsi->state));
1573 ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw, 1571 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
1574 vsi->seid, 1572 vsi->seid,
1575 cur_promisc, 1573 cur_promisc, NULL);
1576 NULL); 1574 if (aq_ret)
1577 if (ret)
1578 dev_info(&pf->pdev->dev, 1575 dev_info(&pf->pdev->dev,
1579 "set uni promisc failed, err %d, aq_err %d\n", 1576 "set uni promisc failed, err %d, aq_err %d\n",
1580 ret, pf->hw.aq.asq_last_status); 1577 aq_ret, pf->hw.aq.asq_last_status);
1581 } 1578 }
1582 1579
1583 clear_bit(__I40E_CONFIG_BUSY, &vsi->state); 1580 clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
@@ -1790,6 +1787,8 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
1790 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan 1787 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
1791 * @vsi: the vsi being configured 1788 * @vsi: the vsi being configured
1792 * @vid: vlan id to be removed (0 = untagged only , -1 = any) 1789 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
1790 *
1791 * Return: 0 on success or negative otherwise
1793 **/ 1792 **/
1794int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) 1793int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
1795{ 1794{
@@ -1863,37 +1862,39 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
1863 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload 1862 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
1864 * @netdev: network interface to be adjusted 1863 * @netdev: network interface to be adjusted
1865 * @vid: vlan id to be added 1864 * @vid: vlan id to be added
1865 *
1866 * net_device_ops implementation for adding vlan ids
1866 **/ 1867 **/
1867static int i40e_vlan_rx_add_vid(struct net_device *netdev, 1868static int i40e_vlan_rx_add_vid(struct net_device *netdev,
1868 __always_unused __be16 proto, u16 vid) 1869 __always_unused __be16 proto, u16 vid)
1869{ 1870{
1870 struct i40e_netdev_priv *np = netdev_priv(netdev); 1871 struct i40e_netdev_priv *np = netdev_priv(netdev);
1871 struct i40e_vsi *vsi = np->vsi; 1872 struct i40e_vsi *vsi = np->vsi;
1872 int ret; 1873 int ret = 0;
1873 1874
1874 if (vid > 4095) 1875 if (vid > 4095)
1875 return 0; 1876 return -EINVAL;
1877
1878 netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
1876 1879
1877 netdev_info(vsi->netdev, "adding %pM vid=%d\n",
1878 netdev->dev_addr, vid);
1879 /* If the network stack called us with vid = 0, we should 1880 /* If the network stack called us with vid = 0, we should
1880 * indicate to i40e_vsi_add_vlan() that we want to receive 1881 * indicate to i40e_vsi_add_vlan() that we want to receive
1881 * any traffic (i.e. with any vlan tag, or untagged) 1882 * any traffic (i.e. with any vlan tag, or untagged)
1882 */ 1883 */
1883 ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY); 1884 ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY);
1884 1885
1885 if (!ret) { 1886 if (!ret && (vid < VLAN_N_VID))
1886 if (vid < VLAN_N_VID) 1887 set_bit(vid, vsi->active_vlans);
1887 set_bit(vid, vsi->active_vlans);
1888 }
1889 1888
1890 return 0; 1889 return ret;
1891} 1890}
1892 1891
1893/** 1892/**
1894 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload 1893 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
1895 * @netdev: network interface to be adjusted 1894 * @netdev: network interface to be adjusted
1896 * @vid: vlan id to be removed 1895 * @vid: vlan id to be removed
1896 *
1897 * net_device_ops implementation for adding vlan ids
1897 **/ 1898 **/
1898static int i40e_vlan_rx_kill_vid(struct net_device *netdev, 1899static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
1899 __always_unused __be16 proto, u16 vid) 1900 __always_unused __be16 proto, u16 vid)
@@ -1901,15 +1902,16 @@ static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
1901 struct i40e_netdev_priv *np = netdev_priv(netdev); 1902 struct i40e_netdev_priv *np = netdev_priv(netdev);
1902 struct i40e_vsi *vsi = np->vsi; 1903 struct i40e_vsi *vsi = np->vsi;
1903 1904
1904 netdev_info(vsi->netdev, "removing %pM vid=%d\n", 1905 netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid);
1905 netdev->dev_addr, vid); 1906
1906 /* return code is ignored as there is nothing a user 1907 /* return code is ignored as there is nothing a user
1907 * can do about failure to remove and a log message was 1908 * can do about failure to remove and a log message was
1908 * already printed from another function 1909 * already printed from the other function
1909 */ 1910 */
1910 i40e_vsi_kill_vlan(vsi, vid); 1911 i40e_vsi_kill_vlan(vsi, vid);
1911 1912
1912 clear_bit(vid, vsi->active_vlans); 1913 clear_bit(vid, vsi->active_vlans);
1914
1913 return 0; 1915 return 0;
1914} 1916}
1915 1917
@@ -1936,10 +1938,10 @@ static void i40e_restore_vlan(struct i40e_vsi *vsi)
1936 * @vsi: the vsi being adjusted 1938 * @vsi: the vsi being adjusted
1937 * @vid: the vlan id to set as a PVID 1939 * @vid: the vlan id to set as a PVID
1938 **/ 1940 **/
1939i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) 1941int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
1940{ 1942{
1941 struct i40e_vsi_context ctxt; 1943 struct i40e_vsi_context ctxt;
1942 i40e_status ret; 1944 i40e_status aq_ret;
1943 1945
1944 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 1946 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1945 vsi->info.pvid = cpu_to_le16(vid); 1947 vsi->info.pvid = cpu_to_le16(vid);
@@ -1948,14 +1950,15 @@ i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
1948 1950
1949 ctxt.seid = vsi->seid; 1951 ctxt.seid = vsi->seid;
1950 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 1952 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1951 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 1953 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1952 if (ret) { 1954 if (aq_ret) {
1953 dev_info(&vsi->back->pdev->dev, 1955 dev_info(&vsi->back->pdev->dev,
1954 "%s: update vsi failed, aq_err=%d\n", 1956 "%s: update vsi failed, aq_err=%d\n",
1955 __func__, vsi->back->hw.aq.asq_last_status); 1957 __func__, vsi->back->hw.aq.asq_last_status);
1958 return -ENOENT;
1956 } 1959 }
1957 1960
1958 return ret; 1961 return 0;
1959} 1962}
1960 1963
1961/** 1964/**
@@ -3326,7 +3329,8 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
3326 **/ 3329 **/
3327static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) 3330static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
3328{ 3331{
3329 int num_tc = 0, i; 3332 u8 num_tc = 0;
3333 int i;
3330 3334
3331 /* Scan the ETS Config Priority Table to find 3335 /* Scan the ETS Config Priority Table to find
3332 * traffic class enabled for a given priority 3336 * traffic class enabled for a given priority
@@ -3341,9 +3345,7 @@ static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
3341 /* Traffic class index starts from zero so 3345 /* Traffic class index starts from zero so
3342 * increment to return the actual count 3346 * increment to return the actual count
3343 */ 3347 */
3344 num_tc++; 3348 return num_tc + 1;
3345
3346 return num_tc;
3347} 3349}
3348 3350
3349/** 3351/**
@@ -3451,28 +3453,27 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
3451 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; 3453 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
3452 struct i40e_pf *pf = vsi->back; 3454 struct i40e_pf *pf = vsi->back;
3453 struct i40e_hw *hw = &pf->hw; 3455 struct i40e_hw *hw = &pf->hw;
3456 i40e_status aq_ret;
3454 u32 tc_bw_max; 3457 u32 tc_bw_max;
3455 int ret;
3456 int i; 3458 int i;
3457 3459
3458 /* Get the VSI level BW configuration */ 3460 /* Get the VSI level BW configuration */
3459 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); 3461 aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
3460 if (ret) { 3462 if (aq_ret) {
3461 dev_info(&pf->pdev->dev, 3463 dev_info(&pf->pdev->dev,
3462 "couldn't get pf vsi bw config, err %d, aq_err %d\n", 3464 "couldn't get pf vsi bw config, err %d, aq_err %d\n",
3463 ret, pf->hw.aq.asq_last_status); 3465 aq_ret, pf->hw.aq.asq_last_status);
3464 return ret; 3466 return -EINVAL;
3465 } 3467 }
3466 3468
3467 /* Get the VSI level BW configuration per TC */ 3469 /* Get the VSI level BW configuration per TC */
3468 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, 3470 aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
3469 &bw_ets_config, 3471 NULL);
3470 NULL); 3472 if (aq_ret) {
3471 if (ret) {
3472 dev_info(&pf->pdev->dev, 3473 dev_info(&pf->pdev->dev,
3473 "couldn't get pf vsi ets bw config, err %d, aq_err %d\n", 3474 "couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
3474 ret, pf->hw.aq.asq_last_status); 3475 aq_ret, pf->hw.aq.asq_last_status);
3475 return ret; 3476 return -EINVAL;
3476 } 3477 }
3477 3478
3478 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) { 3479 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
@@ -3494,7 +3495,8 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
3494 /* 3 bits out of 4 for each TC */ 3495 /* 3 bits out of 4 for each TC */
3495 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7); 3496 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
3496 } 3497 }
3497 return ret; 3498
3499 return 0;
3498} 3500}
3499 3501
3500/** 3502/**
@@ -3505,30 +3507,30 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
3505 * 3507 *
3506 * Returns 0 on success, negative value on failure 3508 * Returns 0 on success, negative value on failure
3507 **/ 3509 **/
3508static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, 3510static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
3509 u8 enabled_tc,
3510 u8 *bw_share) 3511 u8 *bw_share)
3511{ 3512{
3512 struct i40e_aqc_configure_vsi_tc_bw_data bw_data; 3513 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
3513 int i, ret = 0; 3514 i40e_status aq_ret;
3515 int i;
3514 3516
3515 bw_data.tc_valid_bits = enabled_tc; 3517 bw_data.tc_valid_bits = enabled_tc;
3516 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 3518 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3517 bw_data.tc_bw_credits[i] = bw_share[i]; 3519 bw_data.tc_bw_credits[i] = bw_share[i];
3518 3520
3519 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, 3521 aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
3520 &bw_data, NULL); 3522 NULL);
3521 if (ret) { 3523 if (aq_ret) {
3522 dev_info(&vsi->back->pdev->dev, 3524 dev_info(&vsi->back->pdev->dev,
3523 "%s: AQ command Config VSI BW allocation per TC failed = %d\n", 3525 "%s: AQ command Config VSI BW allocation per TC failed = %d\n",
3524 __func__, vsi->back->hw.aq.asq_last_status); 3526 __func__, vsi->back->hw.aq.asq_last_status);
3525 return ret; 3527 return -EINVAL;
3526 } 3528 }
3527 3529
3528 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 3530 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3529 vsi->info.qs_handle[i] = bw_data.qs_handles[i]; 3531 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
3530 3532
3531 return ret; 3533 return 0;
3532} 3534}
3533 3535
3534/** 3536/**
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 48cbc833b051..86d51429a189 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1607,6 +1607,9 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
1607 igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0); 1607 igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0);
1608 igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); 1608 igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
1609 } 1609 }
1610 } else if (hw->phy.type == e1000_phy_82580) {
1611 /* enable MII loopback */
1612 igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041);
1610 } 1613 }
1611 1614
1612 /* add small delay to avoid loopback test failure */ 1615 /* add small delay to avoid loopback test failure */
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 1a9c4f6269ea..ecc7f7b696b8 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3086,13 +3086,16 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
3086 PCI_DMA_FROMDEVICE); 3086 PCI_DMA_FROMDEVICE);
3087 skge_rx_reuse(e, skge->rx_buf_size); 3087 skge_rx_reuse(e, skge->rx_buf_size);
3088 } else { 3088 } else {
3089 struct skge_element ee;
3089 struct sk_buff *nskb; 3090 struct sk_buff *nskb;
3090 3091
3091 nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size); 3092 nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size);
3092 if (!nskb) 3093 if (!nskb)
3093 goto resubmit; 3094 goto resubmit;
3094 3095
3095 skb = e->skb; 3096 ee = *e;
3097
3098 skb = ee.skb;
3096 prefetch(skb->data); 3099 prefetch(skb->data);
3097 3100
3098 if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) { 3101 if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) {
@@ -3101,8 +3104,8 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
3101 } 3104 }
3102 3105
3103 pci_unmap_single(skge->hw->pdev, 3106 pci_unmap_single(skge->hw->pdev,
3104 dma_unmap_addr(e, mapaddr), 3107 dma_unmap_addr(&ee, mapaddr),
3105 dma_unmap_len(e, maplen), 3108 dma_unmap_len(&ee, maplen),
3106 PCI_DMA_FROMDEVICE); 3109 PCI_DMA_FROMDEVICE);
3107 } 3110 }
3108 3111
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 83c2091c9c23..bd1a2d2bc2ae 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -543,7 +543,7 @@ static const struct of_device_id moxart_mac_match[] = {
543 { } 543 { }
544}; 544};
545 545
546struct __initdata platform_driver moxart_mac_driver = { 546static struct platform_driver moxart_mac_driver = {
547 .probe = moxart_mac_probe, 547 .probe = moxart_mac_probe,
548 .remove = moxart_remove, 548 .remove = moxart_remove,
549 .driver = { 549 .driver = {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 4d7ad0074d1c..ebe4c86e5230 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -1794,3 +1794,11 @@ const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops = {
1794 .set_msglevel = qlcnic_set_msglevel, 1794 .set_msglevel = qlcnic_set_msglevel,
1795 .get_msglevel = qlcnic_get_msglevel, 1795 .get_msglevel = qlcnic_get_msglevel,
1796}; 1796};
1797
1798const struct ethtool_ops qlcnic_ethtool_failed_ops = {
1799 .get_settings = qlcnic_get_settings,
1800 .get_drvinfo = qlcnic_get_drvinfo,
1801 .set_msglevel = qlcnic_set_msglevel,
1802 .get_msglevel = qlcnic_get_msglevel,
1803 .set_dump = qlcnic_set_dump,
1804};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index c4c5023e1fdf..21d00a0449a1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -431,6 +431,9 @@ static void qlcnic_82xx_cancel_idc_work(struct qlcnic_adapter *adapter)
431 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) 431 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
432 usleep_range(10000, 11000); 432 usleep_range(10000, 11000);
433 433
434 if (!adapter->fw_work.work.func)
435 return;
436
434 cancel_delayed_work_sync(&adapter->fw_work); 437 cancel_delayed_work_sync(&adapter->fw_work);
435} 438}
436 439
@@ -2275,8 +2278,9 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2275 adapter->portnum = adapter->ahw->pci_func; 2278 adapter->portnum = adapter->ahw->pci_func;
2276 err = qlcnic_start_firmware(adapter); 2279 err = qlcnic_start_firmware(adapter);
2277 if (err) { 2280 if (err) {
2278 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"); 2281 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"
2279 goto err_out_free_hw; 2282 "\t\tIf reboot doesn't help, try flashing the card\n");
2283 goto err_out_maintenance_mode;
2280 } 2284 }
2281 2285
2282 qlcnic_get_multiq_capability(adapter); 2286 qlcnic_get_multiq_capability(adapter);
@@ -2408,6 +2412,22 @@ err_out_disable_pdev:
2408 pci_set_drvdata(pdev, NULL); 2412 pci_set_drvdata(pdev, NULL);
2409 pci_disable_device(pdev); 2413 pci_disable_device(pdev);
2410 return err; 2414 return err;
2415
2416err_out_maintenance_mode:
2417 netdev->netdev_ops = &qlcnic_netdev_failed_ops;
2418 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_failed_ops);
2419 err = register_netdev(netdev);
2420
2421 if (err) {
2422 dev_err(&pdev->dev, "Failed to register net device\n");
2423 qlcnic_clr_all_drv_state(adapter, 0);
2424 goto err_out_free_hw;
2425 }
2426
2427 pci_set_drvdata(pdev, adapter);
2428 qlcnic_add_sysfs(adapter);
2429
2430 return 0;
2411} 2431}
2412 2432
2413static void qlcnic_remove(struct pci_dev *pdev) 2433static void qlcnic_remove(struct pci_dev *pdev)
@@ -2518,8 +2538,16 @@ static int qlcnic_resume(struct pci_dev *pdev)
2518static int qlcnic_open(struct net_device *netdev) 2538static int qlcnic_open(struct net_device *netdev)
2519{ 2539{
2520 struct qlcnic_adapter *adapter = netdev_priv(netdev); 2540 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2541 u32 state;
2521 int err; 2542 int err;
2522 2543
2544 state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
2545 if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) {
2546 netdev_err(netdev, "%s: Device is in FAILED state\n", __func__);
2547
2548 return -EIO;
2549 }
2550
2523 netif_carrier_off(netdev); 2551 netif_carrier_off(netdev);
2524 2552
2525 err = qlcnic_attach(adapter); 2553 err = qlcnic_attach(adapter);
@@ -3228,6 +3256,13 @@ void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *adapter, u32 key)
3228 return; 3256 return;
3229 3257
3230 state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); 3258 state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
3259 if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) {
3260 netdev_err(adapter->netdev, "%s: Device is in FAILED state\n",
3261 __func__);
3262 qlcnic_api_unlock(adapter);
3263
3264 return;
3265 }
3231 3266
3232 if (state == QLCNIC_DEV_READY) { 3267 if (state == QLCNIC_DEV_READY) {
3233 QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, 3268 QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 330d9a8774ad..686f460b1502 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -397,6 +397,7 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
397{ 397{
398 struct net_device *netdev = adapter->netdev; 398 struct net_device *netdev = adapter->netdev;
399 399
400 rtnl_lock();
400 if (netif_running(netdev)) 401 if (netif_running(netdev))
401 __qlcnic_down(adapter, netdev); 402 __qlcnic_down(adapter, netdev);
402 403
@@ -407,12 +408,15 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
407 /* After disabling SRIOV re-init the driver in default mode 408 /* After disabling SRIOV re-init the driver in default mode
408 configure opmode based on op_mode of function 409 configure opmode based on op_mode of function
409 */ 410 */
410 if (qlcnic_83xx_configure_opmode(adapter)) 411 if (qlcnic_83xx_configure_opmode(adapter)) {
412 rtnl_unlock();
411 return -EIO; 413 return -EIO;
414 }
412 415
413 if (netif_running(netdev)) 416 if (netif_running(netdev))
414 __qlcnic_up(adapter, netdev); 417 __qlcnic_up(adapter, netdev);
415 418
419 rtnl_unlock();
416 return 0; 420 return 0;
417} 421}
418 422
@@ -533,6 +537,7 @@ static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs)
533 return -EIO; 537 return -EIO;
534 } 538 }
535 539
540 rtnl_lock();
536 if (netif_running(netdev)) 541 if (netif_running(netdev))
537 __qlcnic_down(adapter, netdev); 542 __qlcnic_down(adapter, netdev);
538 543
@@ -555,6 +560,7 @@ static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs)
555 __qlcnic_up(adapter, netdev); 560 __qlcnic_up(adapter, netdev);
556 561
557error: 562error:
563 rtnl_unlock();
558 return err; 564 return err;
559} 565}
560 566
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index c6165d05cc13..019f4377307f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -1272,6 +1272,7 @@ void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
1272void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter) 1272void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
1273{ 1273{
1274 struct device *dev = &adapter->pdev->dev; 1274 struct device *dev = &adapter->pdev->dev;
1275 u32 state;
1275 1276
1276 if (device_create_bin_file(dev, &bin_attr_port_stats)) 1277 if (device_create_bin_file(dev, &bin_attr_port_stats))
1277 dev_info(dev, "failed to create port stats sysfs entry"); 1278 dev_info(dev, "failed to create port stats sysfs entry");
@@ -1285,8 +1286,13 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
1285 if (device_create_bin_file(dev, &bin_attr_mem)) 1286 if (device_create_bin_file(dev, &bin_attr_mem))
1286 dev_info(dev, "failed to create mem sysfs entry\n"); 1287 dev_info(dev, "failed to create mem sysfs entry\n");
1287 1288
1289 state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
1290 if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD)
1291 return;
1292
1288 if (device_create_bin_file(dev, &bin_attr_pci_config)) 1293 if (device_create_bin_file(dev, &bin_attr_pci_config))
1289 dev_info(dev, "failed to create pci config sysfs entry"); 1294 dev_info(dev, "failed to create pci config sysfs entry");
1295
1290 if (device_create_file(dev, &dev_attr_beacon)) 1296 if (device_create_file(dev, &dev_attr_beacon))
1291 dev_info(dev, "failed to create beacon sysfs entry"); 1297 dev_info(dev, "failed to create beacon sysfs entry");
1292 1298
@@ -1307,6 +1313,7 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
1307void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter) 1313void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
1308{ 1314{
1309 struct device *dev = &adapter->pdev->dev; 1315 struct device *dev = &adapter->pdev->dev;
1316 u32 state;
1310 1317
1311 device_remove_bin_file(dev, &bin_attr_port_stats); 1318 device_remove_bin_file(dev, &bin_attr_port_stats);
1312 1319
@@ -1315,6 +1322,11 @@ void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
1315 device_remove_file(dev, &dev_attr_diag_mode); 1322 device_remove_file(dev, &dev_attr_diag_mode);
1316 device_remove_bin_file(dev, &bin_attr_crb); 1323 device_remove_bin_file(dev, &bin_attr_crb);
1317 device_remove_bin_file(dev, &bin_attr_mem); 1324 device_remove_bin_file(dev, &bin_attr_mem);
1325
1326 state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
1327 if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD)
1328 return;
1329
1318 device_remove_bin_file(dev, &bin_attr_pci_config); 1330 device_remove_bin_file(dev, &bin_attr_pci_config);
1319 device_remove_file(dev, &dev_attr_beacon); 1331 device_remove_file(dev, &dev_attr_beacon);
1320 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) 1332 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index 10093f0c4c0f..6bc5db703920 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -740,8 +740,8 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
740 int i; 740 int i;
741 741
742 if (!mpi_coredump) { 742 if (!mpi_coredump) {
743 netif_err(qdev, drv, qdev->ndev, "No memory available\n"); 743 netif_err(qdev, drv, qdev->ndev, "No memory allocated\n");
744 return -ENOMEM; 744 return -EINVAL;
745 } 745 }
746 746
747 /* Try to get the spinlock, but dont worry if 747 /* Try to get the spinlock, but dont worry if
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
index ff2bf8a4e247..7ad146080c36 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
@@ -1274,7 +1274,7 @@ void ql_mpi_reset_work(struct work_struct *work)
1274 return; 1274 return;
1275 } 1275 }
1276 1276
1277 if (!ql_core_dump(qdev, qdev->mpi_coredump)) { 1277 if (qdev->mpi_coredump && !ql_core_dump(qdev, qdev->mpi_coredump)) {
1278 netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n"); 1278 netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n");
1279 qdev->core_is_dumped = 1; 1279 qdev->core_is_dumped = 1;
1280 queue_delayed_work(qdev->workqueue, 1280 queue_delayed_work(qdev->workqueue,
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 128d7cdf9eb2..c082562dbf4e 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -27,10 +27,10 @@
27 27
28/* A reboot/assertion causes the MCDI status word to be set after the 28/* A reboot/assertion causes the MCDI status word to be set after the
29 * command word is set or a REBOOT event is sent. If we notice a reboot 29 * command word is set or a REBOOT event is sent. If we notice a reboot
30 * via these mechanisms then wait 20ms for the status word to be set. 30 * via these mechanisms then wait 250ms for the status word to be set.
31 */ 31 */
32#define MCDI_STATUS_DELAY_US 100 32#define MCDI_STATUS_DELAY_US 100
33#define MCDI_STATUS_DELAY_COUNT 200 33#define MCDI_STATUS_DELAY_COUNT 2500
34#define MCDI_STATUS_SLEEP_MS \ 34#define MCDI_STATUS_SLEEP_MS \
35 (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000) 35 (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
36 36
@@ -800,9 +800,6 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
800 } else { 800 } else {
801 int count; 801 int count;
802 802
803 /* Nobody was waiting for an MCDI request, so trigger a reset */
804 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
805
806 /* Consume the status word since efx_mcdi_rpc_finish() won't */ 803 /* Consume the status word since efx_mcdi_rpc_finish() won't */
807 for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) { 804 for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
808 if (efx_mcdi_poll_reboot(efx)) 805 if (efx_mcdi_poll_reboot(efx))
@@ -810,6 +807,9 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
810 udelay(MCDI_STATUS_DELAY_US); 807 udelay(MCDI_STATUS_DELAY_US);
811 } 808 }
812 mcdi->new_epoch = true; 809 mcdi->new_epoch = true;
810
811 /* Nobody was waiting for an MCDI request, so trigger a reset */
812 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
813 } 813 }
814 814
815 spin_unlock(&mcdi->iface_lock); 815 spin_unlock(&mcdi->iface_lock);
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index c8f088ab5fdf..bdf697b184ae 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -32,7 +32,7 @@
32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 33
34#define DRV_NAME "via-rhine" 34#define DRV_NAME "via-rhine"
35#define DRV_VERSION "1.5.0" 35#define DRV_VERSION "1.5.1"
36#define DRV_RELDATE "2010-10-09" 36#define DRV_RELDATE "2010-10-09"
37 37
38#include <linux/types.h> 38#include <linux/types.h>
@@ -1704,7 +1704,12 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1704 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); 1704 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1705 1705
1706 if (unlikely(vlan_tx_tag_present(skb))) { 1706 if (unlikely(vlan_tx_tag_present(skb))) {
1707 rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16); 1707 u16 vid_pcp = vlan_tx_tag_get(skb);
1708
1709 /* drop CFI/DEI bit, register needs VID and PCP */
1710 vid_pcp = (vid_pcp & VLAN_VID_MASK) |
1711 ((vid_pcp & VLAN_PRIO_MASK) >> 1);
1712 rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
1708 /* request tagging */ 1713 /* request tagging */
1709 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000); 1714 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1710 } 1715 }
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index b88121f240ca..0029148077a9 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -297,6 +297,12 @@ static int temac_dma_bd_init(struct net_device *ndev)
297 lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); 297 lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
298 lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p); 298 lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
299 299
300 /* Init descriptor indexes */
301 lp->tx_bd_ci = 0;
302 lp->tx_bd_next = 0;
303 lp->tx_bd_tail = 0;
304 lp->rx_bd_ci = 0;
305
300 return 0; 306 return 0;
301 307
302out: 308out:
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index a34d6bf5e43b..cc70ecfc7062 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -429,11 +429,13 @@ static void slip_write_wakeup(struct tty_struct *tty)
429 if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) 429 if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
430 return; 430 return;
431 431
432 spin_lock(&sl->lock);
432 if (sl->xleft <= 0) { 433 if (sl->xleft <= 0) {
433 /* Now serial buffer is almost free & we can start 434 /* Now serial buffer is almost free & we can start
434 * transmission of another packet */ 435 * transmission of another packet */
435 sl->dev->stats.tx_packets++; 436 sl->dev->stats.tx_packets++;
436 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 437 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
438 spin_unlock(&sl->lock);
437 sl_unlock(sl); 439 sl_unlock(sl);
438 return; 440 return;
439 } 441 }
@@ -441,6 +443,7 @@ static void slip_write_wakeup(struct tty_struct *tty)
441 actual = tty->ops->write(tty, sl->xhead, sl->xleft); 443 actual = tty->ops->write(tty, sl->xhead, sl->xleft);
442 sl->xleft -= actual; 444 sl->xleft -= actual;
443 sl->xhead += actual; 445 sl->xhead += actual;
446 spin_unlock(&sl->lock);
444} 447}
445 448
446static void sl_tx_timeout(struct net_device *dev) 449static void sl_tx_timeout(struct net_device *dev)
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 2dbb9460349d..c6867f926cff 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -303,7 +303,7 @@ static void dm9601_set_multicast(struct net_device *net)
303 rx_ctl |= 0x02; 303 rx_ctl |= 0x02;
304 } else if (net->flags & IFF_ALLMULTI || 304 } else if (net->flags & IFF_ALLMULTI ||
305 netdev_mc_count(net) > DM_MAX_MCAST) { 305 netdev_mc_count(net) > DM_MAX_MCAST) {
306 rx_ctl |= 0x04; 306 rx_ctl |= 0x08;
307 } else if (!netdev_mc_empty(net)) { 307 } else if (!netdev_mc_empty(net)) {
308 struct netdev_hw_addr *ha; 308 struct netdev_hw_addr *ha;
309 309
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 6312332afeba..3d6aaf79d8b2 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -714,7 +714,7 @@ static const struct usb_device_id products[] = {
714 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 714 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
715 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ 715 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
716 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 716 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
717 {QMI_FIXED_INTF(0x1e2d, 0x12d1, 4)}, /* Cinterion PLxx */ 717 {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
718 718
719 /* 4. Gobi 1000 devices */ 719 /* 4. Gobi 1000 devices */
720 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ 720 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 7b331e613e02..bf94e10a37c8 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1241,7 +1241,9 @@ static int build_dma_sg(const struct sk_buff *skb, struct urb *urb)
1241 if (num_sgs == 1) 1241 if (num_sgs == 1)
1242 return 0; 1242 return 0;
1243 1243
1244 urb->sg = kmalloc(num_sgs * sizeof(struct scatterlist), GFP_ATOMIC); 1244 /* reserve one for zero packet */
1245 urb->sg = kmalloc((num_sgs + 1) * sizeof(struct scatterlist),
1246 GFP_ATOMIC);
1245 if (!urb->sg) 1247 if (!urb->sg)
1246 return -ENOMEM; 1248 return -ENOMEM;
1247 1249
@@ -1305,7 +1307,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1305 if (build_dma_sg(skb, urb) < 0) 1307 if (build_dma_sg(skb, urb) < 0)
1306 goto drop; 1308 goto drop;
1307 } 1309 }
1308 entry->length = length = urb->transfer_buffer_length; 1310 length = urb->transfer_buffer_length;
1309 1311
1310 /* don't assume the hardware handles USB_ZERO_PACKET 1312 /* don't assume the hardware handles USB_ZERO_PACKET
1311 * NOTE: strictly conforming cdc-ether devices should expect 1313 * NOTE: strictly conforming cdc-ether devices should expect
@@ -1317,15 +1319,18 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1317 if (length % dev->maxpacket == 0) { 1319 if (length % dev->maxpacket == 0) {
1318 if (!(info->flags & FLAG_SEND_ZLP)) { 1320 if (!(info->flags & FLAG_SEND_ZLP)) {
1319 if (!(info->flags & FLAG_MULTI_PACKET)) { 1321 if (!(info->flags & FLAG_MULTI_PACKET)) {
1320 urb->transfer_buffer_length++; 1322 length++;
1321 if (skb_tailroom(skb)) { 1323 if (skb_tailroom(skb) && !urb->num_sgs) {
1322 skb->data[skb->len] = 0; 1324 skb->data[skb->len] = 0;
1323 __skb_put(skb, 1); 1325 __skb_put(skb, 1);
1324 } 1326 } else if (urb->num_sgs)
1327 sg_set_buf(&urb->sg[urb->num_sgs++],
1328 dev->padding_pkt, 1);
1325 } 1329 }
1326 } else 1330 } else
1327 urb->transfer_flags |= URB_ZERO_PACKET; 1331 urb->transfer_flags |= URB_ZERO_PACKET;
1328 } 1332 }
1333 entry->length = urb->transfer_buffer_length = length;
1329 1334
1330 spin_lock_irqsave(&dev->txq.lock, flags); 1335 spin_lock_irqsave(&dev->txq.lock, flags);
1331 retval = usb_autopm_get_interface_async(dev->intf); 1336 retval = usb_autopm_get_interface_async(dev->intf);
@@ -1509,6 +1514,7 @@ void usbnet_disconnect (struct usb_interface *intf)
1509 1514
1510 usb_kill_urb(dev->interrupt); 1515 usb_kill_urb(dev->interrupt);
1511 usb_free_urb(dev->interrupt); 1516 usb_free_urb(dev->interrupt);
1517 kfree(dev->padding_pkt);
1512 1518
1513 free_netdev(net); 1519 free_netdev(net);
1514} 1520}
@@ -1679,9 +1685,16 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1679 /* initialize max rx_qlen and tx_qlen */ 1685 /* initialize max rx_qlen and tx_qlen */
1680 usbnet_update_max_qlen(dev); 1686 usbnet_update_max_qlen(dev);
1681 1687
1688 if (dev->can_dma_sg && !(info->flags & FLAG_SEND_ZLP) &&
1689 !(info->flags & FLAG_MULTI_PACKET)) {
1690 dev->padding_pkt = kzalloc(1, GFP_KERNEL);
1691 if (!dev->padding_pkt)
1692 goto out4;
1693 }
1694
1682 status = register_netdev (net); 1695 status = register_netdev (net);
1683 if (status) 1696 if (status)
1684 goto out4; 1697 goto out5;
1685 netif_info(dev, probe, dev->net, 1698 netif_info(dev, probe, dev->net,
1686 "register '%s' at usb-%s-%s, %s, %pM\n", 1699 "register '%s' at usb-%s-%s, %s, %pM\n",
1687 udev->dev.driver->name, 1700 udev->dev.driver->name,
@@ -1699,6 +1712,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1699 1712
1700 return 0; 1713 return 0;
1701 1714
1715out5:
1716 kfree(dev->padding_pkt);
1702out4: 1717out4:
1703 usb_free_urb(dev->interrupt); 1718 usb_free_urb(dev->interrupt);
1704out3: 1719out3:
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index d1292fe746bc..2ef5b6219f3f 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -952,8 +952,7 @@ void vxlan_sock_release(struct vxlan_sock *vs)
952 952
953 spin_lock(&vn->sock_lock); 953 spin_lock(&vn->sock_lock);
954 hlist_del_rcu(&vs->hlist); 954 hlist_del_rcu(&vs->hlist);
955 smp_wmb(); 955 rcu_assign_sk_user_data(vs->sock->sk, NULL);
956 vs->sock->sk->sk_user_data = NULL;
957 vxlan_notify_del_rx_port(sk); 956 vxlan_notify_del_rx_port(sk);
958 spin_unlock(&vn->sock_lock); 957 spin_unlock(&vn->sock_lock);
959 958
@@ -1048,8 +1047,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1048 1047
1049 port = inet_sk(sk)->inet_sport; 1048 port = inet_sk(sk)->inet_sport;
1050 1049
1051 smp_read_barrier_depends(); 1050 vs = rcu_dereference_sk_user_data(sk);
1052 vs = (struct vxlan_sock *)sk->sk_user_data;
1053 if (!vs) 1051 if (!vs)
1054 goto drop; 1052 goto drop;
1055 1053
@@ -2302,8 +2300,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
2302 atomic_set(&vs->refcnt, 1); 2300 atomic_set(&vs->refcnt, 1);
2303 vs->rcv = rcv; 2301 vs->rcv = rcv;
2304 vs->data = data; 2302 vs->data = data;
2305 smp_wmb(); 2303 rcu_assign_sk_user_data(vs->sock->sk, vs);
2306 vs->sock->sk->sk_user_data = vs;
2307 2304
2308 spin_lock(&vn->sock_lock); 2305 spin_lock(&vn->sock_lock);
2309 hlist_add_head_rcu(&vs->hlist, vs_head(net, port)); 2306 hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 4ee472a5a4e4..ab9e3a8410bc 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -1270,13 +1270,6 @@ static void ath9k_antenna_check(struct ath_softc *sc,
1270 return; 1270 return;
1271 1271
1272 /* 1272 /*
1273 * All MPDUs in an aggregate will use the same LNA
1274 * as the first MPDU.
1275 */
1276 if (rs->rs_isaggr && !rs->rs_firstaggr)
1277 return;
1278
1279 /*
1280 * Change the default rx antenna if rx diversity 1273 * Change the default rx antenna if rx diversity
1281 * chooses the other antenna 3 times in a row. 1274 * chooses the other antenna 3 times in a row.
1282 */ 1275 */
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 35b515fe3ffa..5ac713d2ff5d 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -399,6 +399,7 @@ static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
399 tbf->bf_buf_addr = bf->bf_buf_addr; 399 tbf->bf_buf_addr = bf->bf_buf_addr;
400 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len); 400 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
401 tbf->bf_state = bf->bf_state; 401 tbf->bf_state = bf->bf_state;
402 tbf->bf_state.stale = false;
402 403
403 return tbf; 404 return tbf;
404} 405}
@@ -1389,11 +1390,15 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1389 u16 tid, u16 *ssn) 1390 u16 tid, u16 *ssn)
1390{ 1391{
1391 struct ath_atx_tid *txtid; 1392 struct ath_atx_tid *txtid;
1393 struct ath_txq *txq;
1392 struct ath_node *an; 1394 struct ath_node *an;
1393 u8 density; 1395 u8 density;
1394 1396
1395 an = (struct ath_node *)sta->drv_priv; 1397 an = (struct ath_node *)sta->drv_priv;
1396 txtid = ATH_AN_2_TID(an, tid); 1398 txtid = ATH_AN_2_TID(an, tid);
1399 txq = txtid->ac->txq;
1400
1401 ath_txq_lock(sc, txq);
1397 1402
1398 /* update ampdu factor/density, they may have changed. This may happen 1403 /* update ampdu factor/density, they may have changed. This may happen
1399 * in HT IBSS when a beacon with HT-info is received after the station 1404 * in HT IBSS when a beacon with HT-info is received after the station
@@ -1417,6 +1422,8 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1417 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf)); 1422 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
1418 txtid->baw_head = txtid->baw_tail = 0; 1423 txtid->baw_head = txtid->baw_tail = 0;
1419 1424
1425 ath_txq_unlock_complete(sc, txq);
1426
1420 return 0; 1427 return 0;
1421} 1428}
1422 1429
@@ -1555,8 +1562,10 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
1555 __skb_unlink(bf->bf_mpdu, tid_q); 1562 __skb_unlink(bf->bf_mpdu, tid_q);
1556 list_add_tail(&bf->list, &bf_q); 1563 list_add_tail(&bf->list, &bf_q);
1557 ath_set_rates(tid->an->vif, tid->an->sta, bf); 1564 ath_set_rates(tid->an->vif, tid->an->sta, bf);
1558 ath_tx_addto_baw(sc, tid, bf); 1565 if (bf_isampdu(bf)) {
1559 bf->bf_state.bf_type &= ~BUF_AGGR; 1566 ath_tx_addto_baw(sc, tid, bf);
1567 bf->bf_state.bf_type &= ~BUF_AGGR;
1568 }
1560 if (bf_tail) 1569 if (bf_tail)
1561 bf_tail->bf_next = bf; 1570 bf_tail->bf_next = bf;
1562 1571
@@ -1950,7 +1959,9 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1950 if (bf_is_ampdu_not_probing(bf)) 1959 if (bf_is_ampdu_not_probing(bf))
1951 txq->axq_ampdu_depth++; 1960 txq->axq_ampdu_depth++;
1952 1961
1953 bf = bf->bf_lastbf->bf_next; 1962 bf_last = bf->bf_lastbf;
1963 bf = bf_last->bf_next;
1964 bf_last->bf_next = NULL;
1954 } 1965 }
1955 } 1966 }
1956} 1967}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index 64f4a2bc8dde..c3462b75bd08 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -464,8 +464,6 @@ static struct sdio_driver brcmf_sdmmc_driver = {
464 464
465static int brcmf_sdio_pd_probe(struct platform_device *pdev) 465static int brcmf_sdio_pd_probe(struct platform_device *pdev)
466{ 466{
467 int ret;
468
469 brcmf_dbg(SDIO, "Enter\n"); 467 brcmf_dbg(SDIO, "Enter\n");
470 468
471 brcmfmac_sdio_pdata = pdev->dev.platform_data; 469 brcmfmac_sdio_pdata = pdev->dev.platform_data;
@@ -473,11 +471,7 @@ static int brcmf_sdio_pd_probe(struct platform_device *pdev)
473 if (brcmfmac_sdio_pdata->power_on) 471 if (brcmfmac_sdio_pdata->power_on)
474 brcmfmac_sdio_pdata->power_on(); 472 brcmfmac_sdio_pdata->power_on();
475 473
476 ret = sdio_register_driver(&brcmf_sdmmc_driver); 474 return 0;
477 if (ret)
478 brcmf_err("sdio_register_driver failed: %d\n", ret);
479
480 return ret;
481} 475}
482 476
483static int brcmf_sdio_pd_remove(struct platform_device *pdev) 477static int brcmf_sdio_pd_remove(struct platform_device *pdev)
@@ -500,6 +494,15 @@ static struct platform_driver brcmf_sdio_pd = {
500 } 494 }
501}; 495};
502 496
497void brcmf_sdio_register(void)
498{
499 int ret;
500
501 ret = sdio_register_driver(&brcmf_sdmmc_driver);
502 if (ret)
503 brcmf_err("sdio_register_driver failed: %d\n", ret);
504}
505
503void brcmf_sdio_exit(void) 506void brcmf_sdio_exit(void)
504{ 507{
505 brcmf_dbg(SDIO, "Enter\n"); 508 brcmf_dbg(SDIO, "Enter\n");
@@ -510,18 +513,13 @@ void brcmf_sdio_exit(void)
510 sdio_unregister_driver(&brcmf_sdmmc_driver); 513 sdio_unregister_driver(&brcmf_sdmmc_driver);
511} 514}
512 515
513void brcmf_sdio_init(void) 516void __init brcmf_sdio_init(void)
514{ 517{
515 int ret; 518 int ret;
516 519
517 brcmf_dbg(SDIO, "Enter\n"); 520 brcmf_dbg(SDIO, "Enter\n");
518 521
519 ret = platform_driver_probe(&brcmf_sdio_pd, brcmf_sdio_pd_probe); 522 ret = platform_driver_probe(&brcmf_sdio_pd, brcmf_sdio_pd_probe);
520 if (ret == -ENODEV) { 523 if (ret == -ENODEV)
521 brcmf_dbg(SDIO, "No platform data available, registering without.\n"); 524 brcmf_dbg(SDIO, "No platform data available.\n");
522 ret = sdio_register_driver(&brcmf_sdmmc_driver);
523 }
524
525 if (ret)
526 brcmf_err("driver registration failed: %d\n", ret);
527} 525}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index f7c1985844e4..74156f84180c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -156,10 +156,11 @@ extern int brcmf_bus_start(struct device *dev);
156#ifdef CONFIG_BRCMFMAC_SDIO 156#ifdef CONFIG_BRCMFMAC_SDIO
157extern void brcmf_sdio_exit(void); 157extern void brcmf_sdio_exit(void);
158extern void brcmf_sdio_init(void); 158extern void brcmf_sdio_init(void);
159extern void brcmf_sdio_register(void);
159#endif 160#endif
160#ifdef CONFIG_BRCMFMAC_USB 161#ifdef CONFIG_BRCMFMAC_USB
161extern void brcmf_usb_exit(void); 162extern void brcmf_usb_exit(void);
162extern void brcmf_usb_init(void); 163extern void brcmf_usb_register(void);
163#endif 164#endif
164 165
165#endif /* _BRCMF_BUS_H_ */ 166#endif /* _BRCMF_BUS_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index e067aec1fbf1..40e7f854e10f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -1231,21 +1231,23 @@ u32 brcmf_get_chip_info(struct brcmf_if *ifp)
1231 return bus->chip << 4 | bus->chiprev; 1231 return bus->chip << 4 | bus->chiprev;
1232} 1232}
1233 1233
1234static void brcmf_driver_init(struct work_struct *work) 1234static void brcmf_driver_register(struct work_struct *work)
1235{ 1235{
1236 brcmf_debugfs_init();
1237
1238#ifdef CONFIG_BRCMFMAC_SDIO 1236#ifdef CONFIG_BRCMFMAC_SDIO
1239 brcmf_sdio_init(); 1237 brcmf_sdio_register();
1240#endif 1238#endif
1241#ifdef CONFIG_BRCMFMAC_USB 1239#ifdef CONFIG_BRCMFMAC_USB
1242 brcmf_usb_init(); 1240 brcmf_usb_register();
1243#endif 1241#endif
1244} 1242}
1245static DECLARE_WORK(brcmf_driver_work, brcmf_driver_init); 1243static DECLARE_WORK(brcmf_driver_work, brcmf_driver_register);
1246 1244
1247static int __init brcmfmac_module_init(void) 1245static int __init brcmfmac_module_init(void)
1248{ 1246{
1247 brcmf_debugfs_init();
1248#ifdef CONFIG_BRCMFMAC_SDIO
1249 brcmf_sdio_init();
1250#endif
1249 if (!schedule_work(&brcmf_driver_work)) 1251 if (!schedule_work(&brcmf_driver_work))
1250 return -EBUSY; 1252 return -EBUSY;
1251 1253
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 39e01a7c8556..f4aea47e0730 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -1539,7 +1539,7 @@ void brcmf_usb_exit(void)
1539 brcmf_release_fw(&fw_image_list); 1539 brcmf_release_fw(&fw_image_list);
1540} 1540}
1541 1541
1542void brcmf_usb_init(void) 1542void brcmf_usb_register(void)
1543{ 1543{
1544 brcmf_dbg(USB, "Enter\n"); 1544 brcmf_dbg(USB, "Enter\n");
1545 INIT_LIST_HEAD(&fw_image_list); 1545 INIT_LIST_HEAD(&fw_image_list);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 3a6544710c8a..edc5d105ff98 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -457,6 +457,8 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
457 if (err != 0) 457 if (err != 0)
458 brcms_err(wl->wlc->hw->d11core, "%s: brcms_up() returned %d\n", 458 brcms_err(wl->wlc->hw->d11core, "%s: brcms_up() returned %d\n",
459 __func__, err); 459 __func__, err);
460
461 bcma_core_pci_power_save(wl->wlc->hw->d11core->bus, true);
460 return err; 462 return err;
461} 463}
462 464
@@ -479,6 +481,8 @@ static void brcms_ops_stop(struct ieee80211_hw *hw)
479 return; 481 return;
480 } 482 }
481 483
484 bcma_core_pci_power_save(wl->wlc->hw->d11core->bus, false);
485
482 /* put driver in down state */ 486 /* put driver in down state */
483 spin_lock_bh(&wl->lock); 487 spin_lock_bh(&wl->lock);
484 brcms_down(wl); 488 brcms_down(wl);
diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c
index f5e6b489ed32..899cad34ccd3 100644
--- a/drivers/net/wireless/cw1200/cw1200_spi.c
+++ b/drivers/net/wireless/cw1200/cw1200_spi.c
@@ -42,7 +42,6 @@ struct hwbus_priv {
42 spinlock_t lock; /* Serialize all bus operations */ 42 spinlock_t lock; /* Serialize all bus operations */
43 wait_queue_head_t wq; 43 wait_queue_head_t wq;
44 int claimed; 44 int claimed;
45 int irq_disabled;
46}; 45};
47 46
48#define SDIO_TO_SPI_ADDR(addr) ((addr & 0x1f)>>2) 47#define SDIO_TO_SPI_ADDR(addr) ((addr & 0x1f)>>2)
@@ -238,8 +237,6 @@ static irqreturn_t cw1200_spi_irq_handler(int irq, void *dev_id)
238 struct hwbus_priv *self = dev_id; 237 struct hwbus_priv *self = dev_id;
239 238
240 if (self->core) { 239 if (self->core) {
241 disable_irq_nosync(self->func->irq);
242 self->irq_disabled = 1;
243 cw1200_irq_handler(self->core); 240 cw1200_irq_handler(self->core);
244 return IRQ_HANDLED; 241 return IRQ_HANDLED;
245 } else { 242 } else {
@@ -253,9 +250,10 @@ static int cw1200_spi_irq_subscribe(struct hwbus_priv *self)
253 250
254 pr_debug("SW IRQ subscribe\n"); 251 pr_debug("SW IRQ subscribe\n");
255 252
256 ret = request_any_context_irq(self->func->irq, cw1200_spi_irq_handler, 253 ret = request_threaded_irq(self->func->irq, NULL,
257 IRQF_TRIGGER_HIGH, 254 cw1200_spi_irq_handler,
258 "cw1200_wlan_irq", self); 255 IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
256 "cw1200_wlan_irq", self);
259 if (WARN_ON(ret < 0)) 257 if (WARN_ON(ret < 0))
260 goto exit; 258 goto exit;
261 259
@@ -273,22 +271,13 @@ exit:
273 271
274static int cw1200_spi_irq_unsubscribe(struct hwbus_priv *self) 272static int cw1200_spi_irq_unsubscribe(struct hwbus_priv *self)
275{ 273{
274 int ret = 0;
275
276 pr_debug("SW IRQ unsubscribe\n"); 276 pr_debug("SW IRQ unsubscribe\n");
277 disable_irq_wake(self->func->irq); 277 disable_irq_wake(self->func->irq);
278 free_irq(self->func->irq, self); 278 free_irq(self->func->irq, self);
279 279
280 return 0; 280 return ret;
281}
282
283static int cw1200_spi_irq_enable(struct hwbus_priv *self, int enable)
284{
285 /* Disables are handled by the interrupt handler */
286 if (enable && self->irq_disabled) {
287 enable_irq(self->func->irq);
288 self->irq_disabled = 0;
289 }
290
291 return 0;
292} 281}
293 282
294static int cw1200_spi_off(const struct cw1200_platform_data_spi *pdata) 283static int cw1200_spi_off(const struct cw1200_platform_data_spi *pdata)
@@ -368,7 +357,6 @@ static struct hwbus_ops cw1200_spi_hwbus_ops = {
368 .unlock = cw1200_spi_unlock, 357 .unlock = cw1200_spi_unlock,
369 .align_size = cw1200_spi_align_size, 358 .align_size = cw1200_spi_align_size,
370 .power_mgmt = cw1200_spi_pm, 359 .power_mgmt = cw1200_spi_pm,
371 .irq_enable = cw1200_spi_irq_enable,
372}; 360};
373 361
374/* Probe Function to be called by SPI stack when device is discovered */ 362/* Probe Function to be called by SPI stack when device is discovered */
diff --git a/drivers/net/wireless/cw1200/fwio.c b/drivers/net/wireless/cw1200/fwio.c
index 0b2061bbc68b..acdff0f7f952 100644
--- a/drivers/net/wireless/cw1200/fwio.c
+++ b/drivers/net/wireless/cw1200/fwio.c
@@ -485,7 +485,7 @@ int cw1200_load_firmware(struct cw1200_common *priv)
485 485
486 /* Enable interrupt signalling */ 486 /* Enable interrupt signalling */
487 priv->hwbus_ops->lock(priv->hwbus_priv); 487 priv->hwbus_ops->lock(priv->hwbus_priv);
488 ret = __cw1200_irq_enable(priv, 2); 488 ret = __cw1200_irq_enable(priv, 1);
489 priv->hwbus_ops->unlock(priv->hwbus_priv); 489 priv->hwbus_ops->unlock(priv->hwbus_priv);
490 if (ret < 0) 490 if (ret < 0)
491 goto unsubscribe; 491 goto unsubscribe;
diff --git a/drivers/net/wireless/cw1200/hwbus.h b/drivers/net/wireless/cw1200/hwbus.h
index 51dfb3a90735..8b2fc831c3de 100644
--- a/drivers/net/wireless/cw1200/hwbus.h
+++ b/drivers/net/wireless/cw1200/hwbus.h
@@ -28,7 +28,6 @@ struct hwbus_ops {
28 void (*unlock)(struct hwbus_priv *self); 28 void (*unlock)(struct hwbus_priv *self);
29 size_t (*align_size)(struct hwbus_priv *self, size_t size); 29 size_t (*align_size)(struct hwbus_priv *self, size_t size);
30 int (*power_mgmt)(struct hwbus_priv *self, bool suspend); 30 int (*power_mgmt)(struct hwbus_priv *self, bool suspend);
31 int (*irq_enable)(struct hwbus_priv *self, int enable);
32}; 31};
33 32
34#endif /* CW1200_HWBUS_H */ 33#endif /* CW1200_HWBUS_H */
diff --git a/drivers/net/wireless/cw1200/hwio.c b/drivers/net/wireless/cw1200/hwio.c
index 41bd7615ccaa..ff230b7aeedd 100644
--- a/drivers/net/wireless/cw1200/hwio.c
+++ b/drivers/net/wireless/cw1200/hwio.c
@@ -273,21 +273,6 @@ int __cw1200_irq_enable(struct cw1200_common *priv, int enable)
273 u16 val16; 273 u16 val16;
274 int ret; 274 int ret;
275 275
276 /* We need to do this hack because the SPI layer can sleep on I/O
277 and the general path involves I/O to the device in interrupt
278 context.
279
280 However, the initial enable call needs to go to the hardware.
281
282 We don't worry about shutdown because we do a full reset which
283 clears the interrupt enabled bits.
284 */
285 if (priv->hwbus_ops->irq_enable) {
286 ret = priv->hwbus_ops->irq_enable(priv->hwbus_priv, enable);
287 if (ret || enable < 2)
288 return ret;
289 }
290
291 if (HIF_8601_SILICON == priv->hw_type) { 276 if (HIF_8601_SILICON == priv->hw_type) {
292 ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32); 277 ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32);
293 if (ret < 0) { 278 if (ret < 0) {
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index 21c688264708..1214c587fd08 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -150,7 +150,7 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv,
150 */ 150 */
151int 151int
152mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, 152mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
153 struct mwifiex_ra_list_tbl *pra_list, int headroom, 153 struct mwifiex_ra_list_tbl *pra_list,
154 int ptrindex, unsigned long ra_list_flags) 154 int ptrindex, unsigned long ra_list_flags)
155 __releases(&priv->wmm.ra_list_spinlock) 155 __releases(&priv->wmm.ra_list_spinlock)
156{ 156{
@@ -160,6 +160,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
160 int pad = 0, ret; 160 int pad = 0, ret;
161 struct mwifiex_tx_param tx_param; 161 struct mwifiex_tx_param tx_param;
162 struct txpd *ptx_pd = NULL; 162 struct txpd *ptx_pd = NULL;
163 int headroom = adapter->iface_type == MWIFIEX_USB ? 0 : INTF_HEADER_LEN;
163 164
164 skb_src = skb_peek(&pra_list->skb_head); 165 skb_src = skb_peek(&pra_list->skb_head);
165 if (!skb_src) { 166 if (!skb_src) {
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.h b/drivers/net/wireless/mwifiex/11n_aggr.h
index 900e1c62a0cc..892098d6a696 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.h
+++ b/drivers/net/wireless/mwifiex/11n_aggr.h
@@ -26,7 +26,7 @@
26int mwifiex_11n_deaggregate_pkt(struct mwifiex_private *priv, 26int mwifiex_11n_deaggregate_pkt(struct mwifiex_private *priv,
27 struct sk_buff *skb); 27 struct sk_buff *skb);
28int mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, 28int mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
29 struct mwifiex_ra_list_tbl *ptr, int headroom, 29 struct mwifiex_ra_list_tbl *ptr,
30 int ptr_index, unsigned long flags) 30 int ptr_index, unsigned long flags)
31 __releases(&priv->wmm.ra_list_spinlock); 31 __releases(&priv->wmm.ra_list_spinlock);
32 32
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 2d761477d15e..a6c46f3b6e3a 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -1155,7 +1155,7 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
1155 uint32_t conditions = le32_to_cpu(phs_cfg->params.hs_config.conditions); 1155 uint32_t conditions = le32_to_cpu(phs_cfg->params.hs_config.conditions);
1156 1156
1157 if (phs_cfg->action == cpu_to_le16(HS_ACTIVATE) && 1157 if (phs_cfg->action == cpu_to_le16(HS_ACTIVATE) &&
1158 adapter->iface_type == MWIFIEX_SDIO) { 1158 adapter->iface_type != MWIFIEX_USB) {
1159 mwifiex_hs_activated_event(priv, true); 1159 mwifiex_hs_activated_event(priv, true);
1160 return 0; 1160 return 0;
1161 } else { 1161 } else {
@@ -1167,8 +1167,7 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
1167 } 1167 }
1168 if (conditions != HS_CFG_CANCEL) { 1168 if (conditions != HS_CFG_CANCEL) {
1169 adapter->is_hs_configured = true; 1169 adapter->is_hs_configured = true;
1170 if (adapter->iface_type == MWIFIEX_USB || 1170 if (adapter->iface_type == MWIFIEX_USB)
1171 adapter->iface_type == MWIFIEX_PCIE)
1172 mwifiex_hs_activated_event(priv, true); 1171 mwifiex_hs_activated_event(priv, true);
1173 } else { 1172 } else {
1174 adapter->is_hs_configured = false; 1173 adapter->is_hs_configured = false;
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index 2472d4b7f00e..1c70b8d09227 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -447,9 +447,6 @@ static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message)
447 */ 447 */
448 adapter->is_suspended = true; 448 adapter->is_suspended = true;
449 449
450 for (i = 0; i < adapter->priv_num; i++)
451 netif_carrier_off(adapter->priv[i]->netdev);
452
453 if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb) 450 if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb)
454 usb_kill_urb(card->rx_cmd.urb); 451 usb_kill_urb(card->rx_cmd.urb);
455 452
@@ -509,10 +506,6 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
509 MWIFIEX_RX_CMD_BUF_SIZE); 506 MWIFIEX_RX_CMD_BUF_SIZE);
510 } 507 }
511 508
512 for (i = 0; i < adapter->priv_num; i++)
513 if (adapter->priv[i]->media_connected)
514 netif_carrier_on(adapter->priv[i]->netdev);
515
516 /* Disable Host Sleep */ 509 /* Disable Host Sleep */
517 if (adapter->hs_activated) 510 if (adapter->hs_activated)
518 mwifiex_cancel_hs(mwifiex_get_priv(adapter, 511 mwifiex_cancel_hs(mwifiex_get_priv(adapter,
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 2e8f9cdea54d..95fa3599b407 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -1239,8 +1239,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
1239 if (enable_tx_amsdu && mwifiex_is_amsdu_allowed(priv, tid) && 1239 if (enable_tx_amsdu && mwifiex_is_amsdu_allowed(priv, tid) &&
1240 mwifiex_is_11n_aggragation_possible(priv, ptr, 1240 mwifiex_is_11n_aggragation_possible(priv, ptr,
1241 adapter->tx_buf_size)) 1241 adapter->tx_buf_size))
1242 mwifiex_11n_aggregate_pkt(priv, ptr, INTF_HEADER_LEN, 1242 mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags);
1243 ptr_index, flags);
1244 /* ra_list_spinlock has been freed in 1243 /* ra_list_spinlock has been freed in
1245 mwifiex_11n_aggregate_pkt() */ 1244 mwifiex_11n_aggregate_pkt() */
1246 else 1245 else
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index b9deef66cf4b..e328d3058c41 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -83,6 +83,7 @@ static struct usb_device_id p54u_table[] = {
83 {USB_DEVICE(0x06a9, 0x000e)}, /* Westell 802.11g USB (A90-211WG-01) */ 83 {USB_DEVICE(0x06a9, 0x000e)}, /* Westell 802.11g USB (A90-211WG-01) */
84 {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */ 84 {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */
85 {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */ 85 {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */
86 {USB_DEVICE(0x07aa, 0x0020)}, /* Corega WLUSB2GTST USB */
86 {USB_DEVICE(0x0803, 0x4310)}, /* Zoom 4410a */ 87 {USB_DEVICE(0x0803, 0x4310)}, /* Zoom 4410a */
87 {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */ 88 {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */
88 {USB_DEVICE(0x083a, 0x4531)}, /* T-Com Sinus 154 data II */ 89 {USB_DEVICE(0x083a, 0x4531)}, /* T-Com Sinus 154 data II */
@@ -979,6 +980,7 @@ static int p54u_load_firmware(struct ieee80211_hw *dev,
979 if (err) { 980 if (err) {
980 dev_err(&priv->udev->dev, "(p54usb) cannot load firmware %s " 981 dev_err(&priv->udev->dev, "(p54usb) cannot load firmware %s "
981 "(%d)!\n", p54u_fwlist[i].fw, err); 982 "(%d)!\n", p54u_fwlist[i].fw, err);
983 usb_put_dev(udev);
982 } 984 }
983 985
984 return err; 986 return err;
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index cc03e7c87cbe..703258742d28 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -2057,7 +2057,7 @@ struct rtl_priv {
2057 that it points to the data allocated 2057 that it points to the data allocated
2058 beyond this structure like: 2058 beyond this structure like:
2059 rtl_pci_priv or rtl_usb_priv */ 2059 rtl_pci_priv or rtl_usb_priv */
2060 u8 priv[0]; 2060 u8 priv[0] __aligned(sizeof(void *));
2061}; 2061};
2062 2062
2063#define rtl_priv(hw) (((struct rtl_priv *)(hw)->priv)) 2063#define rtl_priv(hw) (((struct rtl_priv *)(hw)->priv))
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index a53782ef1540..b45bce20ad76 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -24,6 +24,12 @@
24struct backend_info { 24struct backend_info {
25 struct xenbus_device *dev; 25 struct xenbus_device *dev;
26 struct xenvif *vif; 26 struct xenvif *vif;
27
28 /* This is the state that will be reflected in xenstore when any
29 * active hotplug script completes.
30 */
31 enum xenbus_state state;
32
27 enum xenbus_state frontend_state; 33 enum xenbus_state frontend_state;
28 struct xenbus_watch hotplug_status_watch; 34 struct xenbus_watch hotplug_status_watch;
29 u8 have_hotplug_status_watch:1; 35 u8 have_hotplug_status_watch:1;
@@ -136,6 +142,8 @@ static int netback_probe(struct xenbus_device *dev,
136 if (err) 142 if (err)
137 goto fail; 143 goto fail;
138 144
145 be->state = XenbusStateInitWait;
146
139 /* This kicks hotplug scripts, so do it immediately. */ 147 /* This kicks hotplug scripts, so do it immediately. */
140 backend_create_xenvif(be); 148 backend_create_xenvif(be);
141 149
@@ -208,24 +216,113 @@ static void backend_create_xenvif(struct backend_info *be)
208 kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE); 216 kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
209} 217}
210 218
211 219static void backend_disconnect(struct backend_info *be)
212static void disconnect_backend(struct xenbus_device *dev)
213{ 220{
214 struct backend_info *be = dev_get_drvdata(&dev->dev);
215
216 if (be->vif) 221 if (be->vif)
217 xenvif_disconnect(be->vif); 222 xenvif_disconnect(be->vif);
218} 223}
219 224
220static void destroy_backend(struct xenbus_device *dev) 225static void backend_connect(struct backend_info *be)
221{ 226{
222 struct backend_info *be = dev_get_drvdata(&dev->dev); 227 if (be->vif)
228 connect(be);
229}
223 230
224 if (be->vif) { 231static inline void backend_switch_state(struct backend_info *be,
225 kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE); 232 enum xenbus_state state)
226 xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status"); 233{
227 xenvif_free(be->vif); 234 struct xenbus_device *dev = be->dev;
228 be->vif = NULL; 235
236 pr_debug("%s -> %s\n", dev->nodename, xenbus_strstate(state));
237 be->state = state;
238
239 /* If we are waiting for a hotplug script then defer the
240 * actual xenbus state change.
241 */
242 if (!be->have_hotplug_status_watch)
243 xenbus_switch_state(dev, state);
244}
245
246/* Handle backend state transitions:
247 *
248 * The backend state starts in InitWait and the following transitions are
249 * allowed.
250 *
251 * InitWait -> Connected
252 *
253 * ^ \ |
254 * | \ |
255 * | \ |
256 * | \ |
257 * | \ |
258 * | \ |
259 * | V V
260 *
261 * Closed <-> Closing
262 *
263 * The state argument specifies the eventual state of the backend and the
264 * function transitions to that state via the shortest path.
265 */
266static void set_backend_state(struct backend_info *be,
267 enum xenbus_state state)
268{
269 while (be->state != state) {
270 switch (be->state) {
271 case XenbusStateClosed:
272 switch (state) {
273 case XenbusStateInitWait:
274 case XenbusStateConnected:
275 pr_info("%s: prepare for reconnect\n",
276 be->dev->nodename);
277 backend_switch_state(be, XenbusStateInitWait);
278 break;
279 case XenbusStateClosing:
280 backend_switch_state(be, XenbusStateClosing);
281 break;
282 default:
283 BUG();
284 }
285 break;
286 case XenbusStateInitWait:
287 switch (state) {
288 case XenbusStateConnected:
289 backend_connect(be);
290 backend_switch_state(be, XenbusStateConnected);
291 break;
292 case XenbusStateClosing:
293 case XenbusStateClosed:
294 backend_switch_state(be, XenbusStateClosing);
295 break;
296 default:
297 BUG();
298 }
299 break;
300 case XenbusStateConnected:
301 switch (state) {
302 case XenbusStateInitWait:
303 case XenbusStateClosing:
304 case XenbusStateClosed:
305 backend_disconnect(be);
306 backend_switch_state(be, XenbusStateClosing);
307 break;
308 default:
309 BUG();
310 }
311 break;
312 case XenbusStateClosing:
313 switch (state) {
314 case XenbusStateInitWait:
315 case XenbusStateConnected:
316 case XenbusStateClosed:
317 backend_switch_state(be, XenbusStateClosed);
318 break;
319 default:
320 BUG();
321 }
322 break;
323 default:
324 BUG();
325 }
229 } 326 }
230} 327}
231 328
@@ -237,40 +334,33 @@ static void frontend_changed(struct xenbus_device *dev,
237{ 334{
238 struct backend_info *be = dev_get_drvdata(&dev->dev); 335 struct backend_info *be = dev_get_drvdata(&dev->dev);
239 336
240 pr_debug("frontend state %s\n", xenbus_strstate(frontend_state)); 337 pr_debug("%s -> %s\n", dev->otherend, xenbus_strstate(frontend_state));
241 338
242 be->frontend_state = frontend_state; 339 be->frontend_state = frontend_state;
243 340
244 switch (frontend_state) { 341 switch (frontend_state) {
245 case XenbusStateInitialising: 342 case XenbusStateInitialising:
246 if (dev->state == XenbusStateClosed) { 343 set_backend_state(be, XenbusStateInitWait);
247 pr_info("%s: prepare for reconnect\n", dev->nodename);
248 xenbus_switch_state(dev, XenbusStateInitWait);
249 }
250 break; 344 break;
251 345
252 case XenbusStateInitialised: 346 case XenbusStateInitialised:
253 break; 347 break;
254 348
255 case XenbusStateConnected: 349 case XenbusStateConnected:
256 if (dev->state == XenbusStateConnected) 350 set_backend_state(be, XenbusStateConnected);
257 break;
258 if (be->vif)
259 connect(be);
260 break; 351 break;
261 352
262 case XenbusStateClosing: 353 case XenbusStateClosing:
263 disconnect_backend(dev); 354 set_backend_state(be, XenbusStateClosing);
264 xenbus_switch_state(dev, XenbusStateClosing);
265 break; 355 break;
266 356
267 case XenbusStateClosed: 357 case XenbusStateClosed:
268 xenbus_switch_state(dev, XenbusStateClosed); 358 set_backend_state(be, XenbusStateClosed);
269 if (xenbus_dev_is_online(dev)) 359 if (xenbus_dev_is_online(dev))
270 break; 360 break;
271 destroy_backend(dev);
272 /* fall through if not online */ 361 /* fall through if not online */
273 case XenbusStateUnknown: 362 case XenbusStateUnknown:
363 set_backend_state(be, XenbusStateClosed);
274 device_unregister(&dev->dev); 364 device_unregister(&dev->dev);
275 break; 365 break;
276 366
@@ -363,7 +453,9 @@ static void hotplug_status_changed(struct xenbus_watch *watch,
363 if (IS_ERR(str)) 453 if (IS_ERR(str))
364 return; 454 return;
365 if (len == sizeof("connected")-1 && !memcmp(str, "connected", len)) { 455 if (len == sizeof("connected")-1 && !memcmp(str, "connected", len)) {
366 xenbus_switch_state(be->dev, XenbusStateConnected); 456 /* Complete any pending state change */
457 xenbus_switch_state(be->dev, be->state);
458
367 /* Not interested in this watch anymore. */ 459 /* Not interested in this watch anymore. */
368 unregister_hotplug_status_watch(be); 460 unregister_hotplug_status_watch(be);
369 } 461 }
@@ -393,12 +485,8 @@ static void connect(struct backend_info *be)
393 err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, 485 err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
394 hotplug_status_changed, 486 hotplug_status_changed,
395 "%s/%s", dev->nodename, "hotplug-status"); 487 "%s/%s", dev->nodename, "hotplug-status");
396 if (err) { 488 if (!err)
397 /* Switch now, since we can't do a watch. */
398 xenbus_switch_state(dev, XenbusStateConnected);
399 } else {
400 be->have_hotplug_status_watch = 1; 489 be->have_hotplug_status_watch = 1;
401 }
402 490
403 netif_wake_queue(be->vif->dev); 491 netif_wake_queue(be->vif->dev);
404} 492}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e8ccf6c0f08a..bdd64b1b4817 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1155,8 +1155,14 @@ static void pci_enable_bridge(struct pci_dev *dev)
1155 1155
1156 pci_enable_bridge(dev->bus->self); 1156 pci_enable_bridge(dev->bus->self);
1157 1157
1158 if (pci_is_enabled(dev)) 1158 if (pci_is_enabled(dev)) {
1159 if (!dev->is_busmaster) {
1160 dev_warn(&dev->dev, "driver skip pci_set_master, fix it!\n");
1161 pci_set_master(dev);
1162 }
1159 return; 1163 return;
1164 }
1165
1160 retval = pci_enable_device(dev); 1166 retval = pci_enable_device(dev);
1161 if (retval) 1167 if (retval)
1162 dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n", 1168 dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n",
diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c
index a138965c01cb..b8fcc38c0d11 100644
--- a/drivers/pinctrl/pinconf.c
+++ b/drivers/pinctrl/pinconf.c
@@ -490,7 +490,7 @@ exit:
490 * <devicename> <state> <pinname> are values that should match the pinctrl-maps 490 * <devicename> <state> <pinname> are values that should match the pinctrl-maps
491 * <newvalue> reflects the new config and is driver dependant 491 * <newvalue> reflects the new config and is driver dependant
492 */ 492 */
493static int pinconf_dbg_config_write(struct file *file, 493static ssize_t pinconf_dbg_config_write(struct file *file,
494 const char __user *user_buf, size_t count, loff_t *ppos) 494 const char __user *user_buf, size_t count, loff_t *ppos)
495{ 495{
496 struct pinctrl_maps *maps_node; 496 struct pinctrl_maps *maps_node;
@@ -508,7 +508,7 @@ static int pinconf_dbg_config_write(struct file *file,
508 int i; 508 int i;
509 509
510 /* Get userspace string and assure termination */ 510 /* Get userspace string and assure termination */
511 buf_size = min(count, (size_t)(sizeof(buf)-1)); 511 buf_size = min(count, sizeof(buf) - 1);
512 if (copy_from_user(buf, user_buf, buf_size)) 512 if (copy_from_user(buf, user_buf, buf_size))
513 return -EFAULT; 513 return -EFAULT;
514 buf[buf_size] = 0; 514 buf[buf_size] = 0;
diff --git a/drivers/pinctrl/pinctrl-exynos.c b/drivers/pinctrl/pinctrl-exynos.c
index 2689f8d01a1e..155b1b3a0e7a 100644
--- a/drivers/pinctrl/pinctrl-exynos.c
+++ b/drivers/pinctrl/pinctrl-exynos.c
@@ -663,18 +663,18 @@ static void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata)
663/* pin banks of s5pv210 pin-controller */ 663/* pin banks of s5pv210 pin-controller */
664static struct samsung_pin_bank s5pv210_pin_bank[] = { 664static struct samsung_pin_bank s5pv210_pin_bank[] = {
665 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), 665 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
666 EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04), 666 EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpa1", 0x04),
667 EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08), 667 EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08),
668 EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpc0", 0x0c), 668 EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpc0", 0x0c),
669 EXYNOS_PIN_BANK_EINTG(5, 0x080, "gpc1", 0x10), 669 EXYNOS_PIN_BANK_EINTG(5, 0x080, "gpc1", 0x10),
670 EXYNOS_PIN_BANK_EINTG(4, 0x0a0, "gpd0", 0x14), 670 EXYNOS_PIN_BANK_EINTG(4, 0x0a0, "gpd0", 0x14),
671 EXYNOS_PIN_BANK_EINTG(4, 0x0c0, "gpd1", 0x18), 671 EXYNOS_PIN_BANK_EINTG(6, 0x0c0, "gpd1", 0x18),
672 EXYNOS_PIN_BANK_EINTG(5, 0x0e0, "gpe0", 0x1c), 672 EXYNOS_PIN_BANK_EINTG(8, 0x0e0, "gpe0", 0x1c),
673 EXYNOS_PIN_BANK_EINTG(8, 0x100, "gpe1", 0x20), 673 EXYNOS_PIN_BANK_EINTG(5, 0x100, "gpe1", 0x20),
674 EXYNOS_PIN_BANK_EINTG(6, 0x120, "gpf0", 0x24), 674 EXYNOS_PIN_BANK_EINTG(8, 0x120, "gpf0", 0x24),
675 EXYNOS_PIN_BANK_EINTG(8, 0x140, "gpf1", 0x28), 675 EXYNOS_PIN_BANK_EINTG(8, 0x140, "gpf1", 0x28),
676 EXYNOS_PIN_BANK_EINTG(8, 0x160, "gpf2", 0x2c), 676 EXYNOS_PIN_BANK_EINTG(8, 0x160, "gpf2", 0x2c),
677 EXYNOS_PIN_BANK_EINTG(8, 0x180, "gpf3", 0x30), 677 EXYNOS_PIN_BANK_EINTG(6, 0x180, "gpf3", 0x30),
678 EXYNOS_PIN_BANK_EINTG(7, 0x1a0, "gpg0", 0x34), 678 EXYNOS_PIN_BANK_EINTG(7, 0x1a0, "gpg0", 0x34),
679 EXYNOS_PIN_BANK_EINTG(7, 0x1c0, "gpg1", 0x38), 679 EXYNOS_PIN_BANK_EINTG(7, 0x1c0, "gpg1", 0x38),
680 EXYNOS_PIN_BANK_EINTG(7, 0x1e0, "gpg2", 0x3c), 680 EXYNOS_PIN_BANK_EINTG(7, 0x1e0, "gpg2", 0x3c),
diff --git a/drivers/pinctrl/pinctrl-palmas.c b/drivers/pinctrl/pinctrl-palmas.c
index 82638fac3cfa..30c4d356cb33 100644
--- a/drivers/pinctrl/pinctrl-palmas.c
+++ b/drivers/pinctrl/pinctrl-palmas.c
@@ -891,9 +891,10 @@ static int palmas_pinconf_set(struct pinctrl_dev *pctldev,
891 param = pinconf_to_config_param(configs[i]); 891 param = pinconf_to_config_param(configs[i]);
892 param_val = pinconf_to_config_argument(configs[i]); 892 param_val = pinconf_to_config_argument(configs[i]);
893 893
894 if (param == PIN_CONFIG_BIAS_PULL_PIN_DEFAULT)
895 continue;
896
894 switch (param) { 897 switch (param) {
895 case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
896 return 0;
897 case PIN_CONFIG_BIAS_DISABLE: 898 case PIN_CONFIG_BIAS_DISABLE:
898 case PIN_CONFIG_BIAS_PULL_UP: 899 case PIN_CONFIG_BIAS_PULL_UP:
899 case PIN_CONFIG_BIAS_PULL_DOWN: 900 case PIN_CONFIG_BIAS_PULL_DOWN:
diff --git a/drivers/pinctrl/pinctrl-tegra114.c b/drivers/pinctrl/pinctrl-tegra114.c
index 622c4854977e..93c9e3899d5e 100644
--- a/drivers/pinctrl/pinctrl-tegra114.c
+++ b/drivers/pinctrl/pinctrl-tegra114.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Arthur: Pritesh Raithatha <praithatha@nvidia.com> 6 * Author: Pritesh Raithatha <praithatha@nvidia.com>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License, 9 * under the terms and conditions of the GNU General Public License,
@@ -2763,7 +2763,6 @@ static struct platform_driver tegra114_pinctrl_driver = {
2763}; 2763};
2764module_platform_driver(tegra114_pinctrl_driver); 2764module_platform_driver(tegra114_pinctrl_driver);
2765 2765
2766MODULE_ALIAS("platform:tegra114-pinctrl");
2767MODULE_AUTHOR("Pritesh Raithatha <praithatha@nvidia.com>"); 2766MODULE_AUTHOR("Pritesh Raithatha <praithatha@nvidia.com>");
2768MODULE_DESCRIPTION("NVIDIA Tegra114 pincontrol driver"); 2767MODULE_DESCRIPTION("NVIDIA Tegra114 pinctrl driver");
2769MODULE_LICENSE("GPL v2"); 2768MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
index 1a7816390773..b9f2653e4ef9 100644
--- a/drivers/regulator/da9063-regulator.c
+++ b/drivers/regulator/da9063-regulator.c
@@ -709,7 +709,7 @@ static struct da9063_regulators_pdata *da9063_parse_regulators_dt(
709 struct of_regulator_match **da9063_reg_matches) 709 struct of_regulator_match **da9063_reg_matches)
710{ 710{
711 da9063_reg_matches = NULL; 711 da9063_reg_matches = NULL;
712 return PTR_ERR(-ENODEV); 712 return ERR_PTR(-ENODEV);
713} 713}
714#endif 714#endif
715 715
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index 488dfe7ce9a6..7e2b165972e6 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -201,13 +201,7 @@ static unsigned int palmas_smps_ramp_delay[4] = {0, 10000, 5000, 2500};
201#define SMPS_CTRL_MODE_ECO 0x02 201#define SMPS_CTRL_MODE_ECO 0x02
202#define SMPS_CTRL_MODE_PWM 0x03 202#define SMPS_CTRL_MODE_PWM 0x03
203 203
204/* These values are derived from the data sheet. And are the number of steps 204#define PALMAS_SMPS_NUM_VOLTAGES 122
205 * where there is a voltage change, the ranges at beginning and end of register
206 * max/min values where there are no change are ommitted.
207 *
208 * So they are basically (maxV-minV)/stepV
209 */
210#define PALMAS_SMPS_NUM_VOLTAGES 117
211#define PALMAS_SMPS10_NUM_VOLTAGES 2 205#define PALMAS_SMPS10_NUM_VOLTAGES 2
212#define PALMAS_LDO_NUM_VOLTAGES 50 206#define PALMAS_LDO_NUM_VOLTAGES 50
213 207
@@ -979,6 +973,7 @@ static int palmas_regulators_probe(struct platform_device *pdev)
979 pmic->desc[id].min_uV = 900000; 973 pmic->desc[id].min_uV = 900000;
980 pmic->desc[id].uV_step = 50000; 974 pmic->desc[id].uV_step = 50000;
981 pmic->desc[id].linear_min_sel = 1; 975 pmic->desc[id].linear_min_sel = 1;
976 pmic->desc[id].enable_time = 500;
982 pmic->desc[id].vsel_reg = 977 pmic->desc[id].vsel_reg =
983 PALMAS_BASE_TO_REG(PALMAS_LDO_BASE, 978 PALMAS_BASE_TO_REG(PALMAS_LDO_BASE,
984 palmas_regs_info[id].vsel_addr); 979 palmas_regs_info[id].vsel_addr);
@@ -997,6 +992,11 @@ static int palmas_regulators_probe(struct platform_device *pdev)
997 pmic->desc[id].min_uV = 450000; 992 pmic->desc[id].min_uV = 450000;
998 pmic->desc[id].uV_step = 25000; 993 pmic->desc[id].uV_step = 25000;
999 } 994 }
995
996 /* LOD6 in vibrator mode will have enable time 2000us */
997 if (pdata && pdata->ldo6_vibrator &&
998 (id == PALMAS_REG_LDO6))
999 pmic->desc[id].enable_time = 2000;
1000 } else { 1000 } else {
1001 pmic->desc[id].n_voltages = 1; 1001 pmic->desc[id].n_voltages = 1;
1002 pmic->desc[id].ops = &palmas_ops_extreg; 1002 pmic->desc[id].ops = &palmas_ops_extreg;
diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c
index d8e3e1262bc2..20c271d49dcb 100644
--- a/drivers/regulator/ti-abb-regulator.c
+++ b/drivers/regulator/ti-abb-regulator.c
@@ -279,8 +279,12 @@ static int ti_abb_set_opp(struct regulator_dev *rdev, struct ti_abb *abb,
279 ti_abb_rmw(regs->opp_sel_mask, info->opp_sel, regs->control_reg, 279 ti_abb_rmw(regs->opp_sel_mask, info->opp_sel, regs->control_reg,
280 abb->base); 280 abb->base);
281 281
282 /* program LDO VBB vset override if needed */ 282 /*
283 if (abb->ldo_base) 283 * program LDO VBB vset override if needed for !bypass mode
284 * XXX: Do not switch sequence - for !bypass, LDO override reset *must*
285 * be performed *before* switch to bias mode else VBB glitches.
286 */
287 if (abb->ldo_base && info->opp_sel != TI_ABB_NOMINAL_OPP)
284 ti_abb_program_ldovbb(dev, abb, info); 288 ti_abb_program_ldovbb(dev, abb, info);
285 289
286 /* Initiate ABB ldo change */ 290 /* Initiate ABB ldo change */
@@ -295,6 +299,14 @@ static int ti_abb_set_opp(struct regulator_dev *rdev, struct ti_abb *abb,
295 if (ret) 299 if (ret)
296 goto out; 300 goto out;
297 301
302 /*
303 * Reset LDO VBB vset override bypass mode
304 * XXX: Do not switch sequence - for bypass, LDO override reset *must*
305 * be performed *after* switch to bypass else VBB glitches.
306 */
307 if (abb->ldo_base && info->opp_sel == TI_ABB_NOMINAL_OPP)
308 ti_abb_program_ldovbb(dev, abb, info);
309
298out: 310out:
299 return ret; 311 return ret;
300} 312}
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index 1432b26ef2e9..2205fbc2c37b 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -63,7 +63,7 @@ static irqreturn_t wm831x_ldo_uv_irq(int irq, void *data)
63 */ 63 */
64 64
65static const struct regulator_linear_range wm831x_gp_ldo_ranges[] = { 65static const struct regulator_linear_range wm831x_gp_ldo_ranges[] = {
66 { .min_uV = 900000, .max_uV = 1650000, .min_sel = 0, .max_sel = 14, 66 { .min_uV = 900000, .max_uV = 1600000, .min_sel = 0, .max_sel = 14,
67 .uV_step = 50000 }, 67 .uV_step = 50000 },
68 { .min_uV = 1700000, .max_uV = 3300000, .min_sel = 15, .max_sel = 31, 68 { .min_uV = 1700000, .max_uV = 3300000, .min_sel = 15, .max_sel = 31,
69 .uV_step = 100000 }, 69 .uV_step = 100000 },
@@ -332,7 +332,7 @@ static struct platform_driver wm831x_gp_ldo_driver = {
332 */ 332 */
333 333
334static const struct regulator_linear_range wm831x_aldo_ranges[] = { 334static const struct regulator_linear_range wm831x_aldo_ranges[] = {
335 { .min_uV = 1000000, .max_uV = 1650000, .min_sel = 0, .max_sel = 12, 335 { .min_uV = 1000000, .max_uV = 1600000, .min_sel = 0, .max_sel = 12,
336 .uV_step = 50000 }, 336 .uV_step = 50000 },
337 { .min_uV = 1700000, .max_uV = 3500000, .min_sel = 13, .max_sel = 31, 337 { .min_uV = 1700000, .max_uV = 3500000, .min_sel = 13, .max_sel = 31,
338 .uV_step = 100000 }, 338 .uV_step = 100000 },
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 835b5f0f344e..61ca9292a429 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -543,7 +543,7 @@ static int wm8350_dcdc_set_suspend_mode(struct regulator_dev *rdev,
543} 543}
544 544
545static const struct regulator_linear_range wm8350_ldo_ranges[] = { 545static const struct regulator_linear_range wm8350_ldo_ranges[] = {
546 { .min_uV = 900000, .max_uV = 1750000, .min_sel = 0, .max_sel = 15, 546 { .min_uV = 900000, .max_uV = 1650000, .min_sel = 0, .max_sel = 15,
547 .uV_step = 50000 }, 547 .uV_step = 50000 },
548 { .min_uV = 1800000, .max_uV = 3300000, .min_sel = 16, .max_sel = 31, 548 { .min_uV = 1800000, .max_uV = 3300000, .min_sel = 16, .max_sel = 31,
549 .uV_step = 100000 }, 549 .uV_step = 100000 },
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 8cd34bf644b3..77df9cb00688 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -145,9 +145,11 @@ bool __init sclp_has_linemode(void)
145 145
146 if (sccb->header.response_code != 0x20) 146 if (sccb->header.response_code != 0x20)
147 return 0; 147 return 0;
148 if (sccb->sclp_send_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)) 148 if (!(sccb->sclp_send_mask & (EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK)))
149 return 1; 149 return 0;
150 return 0; 150 if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)))
151 return 0;
152 return 1;
151} 153}
152 154
153bool __init sclp_has_vt220(void) 155bool __init sclp_has_vt220(void)
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index a0f47c83fd62..3f4ca4e09a4c 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -810,7 +810,7 @@ static void tty3270_resize_work(struct work_struct *work)
810 struct winsize ws; 810 struct winsize ws;
811 811
812 screen = tty3270_alloc_screen(tp->n_rows, tp->n_cols); 812 screen = tty3270_alloc_screen(tp->n_rows, tp->n_cols);
813 if (!screen) 813 if (IS_ERR(screen))
814 return; 814 return;
815 /* Switch to new output size */ 815 /* Switch to new output size */
816 spin_lock_bh(&tp->view.lock); 816 spin_lock_bh(&tp->view.lock);
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index fd7cc566095a..d4ac60b4a56e 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1583,7 +1583,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
1583 /* Initialize the hardware */ 1583 /* Initialize the hardware */
1584 ret = clk_prepare_enable(clk); 1584 ret = clk_prepare_enable(clk);
1585 if (ret) 1585 if (ret)
1586 goto out_unmap_regs; 1586 goto out_free_irq;
1587 spi_writel(as, CR, SPI_BIT(SWRST)); 1587 spi_writel(as, CR, SPI_BIT(SWRST));
1588 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ 1588 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
1589 if (as->caps.has_wdrbt) { 1589 if (as->caps.has_wdrbt) {
@@ -1614,6 +1614,7 @@ out_free_dma:
1614 spi_writel(as, CR, SPI_BIT(SWRST)); 1614 spi_writel(as, CR, SPI_BIT(SWRST));
1615 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ 1615 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
1616 clk_disable_unprepare(clk); 1616 clk_disable_unprepare(clk);
1617out_free_irq:
1617 free_irq(irq, master); 1618 free_irq(irq, master);
1618out_unmap_regs: 1619out_unmap_regs:
1619 iounmap(as->regs); 1620 iounmap(as->regs);
diff --git a/drivers/spi/spi-clps711x.c b/drivers/spi/spi-clps711x.c
index 5655acf55bfe..6416798828e7 100644
--- a/drivers/spi/spi-clps711x.c
+++ b/drivers/spi/spi-clps711x.c
@@ -226,7 +226,6 @@ static int spi_clps711x_probe(struct platform_device *pdev)
226 dev_name(&pdev->dev), hw); 226 dev_name(&pdev->dev), hw);
227 if (ret) { 227 if (ret) {
228 dev_err(&pdev->dev, "Can't request IRQ\n"); 228 dev_err(&pdev->dev, "Can't request IRQ\n");
229 clk_put(hw->spi_clk);
230 goto clk_out; 229 goto clk_out;
231 } 230 }
232 231
@@ -247,7 +246,6 @@ err_out:
247 gpio_free(hw->chipselect[i]); 246 gpio_free(hw->chipselect[i]);
248 247
249 spi_master_put(master); 248 spi_master_put(master);
250 kfree(master);
251 249
252 return ret; 250 return ret;
253} 251}
@@ -263,7 +261,6 @@ static int spi_clps711x_remove(struct platform_device *pdev)
263 gpio_free(hw->chipselect[i]); 261 gpio_free(hw->chipselect[i]);
264 262
265 spi_unregister_master(master); 263 spi_unregister_master(master);
266 kfree(master);
267 264
268 return 0; 265 return 0;
269} 266}
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 6cd07d13ecab..4e44575bd87a 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -476,15 +476,9 @@ static int dspi_probe(struct platform_device *pdev)
476 master->bus_num = bus_num; 476 master->bus_num = bus_num;
477 477
478 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 478 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
479 if (!res) {
480 dev_err(&pdev->dev, "can't get platform resource\n");
481 ret = -EINVAL;
482 goto out_master_put;
483 }
484
485 dspi->base = devm_ioremap_resource(&pdev->dev, res); 479 dspi->base = devm_ioremap_resource(&pdev->dev, res);
486 if (!dspi->base) { 480 if (IS_ERR(dspi->base)) {
487 ret = -EINVAL; 481 ret = PTR_ERR(dspi->base);
488 goto out_master_put; 482 goto out_master_put;
489 } 483 }
490 484
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
index dbc5e999a1f5..6adf4e35816d 100644
--- a/drivers/spi/spi-mpc512x-psc.c
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -522,8 +522,10 @@ static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
522 psc_num = master->bus_num; 522 psc_num = master->bus_num;
523 snprintf(clk_name, sizeof(clk_name), "psc%d_mclk", psc_num); 523 snprintf(clk_name, sizeof(clk_name), "psc%d_mclk", psc_num);
524 clk = devm_clk_get(dev, clk_name); 524 clk = devm_clk_get(dev, clk_name);
525 if (IS_ERR(clk)) 525 if (IS_ERR(clk)) {
526 ret = PTR_ERR(clk);
526 goto free_irq; 527 goto free_irq;
528 }
527 ret = clk_prepare_enable(clk); 529 ret = clk_prepare_enable(clk);
528 if (ret) 530 if (ret)
529 goto free_irq; 531 goto free_irq;
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 2eb06ee0b326..c1a50674c1e3 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -546,8 +546,17 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
546 if (pm_runtime_suspended(&drv_data->pdev->dev)) 546 if (pm_runtime_suspended(&drv_data->pdev->dev))
547 return IRQ_NONE; 547 return IRQ_NONE;
548 548
549 sccr1_reg = read_SSCR1(reg); 549 /*
550 * If the device is not yet in RPM suspended state and we get an
551 * interrupt that is meant for another device, check if status bits
552 * are all set to one. That means that the device is already
553 * powered off.
554 */
550 status = read_SSSR(reg); 555 status = read_SSSR(reg);
556 if (status == ~0)
557 return IRQ_NONE;
558
559 sccr1_reg = read_SSCR1(reg);
551 560
552 /* Ignore possible writes if we don't need to write */ 561 /* Ignore possible writes if we don't need to write */
553 if (!(sccr1_reg & SSCR1_TIE)) 562 if (!(sccr1_reg & SSCR1_TIE))
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 512b8893893b..a80376dc3a10 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -1428,6 +1428,8 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
1428 S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN, 1428 S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
1429 sdd->regs + S3C64XX_SPI_INT_EN); 1429 sdd->regs + S3C64XX_SPI_INT_EN);
1430 1430
1431 pm_runtime_enable(&pdev->dev);
1432
1431 if (spi_register_master(master)) { 1433 if (spi_register_master(master)) {
1432 dev_err(&pdev->dev, "cannot register SPI master\n"); 1434 dev_err(&pdev->dev, "cannot register SPI master\n");
1433 ret = -EBUSY; 1435 ret = -EBUSY;
@@ -1440,8 +1442,6 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
1440 mem_res, 1442 mem_res,
1441 sdd->rx_dma.dmach, sdd->tx_dma.dmach); 1443 sdd->rx_dma.dmach, sdd->tx_dma.dmach);
1442 1444
1443 pm_runtime_enable(&pdev->dev);
1444
1445 return 0; 1445 return 0;
1446 1446
1447err3: 1447err3:
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
index 0b68cb592fa4..e488a90a98b8 100644
--- a/drivers/spi/spi-sh-hspi.c
+++ b/drivers/spi/spi-sh-hspi.c
@@ -296,6 +296,8 @@ static int hspi_probe(struct platform_device *pdev)
296 goto error1; 296 goto error1;
297 } 297 }
298 298
299 pm_runtime_enable(&pdev->dev);
300
299 master->num_chipselect = 1; 301 master->num_chipselect = 1;
300 master->bus_num = pdev->id; 302 master->bus_num = pdev->id;
301 master->setup = hspi_setup; 303 master->setup = hspi_setup;
@@ -309,8 +311,6 @@ static int hspi_probe(struct platform_device *pdev)
309 goto error1; 311 goto error1;
310 } 312 }
311 313
312 pm_runtime_enable(&pdev->dev);
313
314 return 0; 314 return 0;
315 315
316 error1: 316 error1:
diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c
index 3ba4c5712dff..853f62b2b1a9 100644
--- a/drivers/staging/comedi/drivers/ni_65xx.c
+++ b/drivers/staging/comedi/drivers/ni_65xx.c
@@ -369,28 +369,23 @@ static int ni_65xx_dio_insn_bits(struct comedi_device *dev,
369{ 369{
370 const struct ni_65xx_board *board = comedi_board(dev); 370 const struct ni_65xx_board *board = comedi_board(dev);
371 struct ni_65xx_private *devpriv = dev->private; 371 struct ni_65xx_private *devpriv = dev->private;
372 unsigned base_bitfield_channel; 372 int base_bitfield_channel;
373 const unsigned max_ports_per_bitfield = 5;
374 unsigned read_bits = 0; 373 unsigned read_bits = 0;
375 unsigned j; 374 int last_port_offset = ni_65xx_port_by_channel(s->n_chan - 1);
375 int port_offset;
376 376
377 base_bitfield_channel = CR_CHAN(insn->chanspec); 377 base_bitfield_channel = CR_CHAN(insn->chanspec);
378 for (j = 0; j < max_ports_per_bitfield; ++j) { 378 for (port_offset = ni_65xx_port_by_channel(base_bitfield_channel);
379 const unsigned port_offset = 379 port_offset <= last_port_offset; port_offset++) {
380 ni_65xx_port_by_channel(base_bitfield_channel) + j; 380 unsigned port = sprivate(s)->base_port + port_offset;
381 const unsigned port = 381 int base_port_channel = port_offset * ni_65xx_channels_per_port;
382 sprivate(s)->base_port + port_offset;
383 unsigned base_port_channel;
384 unsigned port_mask, port_data, port_read_bits; 382 unsigned port_mask, port_data, port_read_bits;
385 int bitshift; 383 int bitshift = base_port_channel - base_bitfield_channel;
386 if (port >= ni_65xx_total_num_ports(board)) 384
385 if (bitshift >= 32)
387 break; 386 break;
388 base_port_channel = port_offset * ni_65xx_channels_per_port;
389 port_mask = data[0]; 387 port_mask = data[0];
390 port_data = data[1]; 388 port_data = data[1];
391 bitshift = base_port_channel - base_bitfield_channel;
392 if (bitshift >= 32 || bitshift <= -32)
393 break;
394 if (bitshift > 0) { 389 if (bitshift > 0) {
395 port_mask >>= bitshift; 390 port_mask >>= bitshift;
396 port_data >>= bitshift; 391 port_data >>= bitshift;
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 47c5888461ff..a2e52a0c53c9 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -41,7 +41,6 @@ struct imx_drm_device {
41 struct list_head encoder_list; 41 struct list_head encoder_list;
42 struct list_head connector_list; 42 struct list_head connector_list;
43 struct mutex mutex; 43 struct mutex mutex;
44 int references;
45 int pipes; 44 int pipes;
46 struct drm_fbdev_cma *fbhelper; 45 struct drm_fbdev_cma *fbhelper;
47}; 46};
@@ -241,8 +240,6 @@ struct drm_device *imx_drm_device_get(void)
241 } 240 }
242 } 241 }
243 242
244 imxdrm->references++;
245
246 return imxdrm->drm; 243 return imxdrm->drm;
247 244
248unwind_crtc: 245unwind_crtc:
@@ -280,8 +277,6 @@ void imx_drm_device_put(void)
280 list_for_each_entry(enc, &imxdrm->encoder_list, list) 277 list_for_each_entry(enc, &imxdrm->encoder_list, list)
281 module_put(enc->owner); 278 module_put(enc->owner);
282 279
283 imxdrm->references--;
284
285 mutex_unlock(&imxdrm->mutex); 280 mutex_unlock(&imxdrm->mutex);
286} 281}
287EXPORT_SYMBOL_GPL(imx_drm_device_put); 282EXPORT_SYMBOL_GPL(imx_drm_device_put);
@@ -485,7 +480,7 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
485 480
486 mutex_lock(&imxdrm->mutex); 481 mutex_lock(&imxdrm->mutex);
487 482
488 if (imxdrm->references) { 483 if (imxdrm->drm->open_count) {
489 ret = -EBUSY; 484 ret = -EBUSY;
490 goto err_busy; 485 goto err_busy;
491 } 486 }
@@ -564,7 +559,7 @@ int imx_drm_add_encoder(struct drm_encoder *encoder,
564 559
565 mutex_lock(&imxdrm->mutex); 560 mutex_lock(&imxdrm->mutex);
566 561
567 if (imxdrm->references) { 562 if (imxdrm->drm->open_count) {
568 ret = -EBUSY; 563 ret = -EBUSY;
569 goto err_busy; 564 goto err_busy;
570 } 565 }
@@ -709,7 +704,7 @@ int imx_drm_add_connector(struct drm_connector *connector,
709 704
710 mutex_lock(&imxdrm->mutex); 705 mutex_lock(&imxdrm->mutex);
711 706
712 if (imxdrm->references) { 707 if (imxdrm->drm->open_count) {
713 ret = -EBUSY; 708 ret = -EBUSY;
714 goto err_busy; 709 goto err_busy;
715 } 710 }
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index 2644edf438c1..c8b43442dc74 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -1387,7 +1387,7 @@ echo_copyout_lsm (struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
1387 if (nob > ulsm_nob) 1387 if (nob > ulsm_nob)
1388 return (-EINVAL); 1388 return (-EINVAL);
1389 1389
1390 if (copy_to_user (ulsm, lsm, sizeof(ulsm))) 1390 if (copy_to_user (ulsm, lsm, sizeof(*ulsm)))
1391 return (-EFAULT); 1391 return (-EFAULT);
1392 1392
1393 for (i = 0; i < lsm->lsm_stripe_count; i++) { 1393 for (i = 0; i < lsm->lsm_stripe_count; i++) {
diff --git a/drivers/staging/octeon-usb/cvmx-usb.c b/drivers/staging/octeon-usb/cvmx-usb.c
index d7b3c82b5ead..45dfe94199ae 100644
--- a/drivers/staging/octeon-usb/cvmx-usb.c
+++ b/drivers/staging/octeon-usb/cvmx-usb.c
@@ -604,7 +604,7 @@ int cvmx_usb_initialize(struct cvmx_usb_state *state, int usb_port_number,
604 } 604 }
605 } 605 }
606 606
607 memset(usb, 0, sizeof(usb)); 607 memset(usb, 0, sizeof(*usb));
608 usb->init_flags = flags; 608 usb->init_flags = flags;
609 609
610 /* Initialize the USB state structure */ 610 /* Initialize the USB state structure */
diff --git a/drivers/staging/rtl8188eu/core/rtw_mp.c b/drivers/staging/rtl8188eu/core/rtw_mp.c
index c7ff2e4d1f23..9832dcbbd07f 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mp.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mp.c
@@ -907,7 +907,7 @@ u32 mp_query_psd(struct adapter *pAdapter, u8 *data)
907 sscanf(data, "pts =%d, start =%d, stop =%d", &psd_pts, &psd_start, &psd_stop); 907 sscanf(data, "pts =%d, start =%d, stop =%d", &psd_pts, &psd_start, &psd_stop);
908 } 908 }
909 909
910 _rtw_memset(data, '\0', sizeof(data)); 910 _rtw_memset(data, '\0', sizeof(*data));
911 911
912 i = psd_start; 912 i = psd_start;
913 while (i < psd_stop) { 913 while (i < psd_stop) {
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
index 9c2e7a20c09e..ec0028d4e61a 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
@@ -57,7 +57,7 @@ static void Init_ODM_ComInfo_88E(struct adapter *Adapter)
57 u8 cut_ver, fab_ver; 57 u8 cut_ver, fab_ver;
58 58
59 /* Init Value */ 59 /* Init Value */
60 _rtw_memset(dm_odm, 0, sizeof(dm_odm)); 60 _rtw_memset(dm_odm, 0, sizeof(*dm_odm));
61 61
62 dm_odm->Adapter = Adapter; 62 dm_odm->Adapter = Adapter;
63 63
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index cd4100fb3645..95953ebc0279 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -6973,7 +6973,7 @@ static int rtw_mp_ctx(struct net_device *dev,
6973 stop = strncmp(extra, "stop", 4); 6973 stop = strncmp(extra, "stop", 4);
6974 sscanf(extra, "count =%d, pkt", &count); 6974 sscanf(extra, "count =%d, pkt", &count);
6975 6975
6976 _rtw_memset(extra, '\0', sizeof(extra)); 6976 _rtw_memset(extra, '\0', sizeof(*extra));
6977 6977
6978 if (stop == 0) { 6978 if (stop == 0) {
6979 bStartTest = 0; /* To set Stop */ 6979 bStartTest = 0; /* To set Stop */
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index d3078d200e50..9ca3180ebaa0 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -54,6 +54,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
54 /*=== Customer ID ===*/ 54 /*=== Customer ID ===*/
55 /****** 8188EUS ********/ 55 /****** 8188EUS ********/
56 {USB_DEVICE(0x8179, 0x07B8)}, /* Abocom - Abocom */ 56 {USB_DEVICE(0x8179, 0x07B8)}, /* Abocom - Abocom */
57 {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
57 {} /* Terminating entry */ 58 {} /* Terminating entry */
58}; 59};
59 60
diff --git a/drivers/staging/rtl8192u/r819xU_cmdpkt.c b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
index 5bc361b16d4c..56144014b7c9 100644
--- a/drivers/staging/rtl8192u/r819xU_cmdpkt.c
+++ b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
@@ -37,6 +37,8 @@ rt_status SendTxCommandPacket(struct net_device *dev, void *pData, u32 DataLen)
37 /* Get TCB and local buffer from common pool. 37 /* Get TCB and local buffer from common pool.
38 (It is shared by CmdQ, MgntQ, and USB coalesce DataQ) */ 38 (It is shared by CmdQ, MgntQ, and USB coalesce DataQ) */
39 skb = dev_alloc_skb(USB_HWDESC_HEADER_LEN + DataLen + 4); 39 skb = dev_alloc_skb(USB_HWDESC_HEADER_LEN + DataLen + 4);
40 if (!skb)
41 return RT_STATUS_FAILURE;
40 memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev)); 42 memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
41 tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); 43 tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
42 tcb_desc->queue_index = TXCMD_QUEUE; 44 tcb_desc->queue_index = TXCMD_QUEUE;
diff --git a/drivers/staging/vt6656/iwctl.c b/drivers/staging/vt6656/iwctl.c
index d0cf7d8a20e5..8872e0f84f40 100644
--- a/drivers/staging/vt6656/iwctl.c
+++ b/drivers/staging/vt6656/iwctl.c
@@ -1634,6 +1634,9 @@ int iwctl_siwencodeext(struct net_device *dev, struct iw_request_info *info,
1634 if (pMgmt == NULL) 1634 if (pMgmt == NULL)
1635 return -EFAULT; 1635 return -EFAULT;
1636 1636
1637 if (!(pDevice->flags & DEVICE_FLAGS_OPENED))
1638 return -ENODEV;
1639
1637 buf = kzalloc(sizeof(struct viawget_wpa_param), GFP_KERNEL); 1640 buf = kzalloc(sizeof(struct viawget_wpa_param), GFP_KERNEL);
1638 if (buf == NULL) 1641 if (buf == NULL)
1639 return -ENOMEM; 1642 return -ENOMEM;
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 536971786ae8..6f9d28182445 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -1098,6 +1098,8 @@ static int device_close(struct net_device *dev)
1098 memset(pMgmt->abyCurrBSSID, 0, 6); 1098 memset(pMgmt->abyCurrBSSID, 0, 6);
1099 pMgmt->eCurrState = WMAC_STATE_IDLE; 1099 pMgmt->eCurrState = WMAC_STATE_IDLE;
1100 1100
1101 pDevice->flags &= ~DEVICE_FLAGS_OPENED;
1102
1101 device_free_tx_bufs(pDevice); 1103 device_free_tx_bufs(pDevice);
1102 device_free_rx_bufs(pDevice); 1104 device_free_rx_bufs(pDevice);
1103 device_free_int_bufs(pDevice); 1105 device_free_int_bufs(pDevice);
@@ -1109,7 +1111,6 @@ static int device_close(struct net_device *dev)
1109 usb_free_urb(pDevice->pInterruptURB); 1111 usb_free_urb(pDevice->pInterruptURB);
1110 1112
1111 BSSvClearNodeDBTable(pDevice, 0); 1113 BSSvClearNodeDBTable(pDevice, 0);
1112 pDevice->flags &=(~DEVICE_FLAGS_OPENED);
1113 1114
1114 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_close2 \n"); 1115 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_close2 \n");
1115 1116
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index fb743a8811bb..14f3e852215d 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -148,6 +148,8 @@ static void *s_vGetFreeContext(struct vnt_private *pDevice)
148 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"GetFreeContext()\n"); 148 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"GetFreeContext()\n");
149 149
150 for (ii = 0; ii < pDevice->cbTD; ii++) { 150 for (ii = 0; ii < pDevice->cbTD; ii++) {
151 if (!pDevice->apTD[ii])
152 return NULL;
151 pContext = pDevice->apTD[ii]; 153 pContext = pDevice->apTD[ii];
152 if (pContext->bBoolInUse == false) { 154 if (pContext->bBoolInUse == false) {
153 pContext->bBoolInUse = true; 155 pContext->bBoolInUse = true;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 35b61f7d6c63..38e44b9abf0f 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -753,7 +753,8 @@ static void iscsit_unmap_iovec(struct iscsi_cmd *cmd)
753 753
754static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn) 754static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
755{ 755{
756 struct iscsi_cmd *cmd; 756 LIST_HEAD(ack_list);
757 struct iscsi_cmd *cmd, *cmd_p;
757 758
758 conn->exp_statsn = exp_statsn; 759 conn->exp_statsn = exp_statsn;
759 760
@@ -761,19 +762,23 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
761 return; 762 return;
762 763
763 spin_lock_bh(&conn->cmd_lock); 764 spin_lock_bh(&conn->cmd_lock);
764 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { 765 list_for_each_entry_safe(cmd, cmd_p, &conn->conn_cmd_list, i_conn_node) {
765 spin_lock(&cmd->istate_lock); 766 spin_lock(&cmd->istate_lock);
766 if ((cmd->i_state == ISTATE_SENT_STATUS) && 767 if ((cmd->i_state == ISTATE_SENT_STATUS) &&
767 iscsi_sna_lt(cmd->stat_sn, exp_statsn)) { 768 iscsi_sna_lt(cmd->stat_sn, exp_statsn)) {
768 cmd->i_state = ISTATE_REMOVE; 769 cmd->i_state = ISTATE_REMOVE;
769 spin_unlock(&cmd->istate_lock); 770 spin_unlock(&cmd->istate_lock);
770 iscsit_add_cmd_to_immediate_queue(cmd, conn, 771 list_move_tail(&cmd->i_conn_node, &ack_list);
771 cmd->i_state);
772 continue; 772 continue;
773 } 773 }
774 spin_unlock(&cmd->istate_lock); 774 spin_unlock(&cmd->istate_lock);
775 } 775 }
776 spin_unlock_bh(&conn->cmd_lock); 776 spin_unlock_bh(&conn->cmd_lock);
777
778 list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) {
779 list_del(&cmd->i_conn_node);
780 iscsit_free_cmd(cmd, false);
781 }
777} 782}
778 783
779static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd) 784static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd)
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 14d1aed5af1d..ef6d836a4d09 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -1192,7 +1192,7 @@ get_target:
1192 */ 1192 */
1193alloc_tags: 1193alloc_tags:
1194 tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth); 1194 tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth);
1195 tag_num += ISCSIT_EXTRA_TAGS; 1195 tag_num += (tag_num / 2) + ISCSIT_EXTRA_TAGS;
1196 tag_size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size; 1196 tag_size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;
1197 1197
1198 ret = transport_alloc_session_tags(sess->se_sess, tag_num, tag_size); 1198 ret = transport_alloc_session_tags(sess->se_sess, tag_num, tag_size);
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index f2de28e178fd..b0cac0c342e1 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -736,7 +736,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
736 * Fallthrough 736 * Fallthrough
737 */ 737 */
738 case ISCSI_OP_SCSI_TMFUNC: 738 case ISCSI_OP_SCSI_TMFUNC:
739 rc = transport_generic_free_cmd(&cmd->se_cmd, 1); 739 rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
740 if (!rc && shutdown && se_cmd && se_cmd->se_sess) { 740 if (!rc && shutdown && se_cmd && se_cmd->se_sess) {
741 __iscsit_free_cmd(cmd, true, shutdown); 741 __iscsit_free_cmd(cmd, true, shutdown);
742 target_put_sess_cmd(se_cmd->se_sess, se_cmd); 742 target_put_sess_cmd(se_cmd->se_sess, se_cmd);
@@ -752,7 +752,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
752 se_cmd = &cmd->se_cmd; 752 se_cmd = &cmd->se_cmd;
753 __iscsit_free_cmd(cmd, true, shutdown); 753 __iscsit_free_cmd(cmd, true, shutdown);
754 754
755 rc = transport_generic_free_cmd(&cmd->se_cmd, 1); 755 rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
756 if (!rc && shutdown && se_cmd->se_sess) { 756 if (!rc && shutdown && se_cmd->se_sess) {
757 __iscsit_free_cmd(cmd, true, shutdown); 757 __iscsit_free_cmd(cmd, true, shutdown);
758 target_put_sess_cmd(se_cmd->se_sess, se_cmd); 758 target_put_sess_cmd(se_cmd->se_sess, se_cmd);
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 6c17295e8d7c..4714c6f8da4b 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -349,7 +349,16 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
349{ 349{
350 struct se_device *dev = cmd->se_dev; 350 struct se_device *dev = cmd->se_dev;
351 351
352 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; 352 /*
353 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
354 * within target_complete_ok_work() if the command was successfully
355 * sent to the backend driver.
356 */
357 spin_lock_irq(&cmd->t_state_lock);
358 if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status)
359 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
360 spin_unlock_irq(&cmd->t_state_lock);
361
353 /* 362 /*
354 * Unlock ->caw_sem originally obtained during sbc_compare_and_write() 363 * Unlock ->caw_sem originally obtained during sbc_compare_and_write()
355 * before the original READ I/O submission. 364 * before the original READ I/O submission.
@@ -363,7 +372,7 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
363{ 372{
364 struct se_device *dev = cmd->se_dev; 373 struct se_device *dev = cmd->se_dev;
365 struct scatterlist *write_sg = NULL, *sg; 374 struct scatterlist *write_sg = NULL, *sg;
366 unsigned char *buf, *addr; 375 unsigned char *buf = NULL, *addr;
367 struct sg_mapping_iter m; 376 struct sg_mapping_iter m;
368 unsigned int offset = 0, len; 377 unsigned int offset = 0, len;
369 unsigned int nlbas = cmd->t_task_nolb; 378 unsigned int nlbas = cmd->t_task_nolb;
@@ -378,6 +387,15 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
378 */ 387 */
379 if (!cmd->t_data_sg || !cmd->t_bidi_data_sg) 388 if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
380 return TCM_NO_SENSE; 389 return TCM_NO_SENSE;
390 /*
391 * Immediately exit + release dev->caw_sem if command has already
392 * been failed with a non-zero SCSI status.
393 */
394 if (cmd->scsi_status) {
395 pr_err("compare_and_write_callback: non zero scsi_status:"
396 " 0x%02x\n", cmd->scsi_status);
397 goto out;
398 }
381 399
382 buf = kzalloc(cmd->data_length, GFP_KERNEL); 400 buf = kzalloc(cmd->data_length, GFP_KERNEL);
383 if (!buf) { 401 if (!buf) {
@@ -508,6 +526,12 @@ sbc_compare_and_write(struct se_cmd *cmd)
508 cmd->transport_complete_callback = NULL; 526 cmd->transport_complete_callback = NULL;
509 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 527 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
510 } 528 }
529 /*
530 * Reset cmd->data_length to individual block_size in order to not
531 * confuse backend drivers that depend on this value matching the
532 * size of the I/O being submitted.
533 */
534 cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size;
511 535
512 ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents, 536 ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,
513 DMA_FROM_DEVICE); 537 DMA_FROM_DEVICE);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 84747cc1aac0..81e945eefbbd 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -236,17 +236,24 @@ int transport_alloc_session_tags(struct se_session *se_sess,
236{ 236{
237 int rc; 237 int rc;
238 238
239 se_sess->sess_cmd_map = kzalloc(tag_num * tag_size, GFP_KERNEL); 239 se_sess->sess_cmd_map = kzalloc(tag_num * tag_size,
240 GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
240 if (!se_sess->sess_cmd_map) { 241 if (!se_sess->sess_cmd_map) {
241 pr_err("Unable to allocate se_sess->sess_cmd_map\n"); 242 se_sess->sess_cmd_map = vzalloc(tag_num * tag_size);
242 return -ENOMEM; 243 if (!se_sess->sess_cmd_map) {
244 pr_err("Unable to allocate se_sess->sess_cmd_map\n");
245 return -ENOMEM;
246 }
243 } 247 }
244 248
245 rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num); 249 rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num);
246 if (rc < 0) { 250 if (rc < 0) {
247 pr_err("Unable to init se_sess->sess_tag_pool," 251 pr_err("Unable to init se_sess->sess_tag_pool,"
248 " tag_num: %u\n", tag_num); 252 " tag_num: %u\n", tag_num);
249 kfree(se_sess->sess_cmd_map); 253 if (is_vmalloc_addr(se_sess->sess_cmd_map))
254 vfree(se_sess->sess_cmd_map);
255 else
256 kfree(se_sess->sess_cmd_map);
250 se_sess->sess_cmd_map = NULL; 257 se_sess->sess_cmd_map = NULL;
251 return -ENOMEM; 258 return -ENOMEM;
252 } 259 }
@@ -412,7 +419,10 @@ void transport_free_session(struct se_session *se_sess)
412{ 419{
413 if (se_sess->sess_cmd_map) { 420 if (se_sess->sess_cmd_map) {
414 percpu_ida_destroy(&se_sess->sess_tag_pool); 421 percpu_ida_destroy(&se_sess->sess_tag_pool);
415 kfree(se_sess->sess_cmd_map); 422 if (is_vmalloc_addr(se_sess->sess_cmd_map))
423 vfree(se_sess->sess_cmd_map);
424 else
425 kfree(se_sess->sess_cmd_map);
416 } 426 }
417 kmem_cache_free(se_sess_cache, se_sess); 427 kmem_cache_free(se_sess_cache, se_sess);
418} 428}
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 4d22e7d2adca..3da4fd10b9f8 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -298,8 +298,8 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op
298 (unsigned long long)xop->dst_lba); 298 (unsigned long long)xop->dst_lba);
299 299
300 if (dc != 0) { 300 if (dc != 0) {
301 xop->dbl = (desc[29] << 16) & 0xff; 301 xop->dbl = (desc[29] & 0xff) << 16;
302 xop->dbl |= (desc[30] << 8) & 0xff; 302 xop->dbl |= (desc[30] & 0xff) << 8;
303 xop->dbl |= desc[31] & 0xff; 303 xop->dbl |= desc[31] & 0xff;
304 304
305 pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl); 305 pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl);
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index e61c36cbb866..c193af6a628f 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -636,6 +636,7 @@ struct console xenboot_console = {
636 .name = "xenboot", 636 .name = "xenboot",
637 .write = xenboot_write_console, 637 .write = xenboot_write_console,
638 .flags = CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME, 638 .flags = CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME,
639 .index = -1,
639}; 640};
640#endif /* CONFIG_EARLY_PRINTK */ 641#endif /* CONFIG_EARLY_PRINTK */
641 642
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index c9a9ddd1d0bc..7a744b69c3d1 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1758,8 +1758,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
1758 canon_change = (old->c_lflag ^ tty->termios.c_lflag) & ICANON; 1758 canon_change = (old->c_lflag ^ tty->termios.c_lflag) & ICANON;
1759 if (canon_change) { 1759 if (canon_change) {
1760 bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE); 1760 bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE);
1761 ldata->line_start = 0; 1761 ldata->line_start = ldata->canon_head = ldata->read_tail;
1762 ldata->canon_head = ldata->read_tail;
1763 ldata->erasing = 0; 1762 ldata->erasing = 0;
1764 ldata->lnext = 0; 1763 ldata->lnext = 0;
1765 } 1764 }
@@ -2184,28 +2183,34 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
2184 2183
2185 if (!input_available_p(tty, 0)) { 2184 if (!input_available_p(tty, 0)) {
2186 if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) { 2185 if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
2187 retval = -EIO; 2186 up_read(&tty->termios_rwsem);
2188 break; 2187 tty_flush_to_ldisc(tty);
2189 } 2188 down_read(&tty->termios_rwsem);
2190 if (tty_hung_up_p(file)) 2189 if (!input_available_p(tty, 0)) {
2191 break; 2190 retval = -EIO;
2192 if (!timeout) 2191 break;
2193 break; 2192 }
2194 if (file->f_flags & O_NONBLOCK) { 2193 } else {
2195 retval = -EAGAIN; 2194 if (tty_hung_up_p(file))
2196 break; 2195 break;
2197 } 2196 if (!timeout)
2198 if (signal_pending(current)) { 2197 break;
2199 retval = -ERESTARTSYS; 2198 if (file->f_flags & O_NONBLOCK) {
2200 break; 2199 retval = -EAGAIN;
2201 } 2200 break;
2202 n_tty_set_room(tty); 2201 }
2203 up_read(&tty->termios_rwsem); 2202 if (signal_pending(current)) {
2203 retval = -ERESTARTSYS;
2204 break;
2205 }
2206 n_tty_set_room(tty);
2207 up_read(&tty->termios_rwsem);
2204 2208
2205 timeout = schedule_timeout(timeout); 2209 timeout = schedule_timeout(timeout);
2206 2210
2207 down_read(&tty->termios_rwsem); 2211 down_read(&tty->termios_rwsem);
2208 continue; 2212 continue;
2213 }
2209 } 2214 }
2210 __set_current_state(TASK_RUNNING); 2215 __set_current_state(TASK_RUNNING);
2211 2216
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 52379e56a31e..44077c0b7670 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -667,30 +667,21 @@ static int pop_tx_x(struct eg20t_port *priv, unsigned char *buf)
667 667
668static int dma_push_rx(struct eg20t_port *priv, int size) 668static int dma_push_rx(struct eg20t_port *priv, int size)
669{ 669{
670 struct tty_struct *tty;
671 int room; 670 int room;
672 struct uart_port *port = &priv->port; 671 struct uart_port *port = &priv->port;
673 struct tty_port *tport = &port->state->port; 672 struct tty_port *tport = &port->state->port;
674 673
675 port = &priv->port;
676 tty = tty_port_tty_get(tport);
677 if (!tty) {
678 dev_dbg(priv->port.dev, "%s:tty is busy now", __func__);
679 return 0;
680 }
681
682 room = tty_buffer_request_room(tport, size); 674 room = tty_buffer_request_room(tport, size);
683 675
684 if (room < size) 676 if (room < size)
685 dev_warn(port->dev, "Rx overrun: dropping %u bytes\n", 677 dev_warn(port->dev, "Rx overrun: dropping %u bytes\n",
686 size - room); 678 size - room);
687 if (!room) 679 if (!room)
688 return room; 680 return 0;
689 681
690 tty_insert_flip_string(tport, sg_virt(&priv->sg_rx), size); 682 tty_insert_flip_string(tport, sg_virt(&priv->sg_rx), size);
691 683
692 port->icount.rx += room; 684 port->icount.rx += room;
693 tty_kref_put(tty);
694 685
695 return room; 686 return room;
696} 687}
@@ -1098,6 +1089,8 @@ static void pch_uart_err_ir(struct eg20t_port *priv, unsigned int lsr)
1098 if (tty == NULL) { 1089 if (tty == NULL) {
1099 for (i = 0; error_msg[i] != NULL; i++) 1090 for (i = 0; error_msg[i] != NULL; i++)
1100 dev_err(&priv->pdev->dev, error_msg[i]); 1091 dev_err(&priv->pdev->dev, error_msg[i]);
1092 } else {
1093 tty_kref_put(tty);
1101 } 1094 }
1102} 1095}
1103 1096
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index d0d972f7e43e..0489a2bdcdf9 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -732,7 +732,7 @@ static irqreturn_t tegra_uart_isr(int irq, void *data)
732static void tegra_uart_stop_rx(struct uart_port *u) 732static void tegra_uart_stop_rx(struct uart_port *u)
733{ 733{
734 struct tegra_uart_port *tup = to_tegra_uport(u); 734 struct tegra_uart_port *tup = to_tegra_uport(u);
735 struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port); 735 struct tty_struct *tty;
736 struct tty_port *port = &u->state->port; 736 struct tty_port *port = &u->state->port;
737 struct dma_tx_state state; 737 struct dma_tx_state state;
738 unsigned long ier; 738 unsigned long ier;
@@ -744,6 +744,8 @@ static void tegra_uart_stop_rx(struct uart_port *u)
744 if (!tup->rx_in_progress) 744 if (!tup->rx_in_progress)
745 return; 745 return;
746 746
747 tty = tty_port_tty_get(&tup->uport.state->port);
748
747 tegra_uart_wait_sym_time(tup, 1); /* wait a character interval */ 749 tegra_uart_wait_sym_time(tup, 1); /* wait a character interval */
748 750
749 ier = tup->ier_shadow; 751 ier = tup->ier_shadow;
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 03ba081c5772..6fd60fece6b4 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -1201,6 +1201,9 @@ int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file,
1201 } 1201 }
1202 return 0; 1202 return 0;
1203 case TCFLSH: 1203 case TCFLSH:
1204 retval = tty_check_change(tty);
1205 if (retval)
1206 return retval;
1204 return __tty_perform_flush(tty, arg); 1207 return __tty_perform_flush(tty, arg);
1205 default: 1208 default:
1206 /* Try the mode commands */ 1209 /* Try the mode commands */
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index 4a851e15e58c..77b47d82c9a6 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -1,6 +1,6 @@
1config USB_CHIPIDEA 1config USB_CHIPIDEA
2 tristate "ChipIdea Highspeed Dual Role Controller" 2 tristate "ChipIdea Highspeed Dual Role Controller"
3 depends on (USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET) 3 depends on ((USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET)) && HAS_DMA
4 help 4 help
5 Say Y here if your system has a dual role high speed USB 5 Say Y here if your system has a dual role high speed USB
6 controller based on ChipIdea silicon IP. Currently, only the 6 controller based on ChipIdea silicon IP. Currently, only the
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 74d998d9b45b..be822a2c1776 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -131,7 +131,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
131 if (ret) { 131 if (ret) {
132 dev_err(&pdev->dev, "usbmisc init failed, ret=%d\n", 132 dev_err(&pdev->dev, "usbmisc init failed, ret=%d\n",
133 ret); 133 ret);
134 goto err_clk; 134 goto err_phy;
135 } 135 }
136 } 136 }
137 137
@@ -143,7 +143,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
143 dev_err(&pdev->dev, 143 dev_err(&pdev->dev,
144 "Can't register ci_hdrc platform device, err=%d\n", 144 "Can't register ci_hdrc platform device, err=%d\n",
145 ret); 145 ret);
146 goto err_clk; 146 goto err_phy;
147 } 147 }
148 148
149 if (data->usbmisc_data) { 149 if (data->usbmisc_data) {
@@ -164,6 +164,9 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
164 164
165disable_device: 165disable_device:
166 ci_hdrc_remove_device(data->ci_pdev); 166 ci_hdrc_remove_device(data->ci_pdev);
167err_phy:
168 if (data->phy)
169 usb_phy_shutdown(data->phy);
167err_clk: 170err_clk:
168 clk_disable_unprepare(data->clk); 171 clk_disable_unprepare(data->clk);
169 return ret; 172 return ret;
diff --git a/drivers/usb/chipidea/ci_hdrc_pci.c b/drivers/usb/chipidea/ci_hdrc_pci.c
index 042320a6c6c7..d514332ac081 100644
--- a/drivers/usb/chipidea/ci_hdrc_pci.c
+++ b/drivers/usb/chipidea/ci_hdrc_pci.c
@@ -129,7 +129,12 @@ static DEFINE_PCI_DEVICE_TABLE(ci_hdrc_pci_id_table) = {
129 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0829), 129 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0829),
130 .driver_data = (kernel_ulong_t)&penwell_pci_platdata, 130 .driver_data = (kernel_ulong_t)&penwell_pci_platdata,
131 }, 131 },
132 { 0, 0, 0, 0, 0, 0, 0 /* end: all zeroes */ } 132 {
133 /* Intel Clovertrail */
134 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe006),
135 .driver_data = (kernel_ulong_t)&penwell_pci_platdata,
136 },
137 { 0 } /* end: all zeroes */
133}; 138};
134MODULE_DEVICE_TABLE(pci, ci_hdrc_pci_id_table); 139MODULE_DEVICE_TABLE(pci, ci_hdrc_pci_id_table);
135 140
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 94626409559a..23763dcec069 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -605,6 +605,7 @@ static int ci_hdrc_remove(struct platform_device *pdev)
605 dbg_remove_files(ci); 605 dbg_remove_files(ci);
606 free_irq(ci->irq, ci); 606 free_irq(ci->irq, ci);
607 ci_role_destroy(ci); 607 ci_role_destroy(ci);
608 kfree(ci->hw_bank.regmap);
608 609
609 return 0; 610 return 0;
610} 611}
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 6b4c2f2eb946..9333083dd111 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1600,6 +1600,8 @@ static void destroy_eps(struct ci_hdrc *ci)
1600 for (i = 0; i < ci->hw_ep_max; i++) { 1600 for (i = 0; i < ci->hw_ep_max; i++) {
1601 struct ci_hw_ep *hwep = &ci->ci_hw_ep[i]; 1601 struct ci_hw_ep *hwep = &ci->ci_hw_ep[i];
1602 1602
1603 if (hwep->pending_td)
1604 free_pending_td(hwep);
1603 dma_pool_free(ci->qh_pool, hwep->qh.ptr, hwep->qh.dma); 1605 dma_pool_free(ci->qh_pool, hwep->qh.ptr, hwep->qh.dma);
1604 } 1606 }
1605} 1607}
@@ -1667,13 +1669,13 @@ static int ci_udc_stop(struct usb_gadget *gadget,
1667 if (ci->platdata->notify_event) 1669 if (ci->platdata->notify_event)
1668 ci->platdata->notify_event(ci, 1670 ci->platdata->notify_event(ci,
1669 CI_HDRC_CONTROLLER_STOPPED_EVENT); 1671 CI_HDRC_CONTROLLER_STOPPED_EVENT);
1670 ci->driver = NULL;
1671 spin_unlock_irqrestore(&ci->lock, flags); 1672 spin_unlock_irqrestore(&ci->lock, flags);
1672 _gadget_stop_activity(&ci->gadget); 1673 _gadget_stop_activity(&ci->gadget);
1673 spin_lock_irqsave(&ci->lock, flags); 1674 spin_lock_irqsave(&ci->lock, flags);
1674 pm_runtime_put(&ci->gadget.dev); 1675 pm_runtime_put(&ci->gadget.dev);
1675 } 1676 }
1676 1677
1678 ci->driver = NULL;
1677 spin_unlock_irqrestore(&ci->lock, flags); 1679 spin_unlock_irqrestore(&ci->lock, flags);
1678 1680
1679 return 0; 1681 return 0;
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 737e3c19967b..71dc5d768fa5 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -742,6 +742,22 @@ static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype,
742 if ((index & ~USB_DIR_IN) == 0) 742 if ((index & ~USB_DIR_IN) == 0)
743 return 0; 743 return 0;
744 ret = findintfep(ps->dev, index); 744 ret = findintfep(ps->dev, index);
745 if (ret < 0) {
746 /*
747 * Some not fully compliant Win apps seem to get
748 * index wrong and have the endpoint number here
749 * rather than the endpoint address (with the
750 * correct direction). Win does let this through,
751 * so we'll not reject it here but leave it to
752 * the device to not break KVM. But we warn.
753 */
754 ret = findintfep(ps->dev, index ^ 0x80);
755 if (ret >= 0)
756 dev_info(&ps->dev->dev,
757 "%s: process %i (%s) requesting ep %02x but needs %02x\n",
758 __func__, task_pid_nr(current),
759 current->comm, index, index ^ 0x80);
760 }
745 if (ret >= 0) 761 if (ret >= 0)
746 ret = checkintf(ps, ret); 762 ret = checkintf(ps, ret);
747 break; 763 break;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index dde4c83516a1..e6b682c6c236 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -3426,6 +3426,9 @@ static int usb_req_set_sel(struct usb_device *udev, enum usb3_link_state state)
3426 unsigned long long u2_pel; 3426 unsigned long long u2_pel;
3427 int ret; 3427 int ret;
3428 3428
3429 if (udev->state != USB_STATE_CONFIGURED)
3430 return 0;
3431
3429 /* Convert SEL and PEL stored in ns to us */ 3432 /* Convert SEL and PEL stored in ns to us */
3430 u1_sel = DIV_ROUND_UP(udev->u1_params.sel, 1000); 3433 u1_sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
3431 u1_pel = DIV_ROUND_UP(udev->u1_params.pel, 1000); 3434 u1_pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 997ebe420bc9..2e252aae51ca 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -29,6 +29,7 @@
29#define PCI_VENDOR_ID_SYNOPSYS 0x16c3 29#define PCI_VENDOR_ID_SYNOPSYS 0x16c3
30#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd 30#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd
31#define PCI_DEVICE_ID_INTEL_BYT 0x0f37 31#define PCI_DEVICE_ID_INTEL_BYT 0x0f37
32#define PCI_DEVICE_ID_INTEL_MRFLD 0x119e
32 33
33struct dwc3_pci { 34struct dwc3_pci {
34 struct device *dev; 35 struct device *dev;
@@ -189,6 +190,7 @@ static DEFINE_PCI_DEVICE_TABLE(dwc3_pci_id_table) = {
189 PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3), 190 PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3),
190 }, 191 },
191 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), }, 192 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), },
193 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), },
192 { } /* Terminating Entry */ 194 { } /* Terminating Entry */
193}; 195};
194MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table); 196MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table);
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 1a66c5baa0d1..44cf775a8627 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -1034,37 +1034,19 @@ struct ffs_sb_fill_data {
1034 struct ffs_file_perms perms; 1034 struct ffs_file_perms perms;
1035 umode_t root_mode; 1035 umode_t root_mode;
1036 const char *dev_name; 1036 const char *dev_name;
1037 union { 1037 struct ffs_data *ffs_data;
1038 /* set by ffs_fs_mount(), read by ffs_sb_fill() */
1039 void *private_data;
1040 /* set by ffs_sb_fill(), read by ffs_fs_mount */
1041 struct ffs_data *ffs_data;
1042 };
1043}; 1038};
1044 1039
1045static int ffs_sb_fill(struct super_block *sb, void *_data, int silent) 1040static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
1046{ 1041{
1047 struct ffs_sb_fill_data *data = _data; 1042 struct ffs_sb_fill_data *data = _data;
1048 struct inode *inode; 1043 struct inode *inode;
1049 struct ffs_data *ffs; 1044 struct ffs_data *ffs = data->ffs_data;
1050 1045
1051 ENTER(); 1046 ENTER();
1052 1047
1053 /* Initialise data */
1054 ffs = ffs_data_new();
1055 if (unlikely(!ffs))
1056 goto Enomem;
1057
1058 ffs->sb = sb; 1048 ffs->sb = sb;
1059 ffs->dev_name = kstrdup(data->dev_name, GFP_KERNEL); 1049 data->ffs_data = NULL;
1060 if (unlikely(!ffs->dev_name))
1061 goto Enomem;
1062 ffs->file_perms = data->perms;
1063 ffs->private_data = data->private_data;
1064
1065 /* used by the caller of this function */
1066 data->ffs_data = ffs;
1067
1068 sb->s_fs_info = ffs; 1050 sb->s_fs_info = ffs;
1069 sb->s_blocksize = PAGE_CACHE_SIZE; 1051 sb->s_blocksize = PAGE_CACHE_SIZE;
1070 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 1052 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
@@ -1080,17 +1062,14 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
1080 &data->perms); 1062 &data->perms);
1081 sb->s_root = d_make_root(inode); 1063 sb->s_root = d_make_root(inode);
1082 if (unlikely(!sb->s_root)) 1064 if (unlikely(!sb->s_root))
1083 goto Enomem; 1065 return -ENOMEM;
1084 1066
1085 /* EP0 file */ 1067 /* EP0 file */
1086 if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs, 1068 if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs,
1087 &ffs_ep0_operations, NULL))) 1069 &ffs_ep0_operations, NULL)))
1088 goto Enomem; 1070 return -ENOMEM;
1089 1071
1090 return 0; 1072 return 0;
1091
1092Enomem:
1093 return -ENOMEM;
1094} 1073}
1095 1074
1096static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts) 1075static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
@@ -1193,6 +1172,7 @@ ffs_fs_mount(struct file_system_type *t, int flags,
1193 struct dentry *rv; 1172 struct dentry *rv;
1194 int ret; 1173 int ret;
1195 void *ffs_dev; 1174 void *ffs_dev;
1175 struct ffs_data *ffs;
1196 1176
1197 ENTER(); 1177 ENTER();
1198 1178
@@ -1200,18 +1180,30 @@ ffs_fs_mount(struct file_system_type *t, int flags,
1200 if (unlikely(ret < 0)) 1180 if (unlikely(ret < 0))
1201 return ERR_PTR(ret); 1181 return ERR_PTR(ret);
1202 1182
1183 ffs = ffs_data_new();
1184 if (unlikely(!ffs))
1185 return ERR_PTR(-ENOMEM);
1186 ffs->file_perms = data.perms;
1187
1188 ffs->dev_name = kstrdup(dev_name, GFP_KERNEL);
1189 if (unlikely(!ffs->dev_name)) {
1190 ffs_data_put(ffs);
1191 return ERR_PTR(-ENOMEM);
1192 }
1193
1203 ffs_dev = functionfs_acquire_dev_callback(dev_name); 1194 ffs_dev = functionfs_acquire_dev_callback(dev_name);
1204 if (IS_ERR(ffs_dev)) 1195 if (IS_ERR(ffs_dev)) {
1205 return ffs_dev; 1196 ffs_data_put(ffs);
1197 return ERR_CAST(ffs_dev);
1198 }
1199 ffs->private_data = ffs_dev;
1200 data.ffs_data = ffs;
1206 1201
1207 data.dev_name = dev_name;
1208 data.private_data = ffs_dev;
1209 rv = mount_nodev(t, flags, &data, ffs_sb_fill); 1202 rv = mount_nodev(t, flags, &data, ffs_sb_fill);
1210 1203 if (IS_ERR(rv) && data.ffs_data) {
1211 /* data.ffs_data is set by ffs_sb_fill */
1212 if (IS_ERR(rv))
1213 functionfs_release_dev_callback(data.ffs_data); 1204 functionfs_release_dev_callback(data.ffs_data);
1214 1205 ffs_data_put(data.ffs_data);
1206 }
1215 return rv; 1207 return rv;
1216} 1208}
1217 1209
@@ -2264,6 +2256,8 @@ static int ffs_func_bind(struct usb_configuration *c,
2264 data->raw_descs + ret, 2256 data->raw_descs + ret,
2265 (sizeof data->raw_descs) - ret, 2257 (sizeof data->raw_descs) - ret,
2266 __ffs_func_bind_do_descs, func); 2258 __ffs_func_bind_do_descs, func);
2259 if (unlikely(ret < 0))
2260 goto error;
2267 } 2261 }
2268 2262
2269 /* 2263 /*
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index cc9207473dbc..0ac6064aa3b8 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -2054,7 +2054,7 @@ static struct pxa25x_udc memory = {
2054/* 2054/*
2055 * probe - binds to the platform device 2055 * probe - binds to the platform device
2056 */ 2056 */
2057static int __init pxa25x_udc_probe(struct platform_device *pdev) 2057static int pxa25x_udc_probe(struct platform_device *pdev)
2058{ 2058{
2059 struct pxa25x_udc *dev = &memory; 2059 struct pxa25x_udc *dev = &memory;
2060 int retval, irq; 2060 int retval, irq;
@@ -2203,7 +2203,7 @@ static void pxa25x_udc_shutdown(struct platform_device *_dev)
2203 pullup_off(); 2203 pullup_off();
2204} 2204}
2205 2205
2206static int __exit pxa25x_udc_remove(struct platform_device *pdev) 2206static int pxa25x_udc_remove(struct platform_device *pdev)
2207{ 2207{
2208 struct pxa25x_udc *dev = platform_get_drvdata(pdev); 2208 struct pxa25x_udc *dev = platform_get_drvdata(pdev);
2209 2209
@@ -2294,7 +2294,8 @@ static int pxa25x_udc_resume(struct platform_device *dev)
2294 2294
2295static struct platform_driver udc_driver = { 2295static struct platform_driver udc_driver = {
2296 .shutdown = pxa25x_udc_shutdown, 2296 .shutdown = pxa25x_udc_shutdown,
2297 .remove = __exit_p(pxa25x_udc_remove), 2297 .probe = pxa25x_udc_probe,
2298 .remove = pxa25x_udc_remove,
2298 .suspend = pxa25x_udc_suspend, 2299 .suspend = pxa25x_udc_suspend,
2299 .resume = pxa25x_udc_resume, 2300 .resume = pxa25x_udc_resume,
2300 .driver = { 2301 .driver = {
@@ -2303,7 +2304,7 @@ static struct platform_driver udc_driver = {
2303 }, 2304 },
2304}; 2305};
2305 2306
2306module_platform_driver_probe(udc_driver, pxa25x_udc_probe); 2307module_platform_driver(udc_driver);
2307 2308
2308MODULE_DESCRIPTION(DRIVER_DESC); 2309MODULE_DESCRIPTION(DRIVER_DESC);
2309MODULE_AUTHOR("Frank Becker, Robert Schwebel, David Brownell"); 2310MODULE_AUTHOR("Frank Becker, Robert Schwebel, David Brownell");
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index 6bddf1aa2347..a8a99e4748d5 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -543,7 +543,7 @@ static int s3c_hsotg_write_fifo(struct s3c_hsotg *hsotg,
543 * FIFO, requests of >512 cause the endpoint to get stuck with a 543 * FIFO, requests of >512 cause the endpoint to get stuck with a
544 * fragment of the end of the transfer in it. 544 * fragment of the end of the transfer in it.
545 */ 545 */
546 if (can_write > 512) 546 if (can_write > 512 && !periodic)
547 can_write = 512; 547 can_write = 512;
548 548
549 /* 549 /*
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 4449f565d6c6..f2407b2e8a99 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -130,7 +130,7 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
130 } 130 }
131 131
132 /* Enable USB controller, 83xx or 8536 */ 132 /* Enable USB controller, 83xx or 8536 */
133 if (pdata->have_sysif_regs) 133 if (pdata->have_sysif_regs && pdata->controller_ver < FSL_USB_VER_1_6)
134 setbits32(hcd->regs + FSL_SOC_USB_CTRL, 0x4); 134 setbits32(hcd->regs + FSL_SOC_USB_CTRL, 0x4);
135 135
136 /* Don't need to set host mode here. It will be done by tdi_reset() */ 136 /* Don't need to set host mode here. It will be done by tdi_reset() */
@@ -232,15 +232,9 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
232 case FSL_USB2_PHY_ULPI: 232 case FSL_USB2_PHY_ULPI:
233 if (pdata->have_sysif_regs && pdata->controller_ver) { 233 if (pdata->have_sysif_regs && pdata->controller_ver) {
234 /* controller version 1.6 or above */ 234 /* controller version 1.6 or above */
235 clrbits32(non_ehci + FSL_SOC_USB_CTRL, UTMI_PHY_EN);
235 setbits32(non_ehci + FSL_SOC_USB_CTRL, 236 setbits32(non_ehci + FSL_SOC_USB_CTRL,
236 ULPI_PHY_CLK_SEL); 237 ULPI_PHY_CLK_SEL | USB_CTRL_USB_EN);
237 /*
238 * Due to controller issue of PHY_CLK_VALID in ULPI
239 * mode, we set USB_CTRL_USB_EN before checking
240 * PHY_CLK_VALID, otherwise PHY_CLK_VALID doesn't work.
241 */
242 clrsetbits_be32(non_ehci + FSL_SOC_USB_CTRL,
243 UTMI_PHY_EN, USB_CTRL_USB_EN);
244 } 238 }
245 portsc |= PORT_PTS_ULPI; 239 portsc |= PORT_PTS_ULPI;
246 break; 240 break;
@@ -270,8 +264,9 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
270 if (pdata->have_sysif_regs && pdata->controller_ver && 264 if (pdata->have_sysif_regs && pdata->controller_ver &&
271 (phy_mode == FSL_USB2_PHY_ULPI)) { 265 (phy_mode == FSL_USB2_PHY_ULPI)) {
272 /* check PHY_CLK_VALID to get phy clk valid */ 266 /* check PHY_CLK_VALID to get phy clk valid */
273 if (!spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) & 267 if (!(spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) &
274 PHY_CLK_VALID, FSL_USB_PHY_CLK_TIMEOUT, 0)) { 268 PHY_CLK_VALID, FSL_USB_PHY_CLK_TIMEOUT, 0) ||
269 in_be32(non_ehci + FSL_SOC_USB_PRICTRL))) {
275 printk(KERN_WARNING "fsl-ehci: USB PHY clock invalid\n"); 270 printk(KERN_WARNING "fsl-ehci: USB PHY clock invalid\n");
276 return -EINVAL; 271 return -EINVAL;
277 } 272 }
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 6bd299e61f58..854c2ec7b699 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -361,7 +361,7 @@ static struct pci_driver ehci_pci_driver = {
361 .remove = usb_hcd_pci_remove, 361 .remove = usb_hcd_pci_remove,
362 .shutdown = usb_hcd_pci_shutdown, 362 .shutdown = usb_hcd_pci_shutdown,
363 363
364#ifdef CONFIG_PM_SLEEP 364#ifdef CONFIG_PM
365 .driver = { 365 .driver = {
366 .pm = &usb_hcd_pci_pm_ops 366 .pm = &usb_hcd_pci_pm_ops
367 }, 367 },
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
index 60a5de505ca1..adb01d950a16 100644
--- a/drivers/usb/host/imx21-hcd.c
+++ b/drivers/usb/host/imx21-hcd.c
@@ -824,13 +824,13 @@ static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
824 i = DIV_ROUND_UP(wrap_frame( 824 i = DIV_ROUND_UP(wrap_frame(
825 cur_frame - urb->start_frame), 825 cur_frame - urb->start_frame),
826 urb->interval); 826 urb->interval);
827 if (urb->transfer_flags & URB_ISO_ASAP) { 827
828 /* Treat underruns as if URB_ISO_ASAP was set */
829 if ((urb->transfer_flags & URB_ISO_ASAP) ||
830 i >= urb->number_of_packets) {
828 urb->start_frame = wrap_frame(urb->start_frame 831 urb->start_frame = wrap_frame(urb->start_frame
829 + i * urb->interval); 832 + i * urb->interval);
830 i = 0; 833 i = 0;
831 } else if (i >= urb->number_of_packets) {
832 ret = -EXDEV;
833 goto alloc_dmem_failed;
834 } 834 }
835 } 835 }
836 } 836 }
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 8f6b695af6a4..604cad1bcf9c 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -216,31 +216,26 @@ static int ohci_urb_enqueue (
216 frame &= ~(ed->interval - 1); 216 frame &= ~(ed->interval - 1);
217 frame |= ed->branch; 217 frame |= ed->branch;
218 urb->start_frame = frame; 218 urb->start_frame = frame;
219 ed->last_iso = frame + ed->interval * (size - 1);
219 } 220 }
220 } else if (ed->type == PIPE_ISOCHRONOUS) { 221 } else if (ed->type == PIPE_ISOCHRONOUS) {
221 u16 next = ohci_frame_no(ohci) + 1; 222 u16 next = ohci_frame_no(ohci) + 1;
222 u16 frame = ed->last_iso + ed->interval; 223 u16 frame = ed->last_iso + ed->interval;
224 u16 length = ed->interval * (size - 1);
223 225
224 /* Behind the scheduling threshold? */ 226 /* Behind the scheduling threshold? */
225 if (unlikely(tick_before(frame, next))) { 227 if (unlikely(tick_before(frame, next))) {
226 228
227 /* USB_ISO_ASAP: Round up to the first available slot */ 229 /* URB_ISO_ASAP: Round up to the first available slot */
228 if (urb->transfer_flags & URB_ISO_ASAP) { 230 if (urb->transfer_flags & URB_ISO_ASAP) {
229 frame += (next - frame + ed->interval - 1) & 231 frame += (next - frame + ed->interval - 1) &
230 -ed->interval; 232 -ed->interval;
231 233
232 /* 234 /*
233 * Not ASAP: Use the next slot in the stream. If 235 * Not ASAP: Use the next slot in the stream,
234 * the entire URB falls before the threshold, fail. 236 * no matter what.
235 */ 237 */
236 } else { 238 } else {
237 if (tick_before(frame + ed->interval *
238 (urb->number_of_packets - 1), next)) {
239 retval = -EXDEV;
240 usb_hcd_unlink_urb_from_ep(hcd, urb);
241 goto fail;
242 }
243
244 /* 239 /*
245 * Some OHCI hardware doesn't handle late TDs 240 * Some OHCI hardware doesn't handle late TDs
246 * correctly. After retiring them it proceeds 241 * correctly. After retiring them it proceeds
@@ -251,9 +246,16 @@ static int ohci_urb_enqueue (
251 urb_priv->td_cnt = DIV_ROUND_UP( 246 urb_priv->td_cnt = DIV_ROUND_UP(
252 (u16) (next - frame), 247 (u16) (next - frame),
253 ed->interval); 248 ed->interval);
249 if (urb_priv->td_cnt >= urb_priv->length) {
250 ++urb_priv->td_cnt; /* Mark it */
251 ohci_dbg(ohci, "iso underrun %p (%u+%u < %u)\n",
252 urb, frame, length,
253 next);
254 }
254 } 255 }
255 } 256 }
256 urb->start_frame = frame; 257 urb->start_frame = frame;
258 ed->last_iso = frame + length;
257 } 259 }
258 260
259 /* fill the TDs and link them to the ed; and 261 /* fill the TDs and link them to the ed; and
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index df4a6707322d..e7f577e63624 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -41,9 +41,13 @@ finish_urb(struct ohci_hcd *ohci, struct urb *urb, int status)
41__releases(ohci->lock) 41__releases(ohci->lock)
42__acquires(ohci->lock) 42__acquires(ohci->lock)
43{ 43{
44 struct device *dev = ohci_to_hcd(ohci)->self.controller; 44 struct device *dev = ohci_to_hcd(ohci)->self.controller;
45 struct usb_host_endpoint *ep = urb->ep;
46 struct urb_priv *urb_priv;
47
45 // ASSERT (urb->hcpriv != 0); 48 // ASSERT (urb->hcpriv != 0);
46 49
50 restart:
47 urb_free_priv (ohci, urb->hcpriv); 51 urb_free_priv (ohci, urb->hcpriv);
48 urb->hcpriv = NULL; 52 urb->hcpriv = NULL;
49 if (likely(status == -EINPROGRESS)) 53 if (likely(status == -EINPROGRESS))
@@ -80,6 +84,21 @@ __acquires(ohci->lock)
80 ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_IE); 84 ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_IE);
81 ohci_writel (ohci, ohci->hc_control, &ohci->regs->control); 85 ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
82 } 86 }
87
88 /*
89 * An isochronous URB that is sumitted too late won't have any TDs
90 * (marked by the fact that the td_cnt value is larger than the
91 * actual number of TDs). If the next URB on this endpoint is like
92 * that, give it back now.
93 */
94 if (!list_empty(&ep->urb_list)) {
95 urb = list_first_entry(&ep->urb_list, struct urb, urb_list);
96 urb_priv = urb->hcpriv;
97 if (urb_priv->td_cnt > urb_priv->length) {
98 status = 0;
99 goto restart;
100 }
101 }
83} 102}
84 103
85 104
@@ -546,7 +565,6 @@ td_fill (struct ohci_hcd *ohci, u32 info,
546 td->hwCBP = cpu_to_hc32 (ohci, data & 0xFFFFF000); 565 td->hwCBP = cpu_to_hc32 (ohci, data & 0xFFFFF000);
547 *ohci_hwPSWp(ohci, td, 0) = cpu_to_hc16 (ohci, 566 *ohci_hwPSWp(ohci, td, 0) = cpu_to_hc16 (ohci,
548 (data & 0x0FFF) | 0xE000); 567 (data & 0x0FFF) | 0xE000);
549 td->ed->last_iso = info & 0xffff;
550 } else { 568 } else {
551 td->hwCBP = cpu_to_hc32 (ohci, data); 569 td->hwCBP = cpu_to_hc32 (ohci, data);
552 } 570 }
@@ -996,7 +1014,7 @@ rescan_this:
996 urb_priv->td_cnt++; 1014 urb_priv->td_cnt++;
997 1015
998 /* if URB is done, clean up */ 1016 /* if URB is done, clean up */
999 if (urb_priv->td_cnt == urb_priv->length) { 1017 if (urb_priv->td_cnt >= urb_priv->length) {
1000 modified = completed = 1; 1018 modified = completed = 1;
1001 finish_urb(ohci, urb, 0); 1019 finish_urb(ohci, urb, 0);
1002 } 1020 }
@@ -1086,7 +1104,7 @@ static void takeback_td(struct ohci_hcd *ohci, struct td *td)
1086 urb_priv->td_cnt++; 1104 urb_priv->td_cnt++;
1087 1105
1088 /* If all this urb's TDs are done, call complete() */ 1106 /* If all this urb's TDs are done, call complete() */
1089 if (urb_priv->td_cnt == urb_priv->length) 1107 if (urb_priv->td_cnt >= urb_priv->length)
1090 finish_urb(ohci, urb, status); 1108 finish_urb(ohci, urb, status);
1091 1109
1092 /* clean schedule: unlink EDs that are no longer busy */ 1110 /* clean schedule: unlink EDs that are no longer busy */
diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c
index c300bd2f7d1c..0f228c46eeda 100644
--- a/drivers/usb/host/uhci-pci.c
+++ b/drivers/usb/host/uhci-pci.c
@@ -293,7 +293,7 @@ static struct pci_driver uhci_pci_driver = {
293 .remove = usb_hcd_pci_remove, 293 .remove = usb_hcd_pci_remove,
294 .shutdown = uhci_shutdown, 294 .shutdown = uhci_shutdown,
295 295
296#ifdef CONFIG_PM_SLEEP 296#ifdef CONFIG_PM
297 .driver = { 297 .driver = {
298 .pm = &usb_hcd_pci_pm_ops 298 .pm = &usb_hcd_pci_pm_ops
299 }, 299 },
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index 041c6ddb695c..da6f56d996ce 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -1303,7 +1303,7 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
1303 } 1303 }
1304 1304
1305 /* Fell behind? */ 1305 /* Fell behind? */
1306 if (uhci_frame_before_eq(frame, next)) { 1306 if (!uhci_frame_before_eq(next, frame)) {
1307 1307
1308 /* USB_ISO_ASAP: Round up to the first available slot */ 1308 /* USB_ISO_ASAP: Round up to the first available slot */
1309 if (urb->transfer_flags & URB_ISO_ASAP) 1309 if (urb->transfer_flags & URB_ISO_ASAP)
@@ -1311,13 +1311,17 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
1311 -qh->period; 1311 -qh->period;
1312 1312
1313 /* 1313 /*
1314 * Not ASAP: Use the next slot in the stream. If 1314 * Not ASAP: Use the next slot in the stream,
1315 * the entire URB falls before the threshold, fail. 1315 * no matter what.
1316 */ 1316 */
1317 else if (!uhci_frame_before_eq(next, 1317 else if (!uhci_frame_before_eq(next,
1318 frame + (urb->number_of_packets - 1) * 1318 frame + (urb->number_of_packets - 1) *
1319 qh->period)) 1319 qh->period))
1320 return -EXDEV; 1320 dev_dbg(uhci_dev(uhci), "iso underrun %p (%u+%u < %u)\n",
1321 urb, frame,
1322 (urb->number_of_packets - 1) *
1323 qh->period,
1324 next);
1321 } 1325 }
1322 } 1326 }
1323 1327
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index fae697ed0b70..773a6b28c4f1 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -287,7 +287,7 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
287 if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) 287 if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue)
288 xhci_queue_stop_endpoint(xhci, slot_id, i, suspend); 288 xhci_queue_stop_endpoint(xhci, slot_id, i, suspend);
289 } 289 }
290 cmd->command_trb = xhci->cmd_ring->enqueue; 290 cmd->command_trb = xhci_find_next_enqueue(xhci->cmd_ring);
291 list_add_tail(&cmd->cmd_list, &virt_dev->cmd_list); 291 list_add_tail(&cmd->cmd_list, &virt_dev->cmd_list);
292 xhci_queue_stop_endpoint(xhci, slot_id, 0, suspend); 292 xhci_queue_stop_endpoint(xhci, slot_id, 0, suspend);
293 xhci_ring_cmd_db(xhci); 293 xhci_ring_cmd_db(xhci);
@@ -552,11 +552,15 @@ void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status, u16 wIndex)
552 * - Mark a port as being done with device resume, 552 * - Mark a port as being done with device resume,
553 * and ring the endpoint doorbells. 553 * and ring the endpoint doorbells.
554 * - Stop the Synopsys redriver Compliance Mode polling. 554 * - Stop the Synopsys redriver Compliance Mode polling.
555 * - Drop and reacquire the xHCI lock, in order to wait for port resume.
555 */ 556 */
556static u32 xhci_get_port_status(struct usb_hcd *hcd, 557static u32 xhci_get_port_status(struct usb_hcd *hcd,
557 struct xhci_bus_state *bus_state, 558 struct xhci_bus_state *bus_state,
558 __le32 __iomem **port_array, 559 __le32 __iomem **port_array,
559 u16 wIndex, u32 raw_port_status) 560 u16 wIndex, u32 raw_port_status,
561 unsigned long flags)
562 __releases(&xhci->lock)
563 __acquires(&xhci->lock)
560{ 564{
561 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 565 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
562 u32 status = 0; 566 u32 status = 0;
@@ -591,21 +595,42 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
591 return 0xffffffff; 595 return 0xffffffff;
592 if (time_after_eq(jiffies, 596 if (time_after_eq(jiffies,
593 bus_state->resume_done[wIndex])) { 597 bus_state->resume_done[wIndex])) {
598 int time_left;
599
594 xhci_dbg(xhci, "Resume USB2 port %d\n", 600 xhci_dbg(xhci, "Resume USB2 port %d\n",
595 wIndex + 1); 601 wIndex + 1);
596 bus_state->resume_done[wIndex] = 0; 602 bus_state->resume_done[wIndex] = 0;
597 clear_bit(wIndex, &bus_state->resuming_ports); 603 clear_bit(wIndex, &bus_state->resuming_ports);
604
605 set_bit(wIndex, &bus_state->rexit_ports);
598 xhci_set_link_state(xhci, port_array, wIndex, 606 xhci_set_link_state(xhci, port_array, wIndex,
599 XDEV_U0); 607 XDEV_U0);
600 xhci_dbg(xhci, "set port %d resume\n", 608
601 wIndex + 1); 609 spin_unlock_irqrestore(&xhci->lock, flags);
602 slot_id = xhci_find_slot_id_by_port(hcd, xhci, 610 time_left = wait_for_completion_timeout(
603 wIndex + 1); 611 &bus_state->rexit_done[wIndex],
604 if (!slot_id) { 612 msecs_to_jiffies(
605 xhci_dbg(xhci, "slot_id is zero\n"); 613 XHCI_MAX_REXIT_TIMEOUT));
606 return 0xffffffff; 614 spin_lock_irqsave(&xhci->lock, flags);
615
616 if (time_left) {
617 slot_id = xhci_find_slot_id_by_port(hcd,
618 xhci, wIndex + 1);
619 if (!slot_id) {
620 xhci_dbg(xhci, "slot_id is zero\n");
621 return 0xffffffff;
622 }
623 xhci_ring_device(xhci, slot_id);
624 } else {
625 int port_status = xhci_readl(xhci,
626 port_array[wIndex]);
627 xhci_warn(xhci, "Port resume took longer than %i msec, port status = 0x%x\n",
628 XHCI_MAX_REXIT_TIMEOUT,
629 port_status);
630 status |= USB_PORT_STAT_SUSPEND;
631 clear_bit(wIndex, &bus_state->rexit_ports);
607 } 632 }
608 xhci_ring_device(xhci, slot_id); 633
609 bus_state->port_c_suspend |= 1 << wIndex; 634 bus_state->port_c_suspend |= 1 << wIndex;
610 bus_state->suspended_ports &= ~(1 << wIndex); 635 bus_state->suspended_ports &= ~(1 << wIndex);
611 } else { 636 } else {
@@ -728,7 +753,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
728 break; 753 break;
729 } 754 }
730 status = xhci_get_port_status(hcd, bus_state, port_array, 755 status = xhci_get_port_status(hcd, bus_state, port_array,
731 wIndex, temp); 756 wIndex, temp, flags);
732 if (status == 0xffffffff) 757 if (status == 0xffffffff)
733 goto error; 758 goto error;
734 759
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 53b972c2a09f..83bcd13622c3 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -2428,6 +2428,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2428 for (i = 0; i < USB_MAXCHILDREN; ++i) { 2428 for (i = 0; i < USB_MAXCHILDREN; ++i) {
2429 xhci->bus_state[0].resume_done[i] = 0; 2429 xhci->bus_state[0].resume_done[i] = 0;
2430 xhci->bus_state[1].resume_done[i] = 0; 2430 xhci->bus_state[1].resume_done[i] = 0;
2431 /* Only the USB 2.0 completions will ever be used. */
2432 init_completion(&xhci->bus_state[1].rexit_done[i]);
2431 } 2433 }
2432 2434
2433 if (scratchpad_alloc(xhci, flags)) 2435 if (scratchpad_alloc(xhci, flags))
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index c2d495057eb5..236c3aabe940 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -351,7 +351,7 @@ static struct pci_driver xhci_pci_driver = {
351 /* suspend and resume implemented later */ 351 /* suspend and resume implemented later */
352 352
353 .shutdown = usb_hcd_pci_shutdown, 353 .shutdown = usb_hcd_pci_shutdown,
354#ifdef CONFIG_PM_SLEEP 354#ifdef CONFIG_PM
355 .driver = { 355 .driver = {
356 .pm = &usb_hcd_pci_pm_ops 356 .pm = &usb_hcd_pci_pm_ops
357 }, 357 },
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 411da1fc7ae8..6bfbd80ec2b9 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -123,6 +123,16 @@ static int enqueue_is_link_trb(struct xhci_ring *ring)
123 return TRB_TYPE_LINK_LE32(link->control); 123 return TRB_TYPE_LINK_LE32(link->control);
124} 124}
125 125
126union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring)
127{
128 /* Enqueue pointer can be left pointing to the link TRB,
129 * we must handle that
130 */
131 if (TRB_TYPE_LINK_LE32(ring->enqueue->link.control))
132 return ring->enq_seg->next->trbs;
133 return ring->enqueue;
134}
135
126/* Updates trb to point to the next TRB in the ring, and updates seg if the next 136/* Updates trb to point to the next TRB in the ring, and updates seg if the next
127 * TRB is in a new segment. This does not skip over link TRBs, and it does not 137 * TRB is in a new segment. This does not skip over link TRBs, and it does not
128 * effect the ring dequeue or enqueue pointers. 138 * effect the ring dequeue or enqueue pointers.
@@ -859,8 +869,12 @@ remove_finished_td:
859 /* Otherwise ring the doorbell(s) to restart queued transfers */ 869 /* Otherwise ring the doorbell(s) to restart queued transfers */
860 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 870 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
861 } 871 }
862 ep->stopped_td = NULL; 872
863 ep->stopped_trb = NULL; 873 /* Clear stopped_td and stopped_trb if endpoint is not halted */
874 if (!(ep->ep_state & EP_HALTED)) {
875 ep->stopped_td = NULL;
876 ep->stopped_trb = NULL;
877 }
864 878
865 /* 879 /*
866 * Drop the lock and complete the URBs in the cancelled TD list. 880 * Drop the lock and complete the URBs in the cancelled TD list.
@@ -1414,6 +1428,12 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1414 inc_deq(xhci, xhci->cmd_ring); 1428 inc_deq(xhci, xhci->cmd_ring);
1415 return; 1429 return;
1416 } 1430 }
1431 /* There is no command to handle if we get a stop event when the
1432 * command ring is empty, event->cmd_trb points to the next
1433 * unset command
1434 */
1435 if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue)
1436 return;
1417 } 1437 }
1418 1438
1419 switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]) 1439 switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
@@ -1743,6 +1763,19 @@ static void handle_port_status(struct xhci_hcd *xhci,
1743 } 1763 }
1744 } 1764 }
1745 1765
1766 /*
1767 * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or
1768 * RExit to a disconnect state). If so, let the the driver know it's
1769 * out of the RExit state.
1770 */
1771 if (!DEV_SUPERSPEED(temp) &&
1772 test_and_clear_bit(faked_port_index,
1773 &bus_state->rexit_ports)) {
1774 complete(&bus_state->rexit_done[faked_port_index]);
1775 bogus_port_status = true;
1776 goto cleanup;
1777 }
1778
1746 if (hcd->speed != HCD_USB3) 1779 if (hcd->speed != HCD_USB3)
1747 xhci_test_and_clear_bit(xhci, port_array, faked_port_index, 1780 xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
1748 PORT_PLC); 1781 PORT_PLC);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 49b6edb84a79..1e36dbb48366 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -2598,15 +2598,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2598 if (command) { 2598 if (command) {
2599 cmd_completion = command->completion; 2599 cmd_completion = command->completion;
2600 cmd_status = &command->status; 2600 cmd_status = &command->status;
2601 command->command_trb = xhci->cmd_ring->enqueue; 2601 command->command_trb = xhci_find_next_enqueue(xhci->cmd_ring);
2602
2603 /* Enqueue pointer can be left pointing to the link TRB,
2604 * we must handle that
2605 */
2606 if (TRB_TYPE_LINK_LE32(command->command_trb->link.control))
2607 command->command_trb =
2608 xhci->cmd_ring->enq_seg->next->trbs;
2609
2610 list_add_tail(&command->cmd_list, &virt_dev->cmd_list); 2602 list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
2611 } else { 2603 } else {
2612 cmd_completion = &virt_dev->cmd_completion; 2604 cmd_completion = &virt_dev->cmd_completion;
@@ -2614,7 +2606,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2614 } 2606 }
2615 init_completion(cmd_completion); 2607 init_completion(cmd_completion);
2616 2608
2617 cmd_trb = xhci->cmd_ring->dequeue; 2609 cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
2618 if (!ctx_change) 2610 if (!ctx_change)
2619 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma, 2611 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
2620 udev->slot_id, must_succeed); 2612 udev->slot_id, must_succeed);
@@ -3439,14 +3431,7 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
3439 3431
3440 /* Attempt to submit the Reset Device command to the command ring */ 3432 /* Attempt to submit the Reset Device command to the command ring */
3441 spin_lock_irqsave(&xhci->lock, flags); 3433 spin_lock_irqsave(&xhci->lock, flags);
3442 reset_device_cmd->command_trb = xhci->cmd_ring->enqueue; 3434 reset_device_cmd->command_trb = xhci_find_next_enqueue(xhci->cmd_ring);
3443
3444 /* Enqueue pointer can be left pointing to the link TRB,
3445 * we must handle that
3446 */
3447 if (TRB_TYPE_LINK_LE32(reset_device_cmd->command_trb->link.control))
3448 reset_device_cmd->command_trb =
3449 xhci->cmd_ring->enq_seg->next->trbs;
3450 3435
3451 list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list); 3436 list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
3452 ret = xhci_queue_reset_device(xhci, slot_id); 3437 ret = xhci_queue_reset_device(xhci, slot_id);
@@ -3650,7 +3635,7 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3650 union xhci_trb *cmd_trb; 3635 union xhci_trb *cmd_trb;
3651 3636
3652 spin_lock_irqsave(&xhci->lock, flags); 3637 spin_lock_irqsave(&xhci->lock, flags);
3653 cmd_trb = xhci->cmd_ring->dequeue; 3638 cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
3654 ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0); 3639 ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
3655 if (ret) { 3640 if (ret) {
3656 spin_unlock_irqrestore(&xhci->lock, flags); 3641 spin_unlock_irqrestore(&xhci->lock, flags);
@@ -3785,7 +3770,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3785 slot_ctx->dev_info >> 27); 3770 slot_ctx->dev_info >> 27);
3786 3771
3787 spin_lock_irqsave(&xhci->lock, flags); 3772 spin_lock_irqsave(&xhci->lock, flags);
3788 cmd_trb = xhci->cmd_ring->dequeue; 3773 cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
3789 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma, 3774 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
3790 udev->slot_id); 3775 udev->slot_id);
3791 if (ret) { 3776 if (ret) {
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 46aa14894148..289fbfbae746 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1412,8 +1412,18 @@ struct xhci_bus_state {
1412 unsigned long resume_done[USB_MAXCHILDREN]; 1412 unsigned long resume_done[USB_MAXCHILDREN];
1413 /* which ports have started to resume */ 1413 /* which ports have started to resume */
1414 unsigned long resuming_ports; 1414 unsigned long resuming_ports;
1415 /* Which ports are waiting on RExit to U0 transition. */
1416 unsigned long rexit_ports;
1417 struct completion rexit_done[USB_MAXCHILDREN];
1415}; 1418};
1416 1419
1420
1421/*
1422 * It can take up to 20 ms to transition from RExit to U0 on the
1423 * Intel Lynx Point LP xHCI host.
1424 */
1425#define XHCI_MAX_REXIT_TIMEOUT (20 * 1000)
1426
1417static inline unsigned int hcd_index(struct usb_hcd *hcd) 1427static inline unsigned int hcd_index(struct usb_hcd *hcd)
1418{ 1428{
1419 if (hcd->speed == HCD_USB3) 1429 if (hcd->speed == HCD_USB3)
@@ -1840,6 +1850,7 @@ int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command,
1840 union xhci_trb *cmd_trb); 1850 union xhci_trb *cmd_trb);
1841void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id, 1851void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
1842 unsigned int ep_index, unsigned int stream_id); 1852 unsigned int ep_index, unsigned int stream_id);
1853union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring);
1843 1854
1844/* xHCI roothub code */ 1855/* xHCI roothub code */
1845void xhci_set_link_state(struct xhci_hcd *xhci, __le32 __iomem **port_array, 1856void xhci_set_link_state(struct xhci_hcd *xhci, __le32 __iomem **port_array,
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 4047cbb91bac..bd4138d80a48 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -535,6 +535,9 @@ static int dsps_probe(struct platform_device *pdev)
535 struct dsps_glue *glue; 535 struct dsps_glue *glue;
536 int ret; 536 int ret;
537 537
538 if (!strcmp(pdev->name, "musb-hdrc"))
539 return -ENODEV;
540
538 match = of_match_node(musb_dsps_of_match, pdev->dev.of_node); 541 match = of_match_node(musb_dsps_of_match, pdev->dev.of_node);
539 if (!match) { 542 if (!match) {
540 dev_err(&pdev->dev, "fail to get matching of_match struct\n"); 543 dev_err(&pdev->dev, "fail to get matching of_match struct\n");
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 9a08679d204d..b19ed213ab85 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1790,6 +1790,10 @@ int musb_gadget_setup(struct musb *musb)
1790 musb->g.max_speed = USB_SPEED_HIGH; 1790 musb->g.max_speed = USB_SPEED_HIGH;
1791 musb->g.speed = USB_SPEED_UNKNOWN; 1791 musb->g.speed = USB_SPEED_UNKNOWN;
1792 1792
1793 MUSB_DEV_MODE(musb);
1794 musb->xceiv->otg->default_a = 0;
1795 musb->xceiv->state = OTG_STATE_B_IDLE;
1796
1793 /* this "gadget" abstracts/virtualizes the controller */ 1797 /* this "gadget" abstracts/virtualizes the controller */
1794 musb->g.name = musb_driver_name; 1798 musb->g.name = musb_driver_name;
1795 musb->g.is_otg = 1; 1799 musb->g.is_otg = 1;
@@ -1849,7 +1853,6 @@ static int musb_gadget_start(struct usb_gadget *g,
1849 musb->gadget_driver = driver; 1853 musb->gadget_driver = driver;
1850 1854
1851 spin_lock_irqsave(&musb->lock, flags); 1855 spin_lock_irqsave(&musb->lock, flags);
1852 musb->is_active = 1;
1853 1856
1854 otg_set_peripheral(otg, &musb->g); 1857 otg_set_peripheral(otg, &musb->g);
1855 musb->xceiv->state = OTG_STATE_B_IDLE; 1858 musb->xceiv->state = OTG_STATE_B_IDLE;
diff --git a/drivers/usb/phy/phy-gpio-vbus-usb.c b/drivers/usb/phy/phy-gpio-vbus-usb.c
index b2f29c9aebbf..02799a5efcd4 100644
--- a/drivers/usb/phy/phy-gpio-vbus-usb.c
+++ b/drivers/usb/phy/phy-gpio-vbus-usb.c
@@ -241,7 +241,7 @@ static int gpio_vbus_set_suspend(struct usb_phy *phy, int suspend)
241 241
242/* platform driver interface */ 242/* platform driver interface */
243 243
244static int __init gpio_vbus_probe(struct platform_device *pdev) 244static int gpio_vbus_probe(struct platform_device *pdev)
245{ 245{
246 struct gpio_vbus_mach_info *pdata = dev_get_platdata(&pdev->dev); 246 struct gpio_vbus_mach_info *pdata = dev_get_platdata(&pdev->dev);
247 struct gpio_vbus_data *gpio_vbus; 247 struct gpio_vbus_data *gpio_vbus;
@@ -349,7 +349,7 @@ err_gpio:
349 return err; 349 return err;
350} 350}
351 351
352static int __exit gpio_vbus_remove(struct platform_device *pdev) 352static int gpio_vbus_remove(struct platform_device *pdev)
353{ 353{
354 struct gpio_vbus_data *gpio_vbus = platform_get_drvdata(pdev); 354 struct gpio_vbus_data *gpio_vbus = platform_get_drvdata(pdev);
355 struct gpio_vbus_mach_info *pdata = dev_get_platdata(&pdev->dev); 355 struct gpio_vbus_mach_info *pdata = dev_get_platdata(&pdev->dev);
@@ -398,8 +398,6 @@ static const struct dev_pm_ops gpio_vbus_dev_pm_ops = {
398}; 398};
399#endif 399#endif
400 400
401/* NOTE: the gpio-vbus device may *NOT* be hotplugged */
402
403MODULE_ALIAS("platform:gpio-vbus"); 401MODULE_ALIAS("platform:gpio-vbus");
404 402
405static struct platform_driver gpio_vbus_driver = { 403static struct platform_driver gpio_vbus_driver = {
@@ -410,10 +408,11 @@ static struct platform_driver gpio_vbus_driver = {
410 .pm = &gpio_vbus_dev_pm_ops, 408 .pm = &gpio_vbus_dev_pm_ops,
411#endif 409#endif
412 }, 410 },
413 .remove = __exit_p(gpio_vbus_remove), 411 .probe = gpio_vbus_probe,
412 .remove = gpio_vbus_remove,
414}; 413};
415 414
416module_platform_driver_probe(gpio_vbus_driver, gpio_vbus_probe); 415module_platform_driver(gpio_vbus_driver);
417 416
418MODULE_DESCRIPTION("simple GPIO controlled OTG transceiver driver"); 417MODULE_DESCRIPTION("simple GPIO controlled OTG transceiver driver");
419MODULE_AUTHOR("Philipp Zabel"); 418MODULE_AUTHOR("Philipp Zabel");
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 1cf6f125f5f0..80a7104d5ddb 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -81,6 +81,7 @@ static void option_instat_callback(struct urb *urb);
81 81
82#define HUAWEI_VENDOR_ID 0x12D1 82#define HUAWEI_VENDOR_ID 0x12D1
83#define HUAWEI_PRODUCT_E173 0x140C 83#define HUAWEI_PRODUCT_E173 0x140C
84#define HUAWEI_PRODUCT_E1750 0x1406
84#define HUAWEI_PRODUCT_K4505 0x1464 85#define HUAWEI_PRODUCT_K4505 0x1464
85#define HUAWEI_PRODUCT_K3765 0x1465 86#define HUAWEI_PRODUCT_K3765 0x1465
86#define HUAWEI_PRODUCT_K4605 0x14C6 87#define HUAWEI_PRODUCT_K4605 0x14C6
@@ -567,6 +568,8 @@ static const struct usb_device_id option_ids[] = {
567 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) }, 568 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) },
568 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff), 569 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
569 .driver_info = (kernel_ulong_t) &net_intf1_blacklist }, 570 .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
571 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff),
572 .driver_info = (kernel_ulong_t) &net_intf2_blacklist },
570 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) }, 573 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) },
571 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) }, 574 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) },
572 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff), 575 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 592b31698fc8..ce5221fa393a 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -728,7 +728,12 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq,
728 } 728 }
729 se_sess = tv_nexus->tvn_se_sess; 729 se_sess = tv_nexus->tvn_se_sess;
730 730
731 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_KERNEL); 731 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
732 if (tag < 0) {
733 pr_err("Unable to obtain tag for tcm_vhost_cmd\n");
734 return ERR_PTR(-ENOMEM);
735 }
736
732 cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag]; 737 cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag];
733 sg = cmd->tvc_sgl; 738 sg = cmd->tvc_sgl;
734 pages = cmd->tvc_upages; 739 pages = cmd->tvc_upages;
diff --git a/drivers/video/mmp/hw/mmp_ctrl.c b/drivers/video/mmp/hw/mmp_ctrl.c
index 75dca19bf214..6ac755270ab4 100644
--- a/drivers/video/mmp/hw/mmp_ctrl.c
+++ b/drivers/video/mmp/hw/mmp_ctrl.c
@@ -514,7 +514,7 @@ static int mmphw_probe(struct platform_device *pdev)
514 if (IS_ERR(ctrl->clk)) { 514 if (IS_ERR(ctrl->clk)) {
515 dev_err(ctrl->dev, "unable to get clk %s\n", mi->clk_name); 515 dev_err(ctrl->dev, "unable to get clk %s\n", mi->clk_name);
516 ret = -ENOENT; 516 ret = -ENOENT;
517 goto failed_get_clk; 517 goto failed;
518 } 518 }
519 clk_prepare_enable(ctrl->clk); 519 clk_prepare_enable(ctrl->clk);
520 520
@@ -551,21 +551,8 @@ failed_path_init:
551 path_deinit(path_plat); 551 path_deinit(path_plat);
552 } 552 }
553 553
554 if (ctrl->clk) { 554 clk_disable_unprepare(ctrl->clk);
555 devm_clk_put(ctrl->dev, ctrl->clk);
556 clk_disable_unprepare(ctrl->clk);
557 }
558failed_get_clk:
559 devm_free_irq(ctrl->dev, ctrl->irq, ctrl);
560failed: 555failed:
561 if (ctrl) {
562 if (ctrl->reg_base)
563 devm_iounmap(ctrl->dev, ctrl->reg_base);
564 devm_release_mem_region(ctrl->dev, res->start,
565 resource_size(res));
566 devm_kfree(ctrl->dev, ctrl);
567 }
568
569 dev_err(&pdev->dev, "device init failed\n"); 556 dev_err(&pdev->dev, "device init failed\n");
570 557
571 return ret; 558 return ret;
diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c
index d250ed0f806d..27197a8048c0 100644
--- a/drivers/video/mxsfb.c
+++ b/drivers/video/mxsfb.c
@@ -620,6 +620,7 @@ static int mxsfb_restore_mode(struct mxsfb_info *host)
620 break; 620 break;
621 case 3: 621 case 3:
622 bits_per_pixel = 32; 622 bits_per_pixel = 32;
623 break;
623 case 1: 624 case 1:
624 default: 625 default:
625 return -EINVAL; 626 return -EINVAL;
diff --git a/drivers/video/neofb.c b/drivers/video/neofb.c
index 7ef079c146e7..c172a5281f9e 100644
--- a/drivers/video/neofb.c
+++ b/drivers/video/neofb.c
@@ -2075,6 +2075,7 @@ static int neofb_probe(struct pci_dev *dev, const struct pci_device_id *id)
2075 if (!fb_find_mode(&info->var, info, mode_option, NULL, 0, 2075 if (!fb_find_mode(&info->var, info, mode_option, NULL, 0,
2076 info->monspecs.modedb, 16)) { 2076 info->monspecs.modedb, 16)) {
2077 printk(KERN_ERR "neofb: Unable to find usable video mode.\n"); 2077 printk(KERN_ERR "neofb: Unable to find usable video mode.\n");
2078 err = -EINVAL;
2078 goto err_map_video; 2079 goto err_map_video;
2079 } 2080 }
2080 2081
@@ -2097,7 +2098,8 @@ static int neofb_probe(struct pci_dev *dev, const struct pci_device_id *id)
2097 info->fix.smem_len >> 10, info->var.xres, 2098 info->fix.smem_len >> 10, info->var.xres,
2098 info->var.yres, h_sync / 1000, h_sync % 1000, v_sync); 2099 info->var.yres, h_sync / 1000, h_sync % 1000, v_sync);
2099 2100
2100 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) 2101 err = fb_alloc_cmap(&info->cmap, 256, 0);
2102 if (err < 0)
2101 goto err_map_video; 2103 goto err_map_video;
2102 2104
2103 err = register_framebuffer(info); 2105 err = register_framebuffer(info);
diff --git a/drivers/video/of_display_timing.c b/drivers/video/of_display_timing.c
index 171821ddd78d..ba5b40f581f6 100644
--- a/drivers/video/of_display_timing.c
+++ b/drivers/video/of_display_timing.c
@@ -120,7 +120,7 @@ int of_get_display_timing(struct device_node *np, const char *name,
120 return -EINVAL; 120 return -EINVAL;
121 } 121 }
122 122
123 timing_np = of_find_node_by_name(np, name); 123 timing_np = of_get_child_by_name(np, name);
124 if (!timing_np) { 124 if (!timing_np) {
125 pr_err("%s: could not find node '%s'\n", 125 pr_err("%s: could not find node '%s'\n",
126 of_node_full_name(np), name); 126 of_node_full_name(np), name);
@@ -143,11 +143,11 @@ struct display_timings *of_get_display_timings(struct device_node *np)
143 struct display_timings *disp; 143 struct display_timings *disp;
144 144
145 if (!np) { 145 if (!np) {
146 pr_err("%s: no devicenode given\n", of_node_full_name(np)); 146 pr_err("%s: no device node given\n", of_node_full_name(np));
147 return NULL; 147 return NULL;
148 } 148 }
149 149
150 timings_np = of_find_node_by_name(np, "display-timings"); 150 timings_np = of_get_child_by_name(np, "display-timings");
151 if (!timings_np) { 151 if (!timings_np) {
152 pr_err("%s: could not find display-timings node\n", 152 pr_err("%s: could not find display-timings node\n",
153 of_node_full_name(np)); 153 of_node_full_name(np));
diff --git a/drivers/video/omap2/displays-new/Kconfig b/drivers/video/omap2/displays-new/Kconfig
index 6c90885b0940..10b25e7cd878 100644
--- a/drivers/video/omap2/displays-new/Kconfig
+++ b/drivers/video/omap2/displays-new/Kconfig
@@ -35,6 +35,7 @@ config DISPLAY_PANEL_DPI
35 35
36config DISPLAY_PANEL_DSI_CM 36config DISPLAY_PANEL_DSI_CM
37 tristate "Generic DSI Command Mode Panel" 37 tristate "Generic DSI Command Mode Panel"
38 depends on BACKLIGHT_CLASS_DEVICE
38 help 39 help
39 Driver for generic DSI command mode panels. 40 Driver for generic DSI command mode panels.
40 41
diff --git a/drivers/video/omap2/displays-new/connector-analog-tv.c b/drivers/video/omap2/displays-new/connector-analog-tv.c
index 1b60698f141e..ccd9073f706f 100644
--- a/drivers/video/omap2/displays-new/connector-analog-tv.c
+++ b/drivers/video/omap2/displays-new/connector-analog-tv.c
@@ -191,7 +191,7 @@ static int tvc_probe_pdata(struct platform_device *pdev)
191 in = omap_dss_find_output(pdata->source); 191 in = omap_dss_find_output(pdata->source);
192 if (in == NULL) { 192 if (in == NULL) {
193 dev_err(&pdev->dev, "Failed to find video source\n"); 193 dev_err(&pdev->dev, "Failed to find video source\n");
194 return -ENODEV; 194 return -EPROBE_DEFER;
195 } 195 }
196 196
197 ddata->in = in; 197 ddata->in = in;
diff --git a/drivers/video/omap2/displays-new/connector-dvi.c b/drivers/video/omap2/displays-new/connector-dvi.c
index bc5f8ceda371..63d88ee6dfe4 100644
--- a/drivers/video/omap2/displays-new/connector-dvi.c
+++ b/drivers/video/omap2/displays-new/connector-dvi.c
@@ -263,7 +263,7 @@ static int dvic_probe_pdata(struct platform_device *pdev)
263 in = omap_dss_find_output(pdata->source); 263 in = omap_dss_find_output(pdata->source);
264 if (in == NULL) { 264 if (in == NULL) {
265 dev_err(&pdev->dev, "Failed to find video source\n"); 265 dev_err(&pdev->dev, "Failed to find video source\n");
266 return -ENODEV; 266 return -EPROBE_DEFER;
267 } 267 }
268 268
269 ddata->in = in; 269 ddata->in = in;
diff --git a/drivers/video/omap2/displays-new/connector-hdmi.c b/drivers/video/omap2/displays-new/connector-hdmi.c
index c5826716d6ab..9abe2c039ae9 100644
--- a/drivers/video/omap2/displays-new/connector-hdmi.c
+++ b/drivers/video/omap2/displays-new/connector-hdmi.c
@@ -290,7 +290,7 @@ static int hdmic_probe_pdata(struct platform_device *pdev)
290 in = omap_dss_find_output(pdata->source); 290 in = omap_dss_find_output(pdata->source);
291 if (in == NULL) { 291 if (in == NULL) {
292 dev_err(&pdev->dev, "Failed to find video source\n"); 292 dev_err(&pdev->dev, "Failed to find video source\n");
293 return -ENODEV; 293 return -EPROBE_DEFER;
294 } 294 }
295 295
296 ddata->in = in; 296 ddata->in = in;
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 02a7340111df..477975009eee 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -3691,6 +3691,7 @@ static int __init omap_dispchw_probe(struct platform_device *pdev)
3691 } 3691 }
3692 3692
3693 pm_runtime_enable(&pdev->dev); 3693 pm_runtime_enable(&pdev->dev);
3694 pm_runtime_irq_safe(&pdev->dev);
3694 3695
3695 r = dispc_runtime_get(); 3696 r = dispc_runtime_get();
3696 if (r) 3697 if (r)
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index 47ca86c5c6c0..d838ba829459 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -1336,14 +1336,7 @@ static int s3_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1336 (info->var.bits_per_pixel * info->var.xres_virtual); 1336 (info->var.bits_per_pixel * info->var.xres_virtual);
1337 if (info->var.yres_virtual < info->var.yres) { 1337 if (info->var.yres_virtual < info->var.yres) {
1338 dev_err(info->device, "virtual vertical size smaller than real\n"); 1338 dev_err(info->device, "virtual vertical size smaller than real\n");
1339 goto err_find_mode; 1339 rc = -EINVAL;
1340 }
1341
1342 /* maximize virtual vertical size for fast scrolling */
1343 info->var.yres_virtual = info->fix.smem_len * 8 /
1344 (info->var.bits_per_pixel * info->var.xres_virtual);
1345 if (info->var.yres_virtual < info->var.yres) {
1346 dev_err(info->device, "virtual vertical size smaller than real\n");
1347 goto err_find_mode; 1340 goto err_find_mode;
1348 } 1341 }
1349 1342
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 5be5e3d14f79..19f3c3fc65f4 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -802,6 +802,12 @@ static int hpwdt_init_one(struct pci_dev *dev,
802 return -ENODEV; 802 return -ENODEV;
803 } 803 }
804 804
805 /*
806 * Ignore all auxilary iLO devices with the following PCI ID
807 */
808 if (dev->subsystem_device == 0x1979)
809 return -ENODEV;
810
805 if (pci_enable_device(dev)) { 811 if (pci_enable_device(dev)) {
806 dev_warn(&dev->dev, 812 dev_warn(&dev->dev,
807 "Not possible to enable PCI Device: 0x%x:0x%x.\n", 813 "Not possible to enable PCI Device: 0x%x:0x%x.\n",
diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c
index 491419e0772a..5c3d4df63e68 100644
--- a/drivers/watchdog/kempld_wdt.c
+++ b/drivers/watchdog/kempld_wdt.c
@@ -35,7 +35,7 @@
35#define KEMPLD_WDT_STAGE_TIMEOUT(x) (0x1b + (x) * 4) 35#define KEMPLD_WDT_STAGE_TIMEOUT(x) (0x1b + (x) * 4)
36#define KEMPLD_WDT_STAGE_CFG(x) (0x18 + (x)) 36#define KEMPLD_WDT_STAGE_CFG(x) (0x18 + (x))
37#define STAGE_CFG_GET_PRESCALER(x) (((x) & 0x30) >> 4) 37#define STAGE_CFG_GET_PRESCALER(x) (((x) & 0x30) >> 4)
38#define STAGE_CFG_SET_PRESCALER(x) (((x) & 0x30) << 4) 38#define STAGE_CFG_SET_PRESCALER(x) (((x) & 0x3) << 4)
39#define STAGE_CFG_PRESCALER_MASK 0x30 39#define STAGE_CFG_PRESCALER_MASK 0x30
40#define STAGE_CFG_ACTION_MASK 0x7 40#define STAGE_CFG_ACTION_MASK 0x7
41#define STAGE_CFG_ASSERT (1 << 3) 41#define STAGE_CFG_ASSERT (1 << 3)
diff --git a/drivers/watchdog/sunxi_wdt.c b/drivers/watchdog/sunxi_wdt.c
index 1f94b42764aa..f6caa77151c7 100644
--- a/drivers/watchdog/sunxi_wdt.c
+++ b/drivers/watchdog/sunxi_wdt.c
@@ -146,7 +146,7 @@ static const struct watchdog_ops sunxi_wdt_ops = {
146 .set_timeout = sunxi_wdt_set_timeout, 146 .set_timeout = sunxi_wdt_set_timeout,
147}; 147};
148 148
149static int __init sunxi_wdt_probe(struct platform_device *pdev) 149static int sunxi_wdt_probe(struct platform_device *pdev)
150{ 150{
151 struct sunxi_wdt_dev *sunxi_wdt; 151 struct sunxi_wdt_dev *sunxi_wdt;
152 struct resource *res; 152 struct resource *res;
@@ -187,7 +187,7 @@ static int __init sunxi_wdt_probe(struct platform_device *pdev)
187 return 0; 187 return 0;
188} 188}
189 189
190static int __exit sunxi_wdt_remove(struct platform_device *pdev) 190static int sunxi_wdt_remove(struct platform_device *pdev)
191{ 191{
192 struct sunxi_wdt_dev *sunxi_wdt = platform_get_drvdata(pdev); 192 struct sunxi_wdt_dev *sunxi_wdt = platform_get_drvdata(pdev);
193 193
diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
index 42913f131dc2..c9b0c627fe7e 100644
--- a/drivers/watchdog/ts72xx_wdt.c
+++ b/drivers/watchdog/ts72xx_wdt.c
@@ -310,7 +310,8 @@ static long ts72xx_wdt_ioctl(struct file *file, unsigned int cmd,
310 310
311 case WDIOC_GETSTATUS: 311 case WDIOC_GETSTATUS:
312 case WDIOC_GETBOOTSTATUS: 312 case WDIOC_GETBOOTSTATUS:
313 return put_user(0, p); 313 error = put_user(0, p);
314 break;
314 315
315 case WDIOC_KEEPALIVE: 316 case WDIOC_KEEPALIVE:
316 ts72xx_wdt_kick(wdt); 317 ts72xx_wdt_kick(wdt);
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index a50c6e3a7cc4..b232908a6192 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -398,8 +398,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
398 if (nr_pages > ARRAY_SIZE(frame_list)) 398 if (nr_pages > ARRAY_SIZE(frame_list))
399 nr_pages = ARRAY_SIZE(frame_list); 399 nr_pages = ARRAY_SIZE(frame_list);
400 400
401 scratch_page = get_balloon_scratch_page();
402
403 for (i = 0; i < nr_pages; i++) { 401 for (i = 0; i < nr_pages; i++) {
404 page = alloc_page(gfp); 402 page = alloc_page(gfp);
405 if (page == NULL) { 403 if (page == NULL) {
@@ -413,6 +411,12 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
413 411
414 scrub_page(page); 412 scrub_page(page);
415 413
414 /*
415 * Ballooned out frames are effectively replaced with
416 * a scratch frame. Ensure direct mappings and the
417 * p2m are consistent.
418 */
419 scratch_page = get_balloon_scratch_page();
416#ifdef CONFIG_XEN_HAVE_PVMMU 420#ifdef CONFIG_XEN_HAVE_PVMMU
417 if (xen_pv_domain() && !PageHighMem(page)) { 421 if (xen_pv_domain() && !PageHighMem(page)) {
418 ret = HYPERVISOR_update_va_mapping( 422 ret = HYPERVISOR_update_va_mapping(
@@ -422,24 +426,19 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
422 BUG_ON(ret); 426 BUG_ON(ret);
423 } 427 }
424#endif 428#endif
425 }
426
427 /* Ensure that ballooned highmem pages don't have kmaps. */
428 kmap_flush_unused();
429 flush_tlb_all();
430
431 /* No more mappings: invalidate P2M and add to balloon. */
432 for (i = 0; i < nr_pages; i++) {
433 pfn = mfn_to_pfn(frame_list[i]);
434 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 429 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
435 unsigned long p; 430 unsigned long p;
436 p = page_to_pfn(scratch_page); 431 p = page_to_pfn(scratch_page);
437 __set_phys_to_machine(pfn, pfn_to_mfn(p)); 432 __set_phys_to_machine(pfn, pfn_to_mfn(p));
438 } 433 }
434 put_balloon_scratch_page();
435
439 balloon_append(pfn_to_page(pfn)); 436 balloon_append(pfn_to_page(pfn));
440 } 437 }
441 438
442 put_balloon_scratch_page(); 439 /* Ensure that ballooned highmem pages don't have kmaps. */
440 kmap_flush_unused();
441 flush_tlb_all();
443 442
444 set_xen_guest_handle(reservation.extent_start, frame_list); 443 set_xen_guest_handle(reservation.extent_start, frame_list);
445 reservation.nr_extents = nr_pages; 444 reservation.nr_extents = nr_pages;
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 646337dc5201..529300327f45 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -600,9 +600,6 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
600 600
601 /* lock down the parent dentry so we can peer at it */ 601 /* lock down the parent dentry so we can peer at it */
602 parent = dget_parent(dentry); 602 parent = dget_parent(dentry);
603 if (!parent->d_inode)
604 goto out_bad;
605
606 dir = AFS_FS_I(parent->d_inode); 603 dir = AFS_FS_I(parent->d_inode);
607 604
608 /* validate the parent directory */ 605 /* validate the parent directory */
diff --git a/fs/aio.c b/fs/aio.c
index 6b868f0e0c4c..067e3d340c35 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -167,10 +167,25 @@ static int __init aio_setup(void)
167} 167}
168__initcall(aio_setup); 168__initcall(aio_setup);
169 169
170static void put_aio_ring_file(struct kioctx *ctx)
171{
172 struct file *aio_ring_file = ctx->aio_ring_file;
173 if (aio_ring_file) {
174 truncate_setsize(aio_ring_file->f_inode, 0);
175
176 /* Prevent further access to the kioctx from migratepages */
177 spin_lock(&aio_ring_file->f_inode->i_mapping->private_lock);
178 aio_ring_file->f_inode->i_mapping->private_data = NULL;
179 ctx->aio_ring_file = NULL;
180 spin_unlock(&aio_ring_file->f_inode->i_mapping->private_lock);
181
182 fput(aio_ring_file);
183 }
184}
185
170static void aio_free_ring(struct kioctx *ctx) 186static void aio_free_ring(struct kioctx *ctx)
171{ 187{
172 int i; 188 int i;
173 struct file *aio_ring_file = ctx->aio_ring_file;
174 189
175 for (i = 0; i < ctx->nr_pages; i++) { 190 for (i = 0; i < ctx->nr_pages; i++) {
176 pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i, 191 pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i,
@@ -178,14 +193,10 @@ static void aio_free_ring(struct kioctx *ctx)
178 put_page(ctx->ring_pages[i]); 193 put_page(ctx->ring_pages[i]);
179 } 194 }
180 195
196 put_aio_ring_file(ctx);
197
181 if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) 198 if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages)
182 kfree(ctx->ring_pages); 199 kfree(ctx->ring_pages);
183
184 if (aio_ring_file) {
185 truncate_setsize(aio_ring_file->f_inode, 0);
186 fput(aio_ring_file);
187 ctx->aio_ring_file = NULL;
188 }
189} 200}
190 201
191static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma) 202static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
@@ -207,9 +218,8 @@ static int aio_set_page_dirty(struct page *page)
207static int aio_migratepage(struct address_space *mapping, struct page *new, 218static int aio_migratepage(struct address_space *mapping, struct page *new,
208 struct page *old, enum migrate_mode mode) 219 struct page *old, enum migrate_mode mode)
209{ 220{
210 struct kioctx *ctx = mapping->private_data; 221 struct kioctx *ctx;
211 unsigned long flags; 222 unsigned long flags;
212 unsigned idx = old->index;
213 int rc; 223 int rc;
214 224
215 /* Writeback must be complete */ 225 /* Writeback must be complete */
@@ -224,10 +234,23 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
224 234
225 get_page(new); 235 get_page(new);
226 236
227 spin_lock_irqsave(&ctx->completion_lock, flags); 237 /* We can potentially race against kioctx teardown here. Use the
228 migrate_page_copy(new, old); 238 * address_space's private data lock to protect the mapping's
229 ctx->ring_pages[idx] = new; 239 * private_data.
230 spin_unlock_irqrestore(&ctx->completion_lock, flags); 240 */
241 spin_lock(&mapping->private_lock);
242 ctx = mapping->private_data;
243 if (ctx) {
244 pgoff_t idx;
245 spin_lock_irqsave(&ctx->completion_lock, flags);
246 migrate_page_copy(new, old);
247 idx = old->index;
248 if (idx < (pgoff_t)ctx->nr_pages)
249 ctx->ring_pages[idx] = new;
250 spin_unlock_irqrestore(&ctx->completion_lock, flags);
251 } else
252 rc = -EBUSY;
253 spin_unlock(&mapping->private_lock);
231 254
232 return rc; 255 return rc;
233} 256}
@@ -617,8 +640,7 @@ out_freepcpu:
617out_freeref: 640out_freeref:
618 free_percpu(ctx->users.pcpu_count); 641 free_percpu(ctx->users.pcpu_count);
619out_freectx: 642out_freectx:
620 if (ctx->aio_ring_file) 643 put_aio_ring_file(ctx);
621 fput(ctx->aio_ring_file);
622 kmem_cache_free(kioctx_cachep, ctx); 644 kmem_cache_free(kioctx_cachep, ctx);
623 pr_debug("error allocating ioctx %d\n", err); 645 pr_debug("error allocating ioctx %d\n", err);
624 return ERR_PTR(err); 646 return ERR_PTR(err);
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 100edcc5e312..4c94a79991bb 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1413,7 +1413,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
1413 * long file_ofs 1413 * long file_ofs
1414 * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL... 1414 * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL...
1415 */ 1415 */
1416static void fill_files_note(struct memelfnote *note) 1416static int fill_files_note(struct memelfnote *note)
1417{ 1417{
1418 struct vm_area_struct *vma; 1418 struct vm_area_struct *vma;
1419 unsigned count, size, names_ofs, remaining, n; 1419 unsigned count, size, names_ofs, remaining, n;
@@ -1428,11 +1428,11 @@ static void fill_files_note(struct memelfnote *note)
1428 names_ofs = (2 + 3 * count) * sizeof(data[0]); 1428 names_ofs = (2 + 3 * count) * sizeof(data[0]);
1429 alloc: 1429 alloc:
1430 if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */ 1430 if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */
1431 goto err; 1431 return -EINVAL;
1432 size = round_up(size, PAGE_SIZE); 1432 size = round_up(size, PAGE_SIZE);
1433 data = vmalloc(size); 1433 data = vmalloc(size);
1434 if (!data) 1434 if (!data)
1435 goto err; 1435 return -ENOMEM;
1436 1436
1437 start_end_ofs = data + 2; 1437 start_end_ofs = data + 2;
1438 name_base = name_curpos = ((char *)data) + names_ofs; 1438 name_base = name_curpos = ((char *)data) + names_ofs;
@@ -1485,7 +1485,7 @@ static void fill_files_note(struct memelfnote *note)
1485 1485
1486 size = name_curpos - (char *)data; 1486 size = name_curpos - (char *)data;
1487 fill_note(note, "CORE", NT_FILE, size, data); 1487 fill_note(note, "CORE", NT_FILE, size, data);
1488 err: ; 1488 return 0;
1489} 1489}
1490 1490
1491#ifdef CORE_DUMP_USE_REGSET 1491#ifdef CORE_DUMP_USE_REGSET
@@ -1686,8 +1686,8 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
1686 fill_auxv_note(&info->auxv, current->mm); 1686 fill_auxv_note(&info->auxv, current->mm);
1687 info->size += notesize(&info->auxv); 1687 info->size += notesize(&info->auxv);
1688 1688
1689 fill_files_note(&info->files); 1689 if (fill_files_note(&info->files) == 0)
1690 info->size += notesize(&info->files); 1690 info->size += notesize(&info->files);
1691 1691
1692 return 1; 1692 return 1;
1693} 1693}
@@ -1719,7 +1719,8 @@ static int write_note_info(struct elf_note_info *info,
1719 return 0; 1719 return 0;
1720 if (first && !writenote(&info->auxv, file, foffset)) 1720 if (first && !writenote(&info->auxv, file, foffset))
1721 return 0; 1721 return 0;
1722 if (first && !writenote(&info->files, file, foffset)) 1722 if (first && info->files.data &&
1723 !writenote(&info->files, file, foffset))
1723 return 0; 1724 return 0;
1724 1725
1725 for (i = 1; i < info->thread_notes; ++i) 1726 for (i = 1; i < info->thread_notes; ++i)
@@ -1806,6 +1807,7 @@ static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1806 1807
1807struct elf_note_info { 1808struct elf_note_info {
1808 struct memelfnote *notes; 1809 struct memelfnote *notes;
1810 struct memelfnote *notes_files;
1809 struct elf_prstatus *prstatus; /* NT_PRSTATUS */ 1811 struct elf_prstatus *prstatus; /* NT_PRSTATUS */
1810 struct elf_prpsinfo *psinfo; /* NT_PRPSINFO */ 1812 struct elf_prpsinfo *psinfo; /* NT_PRPSINFO */
1811 struct list_head thread_list; 1813 struct list_head thread_list;
@@ -1896,9 +1898,12 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
1896 1898
1897 fill_siginfo_note(info->notes + 2, &info->csigdata, siginfo); 1899 fill_siginfo_note(info->notes + 2, &info->csigdata, siginfo);
1898 fill_auxv_note(info->notes + 3, current->mm); 1900 fill_auxv_note(info->notes + 3, current->mm);
1899 fill_files_note(info->notes + 4); 1901 info->numnote = 4;
1900 1902
1901 info->numnote = 5; 1903 if (fill_files_note(info->notes + info->numnote) == 0) {
1904 info->notes_files = info->notes + info->numnote;
1905 info->numnote++;
1906 }
1902 1907
1903 /* Try to dump the FPU. */ 1908 /* Try to dump the FPU. */
1904 info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, 1909 info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs,
@@ -1960,8 +1965,9 @@ static void free_note_info(struct elf_note_info *info)
1960 kfree(list_entry(tmp, struct elf_thread_status, list)); 1965 kfree(list_entry(tmp, struct elf_thread_status, list));
1961 } 1966 }
1962 1967
1963 /* Free data allocated by fill_files_note(): */ 1968 /* Free data possibly allocated by fill_files_note(): */
1964 vfree(info->notes[4].data); 1969 if (info->notes_files)
1970 vfree(info->notes_files->data);
1965 1971
1966 kfree(info->prstatus); 1972 kfree(info->prstatus);
1967 kfree(info->psinfo); 1973 kfree(info->psinfo);
@@ -2044,7 +2050,7 @@ static int elf_core_dump(struct coredump_params *cprm)
2044 struct vm_area_struct *vma, *gate_vma; 2050 struct vm_area_struct *vma, *gate_vma;
2045 struct elfhdr *elf = NULL; 2051 struct elfhdr *elf = NULL;
2046 loff_t offset = 0, dataoff, foffset; 2052 loff_t offset = 0, dataoff, foffset;
2047 struct elf_note_info info; 2053 struct elf_note_info info = { };
2048 struct elf_phdr *phdr4note = NULL; 2054 struct elf_phdr *phdr4note = NULL;
2049 struct elf_shdr *shdr4extnum = NULL; 2055 struct elf_shdr *shdr4extnum = NULL;
2050 Elf_Half e_phnum; 2056 Elf_Half e_phnum;
diff --git a/fs/bio.c b/fs/bio.c
index b3b20ed9510e..ea5035da4d9a 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -917,8 +917,8 @@ void bio_copy_data(struct bio *dst, struct bio *src)
917 src_p = kmap_atomic(src_bv->bv_page); 917 src_p = kmap_atomic(src_bv->bv_page);
918 dst_p = kmap_atomic(dst_bv->bv_page); 918 dst_p = kmap_atomic(dst_bv->bv_page);
919 919
920 memcpy(dst_p + dst_bv->bv_offset, 920 memcpy(dst_p + dst_offset,
921 src_p + src_bv->bv_offset, 921 src_p + src_offset,
922 bytes); 922 bytes);
923 923
924 kunmap_atomic(dst_p); 924 kunmap_atomic(dst_p);
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 58b7d14b08ee..08cc08f037a6 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -107,7 +107,8 @@ static void check_idle_worker(struct btrfs_worker_thread *worker)
107 worker->idle = 1; 107 worker->idle = 1;
108 108
109 /* the list may be empty if the worker is just starting */ 109 /* the list may be empty if the worker is just starting */
110 if (!list_empty(&worker->worker_list)) { 110 if (!list_empty(&worker->worker_list) &&
111 !worker->workers->stopping) {
111 list_move(&worker->worker_list, 112 list_move(&worker->worker_list,
112 &worker->workers->idle_list); 113 &worker->workers->idle_list);
113 } 114 }
@@ -127,7 +128,8 @@ static void check_busy_worker(struct btrfs_worker_thread *worker)
127 spin_lock_irqsave(&worker->workers->lock, flags); 128 spin_lock_irqsave(&worker->workers->lock, flags);
128 worker->idle = 0; 129 worker->idle = 0;
129 130
130 if (!list_empty(&worker->worker_list)) { 131 if (!list_empty(&worker->worker_list) &&
132 !worker->workers->stopping) {
131 list_move_tail(&worker->worker_list, 133 list_move_tail(&worker->worker_list,
132 &worker->workers->worker_list); 134 &worker->workers->worker_list);
133 } 135 }
@@ -412,6 +414,7 @@ void btrfs_stop_workers(struct btrfs_workers *workers)
412 int can_stop; 414 int can_stop;
413 415
414 spin_lock_irq(&workers->lock); 416 spin_lock_irq(&workers->lock);
417 workers->stopping = 1;
415 list_splice_init(&workers->idle_list, &workers->worker_list); 418 list_splice_init(&workers->idle_list, &workers->worker_list);
416 while (!list_empty(&workers->worker_list)) { 419 while (!list_empty(&workers->worker_list)) {
417 cur = workers->worker_list.next; 420 cur = workers->worker_list.next;
@@ -455,6 +458,7 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
455 workers->ordered = 0; 458 workers->ordered = 0;
456 workers->atomic_start_pending = 0; 459 workers->atomic_start_pending = 0;
457 workers->atomic_worker_start = async_helper; 460 workers->atomic_worker_start = async_helper;
461 workers->stopping = 0;
458} 462}
459 463
460/* 464/*
@@ -480,15 +484,19 @@ static int __btrfs_start_workers(struct btrfs_workers *workers)
480 atomic_set(&worker->num_pending, 0); 484 atomic_set(&worker->num_pending, 0);
481 atomic_set(&worker->refs, 1); 485 atomic_set(&worker->refs, 1);
482 worker->workers = workers; 486 worker->workers = workers;
483 worker->task = kthread_run(worker_loop, worker, 487 worker->task = kthread_create(worker_loop, worker,
484 "btrfs-%s-%d", workers->name, 488 "btrfs-%s-%d", workers->name,
485 workers->num_workers + 1); 489 workers->num_workers + 1);
486 if (IS_ERR(worker->task)) { 490 if (IS_ERR(worker->task)) {
487 ret = PTR_ERR(worker->task); 491 ret = PTR_ERR(worker->task);
488 kfree(worker);
489 goto fail; 492 goto fail;
490 } 493 }
494
491 spin_lock_irq(&workers->lock); 495 spin_lock_irq(&workers->lock);
496 if (workers->stopping) {
497 spin_unlock_irq(&workers->lock);
498 goto fail_kthread;
499 }
492 list_add_tail(&worker->worker_list, &workers->idle_list); 500 list_add_tail(&worker->worker_list, &workers->idle_list);
493 worker->idle = 1; 501 worker->idle = 1;
494 workers->num_workers++; 502 workers->num_workers++;
@@ -496,8 +504,13 @@ static int __btrfs_start_workers(struct btrfs_workers *workers)
496 WARN_ON(workers->num_workers_starting < 0); 504 WARN_ON(workers->num_workers_starting < 0);
497 spin_unlock_irq(&workers->lock); 505 spin_unlock_irq(&workers->lock);
498 506
507 wake_up_process(worker->task);
499 return 0; 508 return 0;
509
510fail_kthread:
511 kthread_stop(worker->task);
500fail: 512fail:
513 kfree(worker);
501 spin_lock_irq(&workers->lock); 514 spin_lock_irq(&workers->lock);
502 workers->num_workers_starting--; 515 workers->num_workers_starting--;
503 spin_unlock_irq(&workers->lock); 516 spin_unlock_irq(&workers->lock);
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
index 063698b90ce2..1f26792683ed 100644
--- a/fs/btrfs/async-thread.h
+++ b/fs/btrfs/async-thread.h
@@ -107,6 +107,8 @@ struct btrfs_workers {
107 107
108 /* extra name for this worker, used for current->name */ 108 /* extra name for this worker, used for current->name */
109 char *name; 109 char *name;
110
111 int stopping;
110}; 112};
111 113
112void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work); 114void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 70681686e8dc..9efb94e95858 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -535,10 +535,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
535 list_add(&tgt_device->dev_alloc_list, &fs_info->fs_devices->alloc_list); 535 list_add(&tgt_device->dev_alloc_list, &fs_info->fs_devices->alloc_list);
536 536
537 btrfs_rm_dev_replace_srcdev(fs_info, src_device); 537 btrfs_rm_dev_replace_srcdev(fs_info, src_device);
538 if (src_device->bdev) { 538
539 /* zero out the old super */
540 btrfs_scratch_superblock(src_device);
541 }
542 /* 539 /*
543 * this is again a consistent state where no dev_replace procedure 540 * this is again a consistent state where no dev_replace procedure
544 * is running, the target device is part of the filesystem, the 541 * is running, the target device is part of the filesystem, the
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 4ae17ed13b32..62176ad89846 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1561,8 +1561,9 @@ int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
1561 return ret; 1561 return ret;
1562} 1562}
1563 1563
1564struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info, 1564struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
1565 struct btrfs_key *location) 1565 struct btrfs_key *location,
1566 bool check_ref)
1566{ 1567{
1567 struct btrfs_root *root; 1568 struct btrfs_root *root;
1568 int ret; 1569 int ret;
@@ -1586,7 +1587,7 @@ struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
1586again: 1587again:
1587 root = btrfs_lookup_fs_root(fs_info, location->objectid); 1588 root = btrfs_lookup_fs_root(fs_info, location->objectid);
1588 if (root) { 1589 if (root) {
1589 if (btrfs_root_refs(&root->root_item) == 0) 1590 if (check_ref && btrfs_root_refs(&root->root_item) == 0)
1590 return ERR_PTR(-ENOENT); 1591 return ERR_PTR(-ENOENT);
1591 return root; 1592 return root;
1592 } 1593 }
@@ -1595,7 +1596,7 @@ again:
1595 if (IS_ERR(root)) 1596 if (IS_ERR(root))
1596 return root; 1597 return root;
1597 1598
1598 if (btrfs_root_refs(&root->root_item) == 0) { 1599 if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1599 ret = -ENOENT; 1600 ret = -ENOENT;
1600 goto fail; 1601 goto fail;
1601 } 1602 }
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index b71acd6e1e5b..5ce2a7da8b11 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -68,8 +68,17 @@ struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
68int btrfs_init_fs_root(struct btrfs_root *root); 68int btrfs_init_fs_root(struct btrfs_root *root);
69int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info, 69int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
70 struct btrfs_root *root); 70 struct btrfs_root *root);
71struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info, 71
72 struct btrfs_key *location); 72struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
73 struct btrfs_key *key,
74 bool check_ref);
75static inline struct btrfs_root *
76btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
77 struct btrfs_key *location)
78{
79 return btrfs_get_fs_root(fs_info, location, true);
80}
81
73int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info); 82int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info);
74void btrfs_btree_balance_dirty(struct btrfs_root *root); 83void btrfs_btree_balance_dirty(struct btrfs_root *root);
75void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root); 84void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index c09a40db53db..51731b76900d 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -145,8 +145,16 @@ int __init extent_io_init(void)
145 offsetof(struct btrfs_io_bio, bio)); 145 offsetof(struct btrfs_io_bio, bio));
146 if (!btrfs_bioset) 146 if (!btrfs_bioset)
147 goto free_buffer_cache; 147 goto free_buffer_cache;
148
149 if (bioset_integrity_create(btrfs_bioset, BIO_POOL_SIZE))
150 goto free_bioset;
151
148 return 0; 152 return 0;
149 153
154free_bioset:
155 bioset_free(btrfs_bioset);
156 btrfs_bioset = NULL;
157
150free_buffer_cache: 158free_buffer_cache:
151 kmem_cache_destroy(extent_buffer_cache); 159 kmem_cache_destroy(extent_buffer_cache);
152 extent_buffer_cache = NULL; 160 extent_buffer_cache = NULL;
@@ -1482,10 +1490,8 @@ static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1482 cur_start = state->end + 1; 1490 cur_start = state->end + 1;
1483 node = rb_next(node); 1491 node = rb_next(node);
1484 total_bytes += state->end - state->start + 1; 1492 total_bytes += state->end - state->start + 1;
1485 if (total_bytes >= max_bytes) { 1493 if (total_bytes >= max_bytes)
1486 *end = *start + max_bytes - 1;
1487 break; 1494 break;
1488 }
1489 if (!node) 1495 if (!node)
1490 break; 1496 break;
1491 } 1497 }
@@ -1614,7 +1620,7 @@ again:
1614 *start = delalloc_start; 1620 *start = delalloc_start;
1615 *end = delalloc_end; 1621 *end = delalloc_end;
1616 free_extent_state(cached_state); 1622 free_extent_state(cached_state);
1617 return found; 1623 return 0;
1618 } 1624 }
1619 1625
1620 /* 1626 /*
@@ -1627,10 +1633,9 @@ again:
1627 1633
1628 /* 1634 /*
1629 * make sure to limit the number of pages we try to lock down 1635 * make sure to limit the number of pages we try to lock down
1630 * if we're looping.
1631 */ 1636 */
1632 if (delalloc_end + 1 - delalloc_start > max_bytes && loops) 1637 if (delalloc_end + 1 - delalloc_start > max_bytes)
1633 delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1; 1638 delalloc_end = delalloc_start + max_bytes - 1;
1634 1639
1635 /* step two, lock all the pages after the page that has start */ 1640 /* step two, lock all the pages after the page that has start */
1636 ret = lock_delalloc_pages(inode, locked_page, 1641 ret = lock_delalloc_pages(inode, locked_page,
@@ -1641,8 +1646,7 @@ again:
1641 */ 1646 */
1642 free_extent_state(cached_state); 1647 free_extent_state(cached_state);
1643 if (!loops) { 1648 if (!loops) {
1644 unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1); 1649 max_bytes = PAGE_CACHE_SIZE;
1645 max_bytes = PAGE_CACHE_SIZE - offset;
1646 loops = 1; 1650 loops = 1;
1647 goto again; 1651 goto again;
1648 } else { 1652 } else {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 22ebc13b6c99..b0ef7b07b1b3 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -7986,7 +7986,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
7986 7986
7987 7987
7988 /* check for collisions, even if the name isn't there */ 7988 /* check for collisions, even if the name isn't there */
7989 ret = btrfs_check_dir_item_collision(root, new_dir->i_ino, 7989 ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino,
7990 new_dentry->d_name.name, 7990 new_dentry->d_name.name,
7991 new_dentry->d_name.len); 7991 new_dentry->d_name.len);
7992 7992
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index a5a26320503f..4a355726151e 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -588,7 +588,7 @@ static struct btrfs_root *read_fs_root(struct btrfs_fs_info *fs_info,
588 else 588 else
589 key.offset = (u64)-1; 589 key.offset = (u64)-1;
590 590
591 return btrfs_read_fs_root_no_name(fs_info, &key); 591 return btrfs_get_fs_root(fs_info, &key, false);
592} 592}
593 593
594#ifdef BTRFS_COMPAT_EXTENT_TREE_V0 594#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 0b1f4ef8db98..ec71ea44d2b4 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -299,11 +299,6 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
299 continue; 299 continue;
300 } 300 }
301 301
302 if (btrfs_root_refs(&root->root_item) == 0) {
303 btrfs_add_dead_root(root);
304 continue;
305 }
306
307 err = btrfs_init_fs_root(root); 302 err = btrfs_init_fs_root(root);
308 if (err) { 303 if (err) {
309 btrfs_free_fs_root(root); 304 btrfs_free_fs_root(root);
@@ -318,6 +313,9 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
318 btrfs_free_fs_root(root); 313 btrfs_free_fs_root(root);
319 break; 314 break;
320 } 315 }
316
317 if (btrfs_root_refs(&root->root_item) == 0)
318 btrfs_add_dead_root(root);
321 } 319 }
322 320
323 btrfs_free_path(path); 321 btrfs_free_path(path);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index e7a95356df83..8c81bdc1ef9b 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -1838,11 +1838,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1838 assert_qgroups_uptodate(trans); 1838 assert_qgroups_uptodate(trans);
1839 update_super_roots(root); 1839 update_super_roots(root);
1840 1840
1841 if (!root->fs_info->log_root_recovering) { 1841 btrfs_set_super_log_root(root->fs_info->super_copy, 0);
1842 btrfs_set_super_log_root(root->fs_info->super_copy, 0); 1842 btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
1843 btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
1844 }
1845
1846 memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy, 1843 memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
1847 sizeof(*root->fs_info->super_copy)); 1844 sizeof(*root->fs_info->super_copy));
1848 1845
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index a10645830223..043b215769c2 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1716,6 +1716,7 @@ void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info,
1716 struct btrfs_device *srcdev) 1716 struct btrfs_device *srcdev)
1717{ 1717{
1718 WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex)); 1718 WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
1719
1719 list_del_rcu(&srcdev->dev_list); 1720 list_del_rcu(&srcdev->dev_list);
1720 list_del_rcu(&srcdev->dev_alloc_list); 1721 list_del_rcu(&srcdev->dev_alloc_list);
1721 fs_info->fs_devices->num_devices--; 1722 fs_info->fs_devices->num_devices--;
@@ -1725,9 +1726,13 @@ void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info,
1725 } 1726 }
1726 if (srcdev->can_discard) 1727 if (srcdev->can_discard)
1727 fs_info->fs_devices->num_can_discard--; 1728 fs_info->fs_devices->num_can_discard--;
1728 if (srcdev->bdev) 1729 if (srcdev->bdev) {
1729 fs_info->fs_devices->open_devices--; 1730 fs_info->fs_devices->open_devices--;
1730 1731
1732 /* zero out the old super */
1733 btrfs_scratch_superblock(srcdev);
1734 }
1735
1731 call_rcu(&srcdev->rcu, free_device); 1736 call_rcu(&srcdev->rcu, free_device);
1732} 1737}
1733 1738
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index ea723a5e8226..6d0b07217ac9 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -132,5 +132,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
132extern const struct export_operations cifs_export_ops; 132extern const struct export_operations cifs_export_ops;
133#endif /* CONFIG_CIFS_NFSD_EXPORT */ 133#endif /* CONFIG_CIFS_NFSD_EXPORT */
134 134
135#define CIFS_VERSION "2.01" 135#define CIFS_VERSION "2.02"
136#endif /* _CIFSFS_H */ 136#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index cfa14c80ef3b..52b6f6c26bfc 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -547,9 +547,6 @@ struct TCP_Server_Info {
547 unsigned int max_rw; /* maxRw specifies the maximum */ 547 unsigned int max_rw; /* maxRw specifies the maximum */
548 /* message size the server can send or receive for */ 548 /* message size the server can send or receive for */
549 /* SMB_COM_WRITE_RAW or SMB_COM_READ_RAW. */ 549 /* SMB_COM_WRITE_RAW or SMB_COM_READ_RAW. */
550 unsigned int max_vcs; /* maximum number of smb sessions, at least
551 those that can be specified uniquely with
552 vcnumbers */
553 unsigned int capabilities; /* selective disabling of caps by smb sess */ 550 unsigned int capabilities; /* selective disabling of caps by smb sess */
554 int timeAdj; /* Adjust for difference in server time zone in sec */ 551 int timeAdj; /* Adjust for difference in server time zone in sec */
555 __u64 CurrentMid; /* multiplex id - rotating counter */ 552 __u64 CurrentMid; /* multiplex id - rotating counter */
@@ -715,7 +712,6 @@ struct cifs_ses {
715 enum statusEnum status; 712 enum statusEnum status;
716 unsigned overrideSecFlg; /* if non-zero override global sec flags */ 713 unsigned overrideSecFlg; /* if non-zero override global sec flags */
717 __u16 ipc_tid; /* special tid for connection to IPC share */ 714 __u16 ipc_tid; /* special tid for connection to IPC share */
718 __u16 vcnum;
719 char *serverOS; /* name of operating system underlying server */ 715 char *serverOS; /* name of operating system underlying server */
720 char *serverNOS; /* name of network operating system of server */ 716 char *serverNOS; /* name of network operating system of server */
721 char *serverDomain; /* security realm of server */ 717 char *serverDomain; /* security realm of server */
@@ -1272,6 +1268,7 @@ struct dfs_info3_param {
1272#define CIFS_FATTR_DELETE_PENDING 0x2 1268#define CIFS_FATTR_DELETE_PENDING 0x2
1273#define CIFS_FATTR_NEED_REVAL 0x4 1269#define CIFS_FATTR_NEED_REVAL 0x4
1274#define CIFS_FATTR_INO_COLLISION 0x8 1270#define CIFS_FATTR_INO_COLLISION 0x8
1271#define CIFS_FATTR_UNKNOWN_NLINK 0x10
1275 1272
1276struct cifs_fattr { 1273struct cifs_fattr {
1277 u32 cf_flags; 1274 u32 cf_flags;
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index 948676db8e2e..a630475e421c 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -2652,26 +2652,7 @@ typedef struct file_xattr_info {
2652} __attribute__((packed)) FILE_XATTR_INFO; /* extended attribute info 2652} __attribute__((packed)) FILE_XATTR_INFO; /* extended attribute info
2653 level 0x205 */ 2653 level 0x205 */
2654 2654
2655 2655/* flags for lsattr and chflags commands removed arein uapi/linux/fs.h */
2656/* flags for chattr command */
2657#define EXT_SECURE_DELETE 0x00000001 /* EXT3_SECRM_FL */
2658#define EXT_ENABLE_UNDELETE 0x00000002 /* EXT3_UNRM_FL */
2659/* Reserved for compress file 0x4 */
2660#define EXT_SYNCHRONOUS 0x00000008 /* EXT3_SYNC_FL */
2661#define EXT_IMMUTABLE_FL 0x00000010 /* EXT3_IMMUTABLE_FL */
2662#define EXT_OPEN_APPEND_ONLY 0x00000020 /* EXT3_APPEND_FL */
2663#define EXT_DO_NOT_BACKUP 0x00000040 /* EXT3_NODUMP_FL */
2664#define EXT_NO_UPDATE_ATIME 0x00000080 /* EXT3_NOATIME_FL */
2665/* 0x100 through 0x800 reserved for compression flags and are GET-ONLY */
2666#define EXT_HASH_TREE_INDEXED_DIR 0x00001000 /* GET-ONLY EXT3_INDEX_FL */
2667/* 0x2000 reserved for IMAGIC_FL */
2668#define EXT_JOURNAL_THIS_FILE 0x00004000 /* GET-ONLY EXT3_JOURNAL_DATA_FL */
2669/* 0x8000 reserved for EXT3_NOTAIL_FL */
2670#define EXT_SYNCHRONOUS_DIR 0x00010000 /* EXT3_DIRSYNC_FL */
2671#define EXT_TOPDIR 0x00020000 /* EXT3_TOPDIR_FL */
2672
2673#define EXT_SET_MASK 0x000300FF
2674#define EXT_GET_MASK 0x0003DFFF
2675 2656
2676typedef struct file_chattr_info { 2657typedef struct file_chattr_info {
2677 __le64 mask; /* list of all possible attribute bits */ 2658 __le64 mask; /* list of all possible attribute bits */
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index a3d74fea1623..4baf35949b51 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -463,7 +463,6 @@ decode_lanman_negprot_rsp(struct TCP_Server_Info *server, NEGOTIATE_RSP *pSMBr)
463 cifs_max_pending); 463 cifs_max_pending);
464 set_credits(server, server->maxReq); 464 set_credits(server, server->maxReq);
465 server->maxBuf = le16_to_cpu(rsp->MaxBufSize); 465 server->maxBuf = le16_to_cpu(rsp->MaxBufSize);
466 server->max_vcs = le16_to_cpu(rsp->MaxNumberVcs);
467 /* even though we do not use raw we might as well set this 466 /* even though we do not use raw we might as well set this
468 accurately, in case we ever find a need for it */ 467 accurately, in case we ever find a need for it */
469 if ((le16_to_cpu(rsp->RawMode) & RAW_ENABLE) == RAW_ENABLE) { 468 if ((le16_to_cpu(rsp->RawMode) & RAW_ENABLE) == RAW_ENABLE) {
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index eb955b525e55..7ddddf2e2504 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -3254,6 +3254,9 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
3254 /* 3254 /*
3255 * Reads as many pages as possible from fscache. Returns -ENOBUFS 3255 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3256 * immediately if the cookie is negative 3256 * immediately if the cookie is negative
3257 *
3258 * After this point, every page in the list might have PG_fscache set,
3259 * so we will need to clean that up off of every page we don't use.
3257 */ 3260 */
3258 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list, 3261 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3259 &num_pages); 3262 &num_pages);
@@ -3376,6 +3379,11 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
3376 kref_put(&rdata->refcount, cifs_readdata_release); 3379 kref_put(&rdata->refcount, cifs_readdata_release);
3377 } 3380 }
3378 3381
3382 /* Any pages that have been shown to fscache but didn't get added to
3383 * the pagecache must be uncached before they get returned to the
3384 * allocator.
3385 */
3386 cifs_fscache_readpages_cancel(mapping->host, page_list);
3379 return rc; 3387 return rc;
3380} 3388}
3381 3389
diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c
index 2f4bc5a58054..b3258f35e88a 100644
--- a/fs/cifs/fscache.c
+++ b/fs/cifs/fscache.c
@@ -223,6 +223,13 @@ void __cifs_readpage_to_fscache(struct inode *inode, struct page *page)
223 fscache_uncache_page(CIFS_I(inode)->fscache, page); 223 fscache_uncache_page(CIFS_I(inode)->fscache, page);
224} 224}
225 225
226void __cifs_fscache_readpages_cancel(struct inode *inode, struct list_head *pages)
227{
228 cifs_dbg(FYI, "%s: (fsc: %p, i: %p)\n",
229 __func__, CIFS_I(inode)->fscache, inode);
230 fscache_readpages_cancel(CIFS_I(inode)->fscache, pages);
231}
232
226void __cifs_fscache_invalidate_page(struct page *page, struct inode *inode) 233void __cifs_fscache_invalidate_page(struct page *page, struct inode *inode)
227{ 234{
228 struct cifsInodeInfo *cifsi = CIFS_I(inode); 235 struct cifsInodeInfo *cifsi = CIFS_I(inode);
diff --git a/fs/cifs/fscache.h b/fs/cifs/fscache.h
index 63539323e0b9..24794b6cd8ec 100644
--- a/fs/cifs/fscache.h
+++ b/fs/cifs/fscache.h
@@ -54,6 +54,7 @@ extern int __cifs_readpages_from_fscache(struct inode *,
54 struct address_space *, 54 struct address_space *,
55 struct list_head *, 55 struct list_head *,
56 unsigned *); 56 unsigned *);
57extern void __cifs_fscache_readpages_cancel(struct inode *, struct list_head *);
57 58
58extern void __cifs_readpage_to_fscache(struct inode *, struct page *); 59extern void __cifs_readpage_to_fscache(struct inode *, struct page *);
59 60
@@ -91,6 +92,13 @@ static inline void cifs_readpage_to_fscache(struct inode *inode,
91 __cifs_readpage_to_fscache(inode, page); 92 __cifs_readpage_to_fscache(inode, page);
92} 93}
93 94
95static inline void cifs_fscache_readpages_cancel(struct inode *inode,
96 struct list_head *pages)
97{
98 if (CIFS_I(inode)->fscache)
99 return __cifs_fscache_readpages_cancel(inode, pages);
100}
101
94#else /* CONFIG_CIFS_FSCACHE */ 102#else /* CONFIG_CIFS_FSCACHE */
95static inline int cifs_fscache_register(void) { return 0; } 103static inline int cifs_fscache_register(void) { return 0; }
96static inline void cifs_fscache_unregister(void) {} 104static inline void cifs_fscache_unregister(void) {}
@@ -131,6 +139,11 @@ static inline int cifs_readpages_from_fscache(struct inode *inode,
131static inline void cifs_readpage_to_fscache(struct inode *inode, 139static inline void cifs_readpage_to_fscache(struct inode *inode,
132 struct page *page) {} 140 struct page *page) {}
133 141
142static inline void cifs_fscache_readpages_cancel(struct inode *inode,
143 struct list_head *pages)
144{
145}
146
134#endif /* CONFIG_CIFS_FSCACHE */ 147#endif /* CONFIG_CIFS_FSCACHE */
135 148
136#endif /* _CIFS_FSCACHE_H */ 149#endif /* _CIFS_FSCACHE_H */
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index f9ff9c173f78..867b7cdc794a 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -120,6 +120,33 @@ cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
120 cifs_i->invalid_mapping = true; 120 cifs_i->invalid_mapping = true;
121} 121}
122 122
123/*
124 * copy nlink to the inode, unless it wasn't provided. Provide
125 * sane values if we don't have an existing one and none was provided
126 */
127static void
128cifs_nlink_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
129{
130 /*
131 * if we're in a situation where we can't trust what we
132 * got from the server (readdir, some non-unix cases)
133 * fake reasonable values
134 */
135 if (fattr->cf_flags & CIFS_FATTR_UNKNOWN_NLINK) {
136 /* only provide fake values on a new inode */
137 if (inode->i_state & I_NEW) {
138 if (fattr->cf_cifsattrs & ATTR_DIRECTORY)
139 set_nlink(inode, 2);
140 else
141 set_nlink(inode, 1);
142 }
143 return;
144 }
145
146 /* we trust the server, so update it */
147 set_nlink(inode, fattr->cf_nlink);
148}
149
123/* populate an inode with info from a cifs_fattr struct */ 150/* populate an inode with info from a cifs_fattr struct */
124void 151void
125cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr) 152cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
@@ -134,7 +161,7 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
134 inode->i_mtime = fattr->cf_mtime; 161 inode->i_mtime = fattr->cf_mtime;
135 inode->i_ctime = fattr->cf_ctime; 162 inode->i_ctime = fattr->cf_ctime;
136 inode->i_rdev = fattr->cf_rdev; 163 inode->i_rdev = fattr->cf_rdev;
137 set_nlink(inode, fattr->cf_nlink); 164 cifs_nlink_fattr_to_inode(inode, fattr);
138 inode->i_uid = fattr->cf_uid; 165 inode->i_uid = fattr->cf_uid;
139 inode->i_gid = fattr->cf_gid; 166 inode->i_gid = fattr->cf_gid;
140 167
@@ -541,6 +568,7 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
541 fattr->cf_bytes = le64_to_cpu(info->AllocationSize); 568 fattr->cf_bytes = le64_to_cpu(info->AllocationSize);
542 fattr->cf_createtime = le64_to_cpu(info->CreationTime); 569 fattr->cf_createtime = le64_to_cpu(info->CreationTime);
543 570
571 fattr->cf_nlink = le32_to_cpu(info->NumberOfLinks);
544 if (fattr->cf_cifsattrs & ATTR_DIRECTORY) { 572 if (fattr->cf_cifsattrs & ATTR_DIRECTORY) {
545 fattr->cf_mode = S_IFDIR | cifs_sb->mnt_dir_mode; 573 fattr->cf_mode = S_IFDIR | cifs_sb->mnt_dir_mode;
546 fattr->cf_dtype = DT_DIR; 574 fattr->cf_dtype = DT_DIR;
@@ -548,7 +576,8 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
548 * Server can return wrong NumberOfLinks value for directories 576 * Server can return wrong NumberOfLinks value for directories
549 * when Unix extensions are disabled - fake it. 577 * when Unix extensions are disabled - fake it.
550 */ 578 */
551 fattr->cf_nlink = 2; 579 if (!tcon->unix_ext)
580 fattr->cf_flags |= CIFS_FATTR_UNKNOWN_NLINK;
552 } else if (fattr->cf_cifsattrs & ATTR_REPARSE) { 581 } else if (fattr->cf_cifsattrs & ATTR_REPARSE) {
553 fattr->cf_mode = S_IFLNK; 582 fattr->cf_mode = S_IFLNK;
554 fattr->cf_dtype = DT_LNK; 583 fattr->cf_dtype = DT_LNK;
@@ -561,11 +590,15 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
561 if (fattr->cf_cifsattrs & ATTR_READONLY) 590 if (fattr->cf_cifsattrs & ATTR_READONLY)
562 fattr->cf_mode &= ~(S_IWUGO); 591 fattr->cf_mode &= ~(S_IWUGO);
563 592
564 fattr->cf_nlink = le32_to_cpu(info->NumberOfLinks); 593 /*
565 if (fattr->cf_nlink < 1) { 594 * Don't accept zero nlink from non-unix servers unless
566 cifs_dbg(1, "replacing bogus file nlink value %u\n", 595 * delete is pending. Instead mark it as unknown.
596 */
597 if ((fattr->cf_nlink < 1) && !tcon->unix_ext &&
598 !info->DeletePending) {
599 cifs_dbg(1, "bogus file nlink value %u\n",
567 fattr->cf_nlink); 600 fattr->cf_nlink);
568 fattr->cf_nlink = 1; 601 fattr->cf_flags |= CIFS_FATTR_UNKNOWN_NLINK;
569 } 602 }
570 } 603 }
571 604
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 42ef03be089f..53a75f3d0179 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -180,6 +180,9 @@ cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb)
180 fattr->cf_dtype = DT_REG; 180 fattr->cf_dtype = DT_REG;
181 } 181 }
182 182
183 /* non-unix readdir doesn't provide nlink */
184 fattr->cf_flags |= CIFS_FATTR_UNKNOWN_NLINK;
185
183 if (fattr->cf_cifsattrs & ATTR_READONLY) 186 if (fattr->cf_cifsattrs & ATTR_READONLY)
184 fattr->cf_mode &= ~S_IWUGO; 187 fattr->cf_mode &= ~S_IWUGO;
185 188
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 5f99b7f19e78..352358de1d7e 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -32,88 +32,6 @@
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include "cifs_spnego.h" 33#include "cifs_spnego.h"
34 34
35/*
36 * Checks if this is the first smb session to be reconnected after
37 * the socket has been reestablished (so we know whether to use vc 0).
38 * Called while holding the cifs_tcp_ses_lock, so do not block
39 */
40static bool is_first_ses_reconnect(struct cifs_ses *ses)
41{
42 struct list_head *tmp;
43 struct cifs_ses *tmp_ses;
44
45 list_for_each(tmp, &ses->server->smb_ses_list) {
46 tmp_ses = list_entry(tmp, struct cifs_ses,
47 smb_ses_list);
48 if (tmp_ses->need_reconnect == false)
49 return false;
50 }
51 /* could not find a session that was already connected,
52 this must be the first one we are reconnecting */
53 return true;
54}
55
56/*
57 * vc number 0 is treated specially by some servers, and should be the
58 * first one we request. After that we can use vcnumbers up to maxvcs,
59 * one for each smb session (some Windows versions set maxvcs incorrectly
60 * so maxvc=1 can be ignored). If we have too many vcs, we can reuse
61 * any vc but zero (some servers reset the connection on vcnum zero)
62 *
63 */
64static __le16 get_next_vcnum(struct cifs_ses *ses)
65{
66 __u16 vcnum = 0;
67 struct list_head *tmp;
68 struct cifs_ses *tmp_ses;
69 __u16 max_vcs = ses->server->max_vcs;
70 __u16 i;
71 int free_vc_found = 0;
72
73 /* Quoting the MS-SMB specification: "Windows-based SMB servers set this
74 field to one but do not enforce this limit, which allows an SMB client
75 to establish more virtual circuits than allowed by this value ... but
76 other server implementations can enforce this limit." */
77 if (max_vcs < 2)
78 max_vcs = 0xFFFF;
79
80 spin_lock(&cifs_tcp_ses_lock);
81 if ((ses->need_reconnect) && is_first_ses_reconnect(ses))
82 goto get_vc_num_exit; /* vcnum will be zero */
83 for (i = ses->server->srv_count - 1; i < max_vcs; i++) {
84 if (i == 0) /* this is the only connection, use vc 0 */
85 break;
86
87 free_vc_found = 1;
88
89 list_for_each(tmp, &ses->server->smb_ses_list) {
90 tmp_ses = list_entry(tmp, struct cifs_ses,
91 smb_ses_list);
92 if (tmp_ses->vcnum == i) {
93 free_vc_found = 0;
94 break; /* found duplicate, try next vcnum */
95 }
96 }
97 if (free_vc_found)
98 break; /* we found a vcnumber that will work - use it */
99 }
100
101 if (i == 0)
102 vcnum = 0; /* for most common case, ie if one smb session, use
103 vc zero. Also for case when no free vcnum, zero
104 is safest to send (some clients only send zero) */
105 else if (free_vc_found == 0)
106 vcnum = 1; /* we can not reuse vc=0 safely, since some servers
107 reset all uids on that, but 1 is ok. */
108 else
109 vcnum = i;
110 ses->vcnum = vcnum;
111get_vc_num_exit:
112 spin_unlock(&cifs_tcp_ses_lock);
113
114 return cpu_to_le16(vcnum);
115}
116
117static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, SESSION_SETUP_ANDX *pSMB) 35static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, SESSION_SETUP_ANDX *pSMB)
118{ 36{
119 __u32 capabilities = 0; 37 __u32 capabilities = 0;
@@ -128,7 +46,7 @@ static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, SESSION_SETUP_ANDX *pSMB)
128 CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4, 46 CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4,
129 USHRT_MAX)); 47 USHRT_MAX));
130 pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq); 48 pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq);
131 pSMB->req.VcNumber = get_next_vcnum(ses); 49 pSMB->req.VcNumber = __constant_cpu_to_le16(1);
132 50
133 /* Now no need to set SMBFLG_CASELESS or obsolete CANONICAL PATH */ 51 /* Now no need to set SMBFLG_CASELESS or obsolete CANONICAL PATH */
134 52
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 0d424d7ac02b..e274e9c1171f 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2563,7 +2563,7 @@ retry:
2563 break; 2563 break;
2564 } 2564 }
2565 blk_finish_plug(&plug); 2565 blk_finish_plug(&plug);
2566 if (!ret && !cycled) { 2566 if (!ret && !cycled && wbc->nr_to_write > 0) {
2567 cycled = 1; 2567 cycled = 1;
2568 mpd.last_page = writeback_index - 1; 2568 mpd.last_page = writeback_index - 1;
2569 mpd.first_page = 0; 2569 mpd.first_page = 0;
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index c081e34f717f..03e9bebba198 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -1350,6 +1350,8 @@ retry:
1350 s_min_extra_isize) { 1350 s_min_extra_isize) {
1351 tried_min_extra_isize++; 1351 tried_min_extra_isize++;
1352 new_extra_isize = s_min_extra_isize; 1352 new_extra_isize = s_min_extra_isize;
1353 kfree(is); is = NULL;
1354 kfree(bs); bs = NULL;
1353 goto retry; 1355 goto retry;
1354 } 1356 }
1355 error = -1; 1357 error = -1;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 62b43b577bfc..b7989f2ab4c4 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -182,6 +182,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
182 struct inode *inode; 182 struct inode *inode;
183 struct dentry *parent; 183 struct dentry *parent;
184 struct fuse_conn *fc; 184 struct fuse_conn *fc;
185 struct fuse_inode *fi;
185 int ret; 186 int ret;
186 187
187 inode = ACCESS_ONCE(entry->d_inode); 188 inode = ACCESS_ONCE(entry->d_inode);
@@ -228,7 +229,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
228 if (!err && !outarg.nodeid) 229 if (!err && !outarg.nodeid)
229 err = -ENOENT; 230 err = -ENOENT;
230 if (!err) { 231 if (!err) {
231 struct fuse_inode *fi = get_fuse_inode(inode); 232 fi = get_fuse_inode(inode);
232 if (outarg.nodeid != get_node_id(inode)) { 233 if (outarg.nodeid != get_node_id(inode)) {
233 fuse_queue_forget(fc, forget, outarg.nodeid, 1); 234 fuse_queue_forget(fc, forget, outarg.nodeid, 1);
234 goto invalid; 235 goto invalid;
@@ -246,8 +247,11 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
246 attr_version); 247 attr_version);
247 fuse_change_entry_timeout(entry, &outarg); 248 fuse_change_entry_timeout(entry, &outarg);
248 } else if (inode) { 249 } else if (inode) {
249 fc = get_fuse_conn(inode); 250 fi = get_fuse_inode(inode);
250 if (fc->readdirplus_auto) { 251 if (flags & LOOKUP_RCU) {
252 if (test_bit(FUSE_I_INIT_RDPLUS, &fi->state))
253 return -ECHILD;
254 } else if (test_and_clear_bit(FUSE_I_INIT_RDPLUS, &fi->state)) {
251 parent = dget_parent(entry); 255 parent = dget_parent(entry);
252 fuse_advise_use_readdirplus(parent->d_inode); 256 fuse_advise_use_readdirplus(parent->d_inode);
253 dput(parent); 257 dput(parent);
@@ -259,7 +263,8 @@ out:
259 263
260invalid: 264invalid:
261 ret = 0; 265 ret = 0;
262 if (check_submounts_and_drop(entry) != 0) 266
267 if (!(flags & LOOKUP_RCU) && check_submounts_and_drop(entry) != 0)
263 ret = 1; 268 ret = 1;
264 goto out; 269 goto out;
265} 270}
@@ -1063,6 +1068,8 @@ static int fuse_access(struct inode *inode, int mask)
1063 struct fuse_access_in inarg; 1068 struct fuse_access_in inarg;
1064 int err; 1069 int err;
1065 1070
1071 BUG_ON(mask & MAY_NOT_BLOCK);
1072
1066 if (fc->no_access) 1073 if (fc->no_access)
1067 return 0; 1074 return 0;
1068 1075
@@ -1150,9 +1157,6 @@ static int fuse_permission(struct inode *inode, int mask)
1150 noticed immediately, only after the attribute 1157 noticed immediately, only after the attribute
1151 timeout has expired */ 1158 timeout has expired */
1152 } else if (mask & (MAY_ACCESS | MAY_CHDIR)) { 1159 } else if (mask & (MAY_ACCESS | MAY_CHDIR)) {
1153 if (mask & MAY_NOT_BLOCK)
1154 return -ECHILD;
1155
1156 err = fuse_access(inode, mask); 1160 err = fuse_access(inode, mask);
1157 } else if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode)) { 1161 } else if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode)) {
1158 if (!(inode->i_mode & S_IXUGO)) { 1162 if (!(inode->i_mode & S_IXUGO)) {
@@ -1291,6 +1295,8 @@ static int fuse_direntplus_link(struct file *file,
1291 } 1295 }
1292 1296
1293found: 1297found:
1298 if (fc->readdirplus_auto)
1299 set_bit(FUSE_I_INIT_RDPLUS, &get_fuse_inode(inode)->state);
1294 fuse_change_entry_timeout(dentry, o); 1300 fuse_change_entry_timeout(dentry, o);
1295 1301
1296 err = 0; 1302 err = 0;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index d409deafc67b..4598345ab87d 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -2467,6 +2467,7 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
2467{ 2467{
2468 struct fuse_file *ff = file->private_data; 2468 struct fuse_file *ff = file->private_data;
2469 struct inode *inode = file->f_inode; 2469 struct inode *inode = file->f_inode;
2470 struct fuse_inode *fi = get_fuse_inode(inode);
2470 struct fuse_conn *fc = ff->fc; 2471 struct fuse_conn *fc = ff->fc;
2471 struct fuse_req *req; 2472 struct fuse_req *req;
2472 struct fuse_fallocate_in inarg = { 2473 struct fuse_fallocate_in inarg = {
@@ -2484,10 +2485,20 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
2484 2485
2485 if (lock_inode) { 2486 if (lock_inode) {
2486 mutex_lock(&inode->i_mutex); 2487 mutex_lock(&inode->i_mutex);
2487 if (mode & FALLOC_FL_PUNCH_HOLE) 2488 if (mode & FALLOC_FL_PUNCH_HOLE) {
2488 fuse_set_nowrite(inode); 2489 loff_t endbyte = offset + length - 1;
2490 err = filemap_write_and_wait_range(inode->i_mapping,
2491 offset, endbyte);
2492 if (err)
2493 goto out;
2494
2495 fuse_sync_writes(inode);
2496 }
2489 } 2497 }
2490 2498
2499 if (!(mode & FALLOC_FL_KEEP_SIZE))
2500 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
2501
2491 req = fuse_get_req_nopages(fc); 2502 req = fuse_get_req_nopages(fc);
2492 if (IS_ERR(req)) { 2503 if (IS_ERR(req)) {
2493 err = PTR_ERR(req); 2504 err = PTR_ERR(req);
@@ -2520,11 +2531,11 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
2520 fuse_invalidate_attr(inode); 2531 fuse_invalidate_attr(inode);
2521 2532
2522out: 2533out:
2523 if (lock_inode) { 2534 if (!(mode & FALLOC_FL_KEEP_SIZE))
2524 if (mode & FALLOC_FL_PUNCH_HOLE) 2535 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
2525 fuse_release_nowrite(inode); 2536
2537 if (lock_inode)
2526 mutex_unlock(&inode->i_mutex); 2538 mutex_unlock(&inode->i_mutex);
2527 }
2528 2539
2529 return err; 2540 return err;
2530} 2541}
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 5ced199b50bb..5b9e6f3b6aef 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -115,6 +115,8 @@ struct fuse_inode {
115enum { 115enum {
116 /** Advise readdirplus */ 116 /** Advise readdirplus */
117 FUSE_I_ADVISE_RDPLUS, 117 FUSE_I_ADVISE_RDPLUS,
118 /** Initialized with readdirplus */
119 FUSE_I_INIT_RDPLUS,
118 /** An operation changing file size is in progress */ 120 /** An operation changing file size is in progress */
119 FUSE_I_SIZE_UNSTABLE, 121 FUSE_I_SIZE_UNSTABLE,
120}; 122};
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 854a8f05a610..02b0df769e2d 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1458,7 +1458,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
1458 1458
1459 trace_nfs_atomic_open_enter(dir, ctx, open_flags); 1459 trace_nfs_atomic_open_enter(dir, ctx, open_flags);
1460 nfs_block_sillyrename(dentry->d_parent); 1460 nfs_block_sillyrename(dentry->d_parent);
1461 inode = NFS_PROTO(dir)->open_context(dir, ctx, open_flags, &attr); 1461 inode = NFS_PROTO(dir)->open_context(dir, ctx, open_flags, &attr, opened);
1462 nfs_unblock_sillyrename(dentry->d_parent); 1462 nfs_unblock_sillyrename(dentry->d_parent);
1463 if (IS_ERR(inode)) { 1463 if (IS_ERR(inode)) {
1464 err = PTR_ERR(inode); 1464 err = PTR_ERR(inode);
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index e5b804dd944c..77efaf15ec90 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -19,6 +19,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
19 struct inode *dir; 19 struct inode *dir;
20 unsigned openflags = filp->f_flags; 20 unsigned openflags = filp->f_flags;
21 struct iattr attr; 21 struct iattr attr;
22 int opened = 0;
22 int err; 23 int err;
23 24
24 /* 25 /*
@@ -55,7 +56,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
55 nfs_wb_all(inode); 56 nfs_wb_all(inode);
56 } 57 }
57 58
58 inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr); 59 inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr, &opened);
59 if (IS_ERR(inode)) { 60 if (IS_ERR(inode)) {
60 err = PTR_ERR(inode); 61 err = PTR_ERR(inode);
61 switch (err) { 62 switch (err) {
diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
index 95604f64cab8..c7c295e556ed 100644
--- a/fs/nfs/nfs4filelayoutdev.c
+++ b/fs/nfs/nfs4filelayoutdev.c
@@ -185,6 +185,7 @@ nfs4_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds)
185 if (status) 185 if (status)
186 goto out_put; 186 goto out_put;
187 187
188 smp_wmb();
188 ds->ds_clp = clp; 189 ds->ds_clp = clp;
189 dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr); 190 dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr);
190out: 191out:
@@ -801,34 +802,35 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
801 struct nfs4_file_layout_dsaddr *dsaddr = FILELAYOUT_LSEG(lseg)->dsaddr; 802 struct nfs4_file_layout_dsaddr *dsaddr = FILELAYOUT_LSEG(lseg)->dsaddr;
802 struct nfs4_pnfs_ds *ds = dsaddr->ds_list[ds_idx]; 803 struct nfs4_pnfs_ds *ds = dsaddr->ds_list[ds_idx];
803 struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg); 804 struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg);
804 805 struct nfs4_pnfs_ds *ret = ds;
805 if (filelayout_test_devid_unavailable(devid))
806 return NULL;
807 806
808 if (ds == NULL) { 807 if (ds == NULL) {
809 printk(KERN_ERR "NFS: %s: No data server for offset index %d\n", 808 printk(KERN_ERR "NFS: %s: No data server for offset index %d\n",
810 __func__, ds_idx); 809 __func__, ds_idx);
811 filelayout_mark_devid_invalid(devid); 810 filelayout_mark_devid_invalid(devid);
812 return NULL; 811 goto out;
813 } 812 }
813 smp_rmb();
814 if (ds->ds_clp) 814 if (ds->ds_clp)
815 return ds; 815 goto out_test_devid;
816 816
817 if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) { 817 if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) {
818 struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode); 818 struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode);
819 int err; 819 int err;
820 820
821 err = nfs4_ds_connect(s, ds); 821 err = nfs4_ds_connect(s, ds);
822 if (err) { 822 if (err)
823 nfs4_mark_deviceid_unavailable(devid); 823 nfs4_mark_deviceid_unavailable(devid);
824 ds = NULL;
825 }
826 nfs4_clear_ds_conn_bit(ds); 824 nfs4_clear_ds_conn_bit(ds);
827 } else { 825 } else {
828 /* Either ds is connected, or ds is NULL */ 826 /* Either ds is connected, or ds is NULL */
829 nfs4_wait_ds_connect(ds); 827 nfs4_wait_ds_connect(ds);
830 } 828 }
831 return ds; 829out_test_devid:
830 if (filelayout_test_devid_unavailable(devid))
831 ret = NULL;
832out:
833 return ret;
832} 834}
833 835
834module_param(dataserver_retrans, uint, 0644); 836module_param(dataserver_retrans, uint, 0644);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 989bb9d3074d..d53d6785cba2 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -912,6 +912,7 @@ struct nfs4_opendata {
912 struct iattr attrs; 912 struct iattr attrs;
913 unsigned long timestamp; 913 unsigned long timestamp;
914 unsigned int rpc_done : 1; 914 unsigned int rpc_done : 1;
915 unsigned int file_created : 1;
915 unsigned int is_recover : 1; 916 unsigned int is_recover : 1;
916 int rpc_status; 917 int rpc_status;
917 int cancelled; 918 int cancelled;
@@ -1946,8 +1947,13 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
1946 1947
1947 nfs_fattr_map_and_free_names(server, &data->f_attr); 1948 nfs_fattr_map_and_free_names(server, &data->f_attr);
1948 1949
1949 if (o_arg->open_flags & O_CREAT) 1950 if (o_arg->open_flags & O_CREAT) {
1950 update_changeattr(dir, &o_res->cinfo); 1951 update_changeattr(dir, &o_res->cinfo);
1952 if (o_arg->open_flags & O_EXCL)
1953 data->file_created = 1;
1954 else if (o_res->cinfo.before != o_res->cinfo.after)
1955 data->file_created = 1;
1956 }
1951 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) 1957 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
1952 server->caps &= ~NFS_CAP_POSIX_LOCK; 1958 server->caps &= ~NFS_CAP_POSIX_LOCK;
1953 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 1959 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
@@ -2191,7 +2197,8 @@ static int _nfs4_do_open(struct inode *dir,
2191 struct nfs_open_context *ctx, 2197 struct nfs_open_context *ctx,
2192 int flags, 2198 int flags,
2193 struct iattr *sattr, 2199 struct iattr *sattr,
2194 struct nfs4_label *label) 2200 struct nfs4_label *label,
2201 int *opened)
2195{ 2202{
2196 struct nfs4_state_owner *sp; 2203 struct nfs4_state_owner *sp;
2197 struct nfs4_state *state = NULL; 2204 struct nfs4_state *state = NULL;
@@ -2261,6 +2268,8 @@ static int _nfs4_do_open(struct inode *dir,
2261 nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel); 2268 nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel);
2262 } 2269 }
2263 } 2270 }
2271 if (opendata->file_created)
2272 *opened |= FILE_CREATED;
2264 2273
2265 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) 2274 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server))
2266 *ctx_th = opendata->f_attr.mdsthreshold; 2275 *ctx_th = opendata->f_attr.mdsthreshold;
@@ -2289,7 +2298,8 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir,
2289 struct nfs_open_context *ctx, 2298 struct nfs_open_context *ctx,
2290 int flags, 2299 int flags,
2291 struct iattr *sattr, 2300 struct iattr *sattr,
2292 struct nfs4_label *label) 2301 struct nfs4_label *label,
2302 int *opened)
2293{ 2303{
2294 struct nfs_server *server = NFS_SERVER(dir); 2304 struct nfs_server *server = NFS_SERVER(dir);
2295 struct nfs4_exception exception = { }; 2305 struct nfs4_exception exception = { };
@@ -2297,7 +2307,7 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir,
2297 int status; 2307 int status;
2298 2308
2299 do { 2309 do {
2300 status = _nfs4_do_open(dir, ctx, flags, sattr, label); 2310 status = _nfs4_do_open(dir, ctx, flags, sattr, label, opened);
2301 res = ctx->state; 2311 res = ctx->state;
2302 trace_nfs4_open_file(ctx, flags, status); 2312 trace_nfs4_open_file(ctx, flags, status);
2303 if (status == 0) 2313 if (status == 0)
@@ -2659,7 +2669,8 @@ out:
2659} 2669}
2660 2670
2661static struct inode * 2671static struct inode *
2662nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags, struct iattr *attr) 2672nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
2673 int open_flags, struct iattr *attr, int *opened)
2663{ 2674{
2664 struct nfs4_state *state; 2675 struct nfs4_state *state;
2665 struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL; 2676 struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL;
@@ -2667,7 +2678,7 @@ nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags
2667 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l); 2678 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l);
2668 2679
2669 /* Protect against concurrent sillydeletes */ 2680 /* Protect against concurrent sillydeletes */
2670 state = nfs4_do_open(dir, ctx, open_flags, attr, label); 2681 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened);
2671 2682
2672 nfs4_label_release_security(label); 2683 nfs4_label_release_security(label);
2673 2684
@@ -3332,6 +3343,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
3332 struct nfs4_label l, *ilabel = NULL; 3343 struct nfs4_label l, *ilabel = NULL;
3333 struct nfs_open_context *ctx; 3344 struct nfs_open_context *ctx;
3334 struct nfs4_state *state; 3345 struct nfs4_state *state;
3346 int opened = 0;
3335 int status = 0; 3347 int status = 0;
3336 3348
3337 ctx = alloc_nfs_open_context(dentry, FMODE_READ); 3349 ctx = alloc_nfs_open_context(dentry, FMODE_READ);
@@ -3341,7 +3353,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
3341 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l); 3353 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l);
3342 3354
3343 sattr->ia_mode &= ~current_umask(); 3355 sattr->ia_mode &= ~current_umask();
3344 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel); 3356 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, &opened);
3345 if (IS_ERR(state)) { 3357 if (IS_ERR(state)) {
3346 status = PTR_ERR(state); 3358 status = PTR_ERR(state);
3347 goto out; 3359 goto out;
@@ -7564,8 +7576,10 @@ nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
7564{ 7576{
7565 int err; 7577 int err;
7566 struct page *page; 7578 struct page *page;
7567 rpc_authflavor_t flavor; 7579 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR;
7568 struct nfs4_secinfo_flavors *flavors; 7580 struct nfs4_secinfo_flavors *flavors;
7581 struct nfs4_secinfo4 *secinfo;
7582 int i;
7569 7583
7570 page = alloc_page(GFP_KERNEL); 7584 page = alloc_page(GFP_KERNEL);
7571 if (!page) { 7585 if (!page) {
@@ -7587,9 +7601,31 @@ nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
7587 if (err) 7601 if (err)
7588 goto out_freepage; 7602 goto out_freepage;
7589 7603
7590 flavor = nfs_find_best_sec(flavors); 7604 for (i = 0; i < flavors->num_flavors; i++) {
7591 if (err == 0) 7605 secinfo = &flavors->flavors[i];
7592 err = nfs4_lookup_root_sec(server, fhandle, info, flavor); 7606
7607 switch (secinfo->flavor) {
7608 case RPC_AUTH_NULL:
7609 case RPC_AUTH_UNIX:
7610 case RPC_AUTH_GSS:
7611 flavor = rpcauth_get_pseudoflavor(secinfo->flavor,
7612 &secinfo->flavor_info);
7613 break;
7614 default:
7615 flavor = RPC_AUTH_MAXFLAVOR;
7616 break;
7617 }
7618
7619 if (flavor != RPC_AUTH_MAXFLAVOR) {
7620 err = nfs4_lookup_root_sec(server, fhandle,
7621 info, flavor);
7622 if (!err)
7623 break;
7624 }
7625 }
7626
7627 if (flavor == RPC_AUTH_MAXFLAVOR)
7628 err = -EPERM;
7593 7629
7594out_freepage: 7630out_freepage:
7595 put_page(page); 7631 put_page(page);
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index 0ba679866e50..da276640f776 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -94,6 +94,7 @@ void nilfs_forget_buffer(struct buffer_head *bh)
94 clear_buffer_nilfs_volatile(bh); 94 clear_buffer_nilfs_volatile(bh);
95 clear_buffer_nilfs_checked(bh); 95 clear_buffer_nilfs_checked(bh);
96 clear_buffer_nilfs_redirected(bh); 96 clear_buffer_nilfs_redirected(bh);
97 clear_buffer_async_write(bh);
97 clear_buffer_dirty(bh); 98 clear_buffer_dirty(bh);
98 if (nilfs_page_buffers_clean(page)) 99 if (nilfs_page_buffers_clean(page))
99 __nilfs_clear_page_dirty(page); 100 __nilfs_clear_page_dirty(page);
@@ -429,6 +430,7 @@ void nilfs_clear_dirty_page(struct page *page, bool silent)
429 "discard block %llu, size %zu", 430 "discard block %llu, size %zu",
430 (u64)bh->b_blocknr, bh->b_size); 431 (u64)bh->b_blocknr, bh->b_size);
431 } 432 }
433 clear_buffer_async_write(bh);
432 clear_buffer_dirty(bh); 434 clear_buffer_dirty(bh);
433 clear_buffer_nilfs_volatile(bh); 435 clear_buffer_nilfs_volatile(bh);
434 clear_buffer_nilfs_checked(bh); 436 clear_buffer_nilfs_checked(bh);
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index bd88a7461063..9f6b486b6c01 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -665,7 +665,7 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
665 665
666 bh = head = page_buffers(page); 666 bh = head = page_buffers(page);
667 do { 667 do {
668 if (!buffer_dirty(bh)) 668 if (!buffer_dirty(bh) || buffer_async_write(bh))
669 continue; 669 continue;
670 get_bh(bh); 670 get_bh(bh);
671 list_add_tail(&bh->b_assoc_buffers, listp); 671 list_add_tail(&bh->b_assoc_buffers, listp);
@@ -699,7 +699,8 @@ static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
699 for (i = 0; i < pagevec_count(&pvec); i++) { 699 for (i = 0; i < pagevec_count(&pvec); i++) {
700 bh = head = page_buffers(pvec.pages[i]); 700 bh = head = page_buffers(pvec.pages[i]);
701 do { 701 do {
702 if (buffer_dirty(bh)) { 702 if (buffer_dirty(bh) &&
703 !buffer_async_write(bh)) {
703 get_bh(bh); 704 get_bh(bh);
704 list_add_tail(&bh->b_assoc_buffers, 705 list_add_tail(&bh->b_assoc_buffers,
705 listp); 706 listp);
@@ -1579,6 +1580,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
1579 1580
1580 list_for_each_entry(bh, &segbuf->sb_segsum_buffers, 1581 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1581 b_assoc_buffers) { 1582 b_assoc_buffers) {
1583 set_buffer_async_write(bh);
1582 if (bh->b_page != bd_page) { 1584 if (bh->b_page != bd_page) {
1583 if (bd_page) { 1585 if (bd_page) {
1584 lock_page(bd_page); 1586 lock_page(bd_page);
@@ -1592,6 +1594,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
1592 1594
1593 list_for_each_entry(bh, &segbuf->sb_payload_buffers, 1595 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1594 b_assoc_buffers) { 1596 b_assoc_buffers) {
1597 set_buffer_async_write(bh);
1595 if (bh == segbuf->sb_super_root) { 1598 if (bh == segbuf->sb_super_root) {
1596 if (bh->b_page != bd_page) { 1599 if (bh->b_page != bd_page) {
1597 lock_page(bd_page); 1600 lock_page(bd_page);
@@ -1677,6 +1680,7 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
1677 list_for_each_entry(segbuf, logs, sb_list) { 1680 list_for_each_entry(segbuf, logs, sb_list) {
1678 list_for_each_entry(bh, &segbuf->sb_segsum_buffers, 1681 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1679 b_assoc_buffers) { 1682 b_assoc_buffers) {
1683 clear_buffer_async_write(bh);
1680 if (bh->b_page != bd_page) { 1684 if (bh->b_page != bd_page) {
1681 if (bd_page) 1685 if (bd_page)
1682 end_page_writeback(bd_page); 1686 end_page_writeback(bd_page);
@@ -1686,6 +1690,7 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
1686 1690
1687 list_for_each_entry(bh, &segbuf->sb_payload_buffers, 1691 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1688 b_assoc_buffers) { 1692 b_assoc_buffers) {
1693 clear_buffer_async_write(bh);
1689 if (bh == segbuf->sb_super_root) { 1694 if (bh == segbuf->sb_super_root) {
1690 if (bh->b_page != bd_page) { 1695 if (bh->b_page != bd_page) {
1691 end_page_writeback(bd_page); 1696 end_page_writeback(bd_page);
@@ -1755,6 +1760,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
1755 b_assoc_buffers) { 1760 b_assoc_buffers) {
1756 set_buffer_uptodate(bh); 1761 set_buffer_uptodate(bh);
1757 clear_buffer_dirty(bh); 1762 clear_buffer_dirty(bh);
1763 clear_buffer_async_write(bh);
1758 if (bh->b_page != bd_page) { 1764 if (bh->b_page != bd_page) {
1759 if (bd_page) 1765 if (bd_page)
1760 end_page_writeback(bd_page); 1766 end_page_writeback(bd_page);
@@ -1776,6 +1782,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
1776 b_assoc_buffers) { 1782 b_assoc_buffers) {
1777 set_buffer_uptodate(bh); 1783 set_buffer_uptodate(bh);
1778 clear_buffer_dirty(bh); 1784 clear_buffer_dirty(bh);
1785 clear_buffer_async_write(bh);
1779 clear_buffer_delay(bh); 1786 clear_buffer_delay(bh);
1780 clear_buffer_nilfs_volatile(bh); 1787 clear_buffer_nilfs_volatile(bh);
1781 clear_buffer_nilfs_redirected(bh); 1788 clear_buffer_nilfs_redirected(bh);
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
index ef999729e274..0d3a97d2d5f6 100644
--- a/fs/ocfs2/dcache.c
+++ b/fs/ocfs2/dcache.c
@@ -70,9 +70,10 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry, unsigned int flags)
70 */ 70 */
71 if (inode == NULL) { 71 if (inode == NULL) {
72 unsigned long gen = (unsigned long) dentry->d_fsdata; 72 unsigned long gen = (unsigned long) dentry->d_fsdata;
73 unsigned long pgen = 73 unsigned long pgen;
74 OCFS2_I(dentry->d_parent->d_inode)->ip_dir_lock_gen; 74 spin_lock(&dentry->d_lock);
75 75 pgen = OCFS2_I(dentry->d_parent->d_inode)->ip_dir_lock_gen;
76 spin_unlock(&dentry->d_lock);
76 trace_ocfs2_dentry_revalidate_negative(dentry->d_name.len, 77 trace_ocfs2_dentry_revalidate_negative(dentry->d_name.len,
77 dentry->d_name.name, 78 dentry->d_name.name,
78 pgen, gen); 79 pgen, gen);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 121da2dc3be8..d4e81e4a9b04 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1924,7 +1924,7 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
1924{ 1924{
1925 int tmp, hangup_needed = 0; 1925 int tmp, hangup_needed = 0;
1926 struct ocfs2_super *osb = NULL; 1926 struct ocfs2_super *osb = NULL;
1927 char nodestr[8]; 1927 char nodestr[12];
1928 1928
1929 trace_ocfs2_dismount_volume(sb); 1929 trace_ocfs2_dismount_volume(sb);
1930 1930
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 73feacc49b2e..fd777032c2ba 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -1163,21 +1163,6 @@ static struct reiserfs_journal_list *find_newer_jl_for_cn(struct
1163 return NULL; 1163 return NULL;
1164} 1164}
1165 1165
1166static int newer_jl_done(struct reiserfs_journal_cnode *cn)
1167{
1168 struct super_block *sb = cn->sb;
1169 b_blocknr_t blocknr = cn->blocknr;
1170
1171 cn = cn->hprev;
1172 while (cn) {
1173 if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist &&
1174 atomic_read(&cn->jlist->j_commit_left) != 0)
1175 return 0;
1176 cn = cn->hprev;
1177 }
1178 return 1;
1179}
1180
1181static void remove_journal_hash(struct super_block *, 1166static void remove_journal_hash(struct super_block *,
1182 struct reiserfs_journal_cnode **, 1167 struct reiserfs_journal_cnode **,
1183 struct reiserfs_journal_list *, unsigned long, 1168 struct reiserfs_journal_list *, unsigned long,
@@ -1353,7 +1338,6 @@ static int flush_journal_list(struct super_block *s,
1353 reiserfs_warning(s, "clm-2048", "called with wcount %d", 1338 reiserfs_warning(s, "clm-2048", "called with wcount %d",
1354 atomic_read(&journal->j_wcount)); 1339 atomic_read(&journal->j_wcount));
1355 } 1340 }
1356 BUG_ON(jl->j_trans_id == 0);
1357 1341
1358 /* if flushall == 0, the lock is already held */ 1342 /* if flushall == 0, the lock is already held */
1359 if (flushall) { 1343 if (flushall) {
@@ -1593,31 +1577,6 @@ static int flush_journal_list(struct super_block *s,
1593 return err; 1577 return err;
1594} 1578}
1595 1579
1596static int test_transaction(struct super_block *s,
1597 struct reiserfs_journal_list *jl)
1598{
1599 struct reiserfs_journal_cnode *cn;
1600
1601 if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0)
1602 return 1;
1603
1604 cn = jl->j_realblock;
1605 while (cn) {
1606 /* if the blocknr == 0, this has been cleared from the hash,
1607 ** skip it
1608 */
1609 if (cn->blocknr == 0) {
1610 goto next;
1611 }
1612 if (cn->bh && !newer_jl_done(cn))
1613 return 0;
1614 next:
1615 cn = cn->next;
1616 cond_resched();
1617 }
1618 return 0;
1619}
1620
1621static int write_one_transaction(struct super_block *s, 1580static int write_one_transaction(struct super_block *s,
1622 struct reiserfs_journal_list *jl, 1581 struct reiserfs_journal_list *jl,
1623 struct buffer_chunk *chunk) 1582 struct buffer_chunk *chunk)
@@ -1805,6 +1764,8 @@ static int flush_used_journal_lists(struct super_block *s,
1805 break; 1764 break;
1806 tjl = JOURNAL_LIST_ENTRY(tjl->j_list.next); 1765 tjl = JOURNAL_LIST_ENTRY(tjl->j_list.next);
1807 } 1766 }
1767 get_journal_list(jl);
1768 get_journal_list(flush_jl);
1808 /* try to find a group of blocks we can flush across all the 1769 /* try to find a group of blocks we can flush across all the
1809 ** transactions, but only bother if we've actually spanned 1770 ** transactions, but only bother if we've actually spanned
1810 ** across multiple lists 1771 ** across multiple lists
@@ -1813,6 +1774,8 @@ static int flush_used_journal_lists(struct super_block *s,
1813 ret = kupdate_transactions(s, jl, &tjl, &trans_id, len, i); 1774 ret = kupdate_transactions(s, jl, &tjl, &trans_id, len, i);
1814 } 1775 }
1815 flush_journal_list(s, flush_jl, 1); 1776 flush_journal_list(s, flush_jl, 1);
1777 put_journal_list(s, flush_jl);
1778 put_journal_list(s, jl);
1816 return 0; 1779 return 0;
1817} 1780}
1818 1781
@@ -3868,27 +3831,6 @@ int reiserfs_prepare_for_journal(struct super_block *sb,
3868 return 1; 3831 return 1;
3869} 3832}
3870 3833
3871static void flush_old_journal_lists(struct super_block *s)
3872{
3873 struct reiserfs_journal *journal = SB_JOURNAL(s);
3874 struct reiserfs_journal_list *jl;
3875 struct list_head *entry;
3876 time_t now = get_seconds();
3877
3878 while (!list_empty(&journal->j_journal_list)) {
3879 entry = journal->j_journal_list.next;
3880 jl = JOURNAL_LIST_ENTRY(entry);
3881 /* this check should always be run, to send old lists to disk */
3882 if (jl->j_timestamp < (now - (JOURNAL_MAX_TRANS_AGE * 4)) &&
3883 atomic_read(&jl->j_commit_left) == 0 &&
3884 test_transaction(s, jl)) {
3885 flush_used_journal_lists(s, jl);
3886 } else {
3887 break;
3888 }
3889 }
3890}
3891
3892/* 3834/*
3893** long and ugly. If flush, will not return until all commit 3835** long and ugly. If flush, will not return until all commit
3894** blocks and all real buffers in the trans are on disk. 3836** blocks and all real buffers in the trans are on disk.
@@ -4232,7 +4174,6 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
4232 } 4174 }
4233 } 4175 }
4234 } 4176 }
4235 flush_old_journal_lists(sb);
4236 4177
4237 journal->j_current_jl->j_list_bitmap = 4178 journal->j_current_jl->j_list_bitmap =
4238 get_list_bitmap(sb, journal->j_current_jl); 4179 get_list_bitmap(sb, journal->j_current_jl);
diff --git a/fs/statfs.c b/fs/statfs.c
index c219e733f553..083dc0ac9140 100644
--- a/fs/statfs.c
+++ b/fs/statfs.c
@@ -94,7 +94,7 @@ retry:
94 94
95int fd_statfs(int fd, struct kstatfs *st) 95int fd_statfs(int fd, struct kstatfs *st)
96{ 96{
97 struct fd f = fdget(fd); 97 struct fd f = fdget_raw(fd);
98 int error = -EBADF; 98 int error = -EBADF;
99 if (f.file) { 99 if (f.file) {
100 error = vfs_statfs(&f.file->f_path, st); 100 error = vfs_statfs(&f.file->f_path, st);
diff --git a/fs/super.c b/fs/super.c
index 3a96c9783a8b..0225c20f8770 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -264,6 +264,8 @@ out_free_sb:
264 */ 264 */
265static inline void destroy_super(struct super_block *s) 265static inline void destroy_super(struct super_block *s)
266{ 266{
267 list_lru_destroy(&s->s_dentry_lru);
268 list_lru_destroy(&s->s_inode_lru);
267#ifdef CONFIG_SMP 269#ifdef CONFIG_SMP
268 free_percpu(s->s_files); 270 free_percpu(s->s_files);
269#endif 271#endif
@@ -323,8 +325,6 @@ void deactivate_locked_super(struct super_block *s)
323 325
324 /* caches are now gone, we can safely kill the shrinker now */ 326 /* caches are now gone, we can safely kill the shrinker now */
325 unregister_shrinker(&s->s_shrink); 327 unregister_shrinker(&s->s_shrink);
326 list_lru_destroy(&s->s_dentry_lru);
327 list_lru_destroy(&s->s_inode_lru);
328 328
329 put_filesystem(fs); 329 put_filesystem(fs);
330 put_super(s); 330 put_super(s);
diff --git a/fs/sysv/super.c b/fs/sysv/super.c
index d0c6a007ce83..eda10959714f 100644
--- a/fs/sysv/super.c
+++ b/fs/sysv/super.c
@@ -487,6 +487,7 @@ static int v7_fill_super(struct super_block *sb, void *data, int silent)
487 sbi->s_sb = sb; 487 sbi->s_sb = sb;
488 sbi->s_block_base = 0; 488 sbi->s_block_base = 0;
489 sbi->s_type = FSTYPE_V7; 489 sbi->s_type = FSTYPE_V7;
490 mutex_init(&sbi->s_lock);
490 sb->s_fs_info = sbi; 491 sb->s_fs_info = sbi;
491 492
492 sb_set_blocksize(sb, 512); 493 sb_set_blocksize(sb, 512);
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index 7e5aae4bf46f..6eaf5edf1ea1 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -30,18 +30,17 @@ void udf_free_inode(struct inode *inode)
30{ 30{
31 struct super_block *sb = inode->i_sb; 31 struct super_block *sb = inode->i_sb;
32 struct udf_sb_info *sbi = UDF_SB(sb); 32 struct udf_sb_info *sbi = UDF_SB(sb);
33 struct logicalVolIntegrityDescImpUse *lvidiu = udf_sb_lvidiu(sb);
33 34
34 mutex_lock(&sbi->s_alloc_mutex); 35 if (lvidiu) {
35 if (sbi->s_lvid_bh) { 36 mutex_lock(&sbi->s_alloc_mutex);
36 struct logicalVolIntegrityDescImpUse *lvidiu =
37 udf_sb_lvidiu(sbi);
38 if (S_ISDIR(inode->i_mode)) 37 if (S_ISDIR(inode->i_mode))
39 le32_add_cpu(&lvidiu->numDirs, -1); 38 le32_add_cpu(&lvidiu->numDirs, -1);
40 else 39 else
41 le32_add_cpu(&lvidiu->numFiles, -1); 40 le32_add_cpu(&lvidiu->numFiles, -1);
42 udf_updated_lvid(sb); 41 udf_updated_lvid(sb);
42 mutex_unlock(&sbi->s_alloc_mutex);
43 } 43 }
44 mutex_unlock(&sbi->s_alloc_mutex);
45 44
46 udf_free_blocks(sb, NULL, &UDF_I(inode)->i_location, 0, 1); 45 udf_free_blocks(sb, NULL, &UDF_I(inode)->i_location, 0, 1);
47} 46}
@@ -55,6 +54,7 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err)
55 uint32_t start = UDF_I(dir)->i_location.logicalBlockNum; 54 uint32_t start = UDF_I(dir)->i_location.logicalBlockNum;
56 struct udf_inode_info *iinfo; 55 struct udf_inode_info *iinfo;
57 struct udf_inode_info *dinfo = UDF_I(dir); 56 struct udf_inode_info *dinfo = UDF_I(dir);
57 struct logicalVolIntegrityDescImpUse *lvidiu;
58 58
59 inode = new_inode(sb); 59 inode = new_inode(sb);
60 60
@@ -92,12 +92,10 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err)
92 return NULL; 92 return NULL;
93 } 93 }
94 94
95 if (sbi->s_lvid_bh) { 95 lvidiu = udf_sb_lvidiu(sb);
96 struct logicalVolIntegrityDescImpUse *lvidiu; 96 if (lvidiu) {
97
98 iinfo->i_unique = lvid_get_unique_id(sb); 97 iinfo->i_unique = lvid_get_unique_id(sb);
99 mutex_lock(&sbi->s_alloc_mutex); 98 mutex_lock(&sbi->s_alloc_mutex);
100 lvidiu = udf_sb_lvidiu(sbi);
101 if (S_ISDIR(mode)) 99 if (S_ISDIR(mode))
102 le32_add_cpu(&lvidiu->numDirs, 1); 100 le32_add_cpu(&lvidiu->numDirs, 1);
103 else 101 else
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 839a2bad7f45..91219385691d 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -94,13 +94,25 @@ static unsigned int udf_count_free(struct super_block *);
94static int udf_statfs(struct dentry *, struct kstatfs *); 94static int udf_statfs(struct dentry *, struct kstatfs *);
95static int udf_show_options(struct seq_file *, struct dentry *); 95static int udf_show_options(struct seq_file *, struct dentry *);
96 96
97struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct udf_sb_info *sbi) 97struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb)
98{ 98{
99 struct logicalVolIntegrityDesc *lvid = 99 struct logicalVolIntegrityDesc *lvid;
100 (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data; 100 unsigned int partnum;
101 __u32 number_of_partitions = le32_to_cpu(lvid->numOfPartitions); 101 unsigned int offset;
102 __u32 offset = number_of_partitions * 2 * 102
103 sizeof(uint32_t)/sizeof(uint8_t); 103 if (!UDF_SB(sb)->s_lvid_bh)
104 return NULL;
105 lvid = (struct logicalVolIntegrityDesc *)UDF_SB(sb)->s_lvid_bh->b_data;
106 partnum = le32_to_cpu(lvid->numOfPartitions);
107 if ((sb->s_blocksize - sizeof(struct logicalVolIntegrityDescImpUse) -
108 offsetof(struct logicalVolIntegrityDesc, impUse)) /
109 (2 * sizeof(uint32_t)) < partnum) {
110 udf_err(sb, "Logical volume integrity descriptor corrupted "
111 "(numOfPartitions = %u)!\n", partnum);
112 return NULL;
113 }
114 /* The offset is to skip freeSpaceTable and sizeTable arrays */
115 offset = partnum * 2 * sizeof(uint32_t);
104 return (struct logicalVolIntegrityDescImpUse *)&(lvid->impUse[offset]); 116 return (struct logicalVolIntegrityDescImpUse *)&(lvid->impUse[offset]);
105} 117}
106 118
@@ -629,9 +641,10 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
629 struct udf_options uopt; 641 struct udf_options uopt;
630 struct udf_sb_info *sbi = UDF_SB(sb); 642 struct udf_sb_info *sbi = UDF_SB(sb);
631 int error = 0; 643 int error = 0;
644 struct logicalVolIntegrityDescImpUse *lvidiu = udf_sb_lvidiu(sb);
632 645
633 if (sbi->s_lvid_bh) { 646 if (lvidiu) {
634 int write_rev = le16_to_cpu(udf_sb_lvidiu(sbi)->minUDFWriteRev); 647 int write_rev = le16_to_cpu(lvidiu->minUDFWriteRev);
635 if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & MS_RDONLY)) 648 if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & MS_RDONLY))
636 return -EACCES; 649 return -EACCES;
637 } 650 }
@@ -1905,11 +1918,12 @@ static void udf_open_lvid(struct super_block *sb)
1905 1918
1906 if (!bh) 1919 if (!bh)
1907 return; 1920 return;
1908
1909 mutex_lock(&sbi->s_alloc_mutex);
1910 lvid = (struct logicalVolIntegrityDesc *)bh->b_data; 1921 lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
1911 lvidiu = udf_sb_lvidiu(sbi); 1922 lvidiu = udf_sb_lvidiu(sb);
1923 if (!lvidiu)
1924 return;
1912 1925
1926 mutex_lock(&sbi->s_alloc_mutex);
1913 lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; 1927 lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1914 lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; 1928 lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1915 udf_time_to_disk_stamp(&lvid->recordingDateAndTime, 1929 udf_time_to_disk_stamp(&lvid->recordingDateAndTime,
@@ -1937,10 +1951,12 @@ static void udf_close_lvid(struct super_block *sb)
1937 1951
1938 if (!bh) 1952 if (!bh)
1939 return; 1953 return;
1954 lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
1955 lvidiu = udf_sb_lvidiu(sb);
1956 if (!lvidiu)
1957 return;
1940 1958
1941 mutex_lock(&sbi->s_alloc_mutex); 1959 mutex_lock(&sbi->s_alloc_mutex);
1942 lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
1943 lvidiu = udf_sb_lvidiu(sbi);
1944 lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; 1960 lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1945 lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; 1961 lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1946 udf_time_to_disk_stamp(&lvid->recordingDateAndTime, CURRENT_TIME); 1962 udf_time_to_disk_stamp(&lvid->recordingDateAndTime, CURRENT_TIME);
@@ -2093,15 +2109,19 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
2093 2109
2094 if (sbi->s_lvid_bh) { 2110 if (sbi->s_lvid_bh) {
2095 struct logicalVolIntegrityDescImpUse *lvidiu = 2111 struct logicalVolIntegrityDescImpUse *lvidiu =
2096 udf_sb_lvidiu(sbi); 2112 udf_sb_lvidiu(sb);
2097 uint16_t minUDFReadRev = le16_to_cpu(lvidiu->minUDFReadRev); 2113 uint16_t minUDFReadRev;
2098 uint16_t minUDFWriteRev = le16_to_cpu(lvidiu->minUDFWriteRev); 2114 uint16_t minUDFWriteRev;
2099 /* uint16_t maxUDFWriteRev =
2100 le16_to_cpu(lvidiu->maxUDFWriteRev); */
2101 2115
2116 if (!lvidiu) {
2117 ret = -EINVAL;
2118 goto error_out;
2119 }
2120 minUDFReadRev = le16_to_cpu(lvidiu->minUDFReadRev);
2121 minUDFWriteRev = le16_to_cpu(lvidiu->minUDFWriteRev);
2102 if (minUDFReadRev > UDF_MAX_READ_VERSION) { 2122 if (minUDFReadRev > UDF_MAX_READ_VERSION) {
2103 udf_err(sb, "minUDFReadRev=%x (max is %x)\n", 2123 udf_err(sb, "minUDFReadRev=%x (max is %x)\n",
2104 le16_to_cpu(lvidiu->minUDFReadRev), 2124 minUDFReadRev,
2105 UDF_MAX_READ_VERSION); 2125 UDF_MAX_READ_VERSION);
2106 ret = -EINVAL; 2126 ret = -EINVAL;
2107 goto error_out; 2127 goto error_out;
@@ -2265,11 +2285,7 @@ static int udf_statfs(struct dentry *dentry, struct kstatfs *buf)
2265 struct logicalVolIntegrityDescImpUse *lvidiu; 2285 struct logicalVolIntegrityDescImpUse *lvidiu;
2266 u64 id = huge_encode_dev(sb->s_bdev->bd_dev); 2286 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
2267 2287
2268 if (sbi->s_lvid_bh != NULL) 2288 lvidiu = udf_sb_lvidiu(sb);
2269 lvidiu = udf_sb_lvidiu(sbi);
2270 else
2271 lvidiu = NULL;
2272
2273 buf->f_type = UDF_SUPER_MAGIC; 2289 buf->f_type = UDF_SUPER_MAGIC;
2274 buf->f_bsize = sb->s_blocksize; 2290 buf->f_bsize = sb->s_blocksize;
2275 buf->f_blocks = sbi->s_partmaps[sbi->s_partition].s_partition_len; 2291 buf->f_blocks = sbi->s_partmaps[sbi->s_partition].s_partition_len;
diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
index ed401e94aa8c..1f32c7bd9f57 100644
--- a/fs/udf/udf_sb.h
+++ b/fs/udf/udf_sb.h
@@ -162,7 +162,7 @@ static inline struct udf_sb_info *UDF_SB(struct super_block *sb)
162 return sb->s_fs_info; 162 return sb->s_fs_info;
163} 163}
164 164
165struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct udf_sb_info *sbi); 165struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb);
166 166
167int udf_compute_nr_groups(struct super_block *sb, u32 partition); 167int udf_compute_nr_groups(struct super_block *sb, u32 partition);
168 168
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 88c5ea75ebf6..f1d85cfc0a54 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -628,6 +628,7 @@ xfs_buf_item_unlock(
628 else if (aborted) { 628 else if (aborted) {
629 ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp)); 629 ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp));
630 if (lip->li_flags & XFS_LI_IN_AIL) { 630 if (lip->li_flags & XFS_LI_IN_AIL) {
631 spin_lock(&lip->li_ailp->xa_lock);
631 xfs_trans_ail_delete(lip->li_ailp, lip, 632 xfs_trans_ail_delete(lip->li_ailp, lip,
632 SHUTDOWN_LOG_IO_ERROR); 633 SHUTDOWN_LOG_IO_ERROR);
633 } 634 }
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index 069537c845e5..20bf8e8002d6 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -1224,6 +1224,7 @@ xfs_da3_node_toosmall(
1224 /* start with smaller blk num */ 1224 /* start with smaller blk num */
1225 forward = nodehdr.forw < nodehdr.back; 1225 forward = nodehdr.forw < nodehdr.back;
1226 for (i = 0; i < 2; forward = !forward, i++) { 1226 for (i = 0; i < 2; forward = !forward, i++) {
1227 struct xfs_da3_icnode_hdr thdr;
1227 if (forward) 1228 if (forward)
1228 blkno = nodehdr.forw; 1229 blkno = nodehdr.forw;
1229 else 1230 else
@@ -1236,10 +1237,10 @@ xfs_da3_node_toosmall(
1236 return(error); 1237 return(error);
1237 1238
1238 node = bp->b_addr; 1239 node = bp->b_addr;
1239 xfs_da3_node_hdr_from_disk(&nodehdr, node); 1240 xfs_da3_node_hdr_from_disk(&thdr, node);
1240 xfs_trans_brelse(state->args->trans, bp); 1241 xfs_trans_brelse(state->args->trans, bp);
1241 1242
1242 if (count - nodehdr.count >= 0) 1243 if (count - thdr.count >= 0)
1243 break; /* fits with at least 25% to spare */ 1244 break; /* fits with at least 25% to spare */
1244 } 1245 }
1245 if (i >= 2) { 1246 if (i >= 2) {
diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c
index 0957aa98b6c0..12dad188939d 100644
--- a/fs/xfs/xfs_dir2_block.c
+++ b/fs/xfs/xfs_dir2_block.c
@@ -1158,7 +1158,7 @@ xfs_dir2_sf_to_block(
1158 /* 1158 /*
1159 * Create entry for . 1159 * Create entry for .
1160 */ 1160 */
1161 dep = xfs_dir3_data_dot_entry_p(hdr); 1161 dep = xfs_dir3_data_dot_entry_p(mp, hdr);
1162 dep->inumber = cpu_to_be64(dp->i_ino); 1162 dep->inumber = cpu_to_be64(dp->i_ino);
1163 dep->namelen = 1; 1163 dep->namelen = 1;
1164 dep->name[0] = '.'; 1164 dep->name[0] = '.';
@@ -1172,7 +1172,7 @@ xfs_dir2_sf_to_block(
1172 /* 1172 /*
1173 * Create entry for .. 1173 * Create entry for ..
1174 */ 1174 */
1175 dep = xfs_dir3_data_dotdot_entry_p(hdr); 1175 dep = xfs_dir3_data_dotdot_entry_p(mp, hdr);
1176 dep->inumber = cpu_to_be64(xfs_dir2_sf_get_parent_ino(sfp)); 1176 dep->inumber = cpu_to_be64(xfs_dir2_sf_get_parent_ino(sfp));
1177 dep->namelen = 2; 1177 dep->namelen = 2;
1178 dep->name[0] = dep->name[1] = '.'; 1178 dep->name[0] = dep->name[1] = '.';
@@ -1183,7 +1183,7 @@ xfs_dir2_sf_to_block(
1183 blp[1].hashval = cpu_to_be32(xfs_dir_hash_dotdot); 1183 blp[1].hashval = cpu_to_be32(xfs_dir_hash_dotdot);
1184 blp[1].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp, 1184 blp[1].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp,
1185 (char *)dep - (char *)hdr)); 1185 (char *)dep - (char *)hdr));
1186 offset = xfs_dir3_data_first_offset(hdr); 1186 offset = xfs_dir3_data_first_offset(mp);
1187 /* 1187 /*
1188 * Loop over existing entries, stuff them in. 1188 * Loop over existing entries, stuff them in.
1189 */ 1189 */
diff --git a/fs/xfs/xfs_dir2_format.h b/fs/xfs/xfs_dir2_format.h
index a0961a61ac1a..9cf67381adf6 100644
--- a/fs/xfs/xfs_dir2_format.h
+++ b/fs/xfs/xfs_dir2_format.h
@@ -497,69 +497,58 @@ xfs_dir3_data_unused_p(struct xfs_dir2_data_hdr *hdr)
497/* 497/*
498 * Offsets of . and .. in data space (always block 0) 498 * Offsets of . and .. in data space (always block 0)
499 * 499 *
500 * The macros are used for shortform directories as they have no headers to read
501 * the magic number out of. Shortform directories need to know the size of the
502 * data block header because the sfe embeds the block offset of the entry into
503 * it so that it doesn't change when format conversion occurs. Bad Things Happen
504 * if we don't follow this rule.
505 *
506 * XXX: there is scope for significant optimisation of the logic here. Right 500 * XXX: there is scope for significant optimisation of the logic here. Right
507 * now we are checking for "dir3 format" over and over again. Ideally we should 501 * now we are checking for "dir3 format" over and over again. Ideally we should
508 * only do it once for each operation. 502 * only do it once for each operation.
509 */ 503 */
510#define XFS_DIR3_DATA_DOT_OFFSET(mp) \
511 xfs_dir3_data_hdr_size(xfs_sb_version_hascrc(&(mp)->m_sb))
512#define XFS_DIR3_DATA_DOTDOT_OFFSET(mp) \
513 (XFS_DIR3_DATA_DOT_OFFSET(mp) + xfs_dir3_data_entsize(mp, 1))
514#define XFS_DIR3_DATA_FIRST_OFFSET(mp) \
515 (XFS_DIR3_DATA_DOTDOT_OFFSET(mp) + xfs_dir3_data_entsize(mp, 2))
516
517static inline xfs_dir2_data_aoff_t 504static inline xfs_dir2_data_aoff_t
518xfs_dir3_data_dot_offset(struct xfs_dir2_data_hdr *hdr) 505xfs_dir3_data_dot_offset(struct xfs_mount *mp)
519{ 506{
520 return xfs_dir3_data_entry_offset(hdr); 507 return xfs_dir3_data_hdr_size(xfs_sb_version_hascrc(&mp->m_sb));
521} 508}
522 509
523static inline xfs_dir2_data_aoff_t 510static inline xfs_dir2_data_aoff_t
524xfs_dir3_data_dotdot_offset(struct xfs_dir2_data_hdr *hdr) 511xfs_dir3_data_dotdot_offset(struct xfs_mount *mp)
525{ 512{
526 bool dir3 = hdr->magic == cpu_to_be32(XFS_DIR3_DATA_MAGIC) || 513 return xfs_dir3_data_dot_offset(mp) +
527 hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC); 514 xfs_dir3_data_entsize(mp, 1);
528 return xfs_dir3_data_dot_offset(hdr) +
529 __xfs_dir3_data_entsize(dir3, 1);
530} 515}
531 516
532static inline xfs_dir2_data_aoff_t 517static inline xfs_dir2_data_aoff_t
533xfs_dir3_data_first_offset(struct xfs_dir2_data_hdr *hdr) 518xfs_dir3_data_first_offset(struct xfs_mount *mp)
534{ 519{
535 bool dir3 = hdr->magic == cpu_to_be32(XFS_DIR3_DATA_MAGIC) || 520 return xfs_dir3_data_dotdot_offset(mp) +
536 hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC); 521 xfs_dir3_data_entsize(mp, 2);
537 return xfs_dir3_data_dotdot_offset(hdr) +
538 __xfs_dir3_data_entsize(dir3, 2);
539} 522}
540 523
541/* 524/*
542 * location of . and .. in data space (always block 0) 525 * location of . and .. in data space (always block 0)
543 */ 526 */
544static inline struct xfs_dir2_data_entry * 527static inline struct xfs_dir2_data_entry *
545xfs_dir3_data_dot_entry_p(struct xfs_dir2_data_hdr *hdr) 528xfs_dir3_data_dot_entry_p(
529 struct xfs_mount *mp,
530 struct xfs_dir2_data_hdr *hdr)
546{ 531{
547 return (struct xfs_dir2_data_entry *) 532 return (struct xfs_dir2_data_entry *)
548 ((char *)hdr + xfs_dir3_data_dot_offset(hdr)); 533 ((char *)hdr + xfs_dir3_data_dot_offset(mp));
549} 534}
550 535
551static inline struct xfs_dir2_data_entry * 536static inline struct xfs_dir2_data_entry *
552xfs_dir3_data_dotdot_entry_p(struct xfs_dir2_data_hdr *hdr) 537xfs_dir3_data_dotdot_entry_p(
538 struct xfs_mount *mp,
539 struct xfs_dir2_data_hdr *hdr)
553{ 540{
554 return (struct xfs_dir2_data_entry *) 541 return (struct xfs_dir2_data_entry *)
555 ((char *)hdr + xfs_dir3_data_dotdot_offset(hdr)); 542 ((char *)hdr + xfs_dir3_data_dotdot_offset(mp));
556} 543}
557 544
558static inline struct xfs_dir2_data_entry * 545static inline struct xfs_dir2_data_entry *
559xfs_dir3_data_first_entry_p(struct xfs_dir2_data_hdr *hdr) 546xfs_dir3_data_first_entry_p(
547 struct xfs_mount *mp,
548 struct xfs_dir2_data_hdr *hdr)
560{ 549{
561 return (struct xfs_dir2_data_entry *) 550 return (struct xfs_dir2_data_entry *)
562 ((char *)hdr + xfs_dir3_data_first_offset(hdr)); 551 ((char *)hdr + xfs_dir3_data_first_offset(mp));
563} 552}
564 553
565/* 554/*
diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
index 8993ec17452c..8f84153e98a8 100644
--- a/fs/xfs/xfs_dir2_readdir.c
+++ b/fs/xfs/xfs_dir2_readdir.c
@@ -119,9 +119,9 @@ xfs_dir2_sf_getdents(
119 * mp->m_dirdatablk. 119 * mp->m_dirdatablk.
120 */ 120 */
121 dot_offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk, 121 dot_offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
122 XFS_DIR3_DATA_DOT_OFFSET(mp)); 122 xfs_dir3_data_dot_offset(mp));
123 dotdot_offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk, 123 dotdot_offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
124 XFS_DIR3_DATA_DOTDOT_OFFSET(mp)); 124 xfs_dir3_data_dotdot_offset(mp));
125 125
126 /* 126 /*
127 * Put . entry unless we're starting past it. 127 * Put . entry unless we're starting past it.
diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
index bb6e2848f473..3ef6d402084c 100644
--- a/fs/xfs/xfs_dir2_sf.c
+++ b/fs/xfs/xfs_dir2_sf.c
@@ -557,7 +557,7 @@ xfs_dir2_sf_addname_hard(
557 * to insert the new entry. 557 * to insert the new entry.
558 * If it's going to end up at the end then oldsfep will point there. 558 * If it's going to end up at the end then oldsfep will point there.
559 */ 559 */
560 for (offset = XFS_DIR3_DATA_FIRST_OFFSET(mp), 560 for (offset = xfs_dir3_data_first_offset(mp),
561 oldsfep = xfs_dir2_sf_firstentry(oldsfp), 561 oldsfep = xfs_dir2_sf_firstentry(oldsfp),
562 add_datasize = xfs_dir3_data_entsize(mp, args->namelen), 562 add_datasize = xfs_dir3_data_entsize(mp, args->namelen),
563 eof = (char *)oldsfep == &buf[old_isize]; 563 eof = (char *)oldsfep == &buf[old_isize];
@@ -640,7 +640,7 @@ xfs_dir2_sf_addname_pick(
640 640
641 sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; 641 sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
642 size = xfs_dir3_data_entsize(mp, args->namelen); 642 size = xfs_dir3_data_entsize(mp, args->namelen);
643 offset = XFS_DIR3_DATA_FIRST_OFFSET(mp); 643 offset = xfs_dir3_data_first_offset(mp);
644 sfep = xfs_dir2_sf_firstentry(sfp); 644 sfep = xfs_dir2_sf_firstentry(sfp);
645 holefit = 0; 645 holefit = 0;
646 /* 646 /*
@@ -713,7 +713,7 @@ xfs_dir2_sf_check(
713 mp = dp->i_mount; 713 mp = dp->i_mount;
714 714
715 sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; 715 sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
716 offset = XFS_DIR3_DATA_FIRST_OFFSET(mp); 716 offset = xfs_dir3_data_first_offset(mp);
717 ino = xfs_dir2_sf_get_parent_ino(sfp); 717 ino = xfs_dir2_sf_get_parent_ino(sfp);
718 i8count = ino > XFS_DIR2_MAX_SHORT_INUM; 718 i8count = ino > XFS_DIR2_MAX_SHORT_INUM;
719 719
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index 71520e6e5d65..1ee776d477c3 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -64,7 +64,8 @@ int xfs_dqerror_mod = 33;
64struct kmem_zone *xfs_qm_dqtrxzone; 64struct kmem_zone *xfs_qm_dqtrxzone;
65static struct kmem_zone *xfs_qm_dqzone; 65static struct kmem_zone *xfs_qm_dqzone;
66 66
67static struct lock_class_key xfs_dquot_other_class; 67static struct lock_class_key xfs_dquot_group_class;
68static struct lock_class_key xfs_dquot_project_class;
68 69
69/* 70/*
70 * This is called to free all the memory associated with a dquot 71 * This is called to free all the memory associated with a dquot
@@ -703,8 +704,20 @@ xfs_qm_dqread(
703 * Make sure group quotas have a different lock class than user 704 * Make sure group quotas have a different lock class than user
704 * quotas. 705 * quotas.
705 */ 706 */
706 if (!(type & XFS_DQ_USER)) 707 switch (type) {
707 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_other_class); 708 case XFS_DQ_USER:
709 /* uses the default lock class */
710 break;
711 case XFS_DQ_GROUP:
712 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class);
713 break;
714 case XFS_DQ_PROJ:
715 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class);
716 break;
717 default:
718 ASSERT(0);
719 break;
720 }
708 721
709 XFS_STATS_INC(xs_qm_dquot); 722 XFS_STATS_INC(xs_qm_dquot);
710 723
diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h
index 1edb5cc3e5f4..18272c766a50 100644
--- a/fs/xfs/xfs_fs.h
+++ b/fs/xfs/xfs_fs.h
@@ -515,7 +515,7 @@ typedef struct xfs_swapext
515/* XFS_IOC_GETBIOSIZE ---- deprecated 47 */ 515/* XFS_IOC_GETBIOSIZE ---- deprecated 47 */
516#define XFS_IOC_GETBMAPX _IOWR('X', 56, struct getbmap) 516#define XFS_IOC_GETBMAPX _IOWR('X', 56, struct getbmap)
517#define XFS_IOC_ZERO_RANGE _IOW ('X', 57, struct xfs_flock64) 517#define XFS_IOC_ZERO_RANGE _IOW ('X', 57, struct xfs_flock64)
518#define XFS_IOC_FREE_EOFBLOCKS _IOR ('X', 58, struct xfs_eofblocks) 518#define XFS_IOC_FREE_EOFBLOCKS _IOR ('X', 58, struct xfs_fs_eofblocks)
519 519
520/* 520/*
521 * ioctl commands that replace IRIX syssgi()'s 521 * ioctl commands that replace IRIX syssgi()'s
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 193206ba4358..474807a401c8 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -119,11 +119,6 @@ xfs_inode_free(
119 ip->i_itemp = NULL; 119 ip->i_itemp = NULL;
120 } 120 }
121 121
122 /* asserts to verify all state is correct here */
123 ASSERT(atomic_read(&ip->i_pincount) == 0);
124 ASSERT(!spin_is_locked(&ip->i_flags_lock));
125 ASSERT(!xfs_isiflocked(ip));
126
127 /* 122 /*
128 * Because we use RCU freeing we need to ensure the inode always 123 * Because we use RCU freeing we need to ensure the inode always
129 * appears to be reclaimed with an invalid inode number when in the 124 * appears to be reclaimed with an invalid inode number when in the
@@ -135,6 +130,10 @@ xfs_inode_free(
135 ip->i_ino = 0; 130 ip->i_ino = 0;
136 spin_unlock(&ip->i_flags_lock); 131 spin_unlock(&ip->i_flags_lock);
137 132
133 /* asserts to verify all state is correct here */
134 ASSERT(atomic_read(&ip->i_pincount) == 0);
135 ASSERT(!xfs_isiflocked(ip));
136
138 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback); 137 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
139} 138}
140 139
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index dabda9521b4b..39797490a1f1 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1585,6 +1585,7 @@ xlog_recover_add_to_trans(
1585 "bad number of regions (%d) in inode log format", 1585 "bad number of regions (%d) in inode log format",
1586 in_f->ilf_size); 1586 in_f->ilf_size);
1587 ASSERT(0); 1587 ASSERT(0);
1588 kmem_free(ptr);
1588 return XFS_ERROR(EIO); 1589 return XFS_ERROR(EIO);
1589 } 1590 }
1590 1591
@@ -1970,6 +1971,13 @@ xlog_recover_do_inode_buffer(
1970 * magic number. If we don't recognise the magic number in the buffer, then 1971 * magic number. If we don't recognise the magic number in the buffer, then
1971 * return a LSN of -1 so that the caller knows it was an unrecognised block and 1972 * return a LSN of -1 so that the caller knows it was an unrecognised block and
1972 * so can recover the buffer. 1973 * so can recover the buffer.
1974 *
1975 * Note: we cannot rely solely on magic number matches to determine that the
1976 * buffer has a valid LSN - we also need to verify that it belongs to this
1977 * filesystem, so we need to extract the object's LSN and compare it to that
1978 * which we read from the superblock. If the UUIDs don't match, then we've got a
1979 * stale metadata block from an old filesystem instance that we need to recover
1980 * over the top of.
1973 */ 1981 */
1974static xfs_lsn_t 1982static xfs_lsn_t
1975xlog_recover_get_buf_lsn( 1983xlog_recover_get_buf_lsn(
@@ -1980,6 +1988,8 @@ xlog_recover_get_buf_lsn(
1980 __uint16_t magic16; 1988 __uint16_t magic16;
1981 __uint16_t magicda; 1989 __uint16_t magicda;
1982 void *blk = bp->b_addr; 1990 void *blk = bp->b_addr;
1991 uuid_t *uuid;
1992 xfs_lsn_t lsn = -1;
1983 1993
1984 /* v4 filesystems always recover immediately */ 1994 /* v4 filesystems always recover immediately */
1985 if (!xfs_sb_version_hascrc(&mp->m_sb)) 1995 if (!xfs_sb_version_hascrc(&mp->m_sb))
@@ -1992,43 +2002,79 @@ xlog_recover_get_buf_lsn(
1992 case XFS_ABTB_MAGIC: 2002 case XFS_ABTB_MAGIC:
1993 case XFS_ABTC_MAGIC: 2003 case XFS_ABTC_MAGIC:
1994 case XFS_IBT_CRC_MAGIC: 2004 case XFS_IBT_CRC_MAGIC:
1995 case XFS_IBT_MAGIC: 2005 case XFS_IBT_MAGIC: {
1996 return be64_to_cpu( 2006 struct xfs_btree_block *btb = blk;
1997 ((struct xfs_btree_block *)blk)->bb_u.s.bb_lsn); 2007
2008 lsn = be64_to_cpu(btb->bb_u.s.bb_lsn);
2009 uuid = &btb->bb_u.s.bb_uuid;
2010 break;
2011 }
1998 case XFS_BMAP_CRC_MAGIC: 2012 case XFS_BMAP_CRC_MAGIC:
1999 case XFS_BMAP_MAGIC: 2013 case XFS_BMAP_MAGIC: {
2000 return be64_to_cpu( 2014 struct xfs_btree_block *btb = blk;
2001 ((struct xfs_btree_block *)blk)->bb_u.l.bb_lsn); 2015
2016 lsn = be64_to_cpu(btb->bb_u.l.bb_lsn);
2017 uuid = &btb->bb_u.l.bb_uuid;
2018 break;
2019 }
2002 case XFS_AGF_MAGIC: 2020 case XFS_AGF_MAGIC:
2003 return be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn); 2021 lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
2022 uuid = &((struct xfs_agf *)blk)->agf_uuid;
2023 break;
2004 case XFS_AGFL_MAGIC: 2024 case XFS_AGFL_MAGIC:
2005 return be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn); 2025 lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
2026 uuid = &((struct xfs_agfl *)blk)->agfl_uuid;
2027 break;
2006 case XFS_AGI_MAGIC: 2028 case XFS_AGI_MAGIC:
2007 return be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn); 2029 lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
2030 uuid = &((struct xfs_agi *)blk)->agi_uuid;
2031 break;
2008 case XFS_SYMLINK_MAGIC: 2032 case XFS_SYMLINK_MAGIC:
2009 return be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn); 2033 lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
2034 uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid;
2035 break;
2010 case XFS_DIR3_BLOCK_MAGIC: 2036 case XFS_DIR3_BLOCK_MAGIC:
2011 case XFS_DIR3_DATA_MAGIC: 2037 case XFS_DIR3_DATA_MAGIC:
2012 case XFS_DIR3_FREE_MAGIC: 2038 case XFS_DIR3_FREE_MAGIC:
2013 return be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn); 2039 lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
2040 uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
2041 break;
2014 case XFS_ATTR3_RMT_MAGIC: 2042 case XFS_ATTR3_RMT_MAGIC:
2015 return be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn); 2043 lsn = be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn);
2044 uuid = &((struct xfs_attr3_rmt_hdr *)blk)->rm_uuid;
2045 break;
2016 case XFS_SB_MAGIC: 2046 case XFS_SB_MAGIC:
2017 return be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn); 2047 lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
2048 uuid = &((struct xfs_dsb *)blk)->sb_uuid;
2049 break;
2018 default: 2050 default:
2019 break; 2051 break;
2020 } 2052 }
2021 2053
2054 if (lsn != (xfs_lsn_t)-1) {
2055 if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
2056 goto recover_immediately;
2057 return lsn;
2058 }
2059
2022 magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic); 2060 magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic);
2023 switch (magicda) { 2061 switch (magicda) {
2024 case XFS_DIR3_LEAF1_MAGIC: 2062 case XFS_DIR3_LEAF1_MAGIC:
2025 case XFS_DIR3_LEAFN_MAGIC: 2063 case XFS_DIR3_LEAFN_MAGIC:
2026 case XFS_DA3_NODE_MAGIC: 2064 case XFS_DA3_NODE_MAGIC:
2027 return be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn); 2065 lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
2066 uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
2067 break;
2028 default: 2068 default:
2029 break; 2069 break;
2030 } 2070 }
2031 2071
2072 if (lsn != (xfs_lsn_t)-1) {
2073 if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
2074 goto recover_immediately;
2075 return lsn;
2076 }
2077
2032 /* 2078 /*
2033 * We do individual object checks on dquot and inode buffers as they 2079 * We do individual object checks on dquot and inode buffers as they
2034 * have their own individual LSN records. Also, we could have a stale 2080 * have their own individual LSN records. Also, we could have a stale
diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h
index d06079c774a0..99b490b4d05a 100644
--- a/include/asm-generic/hugetlb.h
+++ b/include/asm-generic/hugetlb.h
@@ -6,12 +6,12 @@ static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
6 return mk_pte(page, pgprot); 6 return mk_pte(page, pgprot);
7} 7}
8 8
9static inline int huge_pte_write(pte_t pte) 9static inline unsigned long huge_pte_write(pte_t pte)
10{ 10{
11 return pte_write(pte); 11 return pte_write(pte);
12} 12}
13 13
14static inline int huge_pte_dirty(pte_t pte) 14static inline unsigned long huge_pte_dirty(pte_t pte)
15{ 15{
16 return pte_dirty(pte); 16 return pte_dirty(pte);
17} 17}
diff --git a/include/asm-generic/vtime.h b/include/asm-generic/vtime.h
index e69de29bb2d1..b1a49677fe25 100644
--- a/include/asm-generic/vtime.h
+++ b/include/asm-generic/vtime.h
@@ -0,0 +1 @@
/* no content, but patch(1) dislikes empty files */
diff --git a/include/dt-bindings/pinctrl/omap.h b/include/dt-bindings/pinctrl/omap.h
index edbd250809cb..bed35e36fd27 100644
--- a/include/dt-bindings/pinctrl/omap.h
+++ b/include/dt-bindings/pinctrl/omap.h
@@ -23,7 +23,7 @@
23#define PULL_UP (1 << 4) 23#define PULL_UP (1 << 4)
24#define ALTELECTRICALSEL (1 << 5) 24#define ALTELECTRICALSEL (1 << 5)
25 25
26/* 34xx specific mux bit defines */ 26/* omap3/4/5 specific mux bit defines */
27#define INPUT_EN (1 << 8) 27#define INPUT_EN (1 << 8)
28#define OFF_EN (1 << 9) 28#define OFF_EN (1 << 9)
29#define OFFOUT_EN (1 << 10) 29#define OFFOUT_EN (1 << 10)
@@ -31,8 +31,6 @@
31#define OFF_PULL_EN (1 << 12) 31#define OFF_PULL_EN (1 << 12)
32#define OFF_PULL_UP (1 << 13) 32#define OFF_PULL_UP (1 << 13)
33#define WAKEUP_EN (1 << 14) 33#define WAKEUP_EN (1 << 14)
34
35/* 44xx specific mux bit defines */
36#define WAKEUP_EVENT (1 << 15) 34#define WAKEUP_EVENT (1 << 15)
37 35
38/* Active pin states */ 36/* Active pin states */
diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h
index f7f1d7169b11..089743ade734 100644
--- a/include/linux/balloon_compaction.h
+++ b/include/linux/balloon_compaction.h
@@ -159,6 +159,26 @@ static inline bool balloon_page_movable(struct page *page)
159} 159}
160 160
161/* 161/*
162 * isolated_balloon_page - identify an isolated balloon page on private
163 * compaction/migration page lists.
164 *
165 * After a compaction thread isolates a balloon page for migration, it raises
166 * the page refcount to prevent concurrent compaction threads from re-isolating
167 * the same page. For that reason putback_movable_pages(), or other routines
168 * that need to identify isolated balloon pages on private pagelists, cannot
169 * rely on balloon_page_movable() to accomplish the task.
170 */
171static inline bool isolated_balloon_page(struct page *page)
172{
173 /* Already isolated balloon pages, by default, have a raised refcount */
174 if (page_flags_cleared(page) && !page_mapped(page) &&
175 page_count(page) >= 2)
176 return __is_movable_balloon_page(page);
177
178 return false;
179}
180
181/*
162 * balloon_page_insert - insert a page into the balloon's page list and make 182 * balloon_page_insert - insert a page into the balloon's page list and make
163 * the page->mapping assignment accordingly. 183 * the page->mapping assignment accordingly.
164 * @page : page to be assigned as a 'balloon page' 184 * @page : page to be assigned as a 'balloon page'
@@ -243,6 +263,11 @@ static inline bool balloon_page_movable(struct page *page)
243 return false; 263 return false;
244} 264}
245 265
266static inline bool isolated_balloon_page(struct page *page)
267{
268 return false;
269}
270
246static inline bool balloon_page_isolate(struct page *page) 271static inline bool balloon_page_isolate(struct page *page)
247{ 272{
248 return false; 273 return false;
diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h
index d66033f418c9..0333e605ea0d 100644
--- a/include/linux/bcma/bcma_driver_pci.h
+++ b/include/linux/bcma/bcma_driver_pci.h
@@ -242,6 +242,7 @@ extern int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc,
242 struct bcma_device *core, bool enable); 242 struct bcma_device *core, bool enable);
243extern void bcma_core_pci_up(struct bcma_bus *bus); 243extern void bcma_core_pci_up(struct bcma_bus *bus);
244extern void bcma_core_pci_down(struct bcma_bus *bus); 244extern void bcma_core_pci_down(struct bcma_bus *bus);
245extern void bcma_core_pci_power_save(struct bcma_bus *bus, bool up);
245 246
246extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev); 247extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev);
247extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev); 248extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev);
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
index 842de225055f..ded429966c1f 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
@@ -65,6 +65,21 @@
65#define __visible __attribute__((externally_visible)) 65#define __visible __attribute__((externally_visible))
66#endif 66#endif
67 67
68/*
69 * GCC 'asm goto' miscompiles certain code sequences:
70 *
71 * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
72 *
73 * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
74 * Fixed in GCC 4.8.2 and later versions.
75 *
76 * (asm goto is automatically volatile - the naming reflects this.)
77 */
78#if GCC_VERSION <= 40801
79# define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
80#else
81# define asm_volatile_goto(x...) do { asm goto(x); } while (0)
82#endif
68 83
69#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP 84#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
70#if GCC_VERSION >= 40400 85#if GCC_VERSION >= 40400
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 653073de09e3..ed419c62dde1 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -406,13 +406,14 @@ int dm_noflush_suspending(struct dm_target *ti);
406union map_info *dm_get_mapinfo(struct bio *bio); 406union map_info *dm_get_mapinfo(struct bio *bio);
407union map_info *dm_get_rq_mapinfo(struct request *rq); 407union map_info *dm_get_rq_mapinfo(struct request *rq);
408 408
409struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
410
409/* 411/*
410 * Geometry functions. 412 * Geometry functions.
411 */ 413 */
412int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo); 414int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
413int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo); 415int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
414 416
415
416/*----------------------------------------------------------------- 417/*-----------------------------------------------------------------
417 * Functions for manipulating device-mapper tables. 418 * Functions for manipulating device-mapper tables.
418 *---------------------------------------------------------------*/ 419 *---------------------------------------------------------------*/
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index a3b8b2e2d244..d98503bde7e9 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -30,10 +30,13 @@
30/* 30/*
31 * Framework version for util services. 31 * Framework version for util services.
32 */ 32 */
33#define UTIL_FW_MINOR 0
34
35#define UTIL_WS2K8_FW_MAJOR 1
36#define UTIL_WS2K8_FW_VERSION (UTIL_WS2K8_FW_MAJOR << 16 | UTIL_FW_MINOR)
33 37
34#define UTIL_FW_MAJOR 3 38#define UTIL_FW_MAJOR 3
35#define UTIL_FW_MINOR 0 39#define UTIL_FW_VERSION (UTIL_FW_MAJOR << 16 | UTIL_FW_MINOR)
36#define UTIL_FW_MAJOR_MINOR (UTIL_FW_MAJOR << 16 | UTIL_FW_MINOR)
37 40
38 41
39/* 42/*
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 78e2ada50cd5..d380c5e68008 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -55,7 +55,7 @@
55#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */ 55#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */
56#define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */ 56#define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */
57#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */ 57#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */
58#define DMAR_ICS_REG 0x98 /* Invalidation complete status register */ 58#define DMAR_ICS_REG 0x9c /* Invalidation complete status register */
59#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */ 59#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */
60 60
61#define OFFSET_STRIDE (9) 61#define OFFSET_STRIDE (9)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 482ad2d84a32..672ddc4de4af 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -439,6 +439,17 @@ static inline char *hex_byte_pack(char *buf, u8 byte)
439 return buf; 439 return buf;
440} 440}
441 441
442extern const char hex_asc_upper[];
443#define hex_asc_upper_lo(x) hex_asc_upper[((x) & 0x0f)]
444#define hex_asc_upper_hi(x) hex_asc_upper[((x) & 0xf0) >> 4]
445
446static inline char *hex_byte_pack_upper(char *buf, u8 byte)
447{
448 *buf++ = hex_asc_upper_hi(byte);
449 *buf++ = hex_asc_upper_lo(byte);
450 return buf;
451}
452
442static inline char * __deprecated pack_hex_byte(char *buf, u8 byte) 453static inline char * __deprecated pack_hex_byte(char *buf, u8 byte)
443{ 454{
444 return hex_byte_pack(buf, byte); 455 return hex_byte_pack(buf, byte);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 60e95872da29..ecc82b37c4cc 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -53,23 +53,6 @@ struct mem_cgroup_reclaim_cookie {
53 unsigned int generation; 53 unsigned int generation;
54}; 54};
55 55
56enum mem_cgroup_filter_t {
57 VISIT, /* visit current node */
58 SKIP, /* skip the current node and continue traversal */
59 SKIP_TREE, /* skip the whole subtree and continue traversal */
60};
61
62/*
63 * mem_cgroup_filter_t predicate might instruct mem_cgroup_iter_cond how to
64 * iterate through the hierarchy tree. Each tree element is checked by the
65 * predicate before it is returned by the iterator. If a filter returns
66 * SKIP or SKIP_TREE then the iterator code continues traversal (with the
67 * next node down the hierarchy or the next node that doesn't belong under the
68 * memcg's subtree).
69 */
70typedef enum mem_cgroup_filter_t
71(*mem_cgroup_iter_filter)(struct mem_cgroup *memcg, struct mem_cgroup *root);
72
73#ifdef CONFIG_MEMCG 56#ifdef CONFIG_MEMCG
74/* 57/*
75 * All "charge" functions with gfp_mask should use GFP_KERNEL or 58 * All "charge" functions with gfp_mask should use GFP_KERNEL or
@@ -137,18 +120,9 @@ mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
137extern void mem_cgroup_end_migration(struct mem_cgroup *memcg, 120extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
138 struct page *oldpage, struct page *newpage, bool migration_ok); 121 struct page *oldpage, struct page *newpage, bool migration_ok);
139 122
140struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root, 123struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
141 struct mem_cgroup *prev, 124 struct mem_cgroup *,
142 struct mem_cgroup_reclaim_cookie *reclaim, 125 struct mem_cgroup_reclaim_cookie *);
143 mem_cgroup_iter_filter cond);
144
145static inline struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
146 struct mem_cgroup *prev,
147 struct mem_cgroup_reclaim_cookie *reclaim)
148{
149 return mem_cgroup_iter_cond(root, prev, reclaim, NULL);
150}
151
152void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); 126void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
153 127
154/* 128/*
@@ -260,9 +234,9 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
260 mem_cgroup_update_page_stat(page, idx, -1); 234 mem_cgroup_update_page_stat(page, idx, -1);
261} 235}
262 236
263enum mem_cgroup_filter_t 237unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
264mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg, 238 gfp_t gfp_mask,
265 struct mem_cgroup *root); 239 unsigned long *total_scanned);
266 240
267void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx); 241void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
268static inline void mem_cgroup_count_vm_event(struct mm_struct *mm, 242static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
@@ -376,15 +350,6 @@ static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
376 struct page *oldpage, struct page *newpage, bool migration_ok) 350 struct page *oldpage, struct page *newpage, bool migration_ok)
377{ 351{
378} 352}
379static inline struct mem_cgroup *
380mem_cgroup_iter_cond(struct mem_cgroup *root,
381 struct mem_cgroup *prev,
382 struct mem_cgroup_reclaim_cookie *reclaim,
383 mem_cgroup_iter_filter cond)
384{
385 /* first call must return non-NULL, second return NULL */
386 return (struct mem_cgroup *)(unsigned long)!prev;
387}
388 353
389static inline struct mem_cgroup * 354static inline struct mem_cgroup *
390mem_cgroup_iter(struct mem_cgroup *root, 355mem_cgroup_iter(struct mem_cgroup *root,
@@ -471,11 +436,11 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
471} 436}
472 437
473static inline 438static inline
474enum mem_cgroup_filter_t 439unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
475mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg, 440 gfp_t gfp_mask,
476 struct mem_cgroup *root) 441 unsigned long *total_scanned)
477{ 442{
478 return VISIT; 443 return 0;
479} 444}
480 445
481static inline void mem_cgroup_split_huge_fixup(struct page *head) 446static inline void mem_cgroup_split_huge_fixup(struct page *head)
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index 09c2300ddb37..cb358355ef43 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -45,6 +45,7 @@
45#define MAPPER_CTRL_MINOR 236 45#define MAPPER_CTRL_MINOR 236
46#define LOOP_CTRL_MINOR 237 46#define LOOP_CTRL_MINOR 237
47#define VHOST_NET_MINOR 238 47#define VHOST_NET_MINOR 238
48#define UHID_MINOR 239
48#define MISC_DYNAMIC_MINOR 255 49#define MISC_DYNAMIC_MINOR 255
49 50
50struct device; 51struct device;
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index ccd4260834c5..bab49da8a0f0 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -15,8 +15,8 @@
15#include <linux/spinlock_types.h> 15#include <linux/spinlock_types.h>
16#include <linux/linkage.h> 16#include <linux/linkage.h>
17#include <linux/lockdep.h> 17#include <linux/lockdep.h>
18
19#include <linux/atomic.h> 18#include <linux/atomic.h>
19#include <asm/processor.h>
20 20
21/* 21/*
22 * Simple, straightforward mutexes with strict semantics: 22 * Simple, straightforward mutexes with strict semantics:
@@ -175,8 +175,8 @@ extern void mutex_unlock(struct mutex *lock);
175 175
176extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); 176extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
177 177
178#ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX 178#ifndef arch_mutex_cpu_relax
179#define arch_mutex_cpu_relax() cpu_relax() 179# define arch_mutex_cpu_relax() cpu_relax()
180#endif 180#endif
181 181
182#endif 182#endif
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 01fd84b566f7..49f52c8f4422 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1455,7 +1455,8 @@ struct nfs_rpc_ops {
1455 struct inode * (*open_context) (struct inode *dir, 1455 struct inode * (*open_context) (struct inode *dir,
1456 struct nfs_open_context *ctx, 1456 struct nfs_open_context *ctx,
1457 int open_flags, 1457 int open_flags,
1458 struct iattr *iattr); 1458 struct iattr *iattr,
1459 int *);
1459 int (*have_delegation)(struct inode *, fmode_t); 1460 int (*have_delegation)(struct inode *, fmode_t);
1460 int (*return_delegation)(struct inode *); 1461 int (*return_delegation)(struct inode *);
1461 struct nfs_client *(*alloc_client) (const struct nfs_client_initdata *); 1462 struct nfs_client *(*alloc_client) (const struct nfs_client_initdata *);
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index 535cecf1e02f..fcd63baee5f2 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -1,8 +1,6 @@
1#ifndef __OF_IRQ_H 1#ifndef __OF_IRQ_H
2#define __OF_IRQ_H 2#define __OF_IRQ_H
3 3
4#if defined(CONFIG_OF)
5struct of_irq;
6#include <linux/types.h> 4#include <linux/types.h>
7#include <linux/errno.h> 5#include <linux/errno.h>
8#include <linux/irq.h> 6#include <linux/irq.h>
@@ -10,14 +8,6 @@ struct of_irq;
10#include <linux/ioport.h> 8#include <linux/ioport.h>
11#include <linux/of.h> 9#include <linux/of.h>
12 10
13/*
14 * irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC
15 * implements it differently. However, the prototype is the same for all,
16 * so declare it here regardless of the CONFIG_OF_IRQ setting.
17 */
18extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
19
20#if defined(CONFIG_OF_IRQ)
21/** 11/**
22 * of_irq - container for device_node/irq_specifier pair for an irq controller 12 * of_irq - container for device_node/irq_specifier pair for an irq controller
23 * @controller: pointer to interrupt controller device tree node 13 * @controller: pointer to interrupt controller device tree node
@@ -71,11 +61,17 @@ extern int of_irq_to_resource(struct device_node *dev, int index,
71extern int of_irq_count(struct device_node *dev); 61extern int of_irq_count(struct device_node *dev);
72extern int of_irq_to_resource_table(struct device_node *dev, 62extern int of_irq_to_resource_table(struct device_node *dev,
73 struct resource *res, int nr_irqs); 63 struct resource *res, int nr_irqs);
74extern struct device_node *of_irq_find_parent(struct device_node *child);
75 64
76extern void of_irq_init(const struct of_device_id *matches); 65extern void of_irq_init(const struct of_device_id *matches);
77 66
78#endif /* CONFIG_OF_IRQ */ 67#if defined(CONFIG_OF)
68/*
69 * irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC
70 * implements it differently. However, the prototype is the same for all,
71 * so declare it here regardless of the CONFIG_OF_IRQ setting.
72 */
73extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
74extern struct device_node *of_irq_find_parent(struct device_node *child);
79 75
80#else /* !CONFIG_OF */ 76#else /* !CONFIG_OF */
81static inline unsigned int irq_of_parse_and_map(struct device_node *dev, 77static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 866e85c5eb94..c8ba627c1d60 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -294,9 +294,31 @@ struct ring_buffer;
294 */ 294 */
295struct perf_event { 295struct perf_event {
296#ifdef CONFIG_PERF_EVENTS 296#ifdef CONFIG_PERF_EVENTS
297 struct list_head group_entry; 297 /*
298 * entry onto perf_event_context::event_list;
299 * modifications require ctx->lock
300 * RCU safe iterations.
301 */
298 struct list_head event_entry; 302 struct list_head event_entry;
303
304 /*
305 * XXX: group_entry and sibling_list should be mutually exclusive;
306 * either you're a sibling on a group, or you're the group leader.
307 * Rework the code to always use the same list element.
308 *
309 * Locked for modification by both ctx->mutex and ctx->lock; holding
310 * either sufficies for read.
311 */
312 struct list_head group_entry;
299 struct list_head sibling_list; 313 struct list_head sibling_list;
314
315 /*
316 * We need storage to track the entries in perf_pmu_migrate_context; we
317 * cannot use the event_entry because of RCU and we want to keep the
318 * group in tact which avoids us using the other two entries.
319 */
320 struct list_head migrate_entry;
321
300 struct hlist_node hlist_entry; 322 struct hlist_node hlist_entry;
301 int nr_siblings; 323 int nr_siblings;
302 int group_flags; 324 int group_flags;
diff --git a/include/linux/random.h b/include/linux/random.h
index 3b9377d6b7a5..6312dd9ba449 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -17,6 +17,7 @@ extern void add_interrupt_randomness(int irq, int irq_flags);
17extern void get_random_bytes(void *buf, int nbytes); 17extern void get_random_bytes(void *buf, int nbytes);
18extern void get_random_bytes_arch(void *buf, int nbytes); 18extern void get_random_bytes_arch(void *buf, int nbytes);
19void generate_random_uuid(unsigned char uuid_out[16]); 19void generate_random_uuid(unsigned char uuid_out[16]);
20extern int random_int_secret_init(void);
20 21
21#ifndef MODULE 22#ifndef MODULE
22extern const struct file_operations random_fops, urandom_fops; 23extern const struct file_operations random_fops, urandom_fops;
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 67e13aa5a478..9bdad43ad228 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -40,6 +40,8 @@ enum regulator_status {
40}; 40};
41 41
42/** 42/**
43 * struct regulator_linear_range - specify linear voltage ranges
44 *
43 * Specify a range of voltages for regulator_map_linar_range() and 45 * Specify a range of voltages for regulator_map_linar_range() and
44 * regulator_list_linear_range(). 46 * regulator_list_linear_range().
45 * 47 *
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 2ddb48d9312c..c2d89335f637 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -498,7 +498,7 @@ struct sk_buff {
498 * headers if needed 498 * headers if needed
499 */ 499 */
500 __u8 encapsulation:1; 500 __u8 encapsulation:1;
501 /* 7/9 bit hole (depending on ndisc_nodetype presence) */ 501 /* 6/8 bit hole (depending on ndisc_nodetype presence) */
502 kmemcheck_bitfield_end(flags2); 502 kmemcheck_bitfield_end(flags2);
503 503
504#if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL 504#if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL
diff --git a/include/linux/smp.h b/include/linux/smp.h
index cfb7ca094b38..731f5237d5f4 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -155,6 +155,12 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
155 155
156static inline void kick_all_cpus_sync(void) { } 156static inline void kick_all_cpus_sync(void) { }
157 157
158static inline void __smp_call_function_single(int cpuid,
159 struct call_single_data *data, int wait)
160{
161 on_each_cpu(data->func, data->info, wait);
162}
163
158#endif /* !SMP */ 164#endif /* !SMP */
159 165
160/* 166/*
diff --git a/include/linux/timex.h b/include/linux/timex.h
index dd3edd7dfc94..9d3f1a5b6178 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -64,6 +64,20 @@
64 64
65#include <asm/timex.h> 65#include <asm/timex.h>
66 66
67#ifndef random_get_entropy
68/*
69 * The random_get_entropy() function is used by the /dev/random driver
70 * in order to extract entropy via the relative unpredictability of
71 * when an interrupt takes places versus a high speed, fine-grained
72 * timing source or cycle counter. Since it will be occurred on every
73 * single interrupt, it must have a very low cost/overhead.
74 *
75 * By default we use get_cycles() for this purpose, but individual
76 * architectures may override this in their asm/timex.h header file.
77 */
78#define random_get_entropy() get_cycles()
79#endif
80
67/* 81/*
68 * SHIFT_PLL is used as a dampening factor to define how much we 82 * SHIFT_PLL is used as a dampening factor to define how much we
69 * adjust the frequency correction for a given offset in PLL mode. 83 * adjust the frequency correction for a given offset in PLL mode.
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 9cb2fe8ca944..e303eef94dd5 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -42,6 +42,7 @@ struct usbnet {
42 struct usb_host_endpoint *status; 42 struct usb_host_endpoint *status;
43 unsigned maxpacket; 43 unsigned maxpacket;
44 struct timer_list delay; 44 struct timer_list delay;
45 const char *padding_pkt;
45 46
46 /* protocol/interface state */ 47 /* protocol/interface state */
47 struct net_device *net; 48 struct net_device *net;
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h
index 80cf8173a65b..2c02f3a8d2ba 100644
--- a/include/linux/vgaarb.h
+++ b/include/linux/vgaarb.h
@@ -65,15 +65,8 @@ struct pci_dev;
65 * out of the arbitration process (and can be safe to take 65 * out of the arbitration process (and can be safe to take
66 * interrupts at any time. 66 * interrupts at any time.
67 */ 67 */
68#if defined(CONFIG_VGA_ARB)
69extern void vga_set_legacy_decoding(struct pci_dev *pdev, 68extern void vga_set_legacy_decoding(struct pci_dev *pdev,
70 unsigned int decodes); 69 unsigned int decodes);
71#else
72static inline void vga_set_legacy_decoding(struct pci_dev *pdev,
73 unsigned int decodes)
74{
75}
76#endif
77 70
78/** 71/**
79 * vga_get - acquire & locks VGA resources 72 * vga_get - acquire & locks VGA resources
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index fb314de2b61b..86505bfa5d2c 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -67,6 +67,10 @@ int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
67int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr); 67int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr);
68#endif 68#endif
69 69
70bool ipv6_chk_custom_prefix(const struct in6_addr *addr,
71 const unsigned int prefix_len,
72 struct net_device *dev);
73
70int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev); 74int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev);
71 75
72struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, 76struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net,
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index aaeaf0938ec0..15f10841e2b5 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -104,6 +104,7 @@ enum {
104enum { 104enum {
105 HCI_SETUP, 105 HCI_SETUP,
106 HCI_AUTO_OFF, 106 HCI_AUTO_OFF,
107 HCI_RFKILLED,
107 HCI_MGMT, 108 HCI_MGMT,
108 HCI_PAIRABLE, 109 HCI_PAIRABLE,
109 HCI_SERVICE_CACHE, 110 HCI_SERVICE_CACHE,
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index f0d70f066f3d..9c4d37ec45a1 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -723,8 +723,6 @@ struct ip_vs_dest_dst {
723 struct rcu_head rcu_head; 723 struct rcu_head rcu_head;
724}; 724};
725 725
726/* In grace period after removing */
727#define IP_VS_DEST_STATE_REMOVING 0x01
728/* 726/*
729 * The real server destination forwarding entry 727 * The real server destination forwarding entry
730 * with ip address, port number, and so on. 728 * with ip address, port number, and so on.
@@ -742,7 +740,7 @@ struct ip_vs_dest {
742 740
743 atomic_t refcnt; /* reference counter */ 741 atomic_t refcnt; /* reference counter */
744 struct ip_vs_stats stats; /* statistics */ 742 struct ip_vs_stats stats; /* statistics */
745 unsigned long state; /* state flags */ 743 unsigned long idle_start; /* start time, jiffies */
746 744
747 /* connection counters and thresholds */ 745 /* connection counters and thresholds */
748 atomic_t activeconns; /* active connections */ 746 atomic_t activeconns; /* active connections */
@@ -756,14 +754,13 @@ struct ip_vs_dest {
756 struct ip_vs_dest_dst __rcu *dest_dst; /* cached dst info */ 754 struct ip_vs_dest_dst __rcu *dest_dst; /* cached dst info */
757 755
758 /* for virtual service */ 756 /* for virtual service */
759 struct ip_vs_service *svc; /* service it belongs to */ 757 struct ip_vs_service __rcu *svc; /* service it belongs to */
760 __u16 protocol; /* which protocol (TCP/UDP) */ 758 __u16 protocol; /* which protocol (TCP/UDP) */
761 __be16 vport; /* virtual port number */ 759 __be16 vport; /* virtual port number */
762 union nf_inet_addr vaddr; /* virtual IP address */ 760 union nf_inet_addr vaddr; /* virtual IP address */
763 __u32 vfwmark; /* firewall mark of service */ 761 __u32 vfwmark; /* firewall mark of service */
764 762
765 struct list_head t_list; /* in dest_trash */ 763 struct list_head t_list; /* in dest_trash */
766 struct rcu_head rcu_head;
767 unsigned int in_rs_table:1; /* we are in rs_table */ 764 unsigned int in_rs_table:1; /* we are in rs_table */
768}; 765};
769 766
@@ -1649,7 +1646,7 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
1649/* CONFIG_IP_VS_NFCT */ 1646/* CONFIG_IP_VS_NFCT */
1650#endif 1647#endif
1651 1648
1652static inline unsigned int 1649static inline int
1653ip_vs_dest_conn_overhead(struct ip_vs_dest *dest) 1650ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
1654{ 1651{
1655 /* 1652 /*
diff --git a/include/net/mrp.h b/include/net/mrp.h
index 4fbf02aa2ec1..0f7558b638ae 100644
--- a/include/net/mrp.h
+++ b/include/net/mrp.h
@@ -112,6 +112,7 @@ struct mrp_applicant {
112 struct mrp_application *app; 112 struct mrp_application *app;
113 struct net_device *dev; 113 struct net_device *dev;
114 struct timer_list join_timer; 114 struct timer_list join_timer;
115 struct timer_list periodic_timer;
115 116
116 spinlock_t lock; 117 spinlock_t lock;
117 struct sk_buff_head queue; 118 struct sk_buff_head queue;
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 1313456a0994..9d22f08896c6 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -74,6 +74,7 @@ struct net {
74 struct hlist_head *dev_index_head; 74 struct hlist_head *dev_index_head;
75 unsigned int dev_base_seq; /* protected by rtnl_mutex */ 75 unsigned int dev_base_seq; /* protected by rtnl_mutex */
76 int ifindex; 76 int ifindex;
77 unsigned int dev_unreg_count;
77 78
78 /* core fib_rules */ 79 /* core fib_rules */
79 struct list_head rules_ops; 80 struct list_head rules_ops;
diff --git a/include/net/netfilter/nf_conntrack_synproxy.h b/include/net/netfilter/nf_conntrack_synproxy.h
index 806f54a290d6..f572f313d6f1 100644
--- a/include/net/netfilter/nf_conntrack_synproxy.h
+++ b/include/net/netfilter/nf_conntrack_synproxy.h
@@ -56,7 +56,7 @@ struct synproxy_options {
56 56
57struct tcphdr; 57struct tcphdr;
58struct xt_synproxy_info; 58struct xt_synproxy_info;
59extern void synproxy_parse_options(const struct sk_buff *skb, unsigned int doff, 59extern bool synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
60 const struct tcphdr *th, 60 const struct tcphdr *th,
61 struct synproxy_options *opts); 61 struct synproxy_options *opts);
62extern unsigned int synproxy_options_size(const struct synproxy_options *opts); 62extern unsigned int synproxy_options_size(const struct synproxy_options *opts);
diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
index 6ca975bebd37..c2e542b27a5a 100644
--- a/include/net/secure_seq.h
+++ b/include/net/secure_seq.h
@@ -3,7 +3,6 @@
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5 5
6extern void net_secret_init(void);
7extern __u32 secure_ip_id(__be32 daddr); 6extern __u32 secure_ip_id(__be32 daddr);
8extern __u32 secure_ipv6_id(const __be32 daddr[4]); 7extern __u32 secure_ipv6_id(const __be32 daddr[4]);
9extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport); 8extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
diff --git a/include/net/sock.h b/include/net/sock.h
index 6ba2e7b0e2b1..1d37a8086bed 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -409,6 +409,11 @@ struct sock {
409 void (*sk_destruct)(struct sock *sk); 409 void (*sk_destruct)(struct sock *sk);
410}; 410};
411 411
412#define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
413
414#define rcu_dereference_sk_user_data(sk) rcu_dereference(__sk_user_data((sk)))
415#define rcu_assign_sk_user_data(sk, ptr) rcu_assign_pointer(__sk_user_data((sk)), ptr)
416
412/* 417/*
413 * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK 418 * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK
414 * or not whether his port will be reused by someone else. SK_FORCE_REUSE 419 * or not whether his port will be reused by someone else. SK_FORCE_REUSE
diff --git a/include/sound/rcar_snd.h b/include/sound/rcar_snd.h
index fe66533e9b7a..fb0a312bcb81 100644
--- a/include/sound/rcar_snd.h
+++ b/include/sound/rcar_snd.h
@@ -68,6 +68,7 @@ struct rsnd_scu_platform_info {
68 * 68 *
69 * A : generation 69 * A : generation
70 */ 70 */
71#define RSND_GEN_MASK (0xF << 0)
71#define RSND_GEN1 (1 << 0) /* fixme */ 72#define RSND_GEN1 (1 << 0) /* fixme */
72#define RSND_GEN2 (2 << 0) /* fixme */ 73#define RSND_GEN2 (2 << 0) /* fixme */
73 74
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index fa8b3adf9ffb..46d41e8b0dcc 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -1007,4 +1007,6 @@ struct drm_radeon_info {
1007#define SI_TILE_MODE_DEPTH_STENCIL_2D_4AA 3 1007#define SI_TILE_MODE_DEPTH_STENCIL_2D_4AA 3
1008#define SI_TILE_MODE_DEPTH_STENCIL_2D_8AA 2 1008#define SI_TILE_MODE_DEPTH_STENCIL_2D_8AA 2
1009 1009
1010#define CIK_TILE_MODE_DEPTH_STENCIL_1D 5
1011
1010#endif 1012#endif
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 40a1fb807396..009a655a5d35 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -380,10 +380,13 @@ struct perf_event_mmap_page {
380 union { 380 union {
381 __u64 capabilities; 381 __u64 capabilities;
382 struct { 382 struct {
383 __u64 cap_usr_time : 1, 383 __u64 cap_bit0 : 1, /* Always 0, deprecated, see commit 860f085b74e9 */
384 cap_usr_rdpmc : 1, 384 cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */
385 cap_usr_time_zero : 1, 385
386 cap_____res : 61; 386 cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */
387 cap_user_time : 1, /* The time_* fields are used */
388 cap_user_time_zero : 1, /* The time_zero field is used */
389 cap_____res : 59;
387 }; 390 };
388 }; 391 };
389 392
@@ -442,12 +445,13 @@ struct perf_event_mmap_page {
442 * ((rem * time_mult) >> time_shift); 445 * ((rem * time_mult) >> time_shift);
443 */ 446 */
444 __u64 time_zero; 447 __u64 time_zero;
448 __u32 size; /* Header size up to __reserved[] fields. */
445 449
446 /* 450 /*
447 * Hole for extension of the self monitor capabilities 451 * Hole for extension of the self monitor capabilities
448 */ 452 */
449 453
450 __u64 __reserved[119]; /* align to 1k */ 454 __u8 __reserved[118*8+4]; /* align to 1k. */
451 455
452 /* 456 /*
453 * Control data for the mmap() data buffer. 457 * Control data for the mmap() data buffer.
@@ -528,6 +532,7 @@ enum perf_event_type {
528 * u64 len; 532 * u64 len;
529 * u64 pgoff; 533 * u64 pgoff;
530 * char filename[]; 534 * char filename[];
535 * struct sample_id sample_id;
531 * }; 536 * };
532 */ 537 */
533 PERF_RECORD_MMAP = 1, 538 PERF_RECORD_MMAP = 1,
diff --git a/init/main.c b/init/main.c
index af310afbef28..63d3e8f2970c 100644
--- a/init/main.c
+++ b/init/main.c
@@ -76,6 +76,7 @@
76#include <linux/elevator.h> 76#include <linux/elevator.h>
77#include <linux/sched_clock.h> 77#include <linux/sched_clock.h>
78#include <linux/context_tracking.h> 78#include <linux/context_tracking.h>
79#include <linux/random.h>
79 80
80#include <asm/io.h> 81#include <asm/io.h>
81#include <asm/bugs.h> 82#include <asm/bugs.h>
@@ -780,6 +781,7 @@ static void __init do_basic_setup(void)
780 do_ctors(); 781 do_ctors();
781 usermodehelper_enable(); 782 usermodehelper_enable();
782 do_initcalls(); 783 do_initcalls();
784 random_int_secret_init();
783} 785}
784 786
785static void __init do_pre_smp_initcalls(void) 787static void __init do_pre_smp_initcalls(void)
diff --git a/ipc/msg.c b/ipc/msg.c
index b0d541d42677..558aa91186b6 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -165,6 +165,15 @@ static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s)
165 ipc_rmid(&msg_ids(ns), &s->q_perm); 165 ipc_rmid(&msg_ids(ns), &s->q_perm);
166} 166}
167 167
168static void msg_rcu_free(struct rcu_head *head)
169{
170 struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
171 struct msg_queue *msq = ipc_rcu_to_struct(p);
172
173 security_msg_queue_free(msq);
174 ipc_rcu_free(head);
175}
176
168/** 177/**
169 * newque - Create a new msg queue 178 * newque - Create a new msg queue
170 * @ns: namespace 179 * @ns: namespace
@@ -189,15 +198,14 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
189 msq->q_perm.security = NULL; 198 msq->q_perm.security = NULL;
190 retval = security_msg_queue_alloc(msq); 199 retval = security_msg_queue_alloc(msq);
191 if (retval) { 200 if (retval) {
192 ipc_rcu_putref(msq); 201 ipc_rcu_putref(msq, ipc_rcu_free);
193 return retval; 202 return retval;
194 } 203 }
195 204
196 /* ipc_addid() locks msq upon success. */ 205 /* ipc_addid() locks msq upon success. */
197 id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); 206 id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
198 if (id < 0) { 207 if (id < 0) {
199 security_msg_queue_free(msq); 208 ipc_rcu_putref(msq, msg_rcu_free);
200 ipc_rcu_putref(msq);
201 return id; 209 return id;
202 } 210 }
203 211
@@ -276,8 +284,7 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
276 free_msg(msg); 284 free_msg(msg);
277 } 285 }
278 atomic_sub(msq->q_cbytes, &ns->msg_bytes); 286 atomic_sub(msq->q_cbytes, &ns->msg_bytes);
279 security_msg_queue_free(msq); 287 ipc_rcu_putref(msq, msg_rcu_free);
280 ipc_rcu_putref(msq);
281} 288}
282 289
283/* 290/*
@@ -688,6 +695,12 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
688 if (ipcperms(ns, &msq->q_perm, S_IWUGO)) 695 if (ipcperms(ns, &msq->q_perm, S_IWUGO))
689 goto out_unlock0; 696 goto out_unlock0;
690 697
698 /* raced with RMID? */
699 if (msq->q_perm.deleted) {
700 err = -EIDRM;
701 goto out_unlock0;
702 }
703
691 err = security_msg_queue_msgsnd(msq, msg, msgflg); 704 err = security_msg_queue_msgsnd(msq, msg, msgflg);
692 if (err) 705 if (err)
693 goto out_unlock0; 706 goto out_unlock0;
@@ -717,7 +730,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
717 rcu_read_lock(); 730 rcu_read_lock();
718 ipc_lock_object(&msq->q_perm); 731 ipc_lock_object(&msq->q_perm);
719 732
720 ipc_rcu_putref(msq); 733 ipc_rcu_putref(msq, ipc_rcu_free);
721 if (msq->q_perm.deleted) { 734 if (msq->q_perm.deleted) {
722 err = -EIDRM; 735 err = -EIDRM;
723 goto out_unlock0; 736 goto out_unlock0;
@@ -894,6 +907,13 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl
894 goto out_unlock1; 907 goto out_unlock1;
895 908
896 ipc_lock_object(&msq->q_perm); 909 ipc_lock_object(&msq->q_perm);
910
911 /* raced with RMID? */
912 if (msq->q_perm.deleted) {
913 msg = ERR_PTR(-EIDRM);
914 goto out_unlock0;
915 }
916
897 msg = find_msg(msq, &msgtyp, mode); 917 msg = find_msg(msq, &msgtyp, mode);
898 if (!IS_ERR(msg)) { 918 if (!IS_ERR(msg)) {
899 /* 919 /*
diff --git a/ipc/sem.c b/ipc/sem.c
index 69b6a21f3844..8c4f59b0204a 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -243,71 +243,122 @@ static void merge_queues(struct sem_array *sma)
243 } 243 }
244} 244}
245 245
246static void sem_rcu_free(struct rcu_head *head)
247{
248 struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
249 struct sem_array *sma = ipc_rcu_to_struct(p);
250
251 security_sem_free(sma);
252 ipc_rcu_free(head);
253}
254
255/*
256 * Wait until all currently ongoing simple ops have completed.
257 * Caller must own sem_perm.lock.
258 * New simple ops cannot start, because simple ops first check
259 * that sem_perm.lock is free.
260 * that a) sem_perm.lock is free and b) complex_count is 0.
261 */
262static void sem_wait_array(struct sem_array *sma)
263{
264 int i;
265 struct sem *sem;
266
267 if (sma->complex_count) {
268 /* The thread that increased sma->complex_count waited on
269 * all sem->lock locks. Thus we don't need to wait again.
270 */
271 return;
272 }
273
274 for (i = 0; i < sma->sem_nsems; i++) {
275 sem = sma->sem_base + i;
276 spin_unlock_wait(&sem->lock);
277 }
278}
279
246/* 280/*
247 * If the request contains only one semaphore operation, and there are 281 * If the request contains only one semaphore operation, and there are
248 * no complex transactions pending, lock only the semaphore involved. 282 * no complex transactions pending, lock only the semaphore involved.
249 * Otherwise, lock the entire semaphore array, since we either have 283 * Otherwise, lock the entire semaphore array, since we either have
250 * multiple semaphores in our own semops, or we need to look at 284 * multiple semaphores in our own semops, or we need to look at
251 * semaphores from other pending complex operations. 285 * semaphores from other pending complex operations.
252 *
253 * Carefully guard against sma->complex_count changing between zero
254 * and non-zero while we are spinning for the lock. The value of
255 * sma->complex_count cannot change while we are holding the lock,
256 * so sem_unlock should be fine.
257 *
258 * The global lock path checks that all the local locks have been released,
259 * checking each local lock once. This means that the local lock paths
260 * cannot start their critical sections while the global lock is held.
261 */ 286 */
262static inline int sem_lock(struct sem_array *sma, struct sembuf *sops, 287static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
263 int nsops) 288 int nsops)
264{ 289{
265 int locknum; 290 struct sem *sem;
266 again:
267 if (nsops == 1 && !sma->complex_count) {
268 struct sem *sem = sma->sem_base + sops->sem_num;
269 291
270 /* Lock just the semaphore we are interested in. */ 292 if (nsops != 1) {
271 spin_lock(&sem->lock); 293 /* Complex operation - acquire a full lock */
294 ipc_lock_object(&sma->sem_perm);
272 295
273 /* 296 /* And wait until all simple ops that are processed
274 * If sma->complex_count was set while we were spinning, 297 * right now have dropped their locks.
275 * we may need to look at things we did not lock here.
276 */ 298 */
277 if (unlikely(sma->complex_count)) { 299 sem_wait_array(sma);
278 spin_unlock(&sem->lock); 300 return -1;
279 goto lock_array; 301 }
280 } 302
303 /*
304 * Only one semaphore affected - try to optimize locking.
305 * The rules are:
306 * - optimized locking is possible if no complex operation
307 * is either enqueued or processed right now.
308 * - The test for enqueued complex ops is simple:
309 * sma->complex_count != 0
310 * - Testing for complex ops that are processed right now is
311 * a bit more difficult. Complex ops acquire the full lock
312 * and first wait that the running simple ops have completed.
313 * (see above)
314 * Thus: If we own a simple lock and the global lock is free
315 * and complex_count is now 0, then it will stay 0 and
316 * thus just locking sem->lock is sufficient.
317 */
318 sem = sma->sem_base + sops->sem_num;
281 319
320 if (sma->complex_count == 0) {
282 /* 321 /*
283 * Another process is holding the global lock on the 322 * It appears that no complex operation is around.
284 * sem_array; we cannot enter our critical section, 323 * Acquire the per-semaphore lock.
285 * but have to wait for the global lock to be released.
286 */ 324 */
287 if (unlikely(spin_is_locked(&sma->sem_perm.lock))) { 325 spin_lock(&sem->lock);
288 spin_unlock(&sem->lock); 326
289 spin_unlock_wait(&sma->sem_perm.lock); 327 /* Then check that the global lock is free */
290 goto again; 328 if (!spin_is_locked(&sma->sem_perm.lock)) {
329 /* spin_is_locked() is not a memory barrier */
330 smp_mb();
331
332 /* Now repeat the test of complex_count:
333 * It can't change anymore until we drop sem->lock.
334 * Thus: if is now 0, then it will stay 0.
335 */
336 if (sma->complex_count == 0) {
337 /* fast path successful! */
338 return sops->sem_num;
339 }
291 } 340 }
341 spin_unlock(&sem->lock);
342 }
292 343
293 locknum = sops->sem_num; 344 /* slow path: acquire the full lock */
345 ipc_lock_object(&sma->sem_perm);
346
347 if (sma->complex_count == 0) {
348 /* False alarm:
349 * There is no complex operation, thus we can switch
350 * back to the fast path.
351 */
352 spin_lock(&sem->lock);
353 ipc_unlock_object(&sma->sem_perm);
354 return sops->sem_num;
294 } else { 355 } else {
295 int i; 356 /* Not a false alarm, thus complete the sequence for a
296 /* 357 * full lock.
297 * Lock the semaphore array, and wait for all of the
298 * individual semaphore locks to go away. The code
299 * above ensures no new single-lock holders will enter
300 * their critical section while the array lock is held.
301 */ 358 */
302 lock_array: 359 sem_wait_array(sma);
303 ipc_lock_object(&sma->sem_perm); 360 return -1;
304 for (i = 0; i < sma->sem_nsems; i++) {
305 struct sem *sem = sma->sem_base + i;
306 spin_unlock_wait(&sem->lock);
307 }
308 locknum = -1;
309 } 361 }
310 return locknum;
311} 362}
312 363
313static inline void sem_unlock(struct sem_array *sma, int locknum) 364static inline void sem_unlock(struct sem_array *sma, int locknum)
@@ -374,12 +425,7 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
374static inline void sem_lock_and_putref(struct sem_array *sma) 425static inline void sem_lock_and_putref(struct sem_array *sma)
375{ 426{
376 sem_lock(sma, NULL, -1); 427 sem_lock(sma, NULL, -1);
377 ipc_rcu_putref(sma); 428 ipc_rcu_putref(sma, ipc_rcu_free);
378}
379
380static inline void sem_putref(struct sem_array *sma)
381{
382 ipc_rcu_putref(sma);
383} 429}
384 430
385static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) 431static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
@@ -458,14 +504,13 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
458 sma->sem_perm.security = NULL; 504 sma->sem_perm.security = NULL;
459 retval = security_sem_alloc(sma); 505 retval = security_sem_alloc(sma);
460 if (retval) { 506 if (retval) {
461 ipc_rcu_putref(sma); 507 ipc_rcu_putref(sma, ipc_rcu_free);
462 return retval; 508 return retval;
463 } 509 }
464 510
465 id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni); 511 id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
466 if (id < 0) { 512 if (id < 0) {
467 security_sem_free(sma); 513 ipc_rcu_putref(sma, sem_rcu_free);
468 ipc_rcu_putref(sma);
469 return id; 514 return id;
470 } 515 }
471 ns->used_sems += nsems; 516 ns->used_sems += nsems;
@@ -873,6 +918,24 @@ again:
873} 918}
874 919
875/** 920/**
921 * set_semotime(sma, sops) - set sem_otime
922 * @sma: semaphore array
923 * @sops: operations that modified the array, may be NULL
924 *
925 * sem_otime is replicated to avoid cache line trashing.
926 * This function sets one instance to the current time.
927 */
928static void set_semotime(struct sem_array *sma, struct sembuf *sops)
929{
930 if (sops == NULL) {
931 sma->sem_base[0].sem_otime = get_seconds();
932 } else {
933 sma->sem_base[sops[0].sem_num].sem_otime =
934 get_seconds();
935 }
936}
937
938/**
876 * do_smart_update(sma, sops, nsops, otime, pt) - optimized update_queue 939 * do_smart_update(sma, sops, nsops, otime, pt) - optimized update_queue
877 * @sma: semaphore array 940 * @sma: semaphore array
878 * @sops: operations that were performed 941 * @sops: operations that were performed
@@ -922,17 +985,10 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop
922 } 985 }
923 } 986 }
924 } 987 }
925 if (otime) { 988 if (otime)
926 if (sops == NULL) { 989 set_semotime(sma, sops);
927 sma->sem_base[0].sem_otime = get_seconds();
928 } else {
929 sma->sem_base[sops[0].sem_num].sem_otime =
930 get_seconds();
931 }
932 }
933} 990}
934 991
935
936/* The following counts are associated to each semaphore: 992/* The following counts are associated to each semaphore:
937 * semncnt number of tasks waiting on semval being nonzero 993 * semncnt number of tasks waiting on semval being nonzero
938 * semzcnt number of tasks waiting on semval being zero 994 * semzcnt number of tasks waiting on semval being zero
@@ -1047,8 +1103,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1047 1103
1048 wake_up_sem_queue_do(&tasks); 1104 wake_up_sem_queue_do(&tasks);
1049 ns->used_sems -= sma->sem_nsems; 1105 ns->used_sems -= sma->sem_nsems;
1050 security_sem_free(sma); 1106 ipc_rcu_putref(sma, sem_rcu_free);
1051 ipc_rcu_putref(sma);
1052} 1107}
1053 1108
1054static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version) 1109static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
@@ -1292,7 +1347,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1292 rcu_read_unlock(); 1347 rcu_read_unlock();
1293 sem_io = ipc_alloc(sizeof(ushort)*nsems); 1348 sem_io = ipc_alloc(sizeof(ushort)*nsems);
1294 if(sem_io == NULL) { 1349 if(sem_io == NULL) {
1295 sem_putref(sma); 1350 ipc_rcu_putref(sma, ipc_rcu_free);
1296 return -ENOMEM; 1351 return -ENOMEM;
1297 } 1352 }
1298 1353
@@ -1328,20 +1383,20 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1328 if(nsems > SEMMSL_FAST) { 1383 if(nsems > SEMMSL_FAST) {
1329 sem_io = ipc_alloc(sizeof(ushort)*nsems); 1384 sem_io = ipc_alloc(sizeof(ushort)*nsems);
1330 if(sem_io == NULL) { 1385 if(sem_io == NULL) {
1331 sem_putref(sma); 1386 ipc_rcu_putref(sma, ipc_rcu_free);
1332 return -ENOMEM; 1387 return -ENOMEM;
1333 } 1388 }
1334 } 1389 }
1335 1390
1336 if (copy_from_user (sem_io, p, nsems*sizeof(ushort))) { 1391 if (copy_from_user (sem_io, p, nsems*sizeof(ushort))) {
1337 sem_putref(sma); 1392 ipc_rcu_putref(sma, ipc_rcu_free);
1338 err = -EFAULT; 1393 err = -EFAULT;
1339 goto out_free; 1394 goto out_free;
1340 } 1395 }
1341 1396
1342 for (i = 0; i < nsems; i++) { 1397 for (i = 0; i < nsems; i++) {
1343 if (sem_io[i] > SEMVMX) { 1398 if (sem_io[i] > SEMVMX) {
1344 sem_putref(sma); 1399 ipc_rcu_putref(sma, ipc_rcu_free);
1345 err = -ERANGE; 1400 err = -ERANGE;
1346 goto out_free; 1401 goto out_free;
1347 } 1402 }
@@ -1629,7 +1684,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1629 /* step 2: allocate new undo structure */ 1684 /* step 2: allocate new undo structure */
1630 new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); 1685 new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1631 if (!new) { 1686 if (!new) {
1632 sem_putref(sma); 1687 ipc_rcu_putref(sma, ipc_rcu_free);
1633 return ERR_PTR(-ENOMEM); 1688 return ERR_PTR(-ENOMEM);
1634 } 1689 }
1635 1690
@@ -1795,12 +1850,17 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1795 1850
1796 error = perform_atomic_semop(sma, sops, nsops, un, 1851 error = perform_atomic_semop(sma, sops, nsops, un,
1797 task_tgid_vnr(current)); 1852 task_tgid_vnr(current));
1798 if (error <= 0) { 1853 if (error == 0) {
1799 if (alter && error == 0) 1854 /* If the operation was successful, then do
1855 * the required updates.
1856 */
1857 if (alter)
1800 do_smart_update(sma, sops, nsops, 1, &tasks); 1858 do_smart_update(sma, sops, nsops, 1, &tasks);
1801 1859 else
1802 goto out_unlock_free; 1860 set_semotime(sma, sops);
1803 } 1861 }
1862 if (error <= 0)
1863 goto out_unlock_free;
1804 1864
1805 /* We need to sleep on this operation, so we put the current 1865 /* We need to sleep on this operation, so we put the current
1806 * task into the pending queue and go to sleep. 1866 * task into the pending queue and go to sleep.
@@ -2059,6 +2119,14 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
2059 struct sem_array *sma = it; 2119 struct sem_array *sma = it;
2060 time_t sem_otime; 2120 time_t sem_otime;
2061 2121
2122 /*
2123 * The proc interface isn't aware of sem_lock(), it calls
2124 * ipc_lock_object() directly (in sysvipc_find_ipc).
2125 * In order to stay compatible with sem_lock(), we must wait until
2126 * all simple semop() calls have left their critical regions.
2127 */
2128 sem_wait_array(sma);
2129
2062 sem_otime = get_semotime(sma); 2130 sem_otime = get_semotime(sma);
2063 2131
2064 return seq_printf(s, 2132 return seq_printf(s,
diff --git a/ipc/shm.c b/ipc/shm.c
index 2821cdf93adb..d69739610fd4 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -167,6 +167,15 @@ static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
167 ipc_lock_object(&ipcp->shm_perm); 167 ipc_lock_object(&ipcp->shm_perm);
168} 168}
169 169
170static void shm_rcu_free(struct rcu_head *head)
171{
172 struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
173 struct shmid_kernel *shp = ipc_rcu_to_struct(p);
174
175 security_shm_free(shp);
176 ipc_rcu_free(head);
177}
178
170static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) 179static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
171{ 180{
172 ipc_rmid(&shm_ids(ns), &s->shm_perm); 181 ipc_rmid(&shm_ids(ns), &s->shm_perm);
@@ -208,8 +217,7 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
208 user_shm_unlock(file_inode(shp->shm_file)->i_size, 217 user_shm_unlock(file_inode(shp->shm_file)->i_size,
209 shp->mlock_user); 218 shp->mlock_user);
210 fput (shp->shm_file); 219 fput (shp->shm_file);
211 security_shm_free(shp); 220 ipc_rcu_putref(shp, shm_rcu_free);
212 ipc_rcu_putref(shp);
213} 221}
214 222
215/* 223/*
@@ -497,7 +505,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
497 shp->shm_perm.security = NULL; 505 shp->shm_perm.security = NULL;
498 error = security_shm_alloc(shp); 506 error = security_shm_alloc(shp);
499 if (error) { 507 if (error) {
500 ipc_rcu_putref(shp); 508 ipc_rcu_putref(shp, ipc_rcu_free);
501 return error; 509 return error;
502 } 510 }
503 511
@@ -566,8 +574,7 @@ no_id:
566 user_shm_unlock(size, shp->mlock_user); 574 user_shm_unlock(size, shp->mlock_user);
567 fput(file); 575 fput(file);
568no_file: 576no_file:
569 security_shm_free(shp); 577 ipc_rcu_putref(shp, shm_rcu_free);
570 ipc_rcu_putref(shp);
571 return error; 578 return error;
572} 579}
573 580
diff --git a/ipc/util.c b/ipc/util.c
index e829da9ed01f..fdb8ae740775 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -474,11 +474,6 @@ void ipc_free(void* ptr, int size)
474 kfree(ptr); 474 kfree(ptr);
475} 475}
476 476
477struct ipc_rcu {
478 struct rcu_head rcu;
479 atomic_t refcount;
480} ____cacheline_aligned_in_smp;
481
482/** 477/**
483 * ipc_rcu_alloc - allocate ipc and rcu space 478 * ipc_rcu_alloc - allocate ipc and rcu space
484 * @size: size desired 479 * @size: size desired
@@ -505,27 +500,24 @@ int ipc_rcu_getref(void *ptr)
505 return atomic_inc_not_zero(&p->refcount); 500 return atomic_inc_not_zero(&p->refcount);
506} 501}
507 502
508/** 503void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head))
509 * ipc_schedule_free - free ipc + rcu space
510 * @head: RCU callback structure for queued work
511 */
512static void ipc_schedule_free(struct rcu_head *head)
513{
514 vfree(container_of(head, struct ipc_rcu, rcu));
515}
516
517void ipc_rcu_putref(void *ptr)
518{ 504{
519 struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1; 505 struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1;
520 506
521 if (!atomic_dec_and_test(&p->refcount)) 507 if (!atomic_dec_and_test(&p->refcount))
522 return; 508 return;
523 509
524 if (is_vmalloc_addr(ptr)) { 510 call_rcu(&p->rcu, func);
525 call_rcu(&p->rcu, ipc_schedule_free); 511}
526 } else { 512
527 kfree_rcu(p, rcu); 513void ipc_rcu_free(struct rcu_head *head)
528 } 514{
515 struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
516
517 if (is_vmalloc_addr(p))
518 vfree(p);
519 else
520 kfree(p);
529} 521}
530 522
531/** 523/**
diff --git a/ipc/util.h b/ipc/util.h
index c5f3338ba1fa..f2f5036f2eed 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -47,6 +47,13 @@ static inline void msg_exit_ns(struct ipc_namespace *ns) { }
47static inline void shm_exit_ns(struct ipc_namespace *ns) { } 47static inline void shm_exit_ns(struct ipc_namespace *ns) { }
48#endif 48#endif
49 49
50struct ipc_rcu {
51 struct rcu_head rcu;
52 atomic_t refcount;
53} ____cacheline_aligned_in_smp;
54
55#define ipc_rcu_to_struct(p) ((void *)(p+1))
56
50/* 57/*
51 * Structure that holds the parameters needed by the ipc operations 58 * Structure that holds the parameters needed by the ipc operations
52 * (see after) 59 * (see after)
@@ -120,7 +127,8 @@ void ipc_free(void* ptr, int size);
120 */ 127 */
121void* ipc_rcu_alloc(int size); 128void* ipc_rcu_alloc(int size);
122int ipc_rcu_getref(void *ptr); 129int ipc_rcu_getref(void *ptr);
123void ipc_rcu_putref(void *ptr); 130void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head));
131void ipc_rcu_free(struct rcu_head *head);
124 132
125struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int); 133struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int);
126struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id); 134struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id);
diff --git a/kernel/audit.c b/kernel/audit.c
index 91e53d04b6a9..7b0e23a740ce 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -1117,9 +1117,10 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
1117 1117
1118 sleep_time = timeout_start + audit_backlog_wait_time - 1118 sleep_time = timeout_start + audit_backlog_wait_time -
1119 jiffies; 1119 jiffies;
1120 if ((long)sleep_time > 0) 1120 if ((long)sleep_time > 0) {
1121 wait_for_auditd(sleep_time); 1121 wait_for_auditd(sleep_time);
1122 continue; 1122 continue;
1123 }
1123 } 1124 }
1124 if (audit_rate_check() && printk_ratelimit()) 1125 if (audit_rate_check() && printk_ratelimit())
1125 printk(KERN_WARNING 1126 printk(KERN_WARNING
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index 247091bf0587..859c8dfd78a1 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -51,6 +51,15 @@ void context_tracking_user_enter(void)
51 unsigned long flags; 51 unsigned long flags;
52 52
53 /* 53 /*
54 * Repeat the user_enter() check here because some archs may be calling
55 * this from asm and if no CPU needs context tracking, they shouldn't
56 * go further. Repeat the check here until they support the static key
57 * check.
58 */
59 if (!static_key_false(&context_tracking_enabled))
60 return;
61
62 /*
54 * Some contexts may involve an exception occuring in an irq, 63 * Some contexts may involve an exception occuring in an irq,
55 * leading to that nesting: 64 * leading to that nesting:
56 * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() 65 * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
@@ -151,6 +160,9 @@ void context_tracking_user_exit(void)
151{ 160{
152 unsigned long flags; 161 unsigned long flags;
153 162
163 if (!static_key_false(&context_tracking_enabled))
164 return;
165
154 if (in_interrupt()) 166 if (in_interrupt())
155 return; 167 return;
156 168
diff --git a/kernel/events/core.c b/kernel/events/core.c
index dd236b66ca3a..d49a9d29334c 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3660,6 +3660,26 @@ static void calc_timer_values(struct perf_event *event,
3660 *running = ctx_time - event->tstamp_running; 3660 *running = ctx_time - event->tstamp_running;
3661} 3661}
3662 3662
3663static void perf_event_init_userpage(struct perf_event *event)
3664{
3665 struct perf_event_mmap_page *userpg;
3666 struct ring_buffer *rb;
3667
3668 rcu_read_lock();
3669 rb = rcu_dereference(event->rb);
3670 if (!rb)
3671 goto unlock;
3672
3673 userpg = rb->user_page;
3674
3675 /* Allow new userspace to detect that bit 0 is deprecated */
3676 userpg->cap_bit0_is_deprecated = 1;
3677 userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
3678
3679unlock:
3680 rcu_read_unlock();
3681}
3682
3663void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) 3683void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
3664{ 3684{
3665} 3685}
@@ -4044,6 +4064,7 @@ again:
4044 ring_buffer_attach(event, rb); 4064 ring_buffer_attach(event, rb);
4045 rcu_assign_pointer(event->rb, rb); 4065 rcu_assign_pointer(event->rb, rb);
4046 4066
4067 perf_event_init_userpage(event);
4047 perf_event_update_userpage(event); 4068 perf_event_update_userpage(event);
4048 4069
4049unlock: 4070unlock:
@@ -7213,15 +7234,15 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
7213 perf_remove_from_context(event); 7234 perf_remove_from_context(event);
7214 unaccount_event_cpu(event, src_cpu); 7235 unaccount_event_cpu(event, src_cpu);
7215 put_ctx(src_ctx); 7236 put_ctx(src_ctx);
7216 list_add(&event->event_entry, &events); 7237 list_add(&event->migrate_entry, &events);
7217 } 7238 }
7218 mutex_unlock(&src_ctx->mutex); 7239 mutex_unlock(&src_ctx->mutex);
7219 7240
7220 synchronize_rcu(); 7241 synchronize_rcu();
7221 7242
7222 mutex_lock(&dst_ctx->mutex); 7243 mutex_lock(&dst_ctx->mutex);
7223 list_for_each_entry_safe(event, tmp, &events, event_entry) { 7244 list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
7224 list_del(&event->event_entry); 7245 list_del(&event->migrate_entry);
7225 if (event->state >= PERF_EVENT_STATE_OFF) 7246 if (event->state >= PERF_EVENT_STATE_OFF)
7226 event->state = PERF_EVENT_STATE_INACTIVE; 7247 event->state = PERF_EVENT_STATE_INACTIVE;
7227 account_event_cpu(event, dst_cpu); 7248 account_event_cpu(event, dst_cpu);
diff --git a/kernel/kmod.c b/kernel/kmod.c
index fb326365b694..b086006c59e7 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -571,6 +571,10 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
571 DECLARE_COMPLETION_ONSTACK(done); 571 DECLARE_COMPLETION_ONSTACK(done);
572 int retval = 0; 572 int retval = 0;
573 573
574 if (!sub_info->path) {
575 call_usermodehelper_freeinfo(sub_info);
576 return -EINVAL;
577 }
574 helper_lock(); 578 helper_lock();
575 if (!khelper_wq || usermodehelper_disabled) { 579 if (!khelper_wq || usermodehelper_disabled) {
576 retval = -EBUSY; 580 retval = -EBUSY;
diff --git a/kernel/params.c b/kernel/params.c
index 81c4e78c8f4c..c00d5b502aa4 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -254,11 +254,11 @@ int parse_args(const char *doing,
254 254
255 255
256STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", unsigned long, kstrtoul); 256STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", unsigned long, kstrtoul);
257STANDARD_PARAM_DEF(short, short, "%hi", long, kstrtoul); 257STANDARD_PARAM_DEF(short, short, "%hi", long, kstrtol);
258STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", unsigned long, kstrtoul); 258STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", unsigned long, kstrtoul);
259STANDARD_PARAM_DEF(int, int, "%i", long, kstrtoul); 259STANDARD_PARAM_DEF(int, int, "%i", long, kstrtol);
260STANDARD_PARAM_DEF(uint, unsigned int, "%u", unsigned long, kstrtoul); 260STANDARD_PARAM_DEF(uint, unsigned int, "%u", unsigned long, kstrtoul);
261STANDARD_PARAM_DEF(long, long, "%li", long, kstrtoul); 261STANDARD_PARAM_DEF(long, long, "%li", long, kstrtol);
262STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", unsigned long, kstrtoul); 262STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", unsigned long, kstrtoul);
263 263
264int param_set_charp(const char *val, const struct kernel_param *kp) 264int param_set_charp(const char *val, const struct kernel_param *kp)
diff --git a/kernel/pid.c b/kernel/pid.c
index ebe5e80b10f8..9b9a26698144 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -273,6 +273,11 @@ void free_pid(struct pid *pid)
273 */ 273 */
274 wake_up_process(ns->child_reaper); 274 wake_up_process(ns->child_reaper);
275 break; 275 break;
276 case PIDNS_HASH_ADDING:
277 /* Handle a fork failure of the first process */
278 WARN_ON(ns->child_reaper);
279 ns->nr_hashed = 0;
280 /* fall through */
276 case 0: 281 case 0:
277 schedule_work(&ns->proc_work); 282 schedule_work(&ns->proc_work);
278 break; 283 break;
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 358a146fd4da..98c3b34a4cff 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -743,7 +743,10 @@ int create_basic_memory_bitmaps(void)
743 struct memory_bitmap *bm1, *bm2; 743 struct memory_bitmap *bm1, *bm2;
744 int error = 0; 744 int error = 0;
745 745
746 BUG_ON(forbidden_pages_map || free_pages_map); 746 if (forbidden_pages_map && free_pages_map)
747 return 0;
748 else
749 BUG_ON(forbidden_pages_map || free_pages_map);
747 750
748 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL); 751 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
749 if (!bm1) 752 if (!bm1)
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 72e8f4fd616d..957f06164ad1 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -39,6 +39,7 @@ static struct snapshot_data {
39 char frozen; 39 char frozen;
40 char ready; 40 char ready;
41 char platform_support; 41 char platform_support;
42 bool free_bitmaps;
42} snapshot_state; 43} snapshot_state;
43 44
44atomic_t snapshot_device_available = ATOMIC_INIT(1); 45atomic_t snapshot_device_available = ATOMIC_INIT(1);
@@ -82,6 +83,10 @@ static int snapshot_open(struct inode *inode, struct file *filp)
82 data->swap = -1; 83 data->swap = -1;
83 data->mode = O_WRONLY; 84 data->mode = O_WRONLY;
84 error = pm_notifier_call_chain(PM_RESTORE_PREPARE); 85 error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
86 if (!error) {
87 error = create_basic_memory_bitmaps();
88 data->free_bitmaps = !error;
89 }
85 if (error) 90 if (error)
86 pm_notifier_call_chain(PM_POST_RESTORE); 91 pm_notifier_call_chain(PM_POST_RESTORE);
87 } 92 }
@@ -111,6 +116,8 @@ static int snapshot_release(struct inode *inode, struct file *filp)
111 pm_restore_gfp_mask(); 116 pm_restore_gfp_mask();
112 free_basic_memory_bitmaps(); 117 free_basic_memory_bitmaps();
113 thaw_processes(); 118 thaw_processes();
119 } else if (data->free_bitmaps) {
120 free_basic_memory_bitmaps();
114 } 121 }
115 pm_notifier_call_chain(data->mode == O_RDONLY ? 122 pm_notifier_call_chain(data->mode == O_RDONLY ?
116 PM_POST_HIBERNATION : PM_POST_RESTORE); 123 PM_POST_HIBERNATION : PM_POST_RESTORE);
@@ -231,6 +238,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
231 break; 238 break;
232 pm_restore_gfp_mask(); 239 pm_restore_gfp_mask();
233 free_basic_memory_bitmaps(); 240 free_basic_memory_bitmaps();
241 data->free_bitmaps = false;
234 thaw_processes(); 242 thaw_processes();
235 data->frozen = 0; 243 data->frozen = 0;
236 break; 244 break;
diff --git a/kernel/reboot.c b/kernel/reboot.c
index 269ed9384cc4..f813b3474646 100644
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -32,7 +32,14 @@ EXPORT_SYMBOL(cad_pid);
32#endif 32#endif
33enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE; 33enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE;
34 34
35int reboot_default; 35/*
36 * This variable is used privately to keep track of whether or not
37 * reboot_type is still set to its default value (i.e., reboot= hasn't
38 * been set on the command line). This is needed so that we can
39 * suppress DMI scanning for reboot quirks. Without it, it's
40 * impossible to override a faulty reboot quirk without recompiling.
41 */
42int reboot_default = 1;
36int reboot_cpu; 43int reboot_cpu;
37enum reboot_type reboot_type = BOOT_ACPI; 44enum reboot_type reboot_type = BOOT_ACPI;
38int reboot_force; 45int reboot_force;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 11cd13667359..7c70201fbc61 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4242,7 +4242,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
4242 } 4242 }
4243 4243
4244 if (!se) { 4244 if (!se) {
4245 cfs_rq->h_load = rq->avg.load_avg_contrib; 4245 cfs_rq->h_load = cfs_rq->runnable_load_avg;
4246 cfs_rq->last_h_load_update = now; 4246 cfs_rq->last_h_load_update = now;
4247 } 4247 }
4248 4248
@@ -4823,8 +4823,8 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
4823 (busiest->load_per_task * SCHED_POWER_SCALE) / 4823 (busiest->load_per_task * SCHED_POWER_SCALE) /
4824 busiest->group_power; 4824 busiest->group_power;
4825 4825
4826 if (busiest->avg_load - local->avg_load + scaled_busy_load_per_task >= 4826 if (busiest->avg_load + scaled_busy_load_per_task >=
4827 (scaled_busy_load_per_task * imbn)) { 4827 local->avg_load + (scaled_busy_load_per_task * imbn)) {
4828 env->imbalance = busiest->load_per_task; 4828 env->imbalance = busiest->load_per_task;
4829 return; 4829 return;
4830 } 4830 }
@@ -4896,7 +4896,8 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
4896 * max load less than avg load(as we skip the groups at or below 4896 * max load less than avg load(as we skip the groups at or below
4897 * its cpu_power, while calculating max_load..) 4897 * its cpu_power, while calculating max_load..)
4898 */ 4898 */
4899 if (busiest->avg_load < sds->avg_load) { 4899 if (busiest->avg_load <= sds->avg_load ||
4900 local->avg_load >= sds->avg_load) {
4900 env->imbalance = 0; 4901 env->imbalance = 0;
4901 return fix_small_imbalance(env, sds); 4902 return fix_small_imbalance(env, sds);
4902 } 4903 }
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 53cc09ceb0b8..d7d498d8cc4f 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -328,10 +328,19 @@ void irq_enter(void)
328 328
329static inline void invoke_softirq(void) 329static inline void invoke_softirq(void)
330{ 330{
331 if (!force_irqthreads) 331 if (!force_irqthreads) {
332 __do_softirq(); 332 /*
333 else 333 * We can safely execute softirq on the current stack if
334 * it is the irq stack, because it should be near empty
335 * at this stage. But we have no way to know if the arch
336 * calls irq_exit() on the irq stack. So call softirq
337 * in its own stack to prevent from any overrun on top
338 * of a potentially deep task stack.
339 */
340 do_softirq();
341 } else {
334 wakeup_softirqd(); 342 wakeup_softirqd();
343 }
335} 344}
336 345
337static inline void tick_irq_exit(void) 346static inline void tick_irq_exit(void)
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 51c4f34d258e..4431610f049a 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -486,7 +486,52 @@ static struct smp_hotplug_thread watchdog_threads = {
486 .unpark = watchdog_enable, 486 .unpark = watchdog_enable,
487}; 487};
488 488
489static int watchdog_enable_all_cpus(void) 489static void restart_watchdog_hrtimer(void *info)
490{
491 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
492 int ret;
493
494 /*
495 * No need to cancel and restart hrtimer if it is currently executing
496 * because it will reprogram itself with the new period now.
497 * We should never see it unqueued here because we are running per-cpu
498 * with interrupts disabled.
499 */
500 ret = hrtimer_try_to_cancel(hrtimer);
501 if (ret == 1)
502 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
503 HRTIMER_MODE_REL_PINNED);
504}
505
506static void update_timers(int cpu)
507{
508 struct call_single_data data = {.func = restart_watchdog_hrtimer};
509 /*
510 * Make sure that perf event counter will adopt to a new
511 * sampling period. Updating the sampling period directly would
512 * be much nicer but we do not have an API for that now so
513 * let's use a big hammer.
514 * Hrtimer will adopt the new period on the next tick but this
515 * might be late already so we have to restart the timer as well.
516 */
517 watchdog_nmi_disable(cpu);
518 __smp_call_function_single(cpu, &data, 1);
519 watchdog_nmi_enable(cpu);
520}
521
522static void update_timers_all_cpus(void)
523{
524 int cpu;
525
526 get_online_cpus();
527 preempt_disable();
528 for_each_online_cpu(cpu)
529 update_timers(cpu);
530 preempt_enable();
531 put_online_cpus();
532}
533
534static int watchdog_enable_all_cpus(bool sample_period_changed)
490{ 535{
491 int err = 0; 536 int err = 0;
492 537
@@ -496,6 +541,8 @@ static int watchdog_enable_all_cpus(void)
496 pr_err("Failed to create watchdog threads, disabled\n"); 541 pr_err("Failed to create watchdog threads, disabled\n");
497 else 542 else
498 watchdog_running = 1; 543 watchdog_running = 1;
544 } else if (sample_period_changed) {
545 update_timers_all_cpus();
499 } 546 }
500 547
501 return err; 548 return err;
@@ -520,13 +567,15 @@ int proc_dowatchdog(struct ctl_table *table, int write,
520 void __user *buffer, size_t *lenp, loff_t *ppos) 567 void __user *buffer, size_t *lenp, loff_t *ppos)
521{ 568{
522 int err, old_thresh, old_enabled; 569 int err, old_thresh, old_enabled;
570 static DEFINE_MUTEX(watchdog_proc_mutex);
523 571
572 mutex_lock(&watchdog_proc_mutex);
524 old_thresh = ACCESS_ONCE(watchdog_thresh); 573 old_thresh = ACCESS_ONCE(watchdog_thresh);
525 old_enabled = ACCESS_ONCE(watchdog_user_enabled); 574 old_enabled = ACCESS_ONCE(watchdog_user_enabled);
526 575
527 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 576 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
528 if (err || !write) 577 if (err || !write)
529 return err; 578 goto out;
530 579
531 set_sample_period(); 580 set_sample_period();
532 /* 581 /*
@@ -535,7 +584,7 @@ int proc_dowatchdog(struct ctl_table *table, int write,
535 * watchdog_*_all_cpus() function takes care of this. 584 * watchdog_*_all_cpus() function takes care of this.
536 */ 585 */
537 if (watchdog_user_enabled && watchdog_thresh) 586 if (watchdog_user_enabled && watchdog_thresh)
538 err = watchdog_enable_all_cpus(); 587 err = watchdog_enable_all_cpus(old_thresh != watchdog_thresh);
539 else 588 else
540 watchdog_disable_all_cpus(); 589 watchdog_disable_all_cpus();
541 590
@@ -544,7 +593,8 @@ int proc_dowatchdog(struct ctl_table *table, int write,
544 watchdog_thresh = old_thresh; 593 watchdog_thresh = old_thresh;
545 watchdog_user_enabled = old_enabled; 594 watchdog_user_enabled = old_enabled;
546 } 595 }
547 596out:
597 mutex_unlock(&watchdog_proc_mutex);
548 return err; 598 return err;
549} 599}
550#endif /* CONFIG_SYSCTL */ 600#endif /* CONFIG_SYSCTL */
@@ -554,5 +604,5 @@ void __init lockup_detector_init(void)
554 set_sample_period(); 604 set_sample_period();
555 605
556 if (watchdog_user_enabled) 606 if (watchdog_user_enabled)
557 watchdog_enable_all_cpus(); 607 watchdog_enable_all_cpus(false);
558} 608}
diff --git a/lib/hexdump.c b/lib/hexdump.c
index 3f0494c9d57a..8499c810909a 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -14,6 +14,8 @@
14 14
15const char hex_asc[] = "0123456789abcdef"; 15const char hex_asc[] = "0123456789abcdef";
16EXPORT_SYMBOL(hex_asc); 16EXPORT_SYMBOL(hex_asc);
17const char hex_asc_upper[] = "0123456789ABCDEF";
18EXPORT_SYMBOL(hex_asc_upper);
17 19
18/** 20/**
19 * hex_to_bin - convert a hex digit to its real value 21 * hex_to_bin - convert a hex digit to its real value
diff --git a/lib/kobject.c b/lib/kobject.c
index 962175134702..084f7b18d0c0 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -592,7 +592,7 @@ static void kobject_release(struct kref *kref)
592{ 592{
593 struct kobject *kobj = container_of(kref, struct kobject, kref); 593 struct kobject *kobj = container_of(kref, struct kobject, kref);
594#ifdef CONFIG_DEBUG_KOBJECT_RELEASE 594#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
595 pr_debug("kobject: '%s' (%p): %s, parent %p (delayed)\n", 595 pr_info("kobject: '%s' (%p): %s, parent %p (delayed)\n",
596 kobject_name(kobj), kobj, __func__, kobj->parent); 596 kobject_name(kobj), kobj, __func__, kobj->parent);
597 INIT_DELAYED_WORK(&kobj->release, kobject_delayed_cleanup); 597 INIT_DELAYED_WORK(&kobj->release, kobject_delayed_cleanup);
598 schedule_delayed_work(&kobj->release, HZ); 598 schedule_delayed_work(&kobj->release, HZ);
@@ -933,10 +933,7 @@ const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj)
933 933
934bool kobj_ns_current_may_mount(enum kobj_ns_type type) 934bool kobj_ns_current_may_mount(enum kobj_ns_type type)
935{ 935{
936 bool may_mount = false; 936 bool may_mount = true;
937
938 if (type == KOBJ_NS_TYPE_NONE)
939 return true;
940 937
941 spin_lock(&kobj_ns_type_lock); 938 spin_lock(&kobj_ns_type_lock);
942 if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) && 939 if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
diff --git a/lib/lockref.c b/lib/lockref.c
index 677d036cf3c7..6f9d434c1521 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -4,6 +4,22 @@
4#ifdef CONFIG_CMPXCHG_LOCKREF 4#ifdef CONFIG_CMPXCHG_LOCKREF
5 5
6/* 6/*
7 * Allow weakly-ordered memory architectures to provide barrier-less
8 * cmpxchg semantics for lockref updates.
9 */
10#ifndef cmpxchg64_relaxed
11# define cmpxchg64_relaxed cmpxchg64
12#endif
13
14/*
15 * Allow architectures to override the default cpu_relax() within CMPXCHG_LOOP.
16 * This is useful for architectures with an expensive cpu_relax().
17 */
18#ifndef arch_mutex_cpu_relax
19# define arch_mutex_cpu_relax() cpu_relax()
20#endif
21
22/*
7 * Note that the "cmpxchg()" reloads the "old" value for the 23 * Note that the "cmpxchg()" reloads the "old" value for the
8 * failure case. 24 * failure case.
9 */ 25 */
@@ -14,12 +30,13 @@
14 while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \ 30 while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
15 struct lockref new = old, prev = old; \ 31 struct lockref new = old, prev = old; \
16 CODE \ 32 CODE \
17 old.lock_count = cmpxchg64(&lockref->lock_count, \ 33 old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \
18 old.lock_count, new.lock_count); \ 34 old.lock_count, \
35 new.lock_count); \
19 if (likely(old.lock_count == prev.lock_count)) { \ 36 if (likely(old.lock_count == prev.lock_count)) { \
20 SUCCESS; \ 37 SUCCESS; \
21 } \ 38 } \
22 cpu_relax(); \ 39 arch_mutex_cpu_relax(); \
23 } \ 40 } \
24} while (0) 41} while (0)
25 42
diff --git a/mm/Kconfig b/mm/Kconfig
index 026771a9b097..394838f489eb 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -183,7 +183,7 @@ config MEMORY_HOTPLUG_SPARSE
183config MEMORY_HOTREMOVE 183config MEMORY_HOTREMOVE
184 bool "Allow for memory hot remove" 184 bool "Allow for memory hot remove"
185 select MEMORY_ISOLATION 185 select MEMORY_ISOLATION
186 select HAVE_BOOTMEM_INFO_NODE if X86_64 186 select HAVE_BOOTMEM_INFO_NODE if (X86_64 || PPC64)
187 depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE 187 depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE
188 depends on MIGRATION 188 depends on MIGRATION
189 189
diff --git a/mm/bounce.c b/mm/bounce.c
index c9f0a4339a7d..5a7d58fb883b 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -204,6 +204,8 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
204 struct bio_vec *to, *from; 204 struct bio_vec *to, *from;
205 unsigned i; 205 unsigned i;
206 206
207 if (force)
208 goto bounce;
207 bio_for_each_segment(from, *bio_orig, i) 209 bio_for_each_segment(from, *bio_orig, i)
208 if (page_to_pfn(from->bv_page) > queue_bounce_pfn(q)) 210 if (page_to_pfn(from->bv_page) > queue_bounce_pfn(q))
209 goto bounce; 211 goto bounce;
diff --git a/mm/compaction.c b/mm/compaction.c
index c43789388cd8..b5326b141a25 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -677,6 +677,13 @@ static void isolate_freepages(struct zone *zone,
677 pfn -= pageblock_nr_pages) { 677 pfn -= pageblock_nr_pages) {
678 unsigned long isolated; 678 unsigned long isolated;
679 679
680 /*
681 * This can iterate a massively long zone without finding any
682 * suitable migration targets, so periodically check if we need
683 * to schedule.
684 */
685 cond_resched();
686
680 if (!pfn_valid(pfn)) 687 if (!pfn_valid(pfn))
681 continue; 688 continue;
682 689
diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c
index afc2daa91c60..4c84678371eb 100644
--- a/mm/hwpoison-inject.c
+++ b/mm/hwpoison-inject.c
@@ -20,8 +20,6 @@ static int hwpoison_inject(void *data, u64 val)
20 if (!capable(CAP_SYS_ADMIN)) 20 if (!capable(CAP_SYS_ADMIN))
21 return -EPERM; 21 return -EPERM;
22 22
23 if (!hwpoison_filter_enable)
24 goto inject;
25 if (!pfn_valid(pfn)) 23 if (!pfn_valid(pfn))
26 return -ENXIO; 24 return -ENXIO;
27 25
@@ -33,6 +31,9 @@ static int hwpoison_inject(void *data, u64 val)
33 if (!get_page_unless_zero(hpage)) 31 if (!get_page_unless_zero(hpage))
34 return 0; 32 return 0;
35 33
34 if (!hwpoison_filter_enable)
35 goto inject;
36
36 if (!PageLRU(p) && !PageHuge(p)) 37 if (!PageLRU(p) && !PageHuge(p))
37 shake_page(p, 0); 38 shake_page(p, 0);
38 /* 39 /*
diff --git a/mm/madvise.c b/mm/madvise.c
index 6975bc812542..539eeb96b323 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -343,10 +343,11 @@ static long madvise_remove(struct vm_area_struct *vma,
343 */ 343 */
344static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end) 344static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end)
345{ 345{
346 struct page *p;
346 if (!capable(CAP_SYS_ADMIN)) 347 if (!capable(CAP_SYS_ADMIN))
347 return -EPERM; 348 return -EPERM;
348 for (; start < end; start += PAGE_SIZE) { 349 for (; start < end; start += PAGE_SIZE <<
349 struct page *p; 350 compound_order(compound_head(p))) {
350 int ret; 351 int ret;
351 352
352 ret = get_user_pages_fast(start, 1, 0, &p); 353 ret = get_user_pages_fast(start, 1, 0, &p);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d5ff3ce13029..1c52ddbc839b 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -39,6 +39,7 @@
39#include <linux/limits.h> 39#include <linux/limits.h>
40#include <linux/export.h> 40#include <linux/export.h>
41#include <linux/mutex.h> 41#include <linux/mutex.h>
42#include <linux/rbtree.h>
42#include <linux/slab.h> 43#include <linux/slab.h>
43#include <linux/swap.h> 44#include <linux/swap.h>
44#include <linux/swapops.h> 45#include <linux/swapops.h>
@@ -160,6 +161,10 @@ struct mem_cgroup_per_zone {
160 161
161 struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1]; 162 struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
162 163
164 struct rb_node tree_node; /* RB tree node */
165 unsigned long long usage_in_excess;/* Set to the value by which */
166 /* the soft limit is exceeded*/
167 bool on_tree;
163 struct mem_cgroup *memcg; /* Back pointer, we cannot */ 168 struct mem_cgroup *memcg; /* Back pointer, we cannot */
164 /* use container_of */ 169 /* use container_of */
165}; 170};
@@ -168,6 +173,26 @@ struct mem_cgroup_per_node {
168 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES]; 173 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
169}; 174};
170 175
176/*
177 * Cgroups above their limits are maintained in a RB-Tree, independent of
178 * their hierarchy representation
179 */
180
181struct mem_cgroup_tree_per_zone {
182 struct rb_root rb_root;
183 spinlock_t lock;
184};
185
186struct mem_cgroup_tree_per_node {
187 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
188};
189
190struct mem_cgroup_tree {
191 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
192};
193
194static struct mem_cgroup_tree soft_limit_tree __read_mostly;
195
171struct mem_cgroup_threshold { 196struct mem_cgroup_threshold {
172 struct eventfd_ctx *eventfd; 197 struct eventfd_ctx *eventfd;
173 u64 threshold; 198 u64 threshold;
@@ -303,22 +328,6 @@ struct mem_cgroup {
303 atomic_t numainfo_events; 328 atomic_t numainfo_events;
304 atomic_t numainfo_updating; 329 atomic_t numainfo_updating;
305#endif 330#endif
306 /*
307 * Protects soft_contributed transitions.
308 * See mem_cgroup_update_soft_limit
309 */
310 spinlock_t soft_lock;
311
312 /*
313 * If true then this group has increased parents' children_in_excess
314 * when it got over the soft limit.
315 * When a group falls bellow the soft limit, parents' children_in_excess
316 * is decreased and soft_contributed changed to false.
317 */
318 bool soft_contributed;
319
320 /* Number of children that are in soft limit excess */
321 atomic_t children_in_excess;
322 331
323 struct mem_cgroup_per_node *nodeinfo[0]; 332 struct mem_cgroup_per_node *nodeinfo[0];
324 /* WARNING: nodeinfo must be the last member here */ 333 /* WARNING: nodeinfo must be the last member here */
@@ -422,6 +431,7 @@ static bool move_file(void)
422 * limit reclaim to prevent infinite loops, if they ever occur. 431 * limit reclaim to prevent infinite loops, if they ever occur.
423 */ 432 */
424#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100 433#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
434#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
425 435
426enum charge_type { 436enum charge_type {
427 MEM_CGROUP_CHARGE_TYPE_CACHE = 0, 437 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
@@ -648,6 +658,164 @@ page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page)
648 return mem_cgroup_zoneinfo(memcg, nid, zid); 658 return mem_cgroup_zoneinfo(memcg, nid, zid);
649} 659}
650 660
661static struct mem_cgroup_tree_per_zone *
662soft_limit_tree_node_zone(int nid, int zid)
663{
664 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
665}
666
667static struct mem_cgroup_tree_per_zone *
668soft_limit_tree_from_page(struct page *page)
669{
670 int nid = page_to_nid(page);
671 int zid = page_zonenum(page);
672
673 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
674}
675
676static void
677__mem_cgroup_insert_exceeded(struct mem_cgroup *memcg,
678 struct mem_cgroup_per_zone *mz,
679 struct mem_cgroup_tree_per_zone *mctz,
680 unsigned long long new_usage_in_excess)
681{
682 struct rb_node **p = &mctz->rb_root.rb_node;
683 struct rb_node *parent = NULL;
684 struct mem_cgroup_per_zone *mz_node;
685
686 if (mz->on_tree)
687 return;
688
689 mz->usage_in_excess = new_usage_in_excess;
690 if (!mz->usage_in_excess)
691 return;
692 while (*p) {
693 parent = *p;
694 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
695 tree_node);
696 if (mz->usage_in_excess < mz_node->usage_in_excess)
697 p = &(*p)->rb_left;
698 /*
699 * We can't avoid mem cgroups that are over their soft
700 * limit by the same amount
701 */
702 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
703 p = &(*p)->rb_right;
704 }
705 rb_link_node(&mz->tree_node, parent, p);
706 rb_insert_color(&mz->tree_node, &mctz->rb_root);
707 mz->on_tree = true;
708}
709
710static void
711__mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
712 struct mem_cgroup_per_zone *mz,
713 struct mem_cgroup_tree_per_zone *mctz)
714{
715 if (!mz->on_tree)
716 return;
717 rb_erase(&mz->tree_node, &mctz->rb_root);
718 mz->on_tree = false;
719}
720
721static void
722mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
723 struct mem_cgroup_per_zone *mz,
724 struct mem_cgroup_tree_per_zone *mctz)
725{
726 spin_lock(&mctz->lock);
727 __mem_cgroup_remove_exceeded(memcg, mz, mctz);
728 spin_unlock(&mctz->lock);
729}
730
731
732static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
733{
734 unsigned long long excess;
735 struct mem_cgroup_per_zone *mz;
736 struct mem_cgroup_tree_per_zone *mctz;
737 int nid = page_to_nid(page);
738 int zid = page_zonenum(page);
739 mctz = soft_limit_tree_from_page(page);
740
741 /*
742 * Necessary to update all ancestors when hierarchy is used.
743 * because their event counter is not touched.
744 */
745 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
746 mz = mem_cgroup_zoneinfo(memcg, nid, zid);
747 excess = res_counter_soft_limit_excess(&memcg->res);
748 /*
749 * We have to update the tree if mz is on RB-tree or
750 * mem is over its softlimit.
751 */
752 if (excess || mz->on_tree) {
753 spin_lock(&mctz->lock);
754 /* if on-tree, remove it */
755 if (mz->on_tree)
756 __mem_cgroup_remove_exceeded(memcg, mz, mctz);
757 /*
758 * Insert again. mz->usage_in_excess will be updated.
759 * If excess is 0, no tree ops.
760 */
761 __mem_cgroup_insert_exceeded(memcg, mz, mctz, excess);
762 spin_unlock(&mctz->lock);
763 }
764 }
765}
766
767static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
768{
769 int node, zone;
770 struct mem_cgroup_per_zone *mz;
771 struct mem_cgroup_tree_per_zone *mctz;
772
773 for_each_node(node) {
774 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
775 mz = mem_cgroup_zoneinfo(memcg, node, zone);
776 mctz = soft_limit_tree_node_zone(node, zone);
777 mem_cgroup_remove_exceeded(memcg, mz, mctz);
778 }
779 }
780}
781
782static struct mem_cgroup_per_zone *
783__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
784{
785 struct rb_node *rightmost = NULL;
786 struct mem_cgroup_per_zone *mz;
787
788retry:
789 mz = NULL;
790 rightmost = rb_last(&mctz->rb_root);
791 if (!rightmost)
792 goto done; /* Nothing to reclaim from */
793
794 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
795 /*
796 * Remove the node now but someone else can add it back,
797 * we will to add it back at the end of reclaim to its correct
798 * position in the tree.
799 */
800 __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
801 if (!res_counter_soft_limit_excess(&mz->memcg->res) ||
802 !css_tryget(&mz->memcg->css))
803 goto retry;
804done:
805 return mz;
806}
807
808static struct mem_cgroup_per_zone *
809mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
810{
811 struct mem_cgroup_per_zone *mz;
812
813 spin_lock(&mctz->lock);
814 mz = __mem_cgroup_largest_soft_limit_node(mctz);
815 spin_unlock(&mctz->lock);
816 return mz;
817}
818
651/* 819/*
652 * Implementation Note: reading percpu statistics for memcg. 820 * Implementation Note: reading percpu statistics for memcg.
653 * 821 *
@@ -822,48 +990,6 @@ static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
822} 990}
823 991
824/* 992/*
825 * Called from rate-limited memcg_check_events when enough
826 * MEM_CGROUP_TARGET_SOFTLIMIT events are accumulated and it makes sure
827 * that all the parents up the hierarchy will be notified that this group
828 * is in excess or that it is not in excess anymore. mmecg->soft_contributed
829 * makes the transition a single action whenever the state flips from one to
830 * the other.
831 */
832static void mem_cgroup_update_soft_limit(struct mem_cgroup *memcg)
833{
834 unsigned long long excess = res_counter_soft_limit_excess(&memcg->res);
835 struct mem_cgroup *parent = memcg;
836 int delta = 0;
837
838 spin_lock(&memcg->soft_lock);
839 if (excess) {
840 if (!memcg->soft_contributed) {
841 delta = 1;
842 memcg->soft_contributed = true;
843 }
844 } else {
845 if (memcg->soft_contributed) {
846 delta = -1;
847 memcg->soft_contributed = false;
848 }
849 }
850
851 /*
852 * Necessary to update all ancestors when hierarchy is used
853 * because their event counter is not touched.
854 * We track children even outside the hierarchy for the root
855 * cgroup because tree walk starting at root should visit
856 * all cgroups and we want to prevent from pointless tree
857 * walk if no children is below the limit.
858 */
859 while (delta && (parent = parent_mem_cgroup(parent)))
860 atomic_add(delta, &parent->children_in_excess);
861 if (memcg != root_mem_cgroup && !root_mem_cgroup->use_hierarchy)
862 atomic_add(delta, &root_mem_cgroup->children_in_excess);
863 spin_unlock(&memcg->soft_lock);
864}
865
866/*
867 * Check events in order. 993 * Check events in order.
868 * 994 *
869 */ 995 */
@@ -886,7 +1012,7 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
886 1012
887 mem_cgroup_threshold(memcg); 1013 mem_cgroup_threshold(memcg);
888 if (unlikely(do_softlimit)) 1014 if (unlikely(do_softlimit))
889 mem_cgroup_update_soft_limit(memcg); 1015 mem_cgroup_update_tree(memcg, page);
890#if MAX_NUMNODES > 1 1016#if MAX_NUMNODES > 1
891 if (unlikely(do_numainfo)) 1017 if (unlikely(do_numainfo))
892 atomic_inc(&memcg->numainfo_events); 1018 atomic_inc(&memcg->numainfo_events);
@@ -929,15 +1055,6 @@ struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
929 return memcg; 1055 return memcg;
930} 1056}
931 1057
932static enum mem_cgroup_filter_t
933mem_cgroup_filter(struct mem_cgroup *memcg, struct mem_cgroup *root,
934 mem_cgroup_iter_filter cond)
935{
936 if (!cond)
937 return VISIT;
938 return cond(memcg, root);
939}
940
941/* 1058/*
942 * Returns a next (in a pre-order walk) alive memcg (with elevated css 1059 * Returns a next (in a pre-order walk) alive memcg (with elevated css
943 * ref. count) or NULL if the whole root's subtree has been visited. 1060 * ref. count) or NULL if the whole root's subtree has been visited.
@@ -945,7 +1062,7 @@ mem_cgroup_filter(struct mem_cgroup *memcg, struct mem_cgroup *root,
945 * helper function to be used by mem_cgroup_iter 1062 * helper function to be used by mem_cgroup_iter
946 */ 1063 */
947static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root, 1064static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root,
948 struct mem_cgroup *last_visited, mem_cgroup_iter_filter cond) 1065 struct mem_cgroup *last_visited)
949{ 1066{
950 struct cgroup_subsys_state *prev_css, *next_css; 1067 struct cgroup_subsys_state *prev_css, *next_css;
951 1068
@@ -963,31 +1080,11 @@ skip_node:
963 if (next_css) { 1080 if (next_css) {
964 struct mem_cgroup *mem = mem_cgroup_from_css(next_css); 1081 struct mem_cgroup *mem = mem_cgroup_from_css(next_css);
965 1082
966 switch (mem_cgroup_filter(mem, root, cond)) { 1083 if (css_tryget(&mem->css))
967 case SKIP: 1084 return mem;
1085 else {
968 prev_css = next_css; 1086 prev_css = next_css;
969 goto skip_node; 1087 goto skip_node;
970 case SKIP_TREE:
971 if (mem == root)
972 return NULL;
973 /*
974 * css_rightmost_descendant is not an optimal way to
975 * skip through a subtree (especially for imbalanced
976 * trees leaning to right) but that's what we have right
977 * now. More effective solution would be traversing
978 * right-up for first non-NULL without calling
979 * css_next_descendant_pre afterwards.
980 */
981 prev_css = css_rightmost_descendant(next_css);
982 goto skip_node;
983 case VISIT:
984 if (css_tryget(&mem->css))
985 return mem;
986 else {
987 prev_css = next_css;
988 goto skip_node;
989 }
990 break;
991 } 1088 }
992 } 1089 }
993 1090
@@ -1051,7 +1148,6 @@ static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter,
1051 * @root: hierarchy root 1148 * @root: hierarchy root
1052 * @prev: previously returned memcg, NULL on first invocation 1149 * @prev: previously returned memcg, NULL on first invocation
1053 * @reclaim: cookie for shared reclaim walks, NULL for full walks 1150 * @reclaim: cookie for shared reclaim walks, NULL for full walks
1054 * @cond: filter for visited nodes, NULL for no filter
1055 * 1151 *
1056 * Returns references to children of the hierarchy below @root, or 1152 * Returns references to children of the hierarchy below @root, or
1057 * @root itself, or %NULL after a full round-trip. 1153 * @root itself, or %NULL after a full round-trip.
@@ -1064,18 +1160,15 @@ static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter,
1064 * divide up the memcgs in the hierarchy among all concurrent 1160 * divide up the memcgs in the hierarchy among all concurrent
1065 * reclaimers operating on the same zone and priority. 1161 * reclaimers operating on the same zone and priority.
1066 */ 1162 */
1067struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root, 1163struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1068 struct mem_cgroup *prev, 1164 struct mem_cgroup *prev,
1069 struct mem_cgroup_reclaim_cookie *reclaim, 1165 struct mem_cgroup_reclaim_cookie *reclaim)
1070 mem_cgroup_iter_filter cond)
1071{ 1166{
1072 struct mem_cgroup *memcg = NULL; 1167 struct mem_cgroup *memcg = NULL;
1073 struct mem_cgroup *last_visited = NULL; 1168 struct mem_cgroup *last_visited = NULL;
1074 1169
1075 if (mem_cgroup_disabled()) { 1170 if (mem_cgroup_disabled())
1076 /* first call must return non-NULL, second return NULL */ 1171 return NULL;
1077 return (struct mem_cgroup *)(unsigned long)!prev;
1078 }
1079 1172
1080 if (!root) 1173 if (!root)
1081 root = root_mem_cgroup; 1174 root = root_mem_cgroup;
@@ -1086,9 +1179,7 @@ struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
1086 if (!root->use_hierarchy && root != root_mem_cgroup) { 1179 if (!root->use_hierarchy && root != root_mem_cgroup) {
1087 if (prev) 1180 if (prev)
1088 goto out_css_put; 1181 goto out_css_put;
1089 if (mem_cgroup_filter(root, root, cond) == VISIT) 1182 return root;
1090 return root;
1091 return NULL;
1092 } 1183 }
1093 1184
1094 rcu_read_lock(); 1185 rcu_read_lock();
@@ -1111,7 +1202,7 @@ struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
1111 last_visited = mem_cgroup_iter_load(iter, root, &seq); 1202 last_visited = mem_cgroup_iter_load(iter, root, &seq);
1112 } 1203 }
1113 1204
1114 memcg = __mem_cgroup_iter_next(root, last_visited, cond); 1205 memcg = __mem_cgroup_iter_next(root, last_visited);
1115 1206
1116 if (reclaim) { 1207 if (reclaim) {
1117 mem_cgroup_iter_update(iter, last_visited, memcg, seq); 1208 mem_cgroup_iter_update(iter, last_visited, memcg, seq);
@@ -1122,11 +1213,7 @@ struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
1122 reclaim->generation = iter->generation; 1213 reclaim->generation = iter->generation;
1123 } 1214 }
1124 1215
1125 /* 1216 if (prev && !memcg)
1126 * We have finished the whole tree walk or no group has been
1127 * visited because filter told us to skip the root node.
1128 */
1129 if (!memcg && (prev || (cond && !last_visited)))
1130 goto out_unlock; 1217 goto out_unlock;
1131 } 1218 }
1132out_unlock: 1219out_unlock:
@@ -1767,7 +1854,6 @@ static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg,
1767 return total; 1854 return total;
1768} 1855}
1769 1856
1770#if MAX_NUMNODES > 1
1771/** 1857/**
1772 * test_mem_cgroup_node_reclaimable 1858 * test_mem_cgroup_node_reclaimable
1773 * @memcg: the target memcg 1859 * @memcg: the target memcg
@@ -1790,6 +1876,7 @@ static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1790 return false; 1876 return false;
1791 1877
1792} 1878}
1879#if MAX_NUMNODES > 1
1793 1880
1794/* 1881/*
1795 * Always updating the nodemask is not very good - even if we have an empty 1882 * Always updating the nodemask is not very good - even if we have an empty
@@ -1857,50 +1944,104 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1857 return node; 1944 return node;
1858} 1945}
1859 1946
1947/*
1948 * Check all nodes whether it contains reclaimable pages or not.
1949 * For quick scan, we make use of scan_nodes. This will allow us to skip
1950 * unused nodes. But scan_nodes is lazily updated and may not cotain
1951 * enough new information. We need to do double check.
1952 */
1953static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1954{
1955 int nid;
1956
1957 /*
1958 * quick check...making use of scan_node.
1959 * We can skip unused nodes.
1960 */
1961 if (!nodes_empty(memcg->scan_nodes)) {
1962 for (nid = first_node(memcg->scan_nodes);
1963 nid < MAX_NUMNODES;
1964 nid = next_node(nid, memcg->scan_nodes)) {
1965
1966 if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1967 return true;
1968 }
1969 }
1970 /*
1971 * Check rest of nodes.
1972 */
1973 for_each_node_state(nid, N_MEMORY) {
1974 if (node_isset(nid, memcg->scan_nodes))
1975 continue;
1976 if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1977 return true;
1978 }
1979 return false;
1980}
1981
1860#else 1982#else
1861int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 1983int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1862{ 1984{
1863 return 0; 1985 return 0;
1864} 1986}
1865 1987
1866#endif 1988static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1867
1868/*
1869 * A group is eligible for the soft limit reclaim under the given root
1870 * hierarchy if
1871 * a) it is over its soft limit
1872 * b) any parent up the hierarchy is over its soft limit
1873 *
1874 * If the given group doesn't have any children over the limit then it
1875 * doesn't make any sense to iterate its subtree.
1876 */
1877enum mem_cgroup_filter_t
1878mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
1879 struct mem_cgroup *root)
1880{ 1989{
1881 struct mem_cgroup *parent; 1990 return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
1882 1991}
1883 if (!memcg) 1992#endif
1884 memcg = root_mem_cgroup;
1885 parent = memcg;
1886
1887 if (res_counter_soft_limit_excess(&memcg->res))
1888 return VISIT;
1889 1993
1890 /* 1994static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1891 * If any parent up to the root in the hierarchy is over its soft limit 1995 struct zone *zone,
1892 * then we have to obey and reclaim from this group as well. 1996 gfp_t gfp_mask,
1893 */ 1997 unsigned long *total_scanned)
1894 while ((parent = parent_mem_cgroup(parent))) { 1998{
1895 if (res_counter_soft_limit_excess(&parent->res)) 1999 struct mem_cgroup *victim = NULL;
1896 return VISIT; 2000 int total = 0;
1897 if (parent == root) 2001 int loop = 0;
2002 unsigned long excess;
2003 unsigned long nr_scanned;
2004 struct mem_cgroup_reclaim_cookie reclaim = {
2005 .zone = zone,
2006 .priority = 0,
2007 };
2008
2009 excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT;
2010
2011 while (1) {
2012 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
2013 if (!victim) {
2014 loop++;
2015 if (loop >= 2) {
2016 /*
2017 * If we have not been able to reclaim
2018 * anything, it might because there are
2019 * no reclaimable pages under this hierarchy
2020 */
2021 if (!total)
2022 break;
2023 /*
2024 * We want to do more targeted reclaim.
2025 * excess >> 2 is not to excessive so as to
2026 * reclaim too much, nor too less that we keep
2027 * coming back to reclaim from this cgroup
2028 */
2029 if (total >= (excess >> 2) ||
2030 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
2031 break;
2032 }
2033 continue;
2034 }
2035 if (!mem_cgroup_reclaimable(victim, false))
2036 continue;
2037 total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
2038 zone, &nr_scanned);
2039 *total_scanned += nr_scanned;
2040 if (!res_counter_soft_limit_excess(&root_memcg->res))
1898 break; 2041 break;
1899 } 2042 }
1900 2043 mem_cgroup_iter_break(root_memcg, victim);
1901 if (!atomic_read(&memcg->children_in_excess)) 2044 return total;
1902 return SKIP_TREE;
1903 return SKIP;
1904} 2045}
1905 2046
1906static DEFINE_SPINLOCK(memcg_oom_lock); 2047static DEFINE_SPINLOCK(memcg_oom_lock);
@@ -2812,7 +2953,9 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2812 unlock_page_cgroup(pc); 2953 unlock_page_cgroup(pc);
2813 2954
2814 /* 2955 /*
2815 * "charge_statistics" updated event counter. 2956 * "charge_statistics" updated event counter. Then, check it.
2957 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
2958 * if they exceeds softlimit.
2816 */ 2959 */
2817 memcg_check_events(memcg, page); 2960 memcg_check_events(memcg, page);
2818} 2961}
@@ -4647,6 +4790,98 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
4647 return ret; 4790 return ret;
4648} 4791}
4649 4792
4793unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
4794 gfp_t gfp_mask,
4795 unsigned long *total_scanned)
4796{
4797 unsigned long nr_reclaimed = 0;
4798 struct mem_cgroup_per_zone *mz, *next_mz = NULL;
4799 unsigned long reclaimed;
4800 int loop = 0;
4801 struct mem_cgroup_tree_per_zone *mctz;
4802 unsigned long long excess;
4803 unsigned long nr_scanned;
4804
4805 if (order > 0)
4806 return 0;
4807
4808 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
4809 /*
4810 * This loop can run a while, specially if mem_cgroup's continuously
4811 * keep exceeding their soft limit and putting the system under
4812 * pressure
4813 */
4814 do {
4815 if (next_mz)
4816 mz = next_mz;
4817 else
4818 mz = mem_cgroup_largest_soft_limit_node(mctz);
4819 if (!mz)
4820 break;
4821
4822 nr_scanned = 0;
4823 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
4824 gfp_mask, &nr_scanned);
4825 nr_reclaimed += reclaimed;
4826 *total_scanned += nr_scanned;
4827 spin_lock(&mctz->lock);
4828
4829 /*
4830 * If we failed to reclaim anything from this memory cgroup
4831 * it is time to move on to the next cgroup
4832 */
4833 next_mz = NULL;
4834 if (!reclaimed) {
4835 do {
4836 /*
4837 * Loop until we find yet another one.
4838 *
4839 * By the time we get the soft_limit lock
4840 * again, someone might have aded the
4841 * group back on the RB tree. Iterate to
4842 * make sure we get a different mem.
4843 * mem_cgroup_largest_soft_limit_node returns
4844 * NULL if no other cgroup is present on
4845 * the tree
4846 */
4847 next_mz =
4848 __mem_cgroup_largest_soft_limit_node(mctz);
4849 if (next_mz == mz)
4850 css_put(&next_mz->memcg->css);
4851 else /* next_mz == NULL or other memcg */
4852 break;
4853 } while (1);
4854 }
4855 __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
4856 excess = res_counter_soft_limit_excess(&mz->memcg->res);
4857 /*
4858 * One school of thought says that we should not add
4859 * back the node to the tree if reclaim returns 0.
4860 * But our reclaim could return 0, simply because due
4861 * to priority we are exposing a smaller subset of
4862 * memory to reclaim from. Consider this as a longer
4863 * term TODO.
4864 */
4865 /* If excess == 0, no tree ops */
4866 __mem_cgroup_insert_exceeded(mz->memcg, mz, mctz, excess);
4867 spin_unlock(&mctz->lock);
4868 css_put(&mz->memcg->css);
4869 loop++;
4870 /*
4871 * Could not reclaim anything and there are no more
4872 * mem cgroups to try or we seem to be looping without
4873 * reclaiming anything.
4874 */
4875 if (!nr_reclaimed &&
4876 (next_mz == NULL ||
4877 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
4878 break;
4879 } while (!nr_reclaimed);
4880 if (next_mz)
4881 css_put(&next_mz->memcg->css);
4882 return nr_reclaimed;
4883}
4884
4650/** 4885/**
4651 * mem_cgroup_force_empty_list - clears LRU of a group 4886 * mem_cgroup_force_empty_list - clears LRU of a group
4652 * @memcg: group to clear 4887 * @memcg: group to clear
@@ -5911,6 +6146,8 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
5911 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 6146 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
5912 mz = &pn->zoneinfo[zone]; 6147 mz = &pn->zoneinfo[zone];
5913 lruvec_init(&mz->lruvec); 6148 lruvec_init(&mz->lruvec);
6149 mz->usage_in_excess = 0;
6150 mz->on_tree = false;
5914 mz->memcg = memcg; 6151 mz->memcg = memcg;
5915 } 6152 }
5916 memcg->nodeinfo[node] = pn; 6153 memcg->nodeinfo[node] = pn;
@@ -5966,6 +6203,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
5966 int node; 6203 int node;
5967 size_t size = memcg_size(); 6204 size_t size = memcg_size();
5968 6205
6206 mem_cgroup_remove_from_trees(memcg);
5969 free_css_id(&mem_cgroup_subsys, &memcg->css); 6207 free_css_id(&mem_cgroup_subsys, &memcg->css);
5970 6208
5971 for_each_node(node) 6209 for_each_node(node)
@@ -6002,6 +6240,29 @@ struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
6002} 6240}
6003EXPORT_SYMBOL(parent_mem_cgroup); 6241EXPORT_SYMBOL(parent_mem_cgroup);
6004 6242
6243static void __init mem_cgroup_soft_limit_tree_init(void)
6244{
6245 struct mem_cgroup_tree_per_node *rtpn;
6246 struct mem_cgroup_tree_per_zone *rtpz;
6247 int tmp, node, zone;
6248
6249 for_each_node(node) {
6250 tmp = node;
6251 if (!node_state(node, N_NORMAL_MEMORY))
6252 tmp = -1;
6253 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
6254 BUG_ON(!rtpn);
6255
6256 soft_limit_tree.rb_tree_per_node[node] = rtpn;
6257
6258 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
6259 rtpz = &rtpn->rb_tree_per_zone[zone];
6260 rtpz->rb_root = RB_ROOT;
6261 spin_lock_init(&rtpz->lock);
6262 }
6263 }
6264}
6265
6005static struct cgroup_subsys_state * __ref 6266static struct cgroup_subsys_state * __ref
6006mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 6267mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
6007{ 6268{
@@ -6031,7 +6292,6 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
6031 mutex_init(&memcg->thresholds_lock); 6292 mutex_init(&memcg->thresholds_lock);
6032 spin_lock_init(&memcg->move_lock); 6293 spin_lock_init(&memcg->move_lock);
6033 vmpressure_init(&memcg->vmpressure); 6294 vmpressure_init(&memcg->vmpressure);
6034 spin_lock_init(&memcg->soft_lock);
6035 6295
6036 return &memcg->css; 6296 return &memcg->css;
6037 6297
@@ -6109,13 +6369,6 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
6109 6369
6110 mem_cgroup_invalidate_reclaim_iterators(memcg); 6370 mem_cgroup_invalidate_reclaim_iterators(memcg);
6111 mem_cgroup_reparent_charges(memcg); 6371 mem_cgroup_reparent_charges(memcg);
6112 if (memcg->soft_contributed) {
6113 while ((memcg = parent_mem_cgroup(memcg)))
6114 atomic_dec(&memcg->children_in_excess);
6115
6116 if (memcg != root_mem_cgroup && !root_mem_cgroup->use_hierarchy)
6117 atomic_dec(&root_mem_cgroup->children_in_excess);
6118 }
6119 mem_cgroup_destroy_all_caches(memcg); 6372 mem_cgroup_destroy_all_caches(memcg);
6120 vmpressure_cleanup(&memcg->vmpressure); 6373 vmpressure_cleanup(&memcg->vmpressure);
6121} 6374}
@@ -6790,6 +7043,7 @@ static int __init mem_cgroup_init(void)
6790{ 7043{
6791 hotcpu_notifier(memcg_cpu_hotplug_callback, 0); 7044 hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
6792 enable_swap_cgroup(); 7045 enable_swap_cgroup();
7046 mem_cgroup_soft_limit_tree_init();
6793 memcg_stock_init(); 7047 memcg_stock_init();
6794 return 0; 7048 return 0;
6795} 7049}
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 947ed5413279..bf3351b5115e 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1114,8 +1114,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1114 * shake_page could have turned it free. 1114 * shake_page could have turned it free.
1115 */ 1115 */
1116 if (is_free_buddy_page(p)) { 1116 if (is_free_buddy_page(p)) {
1117 action_result(pfn, "free buddy, 2nd try", 1117 if (flags & MF_COUNT_INCREASED)
1118 DELAYED); 1118 action_result(pfn, "free buddy", DELAYED);
1119 else
1120 action_result(pfn, "free buddy, 2nd try", DELAYED);
1119 return 0; 1121 return 0;
1120 } 1122 }
1121 action_result(pfn, "non LRU", IGNORED); 1123 action_result(pfn, "non LRU", IGNORED);
@@ -1349,7 +1351,7 @@ int unpoison_memory(unsigned long pfn)
1349 * worked by memory_failure() and the page lock is not held yet. 1351 * worked by memory_failure() and the page lock is not held yet.
1350 * In such case, we yield to memory_failure() and make unpoison fail. 1352 * In such case, we yield to memory_failure() and make unpoison fail.
1351 */ 1353 */
1352 if (PageTransHuge(page)) { 1354 if (!PageHuge(page) && PageTransHuge(page)) {
1353 pr_info("MCE: Memory failure is now running on %#lx\n", pfn); 1355 pr_info("MCE: Memory failure is now running on %#lx\n", pfn);
1354 return 0; 1356 return 0;
1355 } 1357 }
diff --git a/mm/migrate.c b/mm/migrate.c
index 9c8d5f59d30b..a26bccd44ccb 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -107,7 +107,7 @@ void putback_movable_pages(struct list_head *l)
107 list_del(&page->lru); 107 list_del(&page->lru);
108 dec_zone_page_state(page, NR_ISOLATED_ANON + 108 dec_zone_page_state(page, NR_ISOLATED_ANON +
109 page_is_file_cache(page)); 109 page_is_file_cache(page));
110 if (unlikely(balloon_page_movable(page))) 110 if (unlikely(isolated_balloon_page(page)))
111 balloon_page_putback(page); 111 balloon_page_putback(page);
112 else 112 else
113 putback_lru_page(page); 113 putback_lru_page(page);
diff --git a/mm/mlock.c b/mm/mlock.c
index d63802663242..d480cd6fc475 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -379,10 +379,14 @@ static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
379 379
380 /* 380 /*
381 * Initialize pte walk starting at the already pinned page where we 381 * Initialize pte walk starting at the already pinned page where we
382 * are sure that there is a pte. 382 * are sure that there is a pte, as it was pinned under the same
383 * mmap_sem write op.
383 */ 384 */
384 pte = get_locked_pte(vma->vm_mm, start, &ptl); 385 pte = get_locked_pte(vma->vm_mm, start, &ptl);
385 end = min(end, pmd_addr_end(start, end)); 386 /* Make sure we do not cross the page table boundary */
387 end = pgd_addr_end(start, end);
388 end = pud_addr_end(start, end);
389 end = pmd_addr_end(start, end);
386 390
387 /* The page next to the pinned page is the first we will try to get */ 391 /* The page next to the pinned page is the first we will try to get */
388 start += PAGE_SIZE; 392 start += PAGE_SIZE;
@@ -736,6 +740,7 @@ static int do_mlockall(int flags)
736 740
737 /* Ignore errors */ 741 /* Ignore errors */
738 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags); 742 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
743 cond_resched();
739 } 744 }
740out: 745out:
741 return 0; 746 return 0;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0ee638f76ebe..dd886fac451a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6366,10 +6366,6 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
6366 list_del(&page->lru); 6366 list_del(&page->lru);
6367 rmv_page_order(page); 6367 rmv_page_order(page);
6368 zone->free_area[order].nr_free--; 6368 zone->free_area[order].nr_free--;
6369#ifdef CONFIG_HIGHMEM
6370 if (PageHighMem(page))
6371 totalhigh_pages -= 1 << order;
6372#endif
6373 for (i = 0; i < (1 << order); i++) 6369 for (i = 0; i < (1 << order); i++)
6374 SetPageReserved((page+i)); 6370 SetPageReserved((page+i));
6375 pfn += (1 << order); 6371 pfn += (1 << order);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8ed1b775bdc9..53f2f82f83ae 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -48,6 +48,7 @@
48#include <asm/div64.h> 48#include <asm/div64.h>
49 49
50#include <linux/swapops.h> 50#include <linux/swapops.h>
51#include <linux/balloon_compaction.h>
51 52
52#include "internal.h" 53#include "internal.h"
53 54
@@ -139,23 +140,11 @@ static bool global_reclaim(struct scan_control *sc)
139{ 140{
140 return !sc->target_mem_cgroup; 141 return !sc->target_mem_cgroup;
141} 142}
142
143static bool mem_cgroup_should_soft_reclaim(struct scan_control *sc)
144{
145 struct mem_cgroup *root = sc->target_mem_cgroup;
146 return !mem_cgroup_disabled() &&
147 mem_cgroup_soft_reclaim_eligible(root, root) != SKIP_TREE;
148}
149#else 143#else
150static bool global_reclaim(struct scan_control *sc) 144static bool global_reclaim(struct scan_control *sc)
151{ 145{
152 return true; 146 return true;
153} 147}
154
155static bool mem_cgroup_should_soft_reclaim(struct scan_control *sc)
156{
157 return false;
158}
159#endif 148#endif
160 149
161unsigned long zone_reclaimable_pages(struct zone *zone) 150unsigned long zone_reclaimable_pages(struct zone *zone)
@@ -1125,7 +1114,8 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
1125 LIST_HEAD(clean_pages); 1114 LIST_HEAD(clean_pages);
1126 1115
1127 list_for_each_entry_safe(page, next, page_list, lru) { 1116 list_for_each_entry_safe(page, next, page_list, lru) {
1128 if (page_is_file_cache(page) && !PageDirty(page)) { 1117 if (page_is_file_cache(page) && !PageDirty(page) &&
1118 !isolated_balloon_page(page)) {
1129 ClearPageActive(page); 1119 ClearPageActive(page);
1130 list_move(&page->lru, &clean_pages); 1120 list_move(&page->lru, &clean_pages);
1131 } 1121 }
@@ -2176,11 +2166,9 @@ static inline bool should_continue_reclaim(struct zone *zone,
2176 } 2166 }
2177} 2167}
2178 2168
2179static int 2169static void shrink_zone(struct zone *zone, struct scan_control *sc)
2180__shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
2181{ 2170{
2182 unsigned long nr_reclaimed, nr_scanned; 2171 unsigned long nr_reclaimed, nr_scanned;
2183 int groups_scanned = 0;
2184 2172
2185 do { 2173 do {
2186 struct mem_cgroup *root = sc->target_mem_cgroup; 2174 struct mem_cgroup *root = sc->target_mem_cgroup;
@@ -2188,17 +2176,15 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
2188 .zone = zone, 2176 .zone = zone,
2189 .priority = sc->priority, 2177 .priority = sc->priority,
2190 }; 2178 };
2191 struct mem_cgroup *memcg = NULL; 2179 struct mem_cgroup *memcg;
2192 mem_cgroup_iter_filter filter = (soft_reclaim) ?
2193 mem_cgroup_soft_reclaim_eligible : NULL;
2194 2180
2195 nr_reclaimed = sc->nr_reclaimed; 2181 nr_reclaimed = sc->nr_reclaimed;
2196 nr_scanned = sc->nr_scanned; 2182 nr_scanned = sc->nr_scanned;
2197 2183
2198 while ((memcg = mem_cgroup_iter_cond(root, memcg, &reclaim, filter))) { 2184 memcg = mem_cgroup_iter(root, NULL, &reclaim);
2185 do {
2199 struct lruvec *lruvec; 2186 struct lruvec *lruvec;
2200 2187
2201 groups_scanned++;
2202 lruvec = mem_cgroup_zone_lruvec(zone, memcg); 2188 lruvec = mem_cgroup_zone_lruvec(zone, memcg);
2203 2189
2204 shrink_lruvec(lruvec, sc); 2190 shrink_lruvec(lruvec, sc);
@@ -2218,7 +2204,8 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
2218 mem_cgroup_iter_break(root, memcg); 2204 mem_cgroup_iter_break(root, memcg);
2219 break; 2205 break;
2220 } 2206 }
2221 } 2207 memcg = mem_cgroup_iter(root, memcg, &reclaim);
2208 } while (memcg);
2222 2209
2223 vmpressure(sc->gfp_mask, sc->target_mem_cgroup, 2210 vmpressure(sc->gfp_mask, sc->target_mem_cgroup,
2224 sc->nr_scanned - nr_scanned, 2211 sc->nr_scanned - nr_scanned,
@@ -2226,37 +2213,6 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
2226 2213
2227 } while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed, 2214 } while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed,
2228 sc->nr_scanned - nr_scanned, sc)); 2215 sc->nr_scanned - nr_scanned, sc));
2229
2230 return groups_scanned;
2231}
2232
2233
2234static void shrink_zone(struct zone *zone, struct scan_control *sc)
2235{
2236 bool do_soft_reclaim = mem_cgroup_should_soft_reclaim(sc);
2237 unsigned long nr_scanned = sc->nr_scanned;
2238 int scanned_groups;
2239
2240 scanned_groups = __shrink_zone(zone, sc, do_soft_reclaim);
2241 /*
2242 * memcg iterator might race with other reclaimer or start from
2243 * a incomplete tree walk so the tree walk in __shrink_zone
2244 * might have missed groups that are above the soft limit. Try
2245 * another loop to catch up with others. Do it just once to
2246 * prevent from reclaim latencies when other reclaimers always
2247 * preempt this one.
2248 */
2249 if (do_soft_reclaim && !scanned_groups)
2250 __shrink_zone(zone, sc, do_soft_reclaim);
2251
2252 /*
2253 * No group is over the soft limit or those that are do not have
2254 * pages in the zone we are reclaiming so we have to reclaim everybody
2255 */
2256 if (do_soft_reclaim && (sc->nr_scanned == nr_scanned)) {
2257 __shrink_zone(zone, sc, false);
2258 return;
2259 }
2260} 2216}
2261 2217
2262/* Returns true if compaction should go ahead for a high-order request */ 2218/* Returns true if compaction should go ahead for a high-order request */
@@ -2320,6 +2276,8 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2320{ 2276{
2321 struct zoneref *z; 2277 struct zoneref *z;
2322 struct zone *zone; 2278 struct zone *zone;
2279 unsigned long nr_soft_reclaimed;
2280 unsigned long nr_soft_scanned;
2323 bool aborted_reclaim = false; 2281 bool aborted_reclaim = false;
2324 2282
2325 /* 2283 /*
@@ -2359,6 +2317,18 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2359 continue; 2317 continue;
2360 } 2318 }
2361 } 2319 }
2320 /*
2321 * This steals pages from memory cgroups over softlimit
2322 * and returns the number of reclaimed pages and
2323 * scanned pages. This works for global memory pressure
2324 * and balancing, not for a memcg's limit.
2325 */
2326 nr_soft_scanned = 0;
2327 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
2328 sc->order, sc->gfp_mask,
2329 &nr_soft_scanned);
2330 sc->nr_reclaimed += nr_soft_reclaimed;
2331 sc->nr_scanned += nr_soft_scanned;
2362 /* need some check for avoid more shrink_zone() */ 2332 /* need some check for avoid more shrink_zone() */
2363 } 2333 }
2364 2334
@@ -2952,6 +2922,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
2952{ 2922{
2953 int i; 2923 int i;
2954 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ 2924 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
2925 unsigned long nr_soft_reclaimed;
2926 unsigned long nr_soft_scanned;
2955 struct scan_control sc = { 2927 struct scan_control sc = {
2956 .gfp_mask = GFP_KERNEL, 2928 .gfp_mask = GFP_KERNEL,
2957 .priority = DEF_PRIORITY, 2929 .priority = DEF_PRIORITY,
@@ -3066,6 +3038,15 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
3066 3038
3067 sc.nr_scanned = 0; 3039 sc.nr_scanned = 0;
3068 3040
3041 nr_soft_scanned = 0;
3042 /*
3043 * Call soft limit reclaim before calling shrink_zone.
3044 */
3045 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
3046 order, sc.gfp_mask,
3047 &nr_soft_scanned);
3048 sc.nr_reclaimed += nr_soft_reclaimed;
3049
3069 /* 3050 /*
3070 * There should be no need to raise the scanning 3051 * There should be no need to raise the scanning
3071 * priority if enough pages are already being scanned 3052 * priority if enough pages are already being scanned
diff --git a/net/802/mrp.c b/net/802/mrp.c
index 1eb05d80b07b..3ed616215870 100644
--- a/net/802/mrp.c
+++ b/net/802/mrp.c
@@ -24,6 +24,11 @@
24static unsigned int mrp_join_time __read_mostly = 200; 24static unsigned int mrp_join_time __read_mostly = 200;
25module_param(mrp_join_time, uint, 0644); 25module_param(mrp_join_time, uint, 0644);
26MODULE_PARM_DESC(mrp_join_time, "Join time in ms (default 200ms)"); 26MODULE_PARM_DESC(mrp_join_time, "Join time in ms (default 200ms)");
27
28static unsigned int mrp_periodic_time __read_mostly = 1000;
29module_param(mrp_periodic_time, uint, 0644);
30MODULE_PARM_DESC(mrp_periodic_time, "Periodic time in ms (default 1s)");
31
27MODULE_LICENSE("GPL"); 32MODULE_LICENSE("GPL");
28 33
29static const u8 34static const u8
@@ -595,6 +600,24 @@ static void mrp_join_timer(unsigned long data)
595 mrp_join_timer_arm(app); 600 mrp_join_timer_arm(app);
596} 601}
597 602
603static void mrp_periodic_timer_arm(struct mrp_applicant *app)
604{
605 mod_timer(&app->periodic_timer,
606 jiffies + msecs_to_jiffies(mrp_periodic_time));
607}
608
609static void mrp_periodic_timer(unsigned long data)
610{
611 struct mrp_applicant *app = (struct mrp_applicant *)data;
612
613 spin_lock(&app->lock);
614 mrp_mad_event(app, MRP_EVENT_PERIODIC);
615 mrp_pdu_queue(app);
616 spin_unlock(&app->lock);
617
618 mrp_periodic_timer_arm(app);
619}
620
598static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset) 621static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset)
599{ 622{
600 __be16 endmark; 623 __be16 endmark;
@@ -845,6 +868,9 @@ int mrp_init_applicant(struct net_device *dev, struct mrp_application *appl)
845 rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app); 868 rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app);
846 setup_timer(&app->join_timer, mrp_join_timer, (unsigned long)app); 869 setup_timer(&app->join_timer, mrp_join_timer, (unsigned long)app);
847 mrp_join_timer_arm(app); 870 mrp_join_timer_arm(app);
871 setup_timer(&app->periodic_timer, mrp_periodic_timer,
872 (unsigned long)app);
873 mrp_periodic_timer_arm(app);
848 return 0; 874 return 0;
849 875
850err3: 876err3:
@@ -870,6 +896,7 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
870 * all pending messages before the applicant is gone. 896 * all pending messages before the applicant is gone.
871 */ 897 */
872 del_timer_sync(&app->join_timer); 898 del_timer_sync(&app->join_timer);
899 del_timer_sync(&app->periodic_timer);
873 900
874 spin_lock_bh(&app->lock); 901 spin_lock_bh(&app->lock);
875 mrp_mad_event(app, MRP_EVENT_TX); 902 mrp_mad_event(app, MRP_EVENT_TX);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 634debab4d54..fb7356fcfe51 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1146,7 +1146,11 @@ int hci_dev_open(__u16 dev)
1146 goto done; 1146 goto done;
1147 } 1147 }
1148 1148
1149 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) { 1149 /* Check for rfkill but allow the HCI setup stage to proceed
1150 * (which in itself doesn't cause any RF activity).
1151 */
1152 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) &&
1153 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1150 ret = -ERFKILL; 1154 ret = -ERFKILL;
1151 goto done; 1155 goto done;
1152 } 1156 }
@@ -1566,10 +1570,13 @@ static int hci_rfkill_set_block(void *data, bool blocked)
1566 1570
1567 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked); 1571 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1568 1572
1569 if (!blocked) 1573 if (blocked) {
1570 return 0; 1574 set_bit(HCI_RFKILLED, &hdev->dev_flags);
1571 1575 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1572 hci_dev_do_close(hdev); 1576 hci_dev_do_close(hdev);
1577 } else {
1578 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1579 }
1573 1580
1574 return 0; 1581 return 0;
1575} 1582}
@@ -1591,9 +1598,13 @@ static void hci_power_on(struct work_struct *work)
1591 return; 1598 return;
1592 } 1599 }
1593 1600
1594 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) 1601 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1602 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1603 hci_dev_do_close(hdev);
1604 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1595 queue_delayed_work(hdev->req_workqueue, &hdev->power_off, 1605 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1596 HCI_AUTO_OFF_TIMEOUT); 1606 HCI_AUTO_OFF_TIMEOUT);
1607 }
1597 1608
1598 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) 1609 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1599 mgmt_index_added(hdev); 1610 mgmt_index_added(hdev);
@@ -2209,6 +2220,9 @@ int hci_register_dev(struct hci_dev *hdev)
2209 } 2220 }
2210 } 2221 }
2211 2222
2223 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2224 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2225
2212 set_bit(HCI_SETUP, &hdev->dev_flags); 2226 set_bit(HCI_SETUP, &hdev->dev_flags);
2213 2227
2214 if (hdev->dev_type != HCI_AMP) 2228 if (hdev->dev_type != HCI_AMP)
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 94aab73f89d4..8db3e89fae35 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -3557,7 +3557,11 @@ static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3557 cp.handle = cpu_to_le16(conn->handle); 3557 cp.handle = cpu_to_le16(conn->handle);
3558 3558
3559 if (ltk->authenticated) 3559 if (ltk->authenticated)
3560 conn->sec_level = BT_SECURITY_HIGH; 3560 conn->pending_sec_level = BT_SECURITY_HIGH;
3561 else
3562 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3563
3564 conn->enc_key_size = ltk->enc_size;
3561 3565
3562 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); 3566 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3563 3567
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index b3bb7bca8e60..63fa11109a1c 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -3755,6 +3755,13 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3755 3755
3756 sk = chan->sk; 3756 sk = chan->sk;
3757 3757
3758 /* For certain devices (ex: HID mouse), support for authentication,
3759 * pairing and bonding is optional. For such devices, inorder to avoid
3760 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3761 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3762 */
3763 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3764
3758 bacpy(&bt_sk(sk)->src, conn->src); 3765 bacpy(&bt_sk(sk)->src, conn->src);
3759 bacpy(&bt_sk(sk)->dst, conn->dst); 3766 bacpy(&bt_sk(sk)->dst, conn->dst);
3760 chan->psm = psm; 3767 chan->psm = psm;
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 6d126faf145f..84fcf9fff3ea 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -569,7 +569,6 @@ static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb)
569static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err) 569static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err)
570{ 570{
571 struct rfcomm_dev *dev = dlc->owner; 571 struct rfcomm_dev *dev = dlc->owner;
572 struct tty_struct *tty;
573 if (!dev) 572 if (!dev)
574 return; 573 return;
575 574
@@ -581,38 +580,8 @@ static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err)
581 DPM_ORDER_DEV_AFTER_PARENT); 580 DPM_ORDER_DEV_AFTER_PARENT);
582 581
583 wake_up_interruptible(&dev->port.open_wait); 582 wake_up_interruptible(&dev->port.open_wait);
584 } else if (dlc->state == BT_CLOSED) { 583 } else if (dlc->state == BT_CLOSED)
585 tty = tty_port_tty_get(&dev->port); 584 tty_port_tty_hangup(&dev->port, false);
586 if (!tty) {
587 if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) {
588 /* Drop DLC lock here to avoid deadlock
589 * 1. rfcomm_dev_get will take rfcomm_dev_lock
590 * but in rfcomm_dev_add there's lock order:
591 * rfcomm_dev_lock -> dlc lock
592 * 2. tty_port_put will deadlock if it's
593 * the last reference
594 *
595 * FIXME: when we release the lock anything
596 * could happen to dev, even its destruction
597 */
598 rfcomm_dlc_unlock(dlc);
599 if (rfcomm_dev_get(dev->id) == NULL) {
600 rfcomm_dlc_lock(dlc);
601 return;
602 }
603
604 if (!test_and_set_bit(RFCOMM_TTY_RELEASED,
605 &dev->flags))
606 tty_port_put(&dev->port);
607
608 tty_port_put(&dev->port);
609 rfcomm_dlc_lock(dlc);
610 }
611 } else {
612 tty_hangup(tty);
613 tty_kref_put(tty);
614 }
615 }
616} 585}
617 586
618static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig) 587static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig)
diff --git a/net/core/dev.c b/net/core/dev.c
index 5c713f2239cc..65f829cfd928 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5247,10 +5247,12 @@ static int dev_new_index(struct net *net)
5247 5247
5248/* Delayed registration/unregisteration */ 5248/* Delayed registration/unregisteration */
5249static LIST_HEAD(net_todo_list); 5249static LIST_HEAD(net_todo_list);
5250static DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
5250 5251
5251static void net_set_todo(struct net_device *dev) 5252static void net_set_todo(struct net_device *dev)
5252{ 5253{
5253 list_add_tail(&dev->todo_list, &net_todo_list); 5254 list_add_tail(&dev->todo_list, &net_todo_list);
5255 dev_net(dev)->dev_unreg_count++;
5254} 5256}
5255 5257
5256static void rollback_registered_many(struct list_head *head) 5258static void rollback_registered_many(struct list_head *head)
@@ -5918,6 +5920,12 @@ void netdev_run_todo(void)
5918 if (dev->destructor) 5920 if (dev->destructor)
5919 dev->destructor(dev); 5921 dev->destructor(dev);
5920 5922
5923 /* Report a network device has been unregistered */
5924 rtnl_lock();
5925 dev_net(dev)->dev_unreg_count--;
5926 __rtnl_unlock();
5927 wake_up(&netdev_unregistering_wq);
5928
5921 /* Free network device */ 5929 /* Free network device */
5922 kobject_put(&dev->dev.kobj); 5930 kobject_put(&dev->dev.kobj);
5923 } 5931 }
@@ -6603,6 +6611,34 @@ static void __net_exit default_device_exit(struct net *net)
6603 rtnl_unlock(); 6611 rtnl_unlock();
6604} 6612}
6605 6613
6614static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
6615{
6616 /* Return with the rtnl_lock held when there are no network
6617 * devices unregistering in any network namespace in net_list.
6618 */
6619 struct net *net;
6620 bool unregistering;
6621 DEFINE_WAIT(wait);
6622
6623 for (;;) {
6624 prepare_to_wait(&netdev_unregistering_wq, &wait,
6625 TASK_UNINTERRUPTIBLE);
6626 unregistering = false;
6627 rtnl_lock();
6628 list_for_each_entry(net, net_list, exit_list) {
6629 if (net->dev_unreg_count > 0) {
6630 unregistering = true;
6631 break;
6632 }
6633 }
6634 if (!unregistering)
6635 break;
6636 __rtnl_unlock();
6637 schedule();
6638 }
6639 finish_wait(&netdev_unregistering_wq, &wait);
6640}
6641
6606static void __net_exit default_device_exit_batch(struct list_head *net_list) 6642static void __net_exit default_device_exit_batch(struct list_head *net_list)
6607{ 6643{
6608 /* At exit all network devices most be removed from a network 6644 /* At exit all network devices most be removed from a network
@@ -6614,7 +6650,18 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list)
6614 struct net *net; 6650 struct net *net;
6615 LIST_HEAD(dev_kill_list); 6651 LIST_HEAD(dev_kill_list);
6616 6652
6617 rtnl_lock(); 6653 /* To prevent network device cleanup code from dereferencing
6654 * loopback devices or network devices that have been freed
6655 * wait here for all pending unregistrations to complete,
6656 * before unregistring the loopback device and allowing the
6657 * network namespace be freed.
6658 *
6659 * The netdev todo list containing all network devices
6660 * unregistrations that happen in default_device_exit_batch
6661 * will run in the rtnl_unlock() at the end of
6662 * default_device_exit_batch.
6663 */
6664 rtnl_lock_unregistering(net_list);
6618 list_for_each_entry(net, net_list, exit_list) { 6665 list_for_each_entry(net, net_list, exit_list) {
6619 for_each_netdev_reverse(net, dev) { 6666 for_each_netdev_reverse(net, dev) {
6620 if (dev->rtnl_link_ops) 6667 if (dev->rtnl_link_ops)
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 1929af87b260..8d7d0dd72db2 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -154,8 +154,8 @@ ipv6:
154 if (poff >= 0) { 154 if (poff >= 0) {
155 __be32 *ports, _ports; 155 __be32 *ports, _ports;
156 156
157 nhoff += poff; 157 ports = skb_header_pointer(skb, nhoff + poff,
158 ports = skb_header_pointer(skb, nhoff, sizeof(_ports), &_ports); 158 sizeof(_ports), &_ports);
159 if (ports) 159 if (ports)
160 flow->ports = *ports; 160 flow->ports = *ports;
161 } 161 }
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
index 6a2f13cee86a..3f1ec1586ae1 100644
--- a/net/core/secure_seq.c
+++ b/net/core/secure_seq.c
@@ -10,11 +10,24 @@
10 10
11#include <net/secure_seq.h> 11#include <net/secure_seq.h>
12 12
13static u32 net_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned; 13#define NET_SECRET_SIZE (MD5_MESSAGE_BYTES / 4)
14 14
15void net_secret_init(void) 15static u32 net_secret[NET_SECRET_SIZE] ____cacheline_aligned;
16
17static void net_secret_init(void)
16{ 18{
17 get_random_bytes(net_secret, sizeof(net_secret)); 19 u32 tmp;
20 int i;
21
22 if (likely(net_secret[0]))
23 return;
24
25 for (i = NET_SECRET_SIZE; i > 0;) {
26 do {
27 get_random_bytes(&tmp, sizeof(tmp));
28 } while (!tmp);
29 cmpxchg(&net_secret[--i], 0, tmp);
30 }
18} 31}
19 32
20#ifdef CONFIG_INET 33#ifdef CONFIG_INET
@@ -42,6 +55,7 @@ __u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
42 u32 hash[MD5_DIGEST_WORDS]; 55 u32 hash[MD5_DIGEST_WORDS];
43 u32 i; 56 u32 i;
44 57
58 net_secret_init();
45 memcpy(hash, saddr, 16); 59 memcpy(hash, saddr, 16);
46 for (i = 0; i < 4; i++) 60 for (i = 0; i < 4; i++)
47 secret[i] = net_secret[i] + (__force u32)daddr[i]; 61 secret[i] = net_secret[i] + (__force u32)daddr[i];
@@ -63,6 +77,7 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
63 u32 hash[MD5_DIGEST_WORDS]; 77 u32 hash[MD5_DIGEST_WORDS];
64 u32 i; 78 u32 i;
65 79
80 net_secret_init();
66 memcpy(hash, saddr, 16); 81 memcpy(hash, saddr, 16);
67 for (i = 0; i < 4; i++) 82 for (i = 0; i < 4; i++)
68 secret[i] = net_secret[i] + (__force u32) daddr[i]; 83 secret[i] = net_secret[i] + (__force u32) daddr[i];
@@ -82,6 +97,7 @@ __u32 secure_ip_id(__be32 daddr)
82{ 97{
83 u32 hash[MD5_DIGEST_WORDS]; 98 u32 hash[MD5_DIGEST_WORDS];
84 99
100 net_secret_init();
85 hash[0] = (__force __u32) daddr; 101 hash[0] = (__force __u32) daddr;
86 hash[1] = net_secret[13]; 102 hash[1] = net_secret[13];
87 hash[2] = net_secret[14]; 103 hash[2] = net_secret[14];
@@ -96,6 +112,7 @@ __u32 secure_ipv6_id(const __be32 daddr[4])
96{ 112{
97 __u32 hash[4]; 113 __u32 hash[4];
98 114
115 net_secret_init();
99 memcpy(hash, daddr, 16); 116 memcpy(hash, daddr, 16);
100 md5_transform(hash, net_secret); 117 md5_transform(hash, net_secret);
101 118
@@ -107,6 +124,7 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
107{ 124{
108 u32 hash[MD5_DIGEST_WORDS]; 125 u32 hash[MD5_DIGEST_WORDS];
109 126
127 net_secret_init();
110 hash[0] = (__force u32)saddr; 128 hash[0] = (__force u32)saddr;
111 hash[1] = (__force u32)daddr; 129 hash[1] = (__force u32)daddr;
112 hash[2] = ((__force u16)sport << 16) + (__force u16)dport; 130 hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
@@ -121,6 +139,7 @@ u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
121{ 139{
122 u32 hash[MD5_DIGEST_WORDS]; 140 u32 hash[MD5_DIGEST_WORDS];
123 141
142 net_secret_init();
124 hash[0] = (__force u32)saddr; 143 hash[0] = (__force u32)saddr;
125 hash[1] = (__force u32)daddr; 144 hash[1] = (__force u32)daddr;
126 hash[2] = (__force u32)dport ^ net_secret[14]; 145 hash[2] = (__force u32)dport ^ net_secret[14];
@@ -140,6 +159,7 @@ u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
140 u32 hash[MD5_DIGEST_WORDS]; 159 u32 hash[MD5_DIGEST_WORDS];
141 u64 seq; 160 u64 seq;
142 161
162 net_secret_init();
143 hash[0] = (__force u32)saddr; 163 hash[0] = (__force u32)saddr;
144 hash[1] = (__force u32)daddr; 164 hash[1] = (__force u32)daddr;
145 hash[2] = ((__force u16)sport << 16) + (__force u16)dport; 165 hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
@@ -164,6 +184,7 @@ u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
164 u64 seq; 184 u64 seq;
165 u32 i; 185 u32 i;
166 186
187 net_secret_init();
167 memcpy(hash, saddr, 16); 188 memcpy(hash, saddr, 16);
168 for (i = 0; i < 4; i++) 189 for (i = 0; i < 4; i++)
169 secret[i] = net_secret[i] + daddr[i]; 190 secret[i] = net_secret[i] + daddr[i];
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 7a1874b7b8fd..cfeb85cff4f0 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -263,10 +263,8 @@ void build_ehash_secret(void)
263 get_random_bytes(&rnd, sizeof(rnd)); 263 get_random_bytes(&rnd, sizeof(rnd));
264 } while (rnd == 0); 264 } while (rnd == 0);
265 265
266 if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0) { 266 if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0)
267 get_random_bytes(&ipv6_hash_secret, sizeof(ipv6_hash_secret)); 267 get_random_bytes(&ipv6_hash_secret, sizeof(ipv6_hash_secret));
268 net_secret_init();
269 }
270} 268}
271EXPORT_SYMBOL(build_ehash_secret); 269EXPORT_SYMBOL(build_ehash_secret);
272 270
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index dace87f06e5f..7defdc9ba167 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -736,7 +736,7 @@ static void igmp_gq_timer_expire(unsigned long data)
736 736
737 in_dev->mr_gq_running = 0; 737 in_dev->mr_gq_running = 0;
738 igmpv3_send_report(in_dev, NULL); 738 igmpv3_send_report(in_dev, NULL);
739 __in_dev_put(in_dev); 739 in_dev_put(in_dev);
740} 740}
741 741
742static void igmp_ifc_timer_expire(unsigned long data) 742static void igmp_ifc_timer_expire(unsigned long data)
@@ -749,7 +749,7 @@ static void igmp_ifc_timer_expire(unsigned long data)
749 igmp_ifc_start_timer(in_dev, 749 igmp_ifc_start_timer(in_dev,
750 unsolicited_report_interval(in_dev)); 750 unsolicited_report_interval(in_dev));
751 } 751 }
752 __in_dev_put(in_dev); 752 in_dev_put(in_dev);
753} 753}
754 754
755static void igmp_ifc_event(struct in_device *in_dev) 755static void igmp_ifc_event(struct in_device *in_dev)
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index ac9fabe0300f..63a6d6d6b875 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -623,6 +623,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
623 tunnel->err_count = 0; 623 tunnel->err_count = 0;
624 } 624 }
625 625
626 tos = ip_tunnel_ecn_encap(tos, inner_iph, skb);
626 ttl = tnl_params->ttl; 627 ttl = tnl_params->ttl;
627 if (ttl == 0) { 628 if (ttl == 0) {
628 if (skb->protocol == htons(ETH_P_IP)) 629 if (skb->protocol == htons(ETH_P_IP))
@@ -641,18 +642,17 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
641 642
642 max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr) 643 max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
643 + rt->dst.header_len; 644 + rt->dst.header_len;
644 if (max_headroom > dev->needed_headroom) { 645 if (max_headroom > dev->needed_headroom)
645 dev->needed_headroom = max_headroom; 646 dev->needed_headroom = max_headroom;
646 if (skb_cow_head(skb, dev->needed_headroom)) { 647
647 dev->stats.tx_dropped++; 648 if (skb_cow_head(skb, dev->needed_headroom)) {
648 dev_kfree_skb(skb); 649 dev->stats.tx_dropped++;
649 return; 650 dev_kfree_skb(skb);
650 } 651 return;
651 } 652 }
652 653
653 err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, protocol, 654 err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, protocol,
654 ip_tunnel_ecn_encap(tos, inner_iph, skb), ttl, df, 655 tos, ttl, df, !net_eq(tunnel->net, dev_net(dev)));
655 !net_eq(tunnel->net, dev_net(dev)));
656 iptunnel_xmit_stats(err, &dev->stats, dev->tstats); 656 iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
657 657
658 return; 658 return;
@@ -853,8 +853,10 @@ int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
853 /* FB netdevice is special: we have one, and only one per netns. 853 /* FB netdevice is special: we have one, and only one per netns.
854 * Allowing to move it to another netns is clearly unsafe. 854 * Allowing to move it to another netns is clearly unsafe.
855 */ 855 */
856 if (!IS_ERR(itn->fb_tunnel_dev)) 856 if (!IS_ERR(itn->fb_tunnel_dev)) {
857 itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL; 857 itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
858 ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev));
859 }
858 rtnl_unlock(); 860 rtnl_unlock();
859 861
860 return PTR_RET(itn->fb_tunnel_dev); 862 return PTR_RET(itn->fb_tunnel_dev);
@@ -884,8 +886,6 @@ static void ip_tunnel_destroy(struct ip_tunnel_net *itn, struct list_head *head,
884 if (!net_eq(dev_net(t->dev), net)) 886 if (!net_eq(dev_net(t->dev), net))
885 unregister_netdevice_queue(t->dev, head); 887 unregister_netdevice_queue(t->dev, head);
886 } 888 }
887 if (itn->fb_tunnel_dev)
888 unregister_netdevice_queue(itn->fb_tunnel_dev, head);
889} 889}
890 890
891void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops) 891void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops)
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index d6c856b17fd4..c31e3ad98ef2 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -61,7 +61,7 @@ int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb,
61 memset(IPCB(skb), 0, sizeof(*IPCB(skb))); 61 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
62 62
63 /* Push down and install the IP header. */ 63 /* Push down and install the IP header. */
64 __skb_push(skb, sizeof(struct iphdr)); 64 skb_push(skb, sizeof(struct iphdr));
65 skb_reset_network_header(skb); 65 skb_reset_network_header(skb);
66 66
67 iph = ip_hdr(skb); 67 iph = ip_hdr(skb);
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c
index 67e17dcda65e..b6346bf2fde3 100644
--- a/net/ipv4/netfilter/ipt_SYNPROXY.c
+++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
@@ -267,7 +267,8 @@ synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par)
267 if (th == NULL) 267 if (th == NULL)
268 return NF_DROP; 268 return NF_DROP;
269 269
270 synproxy_parse_options(skb, par->thoff, th, &opts); 270 if (!synproxy_parse_options(skb, par->thoff, th, &opts))
271 return NF_DROP;
271 272
272 if (th->syn && !(th->ack || th->fin || th->rst)) { 273 if (th->syn && !(th->ack || th->fin || th->rst)) {
273 /* Initial SYN from client */ 274 /* Initial SYN from client */
@@ -350,7 +351,8 @@ static unsigned int ipv4_synproxy_hook(unsigned int hooknum,
350 351
351 /* fall through */ 352 /* fall through */
352 case TCP_CONNTRACK_SYN_SENT: 353 case TCP_CONNTRACK_SYN_SENT:
353 synproxy_parse_options(skb, thoff, th, &opts); 354 if (!synproxy_parse_options(skb, thoff, th, &opts))
355 return NF_DROP;
354 356
355 if (!th->syn && th->ack && 357 if (!th->syn && th->ack &&
356 CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) { 358 CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
@@ -373,7 +375,9 @@ static unsigned int ipv4_synproxy_hook(unsigned int hooknum,
373 if (!th->syn || !th->ack) 375 if (!th->syn || !th->ack)
374 break; 376 break;
375 377
376 synproxy_parse_options(skb, thoff, th, &opts); 378 if (!synproxy_parse_options(skb, thoff, th, &opts))
379 return NF_DROP;
380
377 if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP) 381 if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
378 synproxy->tsoff = opts.tsval - synproxy->its; 382 synproxy->tsoff = opts.tsval - synproxy->its;
379 383
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index bfec521c717f..193db03540ad 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -218,8 +218,10 @@ static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info)
218 218
219 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) 219 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
220 ipv4_sk_update_pmtu(skb, sk, info); 220 ipv4_sk_update_pmtu(skb, sk, info);
221 else if (type == ICMP_REDIRECT) 221 else if (type == ICMP_REDIRECT) {
222 ipv4_sk_redirect(skb, sk); 222 ipv4_sk_redirect(skb, sk);
223 return;
224 }
223 225
224 /* Report error on raw socket, if: 226 /* Report error on raw socket, if:
225 1. User requested ip_recverr. 227 1. User requested ip_recverr.
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 7c83cb8bf137..e6bb8256e59f 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -895,8 +895,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
895 895
896 skb_orphan(skb); 896 skb_orphan(skb);
897 skb->sk = sk; 897 skb->sk = sk;
898 skb->destructor = (sysctl_tcp_limit_output_bytes > 0) ? 898 skb->destructor = tcp_wfree;
899 tcp_wfree : sock_wfree;
900 atomic_add(skb->truesize, &sk->sk_wmem_alloc); 899 atomic_add(skb->truesize, &sk->sk_wmem_alloc);
901 900
902 /* Build TCP header and checksum it. */ 901 /* Build TCP header and checksum it. */
@@ -1840,7 +1839,6 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1840 while ((skb = tcp_send_head(sk))) { 1839 while ((skb = tcp_send_head(sk))) {
1841 unsigned int limit; 1840 unsigned int limit;
1842 1841
1843
1844 tso_segs = tcp_init_tso_segs(sk, skb, mss_now); 1842 tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1845 BUG_ON(!tso_segs); 1843 BUG_ON(!tso_segs);
1846 1844
@@ -1869,13 +1867,20 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1869 break; 1867 break;
1870 } 1868 }
1871 1869
1872 /* TSQ : sk_wmem_alloc accounts skb truesize, 1870 /* TCP Small Queues :
1873 * including skb overhead. But thats OK. 1871 * Control number of packets in qdisc/devices to two packets / or ~1 ms.
1872 * This allows for :
1873 * - better RTT estimation and ACK scheduling
1874 * - faster recovery
1875 * - high rates
1874 */ 1876 */
1875 if (atomic_read(&sk->sk_wmem_alloc) >= sysctl_tcp_limit_output_bytes) { 1877 limit = max(skb->truesize, sk->sk_pacing_rate >> 10);
1878
1879 if (atomic_read(&sk->sk_wmem_alloc) > limit) {
1876 set_bit(TSQ_THROTTLED, &tp->tsq_flags); 1880 set_bit(TSQ_THROTTLED, &tp->tsq_flags);
1877 break; 1881 break;
1878 } 1882 }
1883
1879 limit = mss_now; 1884 limit = mss_now;
1880 if (tso_segs > 1 && !tcp_urg_mode(tp)) 1885 if (tso_segs > 1 && !tcp_urg_mode(tp))
1881 limit = tcp_mss_split_point(sk, skb, mss_now, 1886 limit = tcp_mss_split_point(sk, skb, mss_now,
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 74d2c95db57f..0ca44df51ee9 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -658,7 +658,7 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
658 break; 658 break;
659 case ICMP_REDIRECT: 659 case ICMP_REDIRECT:
660 ipv4_sk_redirect(skb, sk); 660 ipv4_sk_redirect(skb, sk);
661 break; 661 goto out;
662 } 662 }
663 663
664 /* 664 /*
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index d6ff12617f36..cd3fb301da38 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1499,6 +1499,33 @@ static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
1499 return false; 1499 return false;
1500} 1500}
1501 1501
1502/* Compares an address/prefix_len with addresses on device @dev.
1503 * If one is found it returns true.
1504 */
1505bool ipv6_chk_custom_prefix(const struct in6_addr *addr,
1506 const unsigned int prefix_len, struct net_device *dev)
1507{
1508 struct inet6_dev *idev;
1509 struct inet6_ifaddr *ifa;
1510 bool ret = false;
1511
1512 rcu_read_lock();
1513 idev = __in6_dev_get(dev);
1514 if (idev) {
1515 read_lock_bh(&idev->lock);
1516 list_for_each_entry(ifa, &idev->addr_list, if_list) {
1517 ret = ipv6_prefix_equal(addr, &ifa->addr, prefix_len);
1518 if (ret)
1519 break;
1520 }
1521 read_unlock_bh(&idev->lock);
1522 }
1523 rcu_read_unlock();
1524
1525 return ret;
1526}
1527EXPORT_SYMBOL(ipv6_chk_custom_prefix);
1528
1502int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev) 1529int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev)
1503{ 1530{
1504 struct inet6_dev *idev; 1531 struct inet6_dev *idev;
@@ -2193,43 +2220,21 @@ ok:
2193 else 2220 else
2194 stored_lft = 0; 2221 stored_lft = 0;
2195 if (!update_lft && !create && stored_lft) { 2222 if (!update_lft && !create && stored_lft) {
2196 if (valid_lft > MIN_VALID_LIFETIME || 2223 const u32 minimum_lft = min(
2197 valid_lft > stored_lft) 2224 stored_lft, (u32)MIN_VALID_LIFETIME);
2198 update_lft = 1; 2225 valid_lft = max(valid_lft, minimum_lft);
2199 else if (stored_lft <= MIN_VALID_LIFETIME) { 2226
2200 /* valid_lft <= stored_lft is always true */ 2227 /* RFC4862 Section 5.5.3e:
2201 /* 2228 * "Note that the preferred lifetime of the
2202 * RFC 4862 Section 5.5.3e: 2229 * corresponding address is always reset to
2203 * "Note that the preferred lifetime of 2230 * the Preferred Lifetime in the received
2204 * the corresponding address is always 2231 * Prefix Information option, regardless of
2205 * reset to the Preferred Lifetime in 2232 * whether the valid lifetime is also reset or
2206 * the received Prefix Information 2233 * ignored."
2207 * option, regardless of whether the 2234 *
2208 * valid lifetime is also reset or 2235 * So we should always update prefered_lft here.
2209 * ignored." 2236 */
2210 * 2237 update_lft = 1;
2211 * So if the preferred lifetime in
2212 * this advertisement is different
2213 * than what we have stored, but the
2214 * valid lifetime is invalid, just
2215 * reset prefered_lft.
2216 *
2217 * We must set the valid lifetime
2218 * to the stored lifetime since we'll
2219 * be updating the timestamp below,
2220 * else we'll set it back to the
2221 * minimum.
2222 */
2223 if (prefered_lft != ifp->prefered_lft) {
2224 valid_lft = stored_lft;
2225 update_lft = 1;
2226 }
2227 } else {
2228 valid_lft = MIN_VALID_LIFETIME;
2229 if (valid_lft < prefered_lft)
2230 prefered_lft = valid_lft;
2231 update_lft = 1;
2232 }
2233 } 2238 }
2234 2239
2235 if (update_lft) { 2240 if (update_lft) {
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 6b26e9feafb9..7bb5446b9d73 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -618,7 +618,7 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
618 struct ip6_tnl *tunnel = netdev_priv(dev); 618 struct ip6_tnl *tunnel = netdev_priv(dev);
619 struct net_device *tdev; /* Device to other host */ 619 struct net_device *tdev; /* Device to other host */
620 struct ipv6hdr *ipv6h; /* Our new IP header */ 620 struct ipv6hdr *ipv6h; /* Our new IP header */
621 unsigned int max_headroom; /* The extra header space needed */ 621 unsigned int max_headroom = 0; /* The extra header space needed */
622 int gre_hlen; 622 int gre_hlen;
623 struct ipv6_tel_txoption opt; 623 struct ipv6_tel_txoption opt;
624 int mtu; 624 int mtu;
@@ -693,7 +693,7 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
693 693
694 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev))); 694 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev)));
695 695
696 max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len; 696 max_headroom += LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len;
697 697
698 if (skb_headroom(skb) < max_headroom || skb_shared(skb) || 698 if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
699 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { 699 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 3a692d529163..a54c45ce4a48 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1015,6 +1015,8 @@ static inline int ip6_ufo_append_data(struct sock *sk,
1015 * udp datagram 1015 * udp datagram
1016 */ 1016 */
1017 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) { 1017 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
1018 struct frag_hdr fhdr;
1019
1018 skb = sock_alloc_send_skb(sk, 1020 skb = sock_alloc_send_skb(sk,
1019 hh_len + fragheaderlen + transhdrlen + 20, 1021 hh_len + fragheaderlen + transhdrlen + 20,
1020 (flags & MSG_DONTWAIT), &err); 1022 (flags & MSG_DONTWAIT), &err);
@@ -1036,12 +1038,6 @@ static inline int ip6_ufo_append_data(struct sock *sk,
1036 skb->protocol = htons(ETH_P_IPV6); 1038 skb->protocol = htons(ETH_P_IPV6);
1037 skb->ip_summed = CHECKSUM_PARTIAL; 1039 skb->ip_summed = CHECKSUM_PARTIAL;
1038 skb->csum = 0; 1040 skb->csum = 0;
1039 }
1040
1041 err = skb_append_datato_frags(sk,skb, getfrag, from,
1042 (length - transhdrlen));
1043 if (!err) {
1044 struct frag_hdr fhdr;
1045 1041
1046 /* Specify the length of each IPv6 datagram fragment. 1042 /* Specify the length of each IPv6 datagram fragment.
1047 * It has to be a multiple of 8. 1043 * It has to be a multiple of 8.
@@ -1052,15 +1048,10 @@ static inline int ip6_ufo_append_data(struct sock *sk,
1052 ipv6_select_ident(&fhdr, rt); 1048 ipv6_select_ident(&fhdr, rt);
1053 skb_shinfo(skb)->ip6_frag_id = fhdr.identification; 1049 skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
1054 __skb_queue_tail(&sk->sk_write_queue, skb); 1050 __skb_queue_tail(&sk->sk_write_queue, skb);
1055
1056 return 0;
1057 } 1051 }
1058 /* There is not enough support do UPD LSO,
1059 * so follow normal path
1060 */
1061 kfree_skb(skb);
1062 1052
1063 return err; 1053 return skb_append_datato_frags(sk, skb, getfrag, from,
1054 (length - transhdrlen));
1064} 1055}
1065 1056
1066static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src, 1057static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
@@ -1227,27 +1218,27 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1227 * --yoshfuji 1218 * --yoshfuji
1228 */ 1219 */
1229 1220
1230 cork->length += length; 1221 if ((length > mtu) && dontfrag && (sk->sk_protocol == IPPROTO_UDP ||
1231 if (length > mtu) { 1222 sk->sk_protocol == IPPROTO_RAW)) {
1232 int proto = sk->sk_protocol; 1223 ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
1233 if (dontfrag && (proto == IPPROTO_UDP || proto == IPPROTO_RAW)){ 1224 return -EMSGSIZE;
1234 ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen); 1225 }
1235 return -EMSGSIZE;
1236 }
1237
1238 if (proto == IPPROTO_UDP &&
1239 (rt->dst.dev->features & NETIF_F_UFO)) {
1240 1226
1241 err = ip6_ufo_append_data(sk, getfrag, from, length, 1227 skb = skb_peek_tail(&sk->sk_write_queue);
1242 hh_len, fragheaderlen, 1228 cork->length += length;
1243 transhdrlen, mtu, flags, rt); 1229 if (((length > mtu) ||
1244 if (err) 1230 (skb && skb_is_gso(skb))) &&
1245 goto error; 1231 (sk->sk_protocol == IPPROTO_UDP) &&
1246 return 0; 1232 (rt->dst.dev->features & NETIF_F_UFO)) {
1247 } 1233 err = ip6_ufo_append_data(sk, getfrag, from, length,
1234 hh_len, fragheaderlen,
1235 transhdrlen, mtu, flags, rt);
1236 if (err)
1237 goto error;
1238 return 0;
1248 } 1239 }
1249 1240
1250 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) 1241 if (!skb)
1251 goto alloc_new_skb; 1242 goto alloc_new_skb;
1252 1243
1253 while (length > 0) { 1244 while (length > 0) {
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 2d8f4829575b..a791552e0422 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1731,8 +1731,6 @@ static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
1731 } 1731 }
1732 } 1732 }
1733 1733
1734 t = rtnl_dereference(ip6n->tnls_wc[0]);
1735 unregister_netdevice_queue(t->dev, &list);
1736 unregister_netdevice_many(&list); 1734 unregister_netdevice_many(&list);
1737} 1735}
1738 1736
@@ -1752,6 +1750,7 @@ static int __net_init ip6_tnl_init_net(struct net *net)
1752 if (!ip6n->fb_tnl_dev) 1750 if (!ip6n->fb_tnl_dev)
1753 goto err_alloc_dev; 1751 goto err_alloc_dev;
1754 dev_net_set(ip6n->fb_tnl_dev, net); 1752 dev_net_set(ip6n->fb_tnl_dev, net);
1753 ip6n->fb_tnl_dev->rtnl_link_ops = &ip6_link_ops;
1755 /* FB netdevice is special: we have one, and only one per netns. 1754 /* FB netdevice is special: we have one, and only one per netns.
1756 * Allowing to move it to another netns is clearly unsafe. 1755 * Allowing to move it to another netns is clearly unsafe.
1757 */ 1756 */
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 096cd67b737c..d18f9f903db6 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -2034,7 +2034,7 @@ static void mld_dad_timer_expire(unsigned long data)
2034 if (idev->mc_dad_count) 2034 if (idev->mc_dad_count)
2035 mld_dad_start_timer(idev, idev->mc_maxdelay); 2035 mld_dad_start_timer(idev, idev->mc_maxdelay);
2036 } 2036 }
2037 __in6_dev_put(idev); 2037 in6_dev_put(idev);
2038} 2038}
2039 2039
2040static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode, 2040static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
@@ -2379,7 +2379,7 @@ static void mld_gq_timer_expire(unsigned long data)
2379 2379
2380 idev->mc_gq_running = 0; 2380 idev->mc_gq_running = 0;
2381 mld_send_report(idev, NULL); 2381 mld_send_report(idev, NULL);
2382 __in6_dev_put(idev); 2382 in6_dev_put(idev);
2383} 2383}
2384 2384
2385static void mld_ifc_timer_expire(unsigned long data) 2385static void mld_ifc_timer_expire(unsigned long data)
@@ -2392,7 +2392,7 @@ static void mld_ifc_timer_expire(unsigned long data)
2392 if (idev->mc_ifc_count) 2392 if (idev->mc_ifc_count)
2393 mld_ifc_start_timer(idev, idev->mc_maxdelay); 2393 mld_ifc_start_timer(idev, idev->mc_maxdelay);
2394 } 2394 }
2395 __in6_dev_put(idev); 2395 in6_dev_put(idev);
2396} 2396}
2397 2397
2398static void mld_ifc_event(struct inet6_dev *idev) 2398static void mld_ifc_event(struct inet6_dev *idev)
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
index 19cfea8dbcaa..2748b042da72 100644
--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -282,7 +282,8 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
282 if (th == NULL) 282 if (th == NULL)
283 return NF_DROP; 283 return NF_DROP;
284 284
285 synproxy_parse_options(skb, par->thoff, th, &opts); 285 if (!synproxy_parse_options(skb, par->thoff, th, &opts))
286 return NF_DROP;
286 287
287 if (th->syn && !(th->ack || th->fin || th->rst)) { 288 if (th->syn && !(th->ack || th->fin || th->rst)) {
288 /* Initial SYN from client */ 289 /* Initial SYN from client */
@@ -372,7 +373,8 @@ static unsigned int ipv6_synproxy_hook(unsigned int hooknum,
372 373
373 /* fall through */ 374 /* fall through */
374 case TCP_CONNTRACK_SYN_SENT: 375 case TCP_CONNTRACK_SYN_SENT:
375 synproxy_parse_options(skb, thoff, th, &opts); 376 if (!synproxy_parse_options(skb, thoff, th, &opts))
377 return NF_DROP;
376 378
377 if (!th->syn && th->ack && 379 if (!th->syn && th->ack &&
378 CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) { 380 CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
@@ -395,7 +397,9 @@ static unsigned int ipv6_synproxy_hook(unsigned int hooknum,
395 if (!th->syn || !th->ack) 397 if (!th->syn || !th->ack)
396 break; 398 break;
397 399
398 synproxy_parse_options(skb, thoff, th, &opts); 400 if (!synproxy_parse_options(skb, thoff, th, &opts))
401 return NF_DROP;
402
399 if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP) 403 if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
400 synproxy->tsoff = opts.tsval - synproxy->its; 404 synproxy->tsoff = opts.tsval - synproxy->its;
401 405
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 58916bbb1728..a4ed2416399e 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -335,8 +335,10 @@ static void rawv6_err(struct sock *sk, struct sk_buff *skb,
335 ip6_sk_update_pmtu(skb, sk, info); 335 ip6_sk_update_pmtu(skb, sk, info);
336 harderr = (np->pmtudisc == IPV6_PMTUDISC_DO); 336 harderr = (np->pmtudisc == IPV6_PMTUDISC_DO);
337 } 337 }
338 if (type == NDISC_REDIRECT) 338 if (type == NDISC_REDIRECT) {
339 ip6_sk_redirect(skb, sk); 339 ip6_sk_redirect(skb, sk);
340 return;
341 }
340 if (np->recverr) { 342 if (np->recverr) {
341 u8 *payload = skb->data; 343 u8 *payload = skb->data;
342 if (!inet->hdrincl) 344 if (!inet->hdrincl)
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 7ee5cb96db34..19269453a8ea 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -566,6 +566,70 @@ static inline bool is_spoofed_6rd(struct ip_tunnel *tunnel, const __be32 v4addr,
566 return false; 566 return false;
567} 567}
568 568
569/* Checks if an address matches an address on the tunnel interface.
570 * Used to detect the NAT of proto 41 packets and let them pass spoofing test.
571 * Long story:
572 * This function is called after we considered the packet as spoofed
573 * in is_spoofed_6rd.
574 * We may have a router that is doing NAT for proto 41 packets
575 * for an internal station. Destination a.a.a.a/PREFIX:bbbb:bbbb
576 * will be translated to n.n.n.n/PREFIX:bbbb:bbbb. And is_spoofed_6rd
577 * function will return true, dropping the packet.
578 * But, we can still check if is spoofed against the IP
579 * addresses associated with the interface.
580 */
581static bool only_dnatted(const struct ip_tunnel *tunnel,
582 const struct in6_addr *v6dst)
583{
584 int prefix_len;
585
586#ifdef CONFIG_IPV6_SIT_6RD
587 prefix_len = tunnel->ip6rd.prefixlen + 32
588 - tunnel->ip6rd.relay_prefixlen;
589#else
590 prefix_len = 48;
591#endif
592 return ipv6_chk_custom_prefix(v6dst, prefix_len, tunnel->dev);
593}
594
595/* Returns true if a packet is spoofed */
596static bool packet_is_spoofed(struct sk_buff *skb,
597 const struct iphdr *iph,
598 struct ip_tunnel *tunnel)
599{
600 const struct ipv6hdr *ipv6h;
601
602 if (tunnel->dev->priv_flags & IFF_ISATAP) {
603 if (!isatap_chksrc(skb, iph, tunnel))
604 return true;
605
606 return false;
607 }
608
609 if (tunnel->dev->flags & IFF_POINTOPOINT)
610 return false;
611
612 ipv6h = ipv6_hdr(skb);
613
614 if (unlikely(is_spoofed_6rd(tunnel, iph->saddr, &ipv6h->saddr))) {
615 net_warn_ratelimited("Src spoofed %pI4/%pI6c -> %pI4/%pI6c\n",
616 &iph->saddr, &ipv6h->saddr,
617 &iph->daddr, &ipv6h->daddr);
618 return true;
619 }
620
621 if (likely(!is_spoofed_6rd(tunnel, iph->daddr, &ipv6h->daddr)))
622 return false;
623
624 if (only_dnatted(tunnel, &ipv6h->daddr))
625 return false;
626
627 net_warn_ratelimited("Dst spoofed %pI4/%pI6c -> %pI4/%pI6c\n",
628 &iph->saddr, &ipv6h->saddr,
629 &iph->daddr, &ipv6h->daddr);
630 return true;
631}
632
569static int ipip6_rcv(struct sk_buff *skb) 633static int ipip6_rcv(struct sk_buff *skb)
570{ 634{
571 const struct iphdr *iph = ip_hdr(skb); 635 const struct iphdr *iph = ip_hdr(skb);
@@ -586,19 +650,9 @@ static int ipip6_rcv(struct sk_buff *skb)
586 IPCB(skb)->flags = 0; 650 IPCB(skb)->flags = 0;
587 skb->protocol = htons(ETH_P_IPV6); 651 skb->protocol = htons(ETH_P_IPV6);
588 652
589 if (tunnel->dev->priv_flags & IFF_ISATAP) { 653 if (packet_is_spoofed(skb, iph, tunnel)) {
590 if (!isatap_chksrc(skb, iph, tunnel)) { 654 tunnel->dev->stats.rx_errors++;
591 tunnel->dev->stats.rx_errors++; 655 goto out;
592 goto out;
593 }
594 } else if (!(tunnel->dev->flags&IFF_POINTOPOINT)) {
595 if (is_spoofed_6rd(tunnel, iph->saddr,
596 &ipv6_hdr(skb)->saddr) ||
597 is_spoofed_6rd(tunnel, iph->daddr,
598 &ipv6_hdr(skb)->daddr)) {
599 tunnel->dev->stats.rx_errors++;
600 goto out;
601 }
602 } 656 }
603 657
604 __skb_tunnel_rx(skb, tunnel->dev, tunnel->net); 658 __skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
@@ -748,7 +802,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
748 neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr); 802 neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
749 803
750 if (neigh == NULL) { 804 if (neigh == NULL) {
751 net_dbg_ratelimited("sit: nexthop == NULL\n"); 805 net_dbg_ratelimited("nexthop == NULL\n");
752 goto tx_error; 806 goto tx_error;
753 } 807 }
754 808
@@ -777,7 +831,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
777 neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr); 831 neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
778 832
779 if (neigh == NULL) { 833 if (neigh == NULL) {
780 net_dbg_ratelimited("sit: nexthop == NULL\n"); 834 net_dbg_ratelimited("nexthop == NULL\n");
781 goto tx_error; 835 goto tx_error;
782 } 836 }
783 837
@@ -1612,6 +1666,7 @@ static int __net_init sit_init_net(struct net *net)
1612 goto err_alloc_dev; 1666 goto err_alloc_dev;
1613 } 1667 }
1614 dev_net_set(sitn->fb_tunnel_dev, net); 1668 dev_net_set(sitn->fb_tunnel_dev, net);
1669 sitn->fb_tunnel_dev->rtnl_link_ops = &sit_link_ops;
1615 /* FB netdevice is special: we have one, and only one per netns. 1670 /* FB netdevice is special: we have one, and only one per netns.
1616 * Allowing to move it to another netns is clearly unsafe. 1671 * Allowing to move it to another netns is clearly unsafe.
1617 */ 1672 */
@@ -1646,7 +1701,6 @@ static void __net_exit sit_exit_net(struct net *net)
1646 1701
1647 rtnl_lock(); 1702 rtnl_lock();
1648 sit_destroy_tunnels(sitn, &list); 1703 sit_destroy_tunnels(sitn, &list);
1649 unregister_netdevice_queue(sitn->fb_tunnel_dev, &list);
1650 unregister_netdevice_many(&list); 1704 unregister_netdevice_many(&list);
1651 rtnl_unlock(); 1705 rtnl_unlock();
1652} 1706}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index f4058150262b..72b7eaaf3ca0 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -525,8 +525,10 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
525 525
526 if (type == ICMPV6_PKT_TOOBIG) 526 if (type == ICMPV6_PKT_TOOBIG)
527 ip6_sk_update_pmtu(skb, sk, info); 527 ip6_sk_update_pmtu(skb, sk, info);
528 if (type == NDISC_REDIRECT) 528 if (type == NDISC_REDIRECT) {
529 ip6_sk_redirect(skb, sk); 529 ip6_sk_redirect(skb, sk);
530 goto out;
531 }
530 532
531 np = inet6_sk(sk); 533 np = inet6_sk(sk);
532 534
diff --git a/net/lapb/lapb_timer.c b/net/lapb/lapb_timer.c
index 54563ad8aeb1..355cc3b6fa4d 100644
--- a/net/lapb/lapb_timer.c
+++ b/net/lapb/lapb_timer.c
@@ -154,6 +154,7 @@ static void lapb_t1timer_expiry(unsigned long param)
154 } else { 154 } else {
155 lapb->n2count++; 155 lapb->n2count++;
156 lapb_requeue_frames(lapb); 156 lapb_requeue_frames(lapb);
157 lapb_kick(lapb);
157 } 158 }
158 break; 159 break;
159 160
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 4f69e83ff836..74fd00c27210 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -116,6 +116,7 @@ ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
116 116
117 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { 117 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
118 struct ip_vs_cpu_stats *s; 118 struct ip_vs_cpu_stats *s;
119 struct ip_vs_service *svc;
119 120
120 s = this_cpu_ptr(dest->stats.cpustats); 121 s = this_cpu_ptr(dest->stats.cpustats);
121 s->ustats.inpkts++; 122 s->ustats.inpkts++;
@@ -123,11 +124,14 @@ ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
123 s->ustats.inbytes += skb->len; 124 s->ustats.inbytes += skb->len;
124 u64_stats_update_end(&s->syncp); 125 u64_stats_update_end(&s->syncp);
125 126
126 s = this_cpu_ptr(dest->svc->stats.cpustats); 127 rcu_read_lock();
128 svc = rcu_dereference(dest->svc);
129 s = this_cpu_ptr(svc->stats.cpustats);
127 s->ustats.inpkts++; 130 s->ustats.inpkts++;
128 u64_stats_update_begin(&s->syncp); 131 u64_stats_update_begin(&s->syncp);
129 s->ustats.inbytes += skb->len; 132 s->ustats.inbytes += skb->len;
130 u64_stats_update_end(&s->syncp); 133 u64_stats_update_end(&s->syncp);
134 rcu_read_unlock();
131 135
132 s = this_cpu_ptr(ipvs->tot_stats.cpustats); 136 s = this_cpu_ptr(ipvs->tot_stats.cpustats);
133 s->ustats.inpkts++; 137 s->ustats.inpkts++;
@@ -146,6 +150,7 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
146 150
147 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { 151 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
148 struct ip_vs_cpu_stats *s; 152 struct ip_vs_cpu_stats *s;
153 struct ip_vs_service *svc;
149 154
150 s = this_cpu_ptr(dest->stats.cpustats); 155 s = this_cpu_ptr(dest->stats.cpustats);
151 s->ustats.outpkts++; 156 s->ustats.outpkts++;
@@ -153,11 +158,14 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
153 s->ustats.outbytes += skb->len; 158 s->ustats.outbytes += skb->len;
154 u64_stats_update_end(&s->syncp); 159 u64_stats_update_end(&s->syncp);
155 160
156 s = this_cpu_ptr(dest->svc->stats.cpustats); 161 rcu_read_lock();
162 svc = rcu_dereference(dest->svc);
163 s = this_cpu_ptr(svc->stats.cpustats);
157 s->ustats.outpkts++; 164 s->ustats.outpkts++;
158 u64_stats_update_begin(&s->syncp); 165 u64_stats_update_begin(&s->syncp);
159 s->ustats.outbytes += skb->len; 166 s->ustats.outbytes += skb->len;
160 u64_stats_update_end(&s->syncp); 167 u64_stats_update_end(&s->syncp);
168 rcu_read_unlock();
161 169
162 s = this_cpu_ptr(ipvs->tot_stats.cpustats); 170 s = this_cpu_ptr(ipvs->tot_stats.cpustats);
163 s->ustats.outpkts++; 171 s->ustats.outpkts++;
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index c8148e487386..a3df9bddc4f7 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -460,7 +460,7 @@ static inline void
460__ip_vs_bind_svc(struct ip_vs_dest *dest, struct ip_vs_service *svc) 460__ip_vs_bind_svc(struct ip_vs_dest *dest, struct ip_vs_service *svc)
461{ 461{
462 atomic_inc(&svc->refcnt); 462 atomic_inc(&svc->refcnt);
463 dest->svc = svc; 463 rcu_assign_pointer(dest->svc, svc);
464} 464}
465 465
466static void ip_vs_service_free(struct ip_vs_service *svc) 466static void ip_vs_service_free(struct ip_vs_service *svc)
@@ -470,18 +470,25 @@ static void ip_vs_service_free(struct ip_vs_service *svc)
470 kfree(svc); 470 kfree(svc);
471} 471}
472 472
473static void 473static void ip_vs_service_rcu_free(struct rcu_head *head)
474__ip_vs_unbind_svc(struct ip_vs_dest *dest)
475{ 474{
476 struct ip_vs_service *svc = dest->svc; 475 struct ip_vs_service *svc;
476
477 svc = container_of(head, struct ip_vs_service, rcu_head);
478 ip_vs_service_free(svc);
479}
477 480
478 dest->svc = NULL; 481static void __ip_vs_svc_put(struct ip_vs_service *svc, bool do_delay)
482{
479 if (atomic_dec_and_test(&svc->refcnt)) { 483 if (atomic_dec_and_test(&svc->refcnt)) {
480 IP_VS_DBG_BUF(3, "Removing service %u/%s:%u\n", 484 IP_VS_DBG_BUF(3, "Removing service %u/%s:%u\n",
481 svc->fwmark, 485 svc->fwmark,
482 IP_VS_DBG_ADDR(svc->af, &svc->addr), 486 IP_VS_DBG_ADDR(svc->af, &svc->addr),
483 ntohs(svc->port)); 487 ntohs(svc->port));
484 ip_vs_service_free(svc); 488 if (do_delay)
489 call_rcu(&svc->rcu_head, ip_vs_service_rcu_free);
490 else
491 ip_vs_service_free(svc);
485 } 492 }
486} 493}
487 494
@@ -667,11 +674,6 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
667 IP_VS_DBG_ADDR(svc->af, &dest->addr), 674 IP_VS_DBG_ADDR(svc->af, &dest->addr),
668 ntohs(dest->port), 675 ntohs(dest->port),
669 atomic_read(&dest->refcnt)); 676 atomic_read(&dest->refcnt));
670 /* We can not reuse dest while in grace period
671 * because conns still can use dest->svc
672 */
673 if (test_bit(IP_VS_DEST_STATE_REMOVING, &dest->state))
674 continue;
675 if (dest->af == svc->af && 677 if (dest->af == svc->af &&
676 ip_vs_addr_equal(svc->af, &dest->addr, daddr) && 678 ip_vs_addr_equal(svc->af, &dest->addr, daddr) &&
677 dest->port == dport && 679 dest->port == dport &&
@@ -697,8 +699,10 @@ out:
697 699
698static void ip_vs_dest_free(struct ip_vs_dest *dest) 700static void ip_vs_dest_free(struct ip_vs_dest *dest)
699{ 701{
702 struct ip_vs_service *svc = rcu_dereference_protected(dest->svc, 1);
703
700 __ip_vs_dst_cache_reset(dest); 704 __ip_vs_dst_cache_reset(dest);
701 __ip_vs_unbind_svc(dest); 705 __ip_vs_svc_put(svc, false);
702 free_percpu(dest->stats.cpustats); 706 free_percpu(dest->stats.cpustats);
703 kfree(dest); 707 kfree(dest);
704} 708}
@@ -771,6 +775,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
771 struct ip_vs_dest_user_kern *udest, int add) 775 struct ip_vs_dest_user_kern *udest, int add)
772{ 776{
773 struct netns_ipvs *ipvs = net_ipvs(svc->net); 777 struct netns_ipvs *ipvs = net_ipvs(svc->net);
778 struct ip_vs_service *old_svc;
774 struct ip_vs_scheduler *sched; 779 struct ip_vs_scheduler *sched;
775 int conn_flags; 780 int conn_flags;
776 781
@@ -792,13 +797,14 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
792 atomic_set(&dest->conn_flags, conn_flags); 797 atomic_set(&dest->conn_flags, conn_flags);
793 798
794 /* bind the service */ 799 /* bind the service */
795 if (!dest->svc) { 800 old_svc = rcu_dereference_protected(dest->svc, 1);
801 if (!old_svc) {
796 __ip_vs_bind_svc(dest, svc); 802 __ip_vs_bind_svc(dest, svc);
797 } else { 803 } else {
798 if (dest->svc != svc) { 804 if (old_svc != svc) {
799 __ip_vs_unbind_svc(dest);
800 ip_vs_zero_stats(&dest->stats); 805 ip_vs_zero_stats(&dest->stats);
801 __ip_vs_bind_svc(dest, svc); 806 __ip_vs_bind_svc(dest, svc);
807 __ip_vs_svc_put(old_svc, true);
802 } 808 }
803 } 809 }
804 810
@@ -998,16 +1004,6 @@ ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
998 return 0; 1004 return 0;
999} 1005}
1000 1006
1001static void ip_vs_dest_wait_readers(struct rcu_head *head)
1002{
1003 struct ip_vs_dest *dest = container_of(head, struct ip_vs_dest,
1004 rcu_head);
1005
1006 /* End of grace period after unlinking */
1007 clear_bit(IP_VS_DEST_STATE_REMOVING, &dest->state);
1008}
1009
1010
1011/* 1007/*
1012 * Delete a destination (must be already unlinked from the service) 1008 * Delete a destination (must be already unlinked from the service)
1013 */ 1009 */
@@ -1023,20 +1019,16 @@ static void __ip_vs_del_dest(struct net *net, struct ip_vs_dest *dest,
1023 */ 1019 */
1024 ip_vs_rs_unhash(dest); 1020 ip_vs_rs_unhash(dest);
1025 1021
1026 if (!cleanup) {
1027 set_bit(IP_VS_DEST_STATE_REMOVING, &dest->state);
1028 call_rcu(&dest->rcu_head, ip_vs_dest_wait_readers);
1029 }
1030
1031 spin_lock_bh(&ipvs->dest_trash_lock); 1022 spin_lock_bh(&ipvs->dest_trash_lock);
1032 IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, dest->refcnt=%d\n", 1023 IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, dest->refcnt=%d\n",
1033 IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port), 1024 IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port),
1034 atomic_read(&dest->refcnt)); 1025 atomic_read(&dest->refcnt));
1035 if (list_empty(&ipvs->dest_trash) && !cleanup) 1026 if (list_empty(&ipvs->dest_trash) && !cleanup)
1036 mod_timer(&ipvs->dest_trash_timer, 1027 mod_timer(&ipvs->dest_trash_timer,
1037 jiffies + IP_VS_DEST_TRASH_PERIOD); 1028 jiffies + (IP_VS_DEST_TRASH_PERIOD >> 1));
1038 /* dest lives in trash without reference */ 1029 /* dest lives in trash without reference */
1039 list_add(&dest->t_list, &ipvs->dest_trash); 1030 list_add(&dest->t_list, &ipvs->dest_trash);
1031 dest->idle_start = 0;
1040 spin_unlock_bh(&ipvs->dest_trash_lock); 1032 spin_unlock_bh(&ipvs->dest_trash_lock);
1041 ip_vs_dest_put(dest); 1033 ip_vs_dest_put(dest);
1042} 1034}
@@ -1108,24 +1100,30 @@ static void ip_vs_dest_trash_expire(unsigned long data)
1108 struct net *net = (struct net *) data; 1100 struct net *net = (struct net *) data;
1109 struct netns_ipvs *ipvs = net_ipvs(net); 1101 struct netns_ipvs *ipvs = net_ipvs(net);
1110 struct ip_vs_dest *dest, *next; 1102 struct ip_vs_dest *dest, *next;
1103 unsigned long now = jiffies;
1111 1104
1112 spin_lock(&ipvs->dest_trash_lock); 1105 spin_lock(&ipvs->dest_trash_lock);
1113 list_for_each_entry_safe(dest, next, &ipvs->dest_trash, t_list) { 1106 list_for_each_entry_safe(dest, next, &ipvs->dest_trash, t_list) {
1114 /* Skip if dest is in grace period */
1115 if (test_bit(IP_VS_DEST_STATE_REMOVING, &dest->state))
1116 continue;
1117 if (atomic_read(&dest->refcnt) > 0) 1107 if (atomic_read(&dest->refcnt) > 0)
1118 continue; 1108 continue;
1109 if (dest->idle_start) {
1110 if (time_before(now, dest->idle_start +
1111 IP_VS_DEST_TRASH_PERIOD))
1112 continue;
1113 } else {
1114 dest->idle_start = max(1UL, now);
1115 continue;
1116 }
1119 IP_VS_DBG_BUF(3, "Removing destination %u/%s:%u from trash\n", 1117 IP_VS_DBG_BUF(3, "Removing destination %u/%s:%u from trash\n",
1120 dest->vfwmark, 1118 dest->vfwmark,
1121 IP_VS_DBG_ADDR(dest->svc->af, &dest->addr), 1119 IP_VS_DBG_ADDR(dest->af, &dest->addr),
1122 ntohs(dest->port)); 1120 ntohs(dest->port));
1123 list_del(&dest->t_list); 1121 list_del(&dest->t_list);
1124 ip_vs_dest_free(dest); 1122 ip_vs_dest_free(dest);
1125 } 1123 }
1126 if (!list_empty(&ipvs->dest_trash)) 1124 if (!list_empty(&ipvs->dest_trash))
1127 mod_timer(&ipvs->dest_trash_timer, 1125 mod_timer(&ipvs->dest_trash_timer,
1128 jiffies + IP_VS_DEST_TRASH_PERIOD); 1126 jiffies + (IP_VS_DEST_TRASH_PERIOD >> 1));
1129 spin_unlock(&ipvs->dest_trash_lock); 1127 spin_unlock(&ipvs->dest_trash_lock);
1130} 1128}
1131 1129
@@ -1320,14 +1318,6 @@ out:
1320 return ret; 1318 return ret;
1321} 1319}
1322 1320
1323static void ip_vs_service_rcu_free(struct rcu_head *head)
1324{
1325 struct ip_vs_service *svc;
1326
1327 svc = container_of(head, struct ip_vs_service, rcu_head);
1328 ip_vs_service_free(svc);
1329}
1330
1331/* 1321/*
1332 * Delete a service from the service list 1322 * Delete a service from the service list
1333 * - The service must be unlinked, unlocked and not referenced! 1323 * - The service must be unlinked, unlocked and not referenced!
@@ -1376,13 +1366,7 @@ static void __ip_vs_del_service(struct ip_vs_service *svc, bool cleanup)
1376 /* 1366 /*
1377 * Free the service if nobody refers to it 1367 * Free the service if nobody refers to it
1378 */ 1368 */
1379 if (atomic_dec_and_test(&svc->refcnt)) { 1369 __ip_vs_svc_put(svc, true);
1380 IP_VS_DBG_BUF(3, "Removing service %u/%s:%u\n",
1381 svc->fwmark,
1382 IP_VS_DBG_ADDR(svc->af, &svc->addr),
1383 ntohs(svc->port));
1384 call_rcu(&svc->rcu_head, ip_vs_service_rcu_free);
1385 }
1386 1370
1387 /* decrease the module use count */ 1371 /* decrease the module use count */
1388 ip_vs_use_count_dec(); 1372 ip_vs_use_count_dec();
diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c
index 6bee6d0c73a5..1425e9a924c4 100644
--- a/net/netfilter/ipvs/ip_vs_est.c
+++ b/net/netfilter/ipvs/ip_vs_est.c
@@ -59,12 +59,13 @@ static void ip_vs_read_cpu_stats(struct ip_vs_stats_user *sum,
59 struct ip_vs_cpu_stats __percpu *stats) 59 struct ip_vs_cpu_stats __percpu *stats)
60{ 60{
61 int i; 61 int i;
62 bool add = false;
62 63
63 for_each_possible_cpu(i) { 64 for_each_possible_cpu(i) {
64 struct ip_vs_cpu_stats *s = per_cpu_ptr(stats, i); 65 struct ip_vs_cpu_stats *s = per_cpu_ptr(stats, i);
65 unsigned int start; 66 unsigned int start;
66 __u64 inbytes, outbytes; 67 __u64 inbytes, outbytes;
67 if (i) { 68 if (add) {
68 sum->conns += s->ustats.conns; 69 sum->conns += s->ustats.conns;
69 sum->inpkts += s->ustats.inpkts; 70 sum->inpkts += s->ustats.inpkts;
70 sum->outpkts += s->ustats.outpkts; 71 sum->outpkts += s->ustats.outpkts;
@@ -76,6 +77,7 @@ static void ip_vs_read_cpu_stats(struct ip_vs_stats_user *sum,
76 sum->inbytes += inbytes; 77 sum->inbytes += inbytes;
77 sum->outbytes += outbytes; 78 sum->outbytes += outbytes;
78 } else { 79 } else {
80 add = true;
79 sum->conns = s->ustats.conns; 81 sum->conns = s->ustats.conns;
80 sum->inpkts = s->ustats.inpkts; 82 sum->inpkts = s->ustats.inpkts;
81 sum->outpkts = s->ustats.outpkts; 83 sum->outpkts = s->ustats.outpkts;
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 1383b0eadc0e..eff13c94498e 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -93,7 +93,7 @@ struct ip_vs_lblc_entry {
93 struct hlist_node list; 93 struct hlist_node list;
94 int af; /* address family */ 94 int af; /* address family */
95 union nf_inet_addr addr; /* destination IP address */ 95 union nf_inet_addr addr; /* destination IP address */
96 struct ip_vs_dest __rcu *dest; /* real server (cache) */ 96 struct ip_vs_dest *dest; /* real server (cache) */
97 unsigned long lastuse; /* last used time */ 97 unsigned long lastuse; /* last used time */
98 struct rcu_head rcu_head; 98 struct rcu_head rcu_head;
99}; 99};
@@ -130,20 +130,21 @@ static struct ctl_table vs_vars_table[] = {
130}; 130};
131#endif 131#endif
132 132
133static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en) 133static void ip_vs_lblc_rcu_free(struct rcu_head *head)
134{ 134{
135 struct ip_vs_dest *dest; 135 struct ip_vs_lblc_entry *en = container_of(head,
136 struct ip_vs_lblc_entry,
137 rcu_head);
136 138
137 hlist_del_rcu(&en->list); 139 ip_vs_dest_put(en->dest);
138 /* 140 kfree(en);
139 * We don't kfree dest because it is referred either by its service
140 * or the trash dest list.
141 */
142 dest = rcu_dereference_protected(en->dest, 1);
143 ip_vs_dest_put(dest);
144 kfree_rcu(en, rcu_head);
145} 141}
146 142
143static inline void ip_vs_lblc_del(struct ip_vs_lblc_entry *en)
144{
145 hlist_del_rcu(&en->list);
146 call_rcu(&en->rcu_head, ip_vs_lblc_rcu_free);
147}
147 148
148/* 149/*
149 * Returns hash value for IPVS LBLC entry 150 * Returns hash value for IPVS LBLC entry
@@ -203,30 +204,23 @@ ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr,
203 struct ip_vs_lblc_entry *en; 204 struct ip_vs_lblc_entry *en;
204 205
205 en = ip_vs_lblc_get(dest->af, tbl, daddr); 206 en = ip_vs_lblc_get(dest->af, tbl, daddr);
206 if (!en) { 207 if (en) {
207 en = kmalloc(sizeof(*en), GFP_ATOMIC); 208 if (en->dest == dest)
208 if (!en) 209 return en;
209 return NULL; 210 ip_vs_lblc_del(en);
210 211 }
211 en->af = dest->af; 212 en = kmalloc(sizeof(*en), GFP_ATOMIC);
212 ip_vs_addr_copy(dest->af, &en->addr, daddr); 213 if (!en)
213 en->lastuse = jiffies; 214 return NULL;
214 215
215 ip_vs_dest_hold(dest); 216 en->af = dest->af;
216 RCU_INIT_POINTER(en->dest, dest); 217 ip_vs_addr_copy(dest->af, &en->addr, daddr);
218 en->lastuse = jiffies;
217 219
218 ip_vs_lblc_hash(tbl, en); 220 ip_vs_dest_hold(dest);
219 } else { 221 en->dest = dest;
220 struct ip_vs_dest *old_dest;
221 222
222 old_dest = rcu_dereference_protected(en->dest, 1); 223 ip_vs_lblc_hash(tbl, en);
223 if (old_dest != dest) {
224 ip_vs_dest_put(old_dest);
225 ip_vs_dest_hold(dest);
226 /* No ordering constraints for refcnt */
227 RCU_INIT_POINTER(en->dest, dest);
228 }
229 }
230 224
231 return en; 225 return en;
232} 226}
@@ -246,7 +240,7 @@ static void ip_vs_lblc_flush(struct ip_vs_service *svc)
246 tbl->dead = 1; 240 tbl->dead = 1;
247 for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) { 241 for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
248 hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) { 242 hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
249 ip_vs_lblc_free(en); 243 ip_vs_lblc_del(en);
250 atomic_dec(&tbl->entries); 244 atomic_dec(&tbl->entries);
251 } 245 }
252 } 246 }
@@ -281,7 +275,7 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
281 sysctl_lblc_expiration(svc))) 275 sysctl_lblc_expiration(svc)))
282 continue; 276 continue;
283 277
284 ip_vs_lblc_free(en); 278 ip_vs_lblc_del(en);
285 atomic_dec(&tbl->entries); 279 atomic_dec(&tbl->entries);
286 } 280 }
287 spin_unlock(&svc->sched_lock); 281 spin_unlock(&svc->sched_lock);
@@ -335,7 +329,7 @@ static void ip_vs_lblc_check_expire(unsigned long data)
335 if (time_before(now, en->lastuse + ENTRY_TIMEOUT)) 329 if (time_before(now, en->lastuse + ENTRY_TIMEOUT))
336 continue; 330 continue;
337 331
338 ip_vs_lblc_free(en); 332 ip_vs_lblc_del(en);
339 atomic_dec(&tbl->entries); 333 atomic_dec(&tbl->entries);
340 goal--; 334 goal--;
341 } 335 }
@@ -443,8 +437,8 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc)
443 continue; 437 continue;
444 438
445 doh = ip_vs_dest_conn_overhead(dest); 439 doh = ip_vs_dest_conn_overhead(dest);
446 if (loh * atomic_read(&dest->weight) > 440 if ((__s64)loh * atomic_read(&dest->weight) >
447 doh * atomic_read(&least->weight)) { 441 (__s64)doh * atomic_read(&least->weight)) {
448 least = dest; 442 least = dest;
449 loh = doh; 443 loh = doh;
450 } 444 }
@@ -511,7 +505,7 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
511 * free up entries from the trash at any time. 505 * free up entries from the trash at any time.
512 */ 506 */
513 507
514 dest = rcu_dereference(en->dest); 508 dest = en->dest;
515 if ((dest->flags & IP_VS_DEST_F_AVAILABLE) && 509 if ((dest->flags & IP_VS_DEST_F_AVAILABLE) &&
516 atomic_read(&dest->weight) > 0 && !is_overloaded(dest, svc)) 510 atomic_read(&dest->weight) > 0 && !is_overloaded(dest, svc))
517 goto out; 511 goto out;
@@ -631,7 +625,7 @@ static void __exit ip_vs_lblc_cleanup(void)
631{ 625{
632 unregister_ip_vs_scheduler(&ip_vs_lblc_scheduler); 626 unregister_ip_vs_scheduler(&ip_vs_lblc_scheduler);
633 unregister_pernet_subsys(&ip_vs_lblc_ops); 627 unregister_pernet_subsys(&ip_vs_lblc_ops);
634 synchronize_rcu(); 628 rcu_barrier();
635} 629}
636 630
637 631
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index 5199448697f6..0b8550089a2e 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -89,7 +89,7 @@
89 */ 89 */
90struct ip_vs_dest_set_elem { 90struct ip_vs_dest_set_elem {
91 struct list_head list; /* list link */ 91 struct list_head list; /* list link */
92 struct ip_vs_dest __rcu *dest; /* destination server */ 92 struct ip_vs_dest *dest; /* destination server */
93 struct rcu_head rcu_head; 93 struct rcu_head rcu_head;
94}; 94};
95 95
@@ -107,11 +107,7 @@ static void ip_vs_dest_set_insert(struct ip_vs_dest_set *set,
107 107
108 if (check) { 108 if (check) {
109 list_for_each_entry(e, &set->list, list) { 109 list_for_each_entry(e, &set->list, list) {
110 struct ip_vs_dest *d; 110 if (e->dest == dest)
111
112 d = rcu_dereference_protected(e->dest, 1);
113 if (d == dest)
114 /* already existed */
115 return; 111 return;
116 } 112 }
117 } 113 }
@@ -121,7 +117,7 @@ static void ip_vs_dest_set_insert(struct ip_vs_dest_set *set,
121 return; 117 return;
122 118
123 ip_vs_dest_hold(dest); 119 ip_vs_dest_hold(dest);
124 RCU_INIT_POINTER(e->dest, dest); 120 e->dest = dest;
125 121
126 list_add_rcu(&e->list, &set->list); 122 list_add_rcu(&e->list, &set->list);
127 atomic_inc(&set->size); 123 atomic_inc(&set->size);
@@ -129,22 +125,27 @@ static void ip_vs_dest_set_insert(struct ip_vs_dest_set *set,
129 set->lastmod = jiffies; 125 set->lastmod = jiffies;
130} 126}
131 127
128static void ip_vs_lblcr_elem_rcu_free(struct rcu_head *head)
129{
130 struct ip_vs_dest_set_elem *e;
131
132 e = container_of(head, struct ip_vs_dest_set_elem, rcu_head);
133 ip_vs_dest_put(e->dest);
134 kfree(e);
135}
136
132static void 137static void
133ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest) 138ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
134{ 139{
135 struct ip_vs_dest_set_elem *e; 140 struct ip_vs_dest_set_elem *e;
136 141
137 list_for_each_entry(e, &set->list, list) { 142 list_for_each_entry(e, &set->list, list) {
138 struct ip_vs_dest *d; 143 if (e->dest == dest) {
139
140 d = rcu_dereference_protected(e->dest, 1);
141 if (d == dest) {
142 /* HIT */ 144 /* HIT */
143 atomic_dec(&set->size); 145 atomic_dec(&set->size);
144 set->lastmod = jiffies; 146 set->lastmod = jiffies;
145 ip_vs_dest_put(dest);
146 list_del_rcu(&e->list); 147 list_del_rcu(&e->list);
147 kfree_rcu(e, rcu_head); 148 call_rcu(&e->rcu_head, ip_vs_lblcr_elem_rcu_free);
148 break; 149 break;
149 } 150 }
150 } 151 }
@@ -155,16 +156,8 @@ static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set)
155 struct ip_vs_dest_set_elem *e, *ep; 156 struct ip_vs_dest_set_elem *e, *ep;
156 157
157 list_for_each_entry_safe(e, ep, &set->list, list) { 158 list_for_each_entry_safe(e, ep, &set->list, list) {
158 struct ip_vs_dest *d;
159
160 d = rcu_dereference_protected(e->dest, 1);
161 /*
162 * We don't kfree dest because it is referred either
163 * by its service or by the trash dest list.
164 */
165 ip_vs_dest_put(d);
166 list_del_rcu(&e->list); 159 list_del_rcu(&e->list);
167 kfree_rcu(e, rcu_head); 160 call_rcu(&e->rcu_head, ip_vs_lblcr_elem_rcu_free);
168 } 161 }
169} 162}
170 163
@@ -175,12 +168,9 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
175 struct ip_vs_dest *dest, *least; 168 struct ip_vs_dest *dest, *least;
176 int loh, doh; 169 int loh, doh;
177 170
178 if (set == NULL)
179 return NULL;
180
181 /* select the first destination server, whose weight > 0 */ 171 /* select the first destination server, whose weight > 0 */
182 list_for_each_entry_rcu(e, &set->list, list) { 172 list_for_each_entry_rcu(e, &set->list, list) {
183 least = rcu_dereference(e->dest); 173 least = e->dest;
184 if (least->flags & IP_VS_DEST_F_OVERLOAD) 174 if (least->flags & IP_VS_DEST_F_OVERLOAD)
185 continue; 175 continue;
186 176
@@ -195,13 +185,13 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
195 /* find the destination with the weighted least load */ 185 /* find the destination with the weighted least load */
196 nextstage: 186 nextstage:
197 list_for_each_entry_continue_rcu(e, &set->list, list) { 187 list_for_each_entry_continue_rcu(e, &set->list, list) {
198 dest = rcu_dereference(e->dest); 188 dest = e->dest;
199 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 189 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
200 continue; 190 continue;
201 191
202 doh = ip_vs_dest_conn_overhead(dest); 192 doh = ip_vs_dest_conn_overhead(dest);
203 if ((loh * atomic_read(&dest->weight) > 193 if (((__s64)loh * atomic_read(&dest->weight) >
204 doh * atomic_read(&least->weight)) 194 (__s64)doh * atomic_read(&least->weight))
205 && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { 195 && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
206 least = dest; 196 least = dest;
207 loh = doh; 197 loh = doh;
@@ -232,7 +222,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
232 222
233 /* select the first destination server, whose weight > 0 */ 223 /* select the first destination server, whose weight > 0 */
234 list_for_each_entry(e, &set->list, list) { 224 list_for_each_entry(e, &set->list, list) {
235 most = rcu_dereference_protected(e->dest, 1); 225 most = e->dest;
236 if (atomic_read(&most->weight) > 0) { 226 if (atomic_read(&most->weight) > 0) {
237 moh = ip_vs_dest_conn_overhead(most); 227 moh = ip_vs_dest_conn_overhead(most);
238 goto nextstage; 228 goto nextstage;
@@ -243,11 +233,11 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
243 /* find the destination with the weighted most load */ 233 /* find the destination with the weighted most load */
244 nextstage: 234 nextstage:
245 list_for_each_entry_continue(e, &set->list, list) { 235 list_for_each_entry_continue(e, &set->list, list) {
246 dest = rcu_dereference_protected(e->dest, 1); 236 dest = e->dest;
247 doh = ip_vs_dest_conn_overhead(dest); 237 doh = ip_vs_dest_conn_overhead(dest);
248 /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */ 238 /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */
249 if ((moh * atomic_read(&dest->weight) < 239 if (((__s64)moh * atomic_read(&dest->weight) <
250 doh * atomic_read(&most->weight)) 240 (__s64)doh * atomic_read(&most->weight))
251 && (atomic_read(&dest->weight) > 0)) { 241 && (atomic_read(&dest->weight) > 0)) {
252 most = dest; 242 most = dest;
253 moh = doh; 243 moh = doh;
@@ -611,8 +601,8 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
611 continue; 601 continue;
612 602
613 doh = ip_vs_dest_conn_overhead(dest); 603 doh = ip_vs_dest_conn_overhead(dest);
614 if (loh * atomic_read(&dest->weight) > 604 if ((__s64)loh * atomic_read(&dest->weight) >
615 doh * atomic_read(&least->weight)) { 605 (__s64)doh * atomic_read(&least->weight)) {
616 least = dest; 606 least = dest;
617 loh = doh; 607 loh = doh;
618 } 608 }
@@ -819,7 +809,7 @@ static void __exit ip_vs_lblcr_cleanup(void)
819{ 809{
820 unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler); 810 unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
821 unregister_pernet_subsys(&ip_vs_lblcr_ops); 811 unregister_pernet_subsys(&ip_vs_lblcr_ops);
822 synchronize_rcu(); 812 rcu_barrier();
823} 813}
824 814
825 815
diff --git a/net/netfilter/ipvs/ip_vs_nq.c b/net/netfilter/ipvs/ip_vs_nq.c
index d8d9860934fe..961a6de9bb29 100644
--- a/net/netfilter/ipvs/ip_vs_nq.c
+++ b/net/netfilter/ipvs/ip_vs_nq.c
@@ -40,7 +40,7 @@
40#include <net/ip_vs.h> 40#include <net/ip_vs.h>
41 41
42 42
43static inline unsigned int 43static inline int
44ip_vs_nq_dest_overhead(struct ip_vs_dest *dest) 44ip_vs_nq_dest_overhead(struct ip_vs_dest *dest)
45{ 45{
46 /* 46 /*
@@ -59,7 +59,7 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
59 struct ip_vs_iphdr *iph) 59 struct ip_vs_iphdr *iph)
60{ 60{
61 struct ip_vs_dest *dest, *least = NULL; 61 struct ip_vs_dest *dest, *least = NULL;
62 unsigned int loh = 0, doh; 62 int loh = 0, doh;
63 63
64 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); 64 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
65 65
@@ -92,8 +92,8 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
92 } 92 }
93 93
94 if (!least || 94 if (!least ||
95 (loh * atomic_read(&dest->weight) > 95 ((__s64)loh * atomic_read(&dest->weight) >
96 doh * atomic_read(&least->weight))) { 96 (__s64)doh * atomic_read(&least->weight))) {
97 least = dest; 97 least = dest;
98 loh = doh; 98 loh = doh;
99 } 99 }
diff --git a/net/netfilter/ipvs/ip_vs_sed.c b/net/netfilter/ipvs/ip_vs_sed.c
index a5284cc3d882..e446b9fa7424 100644
--- a/net/netfilter/ipvs/ip_vs_sed.c
+++ b/net/netfilter/ipvs/ip_vs_sed.c
@@ -44,7 +44,7 @@
44#include <net/ip_vs.h> 44#include <net/ip_vs.h>
45 45
46 46
47static inline unsigned int 47static inline int
48ip_vs_sed_dest_overhead(struct ip_vs_dest *dest) 48ip_vs_sed_dest_overhead(struct ip_vs_dest *dest)
49{ 49{
50 /* 50 /*
@@ -63,7 +63,7 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
63 struct ip_vs_iphdr *iph) 63 struct ip_vs_iphdr *iph)
64{ 64{
65 struct ip_vs_dest *dest, *least; 65 struct ip_vs_dest *dest, *least;
66 unsigned int loh, doh; 66 int loh, doh;
67 67
68 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); 68 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
69 69
@@ -99,8 +99,8 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
99 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 99 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
100 continue; 100 continue;
101 doh = ip_vs_sed_dest_overhead(dest); 101 doh = ip_vs_sed_dest_overhead(dest);
102 if (loh * atomic_read(&dest->weight) > 102 if ((__s64)loh * atomic_read(&dest->weight) >
103 doh * atomic_read(&least->weight)) { 103 (__s64)doh * atomic_read(&least->weight)) {
104 least = dest; 104 least = dest;
105 loh = doh; 105 loh = doh;
106 } 106 }
diff --git a/net/netfilter/ipvs/ip_vs_wlc.c b/net/netfilter/ipvs/ip_vs_wlc.c
index 6dc1fa128840..b5b4650d50a9 100644
--- a/net/netfilter/ipvs/ip_vs_wlc.c
+++ b/net/netfilter/ipvs/ip_vs_wlc.c
@@ -35,7 +35,7 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
35 struct ip_vs_iphdr *iph) 35 struct ip_vs_iphdr *iph)
36{ 36{
37 struct ip_vs_dest *dest, *least; 37 struct ip_vs_dest *dest, *least;
38 unsigned int loh, doh; 38 int loh, doh;
39 39
40 IP_VS_DBG(6, "ip_vs_wlc_schedule(): Scheduling...\n"); 40 IP_VS_DBG(6, "ip_vs_wlc_schedule(): Scheduling...\n");
41 41
@@ -71,8 +71,8 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
71 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 71 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
72 continue; 72 continue;
73 doh = ip_vs_dest_conn_overhead(dest); 73 doh = ip_vs_dest_conn_overhead(dest);
74 if (loh * atomic_read(&dest->weight) > 74 if ((__s64)loh * atomic_read(&dest->weight) >
75 doh * atomic_read(&least->weight)) { 75 (__s64)doh * atomic_read(&least->weight)) {
76 least = dest; 76 least = dest;
77 loh = doh; 77 loh = doh;
78 } 78 }
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index 6fd967c6278c..cdf4567ba9b3 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -24,7 +24,7 @@
24int synproxy_net_id; 24int synproxy_net_id;
25EXPORT_SYMBOL_GPL(synproxy_net_id); 25EXPORT_SYMBOL_GPL(synproxy_net_id);
26 26
27void 27bool
28synproxy_parse_options(const struct sk_buff *skb, unsigned int doff, 28synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
29 const struct tcphdr *th, struct synproxy_options *opts) 29 const struct tcphdr *th, struct synproxy_options *opts)
30{ 30{
@@ -32,7 +32,8 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
32 u8 buf[40], *ptr; 32 u8 buf[40], *ptr;
33 33
34 ptr = skb_header_pointer(skb, doff + sizeof(*th), length, buf); 34 ptr = skb_header_pointer(skb, doff + sizeof(*th), length, buf);
35 BUG_ON(ptr == NULL); 35 if (ptr == NULL)
36 return false;
36 37
37 opts->options = 0; 38 opts->options = 0;
38 while (length > 0) { 39 while (length > 0) {
@@ -41,16 +42,16 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
41 42
42 switch (opcode) { 43 switch (opcode) {
43 case TCPOPT_EOL: 44 case TCPOPT_EOL:
44 return; 45 return true;
45 case TCPOPT_NOP: 46 case TCPOPT_NOP:
46 length--; 47 length--;
47 continue; 48 continue;
48 default: 49 default:
49 opsize = *ptr++; 50 opsize = *ptr++;
50 if (opsize < 2) 51 if (opsize < 2)
51 return; 52 return true;
52 if (opsize > length) 53 if (opsize > length)
53 return; 54 return true;
54 55
55 switch (opcode) { 56 switch (opcode) {
56 case TCPOPT_MSS: 57 case TCPOPT_MSS:
@@ -84,6 +85,7 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
84 length -= opsize; 85 length -= opsize;
85 } 86 }
86 } 87 }
88 return true;
87} 89}
88EXPORT_SYMBOL_GPL(synproxy_parse_options); 90EXPORT_SYMBOL_GPL(synproxy_parse_options);
89 91
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 32ad015ee8ce..a2fef8b10b96 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -285,7 +285,7 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
285 285
286 286
287/* remove one skb from head of flow queue */ 287/* remove one skb from head of flow queue */
288static struct sk_buff *fq_dequeue_head(struct fq_flow *flow) 288static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow)
289{ 289{
290 struct sk_buff *skb = flow->head; 290 struct sk_buff *skb = flow->head;
291 291
@@ -293,6 +293,8 @@ static struct sk_buff *fq_dequeue_head(struct fq_flow *flow)
293 flow->head = skb->next; 293 flow->head = skb->next;
294 skb->next = NULL; 294 skb->next = NULL;
295 flow->qlen--; 295 flow->qlen--;
296 sch->qstats.backlog -= qdisc_pkt_len(skb);
297 sch->q.qlen--;
296 } 298 }
297 return skb; 299 return skb;
298} 300}
@@ -418,8 +420,9 @@ static struct sk_buff *fq_dequeue(struct Qdisc *sch)
418 struct fq_flow_head *head; 420 struct fq_flow_head *head;
419 struct sk_buff *skb; 421 struct sk_buff *skb;
420 struct fq_flow *f; 422 struct fq_flow *f;
423 u32 rate;
421 424
422 skb = fq_dequeue_head(&q->internal); 425 skb = fq_dequeue_head(sch, &q->internal);
423 if (skb) 426 if (skb)
424 goto out; 427 goto out;
425 fq_check_throttled(q, now); 428 fq_check_throttled(q, now);
@@ -449,7 +452,7 @@ begin:
449 goto begin; 452 goto begin;
450 } 453 }
451 454
452 skb = fq_dequeue_head(f); 455 skb = fq_dequeue_head(sch, f);
453 if (!skb) { 456 if (!skb) {
454 head->first = f->next; 457 head->first = f->next;
455 /* force a pass through old_flows to prevent starvation */ 458 /* force a pass through old_flows to prevent starvation */
@@ -466,43 +469,74 @@ begin:
466 f->time_next_packet = now; 469 f->time_next_packet = now;
467 f->credit -= qdisc_pkt_len(skb); 470 f->credit -= qdisc_pkt_len(skb);
468 471
469 if (f->credit <= 0 && 472 if (f->credit > 0 || !q->rate_enable)
470 q->rate_enable && 473 goto out;
471 skb->sk && skb->sk->sk_state != TCP_TIME_WAIT) {
472 u32 rate = skb->sk->sk_pacing_rate ?: q->flow_default_rate;
473 474
474 rate = min(rate, q->flow_max_rate); 475 if (skb->sk && skb->sk->sk_state != TCP_TIME_WAIT) {
475 if (rate) { 476 rate = skb->sk->sk_pacing_rate ?: q->flow_default_rate;
476 u64 len = (u64)qdisc_pkt_len(skb) * NSEC_PER_SEC;
477
478 do_div(len, rate);
479 /* Since socket rate can change later,
480 * clamp the delay to 125 ms.
481 * TODO: maybe segment the too big skb, as in commit
482 * e43ac79a4bc ("sch_tbf: segment too big GSO packets")
483 */
484 if (unlikely(len > 125 * NSEC_PER_MSEC)) {
485 len = 125 * NSEC_PER_MSEC;
486 q->stat_pkts_too_long++;
487 }
488 477
489 f->time_next_packet = now + len; 478 rate = min(rate, q->flow_max_rate);
479 } else {
480 rate = q->flow_max_rate;
481 if (rate == ~0U)
482 goto out;
483 }
484 if (rate) {
485 u32 plen = max(qdisc_pkt_len(skb), q->quantum);
486 u64 len = (u64)plen * NSEC_PER_SEC;
487
488 do_div(len, rate);
489 /* Since socket rate can change later,
490 * clamp the delay to 125 ms.
491 * TODO: maybe segment the too big skb, as in commit
492 * e43ac79a4bc ("sch_tbf: segment too big GSO packets")
493 */
494 if (unlikely(len > 125 * NSEC_PER_MSEC)) {
495 len = 125 * NSEC_PER_MSEC;
496 q->stat_pkts_too_long++;
490 } 497 }
498
499 f->time_next_packet = now + len;
491 } 500 }
492out: 501out:
493 sch->qstats.backlog -= qdisc_pkt_len(skb);
494 qdisc_bstats_update(sch, skb); 502 qdisc_bstats_update(sch, skb);
495 sch->q.qlen--;
496 qdisc_unthrottled(sch); 503 qdisc_unthrottled(sch);
497 return skb; 504 return skb;
498} 505}
499 506
500static void fq_reset(struct Qdisc *sch) 507static void fq_reset(struct Qdisc *sch)
501{ 508{
509 struct fq_sched_data *q = qdisc_priv(sch);
510 struct rb_root *root;
502 struct sk_buff *skb; 511 struct sk_buff *skb;
512 struct rb_node *p;
513 struct fq_flow *f;
514 unsigned int idx;
503 515
504 while ((skb = fq_dequeue(sch)) != NULL) 516 while ((skb = fq_dequeue_head(sch, &q->internal)) != NULL)
505 kfree_skb(skb); 517 kfree_skb(skb);
518
519 if (!q->fq_root)
520 return;
521
522 for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
523 root = &q->fq_root[idx];
524 while ((p = rb_first(root)) != NULL) {
525 f = container_of(p, struct fq_flow, fq_node);
526 rb_erase(p, root);
527
528 while ((skb = fq_dequeue_head(sch, f)) != NULL)
529 kfree_skb(skb);
530
531 kmem_cache_free(fq_flow_cachep, f);
532 }
533 }
534 q->new_flows.first = NULL;
535 q->old_flows.first = NULL;
536 q->delayed = RB_ROOT;
537 q->flows = 0;
538 q->inactive_flows = 0;
539 q->throttled_flows = 0;
506} 540}
507 541
508static void fq_rehash(struct fq_sched_data *q, 542static void fq_rehash(struct fq_sched_data *q,
@@ -645,6 +679,8 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
645 while (sch->q.qlen > sch->limit) { 679 while (sch->q.qlen > sch->limit) {
646 struct sk_buff *skb = fq_dequeue(sch); 680 struct sk_buff *skb = fq_dequeue(sch);
647 681
682 if (!skb)
683 break;
648 kfree_skb(skb); 684 kfree_skb(skb);
649 drop_count++; 685 drop_count++;
650 } 686 }
@@ -657,21 +693,9 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
657static void fq_destroy(struct Qdisc *sch) 693static void fq_destroy(struct Qdisc *sch)
658{ 694{
659 struct fq_sched_data *q = qdisc_priv(sch); 695 struct fq_sched_data *q = qdisc_priv(sch);
660 struct rb_root *root;
661 struct rb_node *p;
662 unsigned int idx;
663 696
664 if (q->fq_root) { 697 fq_reset(sch);
665 for (idx = 0; idx < (1U << q->fq_trees_log); idx++) { 698 kfree(q->fq_root);
666 root = &q->fq_root[idx];
667 while ((p = rb_first(root)) != NULL) {
668 rb_erase(p, root);
669 kmem_cache_free(fq_flow_cachep,
670 container_of(p, struct fq_flow, fq_node));
671 }
672 }
673 kfree(q->fq_root);
674 }
675 qdisc_watchdog_cancel(&q->watchdog); 699 qdisc_watchdog_cancel(&q->watchdog);
676} 700}
677 701
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index 9bc6db04be3e..e7000be321b0 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -47,12 +47,12 @@ static int net_ctl_permissions(struct ctl_table_header *head,
47 47
48 /* Allow network administrator to have same access as root. */ 48 /* Allow network administrator to have same access as root. */
49 if (ns_capable(net->user_ns, CAP_NET_ADMIN) || 49 if (ns_capable(net->user_ns, CAP_NET_ADMIN) ||
50 uid_eq(root_uid, current_uid())) { 50 uid_eq(root_uid, current_euid())) {
51 int mode = (table->mode >> 6) & 7; 51 int mode = (table->mode >> 6) & 7;
52 return (mode << 6) | (mode << 3) | mode; 52 return (mode << 6) | (mode << 3) | mode;
53 } 53 }
54 /* Allow netns root group to have the same access as the root group */ 54 /* Allow netns root group to have the same access as the root group */
55 if (gid_eq(root_gid, current_gid())) { 55 if (in_egroup_p(root_gid)) {
56 int mode = (table->mode >> 3) & 7; 56 int mode = (table->mode >> 3) & 7;
57 return (mode << 3) | mode; 57 return (mode << 3) | mode;
58 } 58 }
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 47016c304c84..66cad506b8a2 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -3975,8 +3975,8 @@ sub string_find_replace {
3975# check for new externs in .h files. 3975# check for new externs in .h files.
3976 if ($realfile =~ /\.h$/ && 3976 if ($realfile =~ /\.h$/ &&
3977 $line =~ /^\+\s*(extern\s+)$Type\s*$Ident\s*\(/s) { 3977 $line =~ /^\+\s*(extern\s+)$Type\s*$Ident\s*\(/s) {
3978 if (WARN("AVOID_EXTERNS", 3978 if (CHK("AVOID_EXTERNS",
3979 "extern prototypes should be avoided in .h files\n" . $herecurr) && 3979 "extern prototypes should be avoided in .h files\n" . $herecurr) &&
3980 $fix) { 3980 $fix) {
3981 $fixed[$linenr - 1] =~ s/(.*)\bextern\b\s*(.*)/$1$2/; 3981 $fixed[$linenr - 1] =~ s/(.*)\bextern\b\s*(.*)/$1$2/;
3982 } 3982 }
diff --git a/security/apparmor/crypto.c b/security/apparmor/crypto.c
index d6222ba4e919..532471d0b3a0 100644
--- a/security/apparmor/crypto.c
+++ b/security/apparmor/crypto.c
@@ -15,14 +15,14 @@
15 * it should be. 15 * it should be.
16 */ 16 */
17 17
18#include <linux/crypto.h> 18#include <crypto/hash.h>
19 19
20#include "include/apparmor.h" 20#include "include/apparmor.h"
21#include "include/crypto.h" 21#include "include/crypto.h"
22 22
23static unsigned int apparmor_hash_size; 23static unsigned int apparmor_hash_size;
24 24
25static struct crypto_hash *apparmor_tfm; 25static struct crypto_shash *apparmor_tfm;
26 26
27unsigned int aa_hash_size(void) 27unsigned int aa_hash_size(void)
28{ 28{
@@ -32,35 +32,33 @@ unsigned int aa_hash_size(void)
32int aa_calc_profile_hash(struct aa_profile *profile, u32 version, void *start, 32int aa_calc_profile_hash(struct aa_profile *profile, u32 version, void *start,
33 size_t len) 33 size_t len)
34{ 34{
35 struct scatterlist sg[2]; 35 struct {
36 struct hash_desc desc = { 36 struct shash_desc shash;
37 .tfm = apparmor_tfm, 37 char ctx[crypto_shash_descsize(apparmor_tfm)];
38 .flags = 0 38 } desc;
39 };
40 int error = -ENOMEM; 39 int error = -ENOMEM;
41 u32 le32_version = cpu_to_le32(version); 40 u32 le32_version = cpu_to_le32(version);
42 41
43 if (!apparmor_tfm) 42 if (!apparmor_tfm)
44 return 0; 43 return 0;
45 44
46 sg_init_table(sg, 2);
47 sg_set_buf(&sg[0], &le32_version, 4);
48 sg_set_buf(&sg[1], (u8 *) start, len);
49
50 profile->hash = kzalloc(apparmor_hash_size, GFP_KERNEL); 45 profile->hash = kzalloc(apparmor_hash_size, GFP_KERNEL);
51 if (!profile->hash) 46 if (!profile->hash)
52 goto fail; 47 goto fail;
53 48
54 error = crypto_hash_init(&desc); 49 desc.shash.tfm = apparmor_tfm;
50 desc.shash.flags = 0;
51
52 error = crypto_shash_init(&desc.shash);
55 if (error) 53 if (error)
56 goto fail; 54 goto fail;
57 error = crypto_hash_update(&desc, &sg[0], 4); 55 error = crypto_shash_update(&desc.shash, (u8 *) &le32_version, 4);
58 if (error) 56 if (error)
59 goto fail; 57 goto fail;
60 error = crypto_hash_update(&desc, &sg[1], len); 58 error = crypto_shash_update(&desc.shash, (u8 *) start, len);
61 if (error) 59 if (error)
62 goto fail; 60 goto fail;
63 error = crypto_hash_final(&desc, profile->hash); 61 error = crypto_shash_final(&desc.shash, profile->hash);
64 if (error) 62 if (error)
65 goto fail; 63 goto fail;
66 64
@@ -75,19 +73,19 @@ fail:
75 73
76static int __init init_profile_hash(void) 74static int __init init_profile_hash(void)
77{ 75{
78 struct crypto_hash *tfm; 76 struct crypto_shash *tfm;
79 77
80 if (!apparmor_initialized) 78 if (!apparmor_initialized)
81 return 0; 79 return 0;
82 80
83 tfm = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC); 81 tfm = crypto_alloc_shash("sha1", 0, CRYPTO_ALG_ASYNC);
84 if (IS_ERR(tfm)) { 82 if (IS_ERR(tfm)) {
85 int error = PTR_ERR(tfm); 83 int error = PTR_ERR(tfm);
86 AA_ERROR("failed to setup profile sha1 hashing: %d\n", error); 84 AA_ERROR("failed to setup profile sha1 hashing: %d\n", error);
87 return error; 85 return error;
88 } 86 }
89 apparmor_tfm = tfm; 87 apparmor_tfm = tfm;
90 apparmor_hash_size = crypto_hash_digestsize(apparmor_tfm); 88 apparmor_hash_size = crypto_shash_digestsize(apparmor_tfm);
91 89
92 aa_info_message("AppArmor sha1 policy hashing enabled"); 90 aa_info_message("AppArmor sha1 policy hashing enabled");
93 91
diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h
index f2d4b6348cbc..c28b0f20ab53 100644
--- a/security/apparmor/include/policy.h
+++ b/security/apparmor/include/policy.h
@@ -360,7 +360,9 @@ static inline void aa_put_replacedby(struct aa_replacedby *p)
360static inline void __aa_update_replacedby(struct aa_profile *orig, 360static inline void __aa_update_replacedby(struct aa_profile *orig,
361 struct aa_profile *new) 361 struct aa_profile *new)
362{ 362{
363 struct aa_profile *tmp = rcu_dereference(orig->replacedby->profile); 363 struct aa_profile *tmp;
364 tmp = rcu_dereference_protected(orig->replacedby->profile,
365 mutex_is_locked(&orig->ns->lock));
364 rcu_assign_pointer(orig->replacedby->profile, aa_get_profile(new)); 366 rcu_assign_pointer(orig->replacedby->profile, aa_get_profile(new));
365 orig->flags |= PFLAG_INVALID; 367 orig->flags |= PFLAG_INVALID;
366 aa_put_profile(tmp); 368 aa_put_profile(tmp);
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
index 6172509fa2b7..345bec07a27d 100644
--- a/security/apparmor/policy.c
+++ b/security/apparmor/policy.c
@@ -563,7 +563,8 @@ void __init aa_free_root_ns(void)
563static void free_replacedby(struct aa_replacedby *r) 563static void free_replacedby(struct aa_replacedby *r)
564{ 564{
565 if (r) { 565 if (r) {
566 aa_put_profile(rcu_dereference(r->profile)); 566 /* r->profile will not be updated any more as r is dead */
567 aa_put_profile(rcu_dereference_protected(r->profile, true));
567 kzfree(r); 568 kzfree(r);
568 } 569 }
569} 570}
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index dad36a6ab45f..fc3e6628a864 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -746,7 +746,6 @@ inline int avc_has_perm_noaudit(u32 ssid, u32 tsid,
746 * @tclass: target security class 746 * @tclass: target security class
747 * @requested: requested permissions, interpreted based on @tclass 747 * @requested: requested permissions, interpreted based on @tclass
748 * @auditdata: auxiliary audit data 748 * @auditdata: auxiliary audit data
749 * @flags: VFS walk flags
750 * 749 *
751 * Check the AVC to determine whether the @requested permissions are granted 750 * Check the AVC to determine whether the @requested permissions are granted
752 * for the SID pair (@ssid, @tsid), interpreting the permissions 751 * for the SID pair (@ssid, @tsid), interpreting the permissions
@@ -756,17 +755,15 @@ inline int avc_has_perm_noaudit(u32 ssid, u32 tsid,
756 * permissions are granted, -%EACCES if any permissions are denied, or 755 * permissions are granted, -%EACCES if any permissions are denied, or
757 * another -errno upon other errors. 756 * another -errno upon other errors.
758 */ 757 */
759int avc_has_perm_flags(u32 ssid, u32 tsid, u16 tclass, 758int avc_has_perm(u32 ssid, u32 tsid, u16 tclass,
760 u32 requested, struct common_audit_data *auditdata, 759 u32 requested, struct common_audit_data *auditdata)
761 unsigned flags)
762{ 760{
763 struct av_decision avd; 761 struct av_decision avd;
764 int rc, rc2; 762 int rc, rc2;
765 763
766 rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, 0, &avd); 764 rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, 0, &avd);
767 765
768 rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata, 766 rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata);
769 flags);
770 if (rc2) 767 if (rc2)
771 return rc2; 768 return rc2;
772 return rc; 769 return rc;
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index a5091ec06aa6..5b5231068516 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1502,7 +1502,7 @@ static int cred_has_capability(const struct cred *cred,
1502 1502
1503 rc = avc_has_perm_noaudit(sid, sid, sclass, av, 0, &avd); 1503 rc = avc_has_perm_noaudit(sid, sid, sclass, av, 0, &avd);
1504 if (audit == SECURITY_CAP_AUDIT) { 1504 if (audit == SECURITY_CAP_AUDIT) {
1505 int rc2 = avc_audit(sid, sid, sclass, av, &avd, rc, &ad, 0); 1505 int rc2 = avc_audit(sid, sid, sclass, av, &avd, rc, &ad);
1506 if (rc2) 1506 if (rc2)
1507 return rc2; 1507 return rc2;
1508 } 1508 }
@@ -1525,8 +1525,7 @@ static int task_has_system(struct task_struct *tsk,
1525static int inode_has_perm(const struct cred *cred, 1525static int inode_has_perm(const struct cred *cred,
1526 struct inode *inode, 1526 struct inode *inode,
1527 u32 perms, 1527 u32 perms,
1528 struct common_audit_data *adp, 1528 struct common_audit_data *adp)
1529 unsigned flags)
1530{ 1529{
1531 struct inode_security_struct *isec; 1530 struct inode_security_struct *isec;
1532 u32 sid; 1531 u32 sid;
@@ -1539,7 +1538,7 @@ static int inode_has_perm(const struct cred *cred,
1539 sid = cred_sid(cred); 1538 sid = cred_sid(cred);
1540 isec = inode->i_security; 1539 isec = inode->i_security;
1541 1540
1542 return avc_has_perm_flags(sid, isec->sid, isec->sclass, perms, adp, flags); 1541 return avc_has_perm(sid, isec->sid, isec->sclass, perms, adp);
1543} 1542}
1544 1543
1545/* Same as inode_has_perm, but pass explicit audit data containing 1544/* Same as inode_has_perm, but pass explicit audit data containing
@@ -1554,7 +1553,7 @@ static inline int dentry_has_perm(const struct cred *cred,
1554 1553
1555 ad.type = LSM_AUDIT_DATA_DENTRY; 1554 ad.type = LSM_AUDIT_DATA_DENTRY;
1556 ad.u.dentry = dentry; 1555 ad.u.dentry = dentry;
1557 return inode_has_perm(cred, inode, av, &ad, 0); 1556 return inode_has_perm(cred, inode, av, &ad);
1558} 1557}
1559 1558
1560/* Same as inode_has_perm, but pass explicit audit data containing 1559/* Same as inode_has_perm, but pass explicit audit data containing
@@ -1569,7 +1568,7 @@ static inline int path_has_perm(const struct cred *cred,
1569 1568
1570 ad.type = LSM_AUDIT_DATA_PATH; 1569 ad.type = LSM_AUDIT_DATA_PATH;
1571 ad.u.path = *path; 1570 ad.u.path = *path;
1572 return inode_has_perm(cred, inode, av, &ad, 0); 1571 return inode_has_perm(cred, inode, av, &ad);
1573} 1572}
1574 1573
1575/* Same as path_has_perm, but uses the inode from the file struct. */ 1574/* Same as path_has_perm, but uses the inode from the file struct. */
@@ -1581,7 +1580,7 @@ static inline int file_path_has_perm(const struct cred *cred,
1581 1580
1582 ad.type = LSM_AUDIT_DATA_PATH; 1581 ad.type = LSM_AUDIT_DATA_PATH;
1583 ad.u.path = file->f_path; 1582 ad.u.path = file->f_path;
1584 return inode_has_perm(cred, file_inode(file), av, &ad, 0); 1583 return inode_has_perm(cred, file_inode(file), av, &ad);
1585} 1584}
1586 1585
1587/* Check whether a task can use an open file descriptor to 1586/* Check whether a task can use an open file descriptor to
@@ -1617,7 +1616,7 @@ static int file_has_perm(const struct cred *cred,
1617 /* av is zero if only checking access to the descriptor. */ 1616 /* av is zero if only checking access to the descriptor. */
1618 rc = 0; 1617 rc = 0;
1619 if (av) 1618 if (av)
1620 rc = inode_has_perm(cred, inode, av, &ad, 0); 1619 rc = inode_has_perm(cred, inode, av, &ad);
1621 1620
1622out: 1621out:
1623 return rc; 1622 return rc;
diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
index 92d0ab561db8..f53ee3c58d0f 100644
--- a/security/selinux/include/avc.h
+++ b/security/selinux/include/avc.h
@@ -130,7 +130,7 @@ static inline int avc_audit(u32 ssid, u32 tsid,
130 u16 tclass, u32 requested, 130 u16 tclass, u32 requested,
131 struct av_decision *avd, 131 struct av_decision *avd,
132 int result, 132 int result,
133 struct common_audit_data *a, unsigned flags) 133 struct common_audit_data *a)
134{ 134{
135 u32 audited, denied; 135 u32 audited, denied;
136 audited = avc_audit_required(requested, avd, result, 0, &denied); 136 audited = avc_audit_required(requested, avd, result, 0, &denied);
@@ -138,7 +138,7 @@ static inline int avc_audit(u32 ssid, u32 tsid,
138 return 0; 138 return 0;
139 return slow_avc_audit(ssid, tsid, tclass, 139 return slow_avc_audit(ssid, tsid, tclass,
140 requested, audited, denied, 140 requested, audited, denied,
141 a, flags); 141 a, 0);
142} 142}
143 143
144#define AVC_STRICT 1 /* Ignore permissive mode. */ 144#define AVC_STRICT 1 /* Ignore permissive mode. */
@@ -147,17 +147,9 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid,
147 unsigned flags, 147 unsigned flags,
148 struct av_decision *avd); 148 struct av_decision *avd);
149 149
150int avc_has_perm_flags(u32 ssid, u32 tsid, 150int avc_has_perm(u32 ssid, u32 tsid,
151 u16 tclass, u32 requested, 151 u16 tclass, u32 requested,
152 struct common_audit_data *auditdata, 152 struct common_audit_data *auditdata);
153 unsigned);
154
155static inline int avc_has_perm(u32 ssid, u32 tsid,
156 u16 tclass, u32 requested,
157 struct common_audit_data *auditdata)
158{
159 return avc_has_perm_flags(ssid, tsid, tclass, requested, auditdata, 0);
160}
161 153
162u32 avc_policy_seqno(void); 154u32 avc_policy_seqno(void);
163 155
diff --git a/sound/soc/codecs/pcm1681.c b/sound/soc/codecs/pcm1681.c
index 651ce0923675..c91eba504f92 100644
--- a/sound/soc/codecs/pcm1681.c
+++ b/sound/soc/codecs/pcm1681.c
@@ -270,7 +270,7 @@ MODULE_DEVICE_TABLE(of, pcm1681_dt_ids);
270static const struct regmap_config pcm1681_regmap = { 270static const struct regmap_config pcm1681_regmap = {
271 .reg_bits = 8, 271 .reg_bits = 8,
272 .val_bits = 8, 272 .val_bits = 8,
273 .max_register = ARRAY_SIZE(pcm1681_reg_defaults) + 1, 273 .max_register = 0x13,
274 .reg_defaults = pcm1681_reg_defaults, 274 .reg_defaults = pcm1681_reg_defaults,
275 .num_reg_defaults = ARRAY_SIZE(pcm1681_reg_defaults), 275 .num_reg_defaults = ARRAY_SIZE(pcm1681_reg_defaults),
276 .writeable_reg = pcm1681_writeable_reg, 276 .writeable_reg = pcm1681_writeable_reg,
diff --git a/sound/soc/codecs/pcm1792a.c b/sound/soc/codecs/pcm1792a.c
index 2a8eccf64c76..7613181123fe 100644
--- a/sound/soc/codecs/pcm1792a.c
+++ b/sound/soc/codecs/pcm1792a.c
@@ -188,7 +188,7 @@ MODULE_DEVICE_TABLE(of, pcm1792a_of_match);
188static const struct regmap_config pcm1792a_regmap = { 188static const struct regmap_config pcm1792a_regmap = {
189 .reg_bits = 8, 189 .reg_bits = 8,
190 .val_bits = 8, 190 .val_bits = 8,
191 .max_register = 24, 191 .max_register = 23,
192 .reg_defaults = pcm1792a_reg_defaults, 192 .reg_defaults = pcm1792a_reg_defaults,
193 .num_reg_defaults = ARRAY_SIZE(pcm1792a_reg_defaults), 193 .num_reg_defaults = ARRAY_SIZE(pcm1792a_reg_defaults),
194 .writeable_reg = pcm1792a_writeable_reg, 194 .writeable_reg = pcm1792a_writeable_reg,
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index 6e3f269243e0..64ad84d8a306 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -674,6 +674,8 @@ static const struct snd_soc_dapm_route intercon[] = {
674 /* Left Input */ 674 /* Left Input */
675 {"Left Line1L Mux", "single-ended", "LINE1L"}, 675 {"Left Line1L Mux", "single-ended", "LINE1L"},
676 {"Left Line1L Mux", "differential", "LINE1L"}, 676 {"Left Line1L Mux", "differential", "LINE1L"},
677 {"Left Line1R Mux", "single-ended", "LINE1R"},
678 {"Left Line1R Mux", "differential", "LINE1R"},
677 679
678 {"Left Line2L Mux", "single-ended", "LINE2L"}, 680 {"Left Line2L Mux", "single-ended", "LINE2L"},
679 {"Left Line2L Mux", "differential", "LINE2L"}, 681 {"Left Line2L Mux", "differential", "LINE2L"},
@@ -690,6 +692,8 @@ static const struct snd_soc_dapm_route intercon[] = {
690 /* Right Input */ 692 /* Right Input */
691 {"Right Line1R Mux", "single-ended", "LINE1R"}, 693 {"Right Line1R Mux", "single-ended", "LINE1R"},
692 {"Right Line1R Mux", "differential", "LINE1R"}, 694 {"Right Line1R Mux", "differential", "LINE1R"},
695 {"Right Line1L Mux", "single-ended", "LINE1L"},
696 {"Right Line1L Mux", "differential", "LINE1L"},
693 697
694 {"Right Line2R Mux", "single-ended", "LINE2R"}, 698 {"Right Line2R Mux", "single-ended", "LINE2R"},
695 {"Right Line2R Mux", "differential", "LINE2R"}, 699 {"Right Line2R Mux", "differential", "LINE2R"},
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index c6b743978d5e..6b81d0ce2c44 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -936,7 +936,7 @@ static int fsl_ssi_probe(struct platform_device *pdev)
936 ssi_private->ssi_phys = res.start; 936 ssi_private->ssi_phys = res.start;
937 937
938 ssi_private->irq = irq_of_parse_and_map(np, 0); 938 ssi_private->irq = irq_of_parse_and_map(np, 0);
939 if (ssi_private->irq == NO_IRQ) { 939 if (ssi_private->irq == 0) {
940 dev_err(&pdev->dev, "no irq for node %s\n", np->full_name); 940 dev_err(&pdev->dev, "no irq for node %s\n", np->full_name);
941 return -ENXIO; 941 return -ENXIO;
942 } 942 }
diff --git a/sound/soc/fsl/imx-mc13783.c b/sound/soc/fsl/imx-mc13783.c
index a3d60d4bea4c..a2fd7321b5a9 100644
--- a/sound/soc/fsl/imx-mc13783.c
+++ b/sound/soc/fsl/imx-mc13783.c
@@ -112,7 +112,7 @@ static int imx_mc13783_probe(struct platform_device *pdev)
112 return ret; 112 return ret;
113 } 113 }
114 114
115 if (machine_is_mx31_3ds()) { 115 if (machine_is_mx31_3ds() || machine_is_mx31moboard()) {
116 imx_audmux_v2_configure_port(MX31_AUDMUX_PORT4_SSI_PINS_4, 116 imx_audmux_v2_configure_port(MX31_AUDMUX_PORT4_SSI_PINS_4,
117 IMX_AUDMUX_V2_PTCR_SYN, 117 IMX_AUDMUX_V2_PTCR_SYN,
118 IMX_AUDMUX_V2_PDCR_RXDSEL(MX31_AUDMUX_PORT1_SSI0) | 118 IMX_AUDMUX_V2_PDCR_RXDSEL(MX31_AUDMUX_PORT1_SSI0) |
diff --git a/sound/soc/fsl/imx-ssi.c b/sound/soc/fsl/imx-ssi.c
index f58bcd85c07f..57d6941676ff 100644
--- a/sound/soc/fsl/imx-ssi.c
+++ b/sound/soc/fsl/imx-ssi.c
@@ -600,19 +600,17 @@ static int imx_ssi_probe(struct platform_device *pdev)
600 ssi->fiq_params.dma_params_rx = &ssi->dma_params_rx; 600 ssi->fiq_params.dma_params_rx = &ssi->dma_params_rx;
601 ssi->fiq_params.dma_params_tx = &ssi->dma_params_tx; 601 ssi->fiq_params.dma_params_tx = &ssi->dma_params_tx;
602 602
603 ret = imx_pcm_fiq_init(pdev, &ssi->fiq_params); 603 ssi->fiq_init = imx_pcm_fiq_init(pdev, &ssi->fiq_params);
604 if (ret) 604 ssi->dma_init = imx_pcm_dma_init(pdev);
605 goto failed_pcm_fiq;
606 605
607 ret = imx_pcm_dma_init(pdev); 606 if (ssi->fiq_init && ssi->dma_init) {
608 if (ret) 607 ret = ssi->fiq_init;
609 goto failed_pcm_dma; 608 goto failed_pcm;
609 }
610 610
611 return 0; 611 return 0;
612 612
613failed_pcm_dma: 613failed_pcm:
614 imx_pcm_fiq_exit(pdev);
615failed_pcm_fiq:
616 snd_soc_unregister_component(&pdev->dev); 614 snd_soc_unregister_component(&pdev->dev);
617failed_register: 615failed_register:
618 release_mem_region(res->start, resource_size(res)); 616 release_mem_region(res->start, resource_size(res));
@@ -628,8 +626,11 @@ static int imx_ssi_remove(struct platform_device *pdev)
628 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 626 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
629 struct imx_ssi *ssi = platform_get_drvdata(pdev); 627 struct imx_ssi *ssi = platform_get_drvdata(pdev);
630 628
631 imx_pcm_dma_exit(pdev); 629 if (!ssi->dma_init)
632 imx_pcm_fiq_exit(pdev); 630 imx_pcm_dma_exit(pdev);
631
632 if (!ssi->fiq_init)
633 imx_pcm_fiq_exit(pdev);
633 634
634 snd_soc_unregister_component(&pdev->dev); 635 snd_soc_unregister_component(&pdev->dev);
635 636
diff --git a/sound/soc/fsl/imx-ssi.h b/sound/soc/fsl/imx-ssi.h
index fb1616ba8c59..560c40fc9ebb 100644
--- a/sound/soc/fsl/imx-ssi.h
+++ b/sound/soc/fsl/imx-ssi.h
@@ -211,6 +211,8 @@ struct imx_ssi {
211 struct imx_dma_data filter_data_rx; 211 struct imx_dma_data filter_data_rx;
212 struct imx_pcm_fiq_params fiq_params; 212 struct imx_pcm_fiq_params fiq_params;
213 213
214 int fiq_init;
215 int dma_init;
214 int enabled; 216 int enabled;
215}; 217};
216 218
diff --git a/sound/soc/omap/Kconfig b/sound/soc/omap/Kconfig
index daa78a0095fa..4a07f7179690 100644
--- a/sound/soc/omap/Kconfig
+++ b/sound/soc/omap/Kconfig
@@ -1,6 +1,6 @@
1config SND_OMAP_SOC 1config SND_OMAP_SOC
2 tristate "SoC Audio for the Texas Instruments OMAP chips" 2 tristate "SoC Audio for the Texas Instruments OMAP chips"
3 depends on (ARCH_OMAP && DMA_OMAP) || (ARCH_ARM && COMPILE_TEST) 3 depends on (ARCH_OMAP && DMA_OMAP) || (ARM && COMPILE_TEST)
4 select SND_DMAENGINE_PCM 4 select SND_DMAENGINE_PCM
5 5
6config SND_OMAP_SOC_DMIC 6config SND_OMAP_SOC_DMIC
@@ -26,7 +26,7 @@ config SND_OMAP_SOC_N810
26 26
27config SND_OMAP_SOC_RX51 27config SND_OMAP_SOC_RX51
28 tristate "SoC Audio support for Nokia RX-51" 28 tristate "SoC Audio support for Nokia RX-51"
29 depends on SND_OMAP_SOC && ARCH_ARM && (MACH_NOKIA_RX51 || COMPILE_TEST) 29 depends on SND_OMAP_SOC && ARM && (MACH_NOKIA_RX51 || COMPILE_TEST)
30 select SND_OMAP_SOC_MCBSP 30 select SND_OMAP_SOC_MCBSP
31 select SND_SOC_TLV320AIC3X 31 select SND_SOC_TLV320AIC3X
32 select SND_SOC_TPA6130A2 32 select SND_SOC_TPA6130A2
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index 9cc6986a8cfb..5dd87f4c919e 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -220,8 +220,8 @@ int rsnd_gen_path_exit(struct rsnd_priv *priv,
220void __iomem *rsnd_gen_reg_get(struct rsnd_priv *priv, 220void __iomem *rsnd_gen_reg_get(struct rsnd_priv *priv,
221 struct rsnd_mod *mod, 221 struct rsnd_mod *mod,
222 enum rsnd_reg reg); 222 enum rsnd_reg reg);
223#define rsnd_is_gen1(s) ((s)->info->flags & RSND_GEN1) 223#define rsnd_is_gen1(s) (((s)->info->flags & RSND_GEN_MASK) == RSND_GEN1)
224#define rsnd_is_gen2(s) ((s)->info->flags & RSND_GEN2) 224#define rsnd_is_gen2(s) (((s)->info->flags & RSND_GEN_MASK) == RSND_GEN2)
225 225
226/* 226/*
227 * R-Car ADG 227 * R-Car ADG
diff --git a/tools/lib/lk/debugfs.c b/tools/lib/lk/debugfs.c
index 099e7cd022e4..7c4347962353 100644
--- a/tools/lib/lk/debugfs.c
+++ b/tools/lib/lk/debugfs.c
@@ -5,7 +5,6 @@
5#include <stdbool.h> 5#include <stdbool.h>
6#include <sys/vfs.h> 6#include <sys/vfs.h>
7#include <sys/mount.h> 7#include <sys/mount.h>
8#include <linux/magic.h>
9#include <linux/kernel.h> 8#include <linux/kernel.h>
10 9
11#include "debugfs.h" 10#include "debugfs.h"
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 3a0ff7fb71b6..64c043b7a438 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -770,6 +770,7 @@ check: $(OUTPUT)common-cmds.h
770install-bin: all 770install-bin: all
771 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)' 771 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)'
772 $(INSTALL) $(OUTPUT)perf '$(DESTDIR_SQ)$(bindir_SQ)' 772 $(INSTALL) $(OUTPUT)perf '$(DESTDIR_SQ)$(bindir_SQ)'
773 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
773 $(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' 774 $(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
774ifndef NO_LIBPERL 775ifndef NO_LIBPERL
775 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace' 776 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'
diff --git a/tools/perf/arch/x86/util/tsc.c b/tools/perf/arch/x86/util/tsc.c
index 9570c2b0f83c..b2519e49424f 100644
--- a/tools/perf/arch/x86/util/tsc.c
+++ b/tools/perf/arch/x86/util/tsc.c
@@ -32,7 +32,7 @@ u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc)
32int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc, 32int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
33 struct perf_tsc_conversion *tc) 33 struct perf_tsc_conversion *tc)
34{ 34{
35 bool cap_usr_time_zero; 35 bool cap_user_time_zero;
36 u32 seq; 36 u32 seq;
37 int i = 0; 37 int i = 0;
38 38
@@ -42,7 +42,7 @@ int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
42 tc->time_mult = pc->time_mult; 42 tc->time_mult = pc->time_mult;
43 tc->time_shift = pc->time_shift; 43 tc->time_shift = pc->time_shift;
44 tc->time_zero = pc->time_zero; 44 tc->time_zero = pc->time_zero;
45 cap_usr_time_zero = pc->cap_usr_time_zero; 45 cap_user_time_zero = pc->cap_user_time_zero;
46 rmb(); 46 rmb();
47 if (pc->lock == seq && !(seq & 1)) 47 if (pc->lock == seq && !(seq & 1))
48 break; 48 break;
@@ -52,7 +52,7 @@ int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
52 } 52 }
53 } 53 }
54 54
55 if (!cap_usr_time_zero) 55 if (!cap_user_time_zero)
56 return -EOPNOTSUPP; 56 return -EOPNOTSUPP;
57 57
58 return 0; 58 return 0;
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 423875c999b2..afe377b2884f 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -321,8 +321,6 @@ found:
321 return perf_event__repipe(tool, event_sw, &sample_sw, machine); 321 return perf_event__repipe(tool, event_sw, &sample_sw, machine);
322} 322}
323 323
324extern volatile int session_done;
325
326static void sig_handler(int sig __maybe_unused) 324static void sig_handler(int sig __maybe_unused)
327{ 325{
328 session_done = 1; 326 session_done = 1;
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index c2dff9cb1f2c..9b5f077fee5b 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -101,7 +101,7 @@ static int setup_cpunode_map(void)
101 101
102 dir1 = opendir(PATH_SYS_NODE); 102 dir1 = opendir(PATH_SYS_NODE);
103 if (!dir1) 103 if (!dir1)
104 return -1; 104 return 0;
105 105
106 while ((dent1 = readdir(dir1)) != NULL) { 106 while ((dent1 = readdir(dir1)) != NULL) {
107 if (dent1->d_type != DT_DIR || 107 if (dent1->d_type != DT_DIR ||
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 8e50d8d77419..72eae7498c09 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -401,8 +401,6 @@ static int perf_report__setup_sample_type(struct perf_report *rep)
401 return 0; 401 return 0;
402} 402}
403 403
404extern volatile int session_done;
405
406static void sig_handler(int sig __maybe_unused) 404static void sig_handler(int sig __maybe_unused)
407{ 405{
408 session_done = 1; 406 session_done = 1;
@@ -568,6 +566,9 @@ static int __cmd_report(struct perf_report *rep)
568 } 566 }
569 } 567 }
570 568
569 if (session_done())
570 return 0;
571
571 if (nr_samples == 0) { 572 if (nr_samples == 0) {
572 ui__error("The %s file has no samples!\n", session->filename); 573 ui__error("The %s file has no samples!\n", session->filename);
573 return 0; 574 return 0;
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 7f31a3ded1b6..9c333ff3dfeb 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -553,8 +553,6 @@ static struct perf_tool perf_script = {
553 .ordering_requires_timestamps = true, 553 .ordering_requires_timestamps = true,
554}; 554};
555 555
556extern volatile int session_done;
557
558static void sig_handler(int sig __maybe_unused) 556static void sig_handler(int sig __maybe_unused)
559{ 557{
560 session_done = 1; 558 session_done = 1;
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index f686d5ff594e..5098f144b92d 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -457,6 +457,7 @@ static int __run_perf_stat(int argc, const char **argv)
457 perror("failed to prepare workload"); 457 perror("failed to prepare workload");
458 return -1; 458 return -1;
459 } 459 }
460 child_pid = evsel_list->workload.pid;
460 } 461 }
461 462
462 if (group) 463 if (group)
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index f5aa6375e3e9..71aa3e35406b 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -16,6 +16,23 @@
16#include <sys/mman.h> 16#include <sys/mman.h>
17#include <linux/futex.h> 17#include <linux/futex.h>
18 18
19/* For older distros: */
20#ifndef MAP_STACK
21# define MAP_STACK 0x20000
22#endif
23
24#ifndef MADV_HWPOISON
25# define MADV_HWPOISON 100
26#endif
27
28#ifndef MADV_MERGEABLE
29# define MADV_MERGEABLE 12
30#endif
31
32#ifndef MADV_UNMERGEABLE
33# define MADV_UNMERGEABLE 13
34#endif
35
19static size_t syscall_arg__scnprintf_hex(char *bf, size_t size, 36static size_t syscall_arg__scnprintf_hex(char *bf, size_t size,
20 unsigned long arg, 37 unsigned long arg,
21 u8 arg_idx __maybe_unused, 38 u8 arg_idx __maybe_unused,
@@ -1038,6 +1055,7 @@ static int trace__replay(struct trace *trace)
1038 1055
1039 trace->tool.sample = trace__process_sample; 1056 trace->tool.sample = trace__process_sample;
1040 trace->tool.mmap = perf_event__process_mmap; 1057 trace->tool.mmap = perf_event__process_mmap;
1058 trace->tool.mmap2 = perf_event__process_mmap2;
1041 trace->tool.comm = perf_event__process_comm; 1059 trace->tool.comm = perf_event__process_comm;
1042 trace->tool.exit = perf_event__process_exit; 1060 trace->tool.exit = perf_event__process_exit;
1043 trace->tool.fork = perf_event__process_fork; 1061 trace->tool.fork = perf_event__process_fork;
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index 214e17e97e5c..5f6f9b3271bb 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -87,7 +87,7 @@ CFLAGS += -Wall
87CFLAGS += -Wextra 87CFLAGS += -Wextra
88CFLAGS += -std=gnu99 88CFLAGS += -std=gnu99
89 89
90EXTLIBS = -lelf -lpthread -lrt -lm 90EXTLIBS = -lelf -lpthread -lrt -lm -ldl
91 91
92ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -Werror -fstack-protector-all,-fstack-protector-all),y) 92ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -Werror -fstack-protector-all,-fstack-protector-all),y)
93 CFLAGS += -fstack-protector-all 93 CFLAGS += -fstack-protector-all
@@ -180,6 +180,9 @@ FLAGS_LIBELF=$(CFLAGS) $(LDFLAGS) $(EXTLIBS)
180ifeq ($(call try-cc,$(SOURCE_ELF_MMAP),$(FLAGS_LIBELF),-DLIBELF_MMAP),y) 180ifeq ($(call try-cc,$(SOURCE_ELF_MMAP),$(FLAGS_LIBELF),-DLIBELF_MMAP),y)
181 CFLAGS += -DLIBELF_MMAP 181 CFLAGS += -DLIBELF_MMAP
182endif 182endif
183ifeq ($(call try-cc,$(SOURCE_ELF_GETPHDRNUM),$(FLAGS_LIBELF),-DHAVE_ELF_GETPHDRNUM),y)
184 CFLAGS += -DHAVE_ELF_GETPHDRNUM
185endif
183 186
184# include ARCH specific config 187# include ARCH specific config
185-include $(src-perf)/arch/$(ARCH)/Makefile 188-include $(src-perf)/arch/$(ARCH)/Makefile
diff --git a/tools/perf/config/feature-tests.mak b/tools/perf/config/feature-tests.mak
index 708fb8e9822a..f79305739ecc 100644
--- a/tools/perf/config/feature-tests.mak
+++ b/tools/perf/config/feature-tests.mak
@@ -61,6 +61,15 @@ int main(void)
61} 61}
62endef 62endef
63 63
64define SOURCE_ELF_GETPHDRNUM
65#include <libelf.h>
66int main(void)
67{
68 size_t dst;
69 return elf_getphdrnum(0, &dst);
70}
71endef
72
64ifndef NO_SLANG 73ifndef NO_SLANG
65define SOURCE_SLANG 74define SOURCE_SLANG
66#include <slang.h> 75#include <slang.h>
@@ -210,6 +219,7 @@ define SOURCE_LIBAUDIT
210 219
211int main(void) 220int main(void)
212{ 221{
222 printf(\"error message: %s\", audit_errno_to_name(0));
213 return audit_open(); 223 return audit_open();
214} 224}
215endef 225endef
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index bfc5a27597d6..7eae5488ecea 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -809,7 +809,7 @@ static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
809 end = map__rip_2objdump(map, sym->end); 809 end = map__rip_2objdump(map, sym->end);
810 810
811 offset = line_ip - start; 811 offset = line_ip - start;
812 if (offset < 0 || (u64)line_ip > end) 812 if ((u64)line_ip < start || (u64)line_ip > end)
813 offset = -1; 813 offset = -1;
814 else 814 else
815 parsed_line = tmp2 + 1; 815 parsed_line = tmp2 + 1;
diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
index 3e5f5430a28a..7defd77105d0 100644
--- a/tools/perf/util/dwarf-aux.c
+++ b/tools/perf/util/dwarf-aux.c
@@ -263,6 +263,21 @@ bool die_is_signed_type(Dwarf_Die *tp_die)
263} 263}
264 264
265/** 265/**
266 * die_is_func_def - Ensure that this DIE is a subprogram and definition
267 * @dw_die: a DIE
268 *
269 * Ensure that this DIE is a subprogram and NOT a declaration. This
270 * returns true if @dw_die is a function definition.
271 **/
272bool die_is_func_def(Dwarf_Die *dw_die)
273{
274 Dwarf_Attribute attr;
275
276 return (dwarf_tag(dw_die) == DW_TAG_subprogram &&
277 dwarf_attr(dw_die, DW_AT_declaration, &attr) == NULL);
278}
279
280/**
266 * die_get_data_member_location - Get the data-member offset 281 * die_get_data_member_location - Get the data-member offset
267 * @mb_die: a DIE of a member of a data structure 282 * @mb_die: a DIE of a member of a data structure
268 * @offs: The offset of the member in the data structure 283 * @offs: The offset of the member in the data structure
@@ -392,6 +407,10 @@ static int __die_search_func_cb(Dwarf_Die *fn_die, void *data)
392{ 407{
393 struct __addr_die_search_param *ad = data; 408 struct __addr_die_search_param *ad = data;
394 409
410 /*
411 * Since a declaration entry doesn't has given pc, this always returns
412 * function definition entry.
413 */
395 if (dwarf_tag(fn_die) == DW_TAG_subprogram && 414 if (dwarf_tag(fn_die) == DW_TAG_subprogram &&
396 dwarf_haspc(fn_die, ad->addr)) { 415 dwarf_haspc(fn_die, ad->addr)) {
397 memcpy(ad->die_mem, fn_die, sizeof(Dwarf_Die)); 416 memcpy(ad->die_mem, fn_die, sizeof(Dwarf_Die));
@@ -407,7 +426,7 @@ static int __die_search_func_cb(Dwarf_Die *fn_die, void *data)
407 * @die_mem: a buffer for result DIE 426 * @die_mem: a buffer for result DIE
408 * 427 *
409 * Search a non-inlined function DIE which includes @addr. Stores the 428 * Search a non-inlined function DIE which includes @addr. Stores the
410 * DIE to @die_mem and returns it if found. Returns NULl if failed. 429 * DIE to @die_mem and returns it if found. Returns NULL if failed.
411 */ 430 */
412Dwarf_Die *die_find_realfunc(Dwarf_Die *cu_die, Dwarf_Addr addr, 431Dwarf_Die *die_find_realfunc(Dwarf_Die *cu_die, Dwarf_Addr addr,
413 Dwarf_Die *die_mem) 432 Dwarf_Die *die_mem)
@@ -435,15 +454,32 @@ static int __die_find_inline_cb(Dwarf_Die *die_mem, void *data)
435} 454}
436 455
437/** 456/**
457 * die_find_top_inlinefunc - Search the top inlined function at given address
458 * @sp_die: a subprogram DIE which including @addr
459 * @addr: target address
460 * @die_mem: a buffer for result DIE
461 *
462 * Search an inlined function DIE which includes @addr. Stores the
463 * DIE to @die_mem and returns it if found. Returns NULL if failed.
464 * Even if several inlined functions are expanded recursively, this
465 * doesn't trace it down, and returns the topmost one.
466 */
467Dwarf_Die *die_find_top_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
468 Dwarf_Die *die_mem)
469{
470 return die_find_child(sp_die, __die_find_inline_cb, &addr, die_mem);
471}
472
473/**
438 * die_find_inlinefunc - Search an inlined function at given address 474 * die_find_inlinefunc - Search an inlined function at given address
439 * @cu_die: a CU DIE which including @addr 475 * @sp_die: a subprogram DIE which including @addr
440 * @addr: target address 476 * @addr: target address
441 * @die_mem: a buffer for result DIE 477 * @die_mem: a buffer for result DIE
442 * 478 *
443 * Search an inlined function DIE which includes @addr. Stores the 479 * Search an inlined function DIE which includes @addr. Stores the
444 * DIE to @die_mem and returns it if found. Returns NULl if failed. 480 * DIE to @die_mem and returns it if found. Returns NULL if failed.
445 * If several inlined functions are expanded recursively, this trace 481 * If several inlined functions are expanded recursively, this trace
446 * it and returns deepest one. 482 * it down and returns deepest one.
447 */ 483 */
448Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr, 484Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
449 Dwarf_Die *die_mem) 485 Dwarf_Die *die_mem)
diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h
index 6ce1717784b7..b4fe90c6cb2d 100644
--- a/tools/perf/util/dwarf-aux.h
+++ b/tools/perf/util/dwarf-aux.h
@@ -38,6 +38,9 @@ extern int cu_find_lineinfo(Dwarf_Die *cudie, unsigned long addr,
38extern int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr, 38extern int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr,
39 int (*callback)(Dwarf_Die *, void *), void *data); 39 int (*callback)(Dwarf_Die *, void *), void *data);
40 40
41/* Ensure that this DIE is a subprogram and definition (not declaration) */
42extern bool die_is_func_def(Dwarf_Die *dw_die);
43
41/* Compare diename and tname */ 44/* Compare diename and tname */
42extern bool die_compare_name(Dwarf_Die *dw_die, const char *tname); 45extern bool die_compare_name(Dwarf_Die *dw_die, const char *tname);
43 46
@@ -76,7 +79,11 @@ extern Dwarf_Die *die_find_child(Dwarf_Die *rt_die,
76extern Dwarf_Die *die_find_realfunc(Dwarf_Die *cu_die, Dwarf_Addr addr, 79extern Dwarf_Die *die_find_realfunc(Dwarf_Die *cu_die, Dwarf_Addr addr,
77 Dwarf_Die *die_mem); 80 Dwarf_Die *die_mem);
78 81
79/* Search an inlined function including given address */ 82/* Search the top inlined function including given address */
83extern Dwarf_Die *die_find_top_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
84 Dwarf_Die *die_mem);
85
86/* Search the deepest inlined function including given address */
80extern Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr, 87extern Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
81 Dwarf_Die *die_mem); 88 Dwarf_Die *die_mem);
82 89
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 26441d0e571b..c3e5a3b817ab 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -199,9 +199,11 @@ static int write_buildid(char *name, size_t name_len, u8 *build_id,
199 return write_padded(fd, name, name_len + 1, len); 199 return write_padded(fd, name, name_len + 1, len);
200} 200}
201 201
202static int __dsos__write_buildid_table(struct list_head *head, pid_t pid, 202static int __dsos__write_buildid_table(struct list_head *head,
203 u16 misc, int fd) 203 struct machine *machine,
204 pid_t pid, u16 misc, int fd)
204{ 205{
206 char nm[PATH_MAX];
205 struct dso *pos; 207 struct dso *pos;
206 208
207 dsos__for_each_with_build_id(pos, head) { 209 dsos__for_each_with_build_id(pos, head) {
@@ -215,6 +217,10 @@ static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
215 if (is_vdso_map(pos->short_name)) { 217 if (is_vdso_map(pos->short_name)) {
216 name = (char *) VDSO__MAP_NAME; 218 name = (char *) VDSO__MAP_NAME;
217 name_len = sizeof(VDSO__MAP_NAME) + 1; 219 name_len = sizeof(VDSO__MAP_NAME) + 1;
220 } else if (dso__is_kcore(pos)) {
221 machine__mmap_name(machine, nm, sizeof(nm));
222 name = nm;
223 name_len = strlen(nm) + 1;
218 } else { 224 } else {
219 name = pos->long_name; 225 name = pos->long_name;
220 name_len = pos->long_name_len + 1; 226 name_len = pos->long_name_len + 1;
@@ -240,10 +246,10 @@ static int machine__write_buildid_table(struct machine *machine, int fd)
240 umisc = PERF_RECORD_MISC_GUEST_USER; 246 umisc = PERF_RECORD_MISC_GUEST_USER;
241 } 247 }
242 248
243 err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid, 249 err = __dsos__write_buildid_table(&machine->kernel_dsos, machine,
244 kmisc, fd); 250 machine->pid, kmisc, fd);
245 if (err == 0) 251 if (err == 0)
246 err = __dsos__write_buildid_table(&machine->user_dsos, 252 err = __dsos__write_buildid_table(&machine->user_dsos, machine,
247 machine->pid, umisc, fd); 253 machine->pid, umisc, fd);
248 return err; 254 return err;
249} 255}
@@ -375,23 +381,31 @@ out_free:
375 return err; 381 return err;
376} 382}
377 383
378static int dso__cache_build_id(struct dso *dso, const char *debugdir) 384static int dso__cache_build_id(struct dso *dso, struct machine *machine,
385 const char *debugdir)
379{ 386{
380 bool is_kallsyms = dso->kernel && dso->long_name[0] != '/'; 387 bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
381 bool is_vdso = is_vdso_map(dso->short_name); 388 bool is_vdso = is_vdso_map(dso->short_name);
389 char *name = dso->long_name;
390 char nm[PATH_MAX];
382 391
383 return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), 392 if (dso__is_kcore(dso)) {
384 dso->long_name, debugdir, 393 is_kallsyms = true;
385 is_kallsyms, is_vdso); 394 machine__mmap_name(machine, nm, sizeof(nm));
395 name = nm;
396 }
397 return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), name,
398 debugdir, is_kallsyms, is_vdso);
386} 399}
387 400
388static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir) 401static int __dsos__cache_build_ids(struct list_head *head,
402 struct machine *machine, const char *debugdir)
389{ 403{
390 struct dso *pos; 404 struct dso *pos;
391 int err = 0; 405 int err = 0;
392 406
393 dsos__for_each_with_build_id(pos, head) 407 dsos__for_each_with_build_id(pos, head)
394 if (dso__cache_build_id(pos, debugdir)) 408 if (dso__cache_build_id(pos, machine, debugdir))
395 err = -1; 409 err = -1;
396 410
397 return err; 411 return err;
@@ -399,8 +413,9 @@ static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
399 413
400static int machine__cache_build_ids(struct machine *machine, const char *debugdir) 414static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
401{ 415{
402 int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir); 416 int ret = __dsos__cache_build_ids(&machine->kernel_dsos, machine,
403 ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir); 417 debugdir);
418 ret |= __dsos__cache_build_ids(&machine->user_dsos, machine, debugdir);
404 return ret; 419 return ret;
405} 420}
406 421
@@ -2753,6 +2768,18 @@ int perf_session__read_header(struct perf_session *session)
2753 if (perf_file_header__read(&f_header, header, fd) < 0) 2768 if (perf_file_header__read(&f_header, header, fd) < 0)
2754 return -EINVAL; 2769 return -EINVAL;
2755 2770
2771 /*
2772 * Sanity check that perf.data was written cleanly; data size is
2773 * initialized to 0 and updated only if the on_exit function is run.
2774 * If data size is still 0 then the file contains only partial
2775 * information. Just warn user and process it as much as it can.
2776 */
2777 if (f_header.data.size == 0) {
2778 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
2779 "Was the 'perf record' command properly terminated?\n",
2780 session->filename);
2781 }
2782
2756 nr_attrs = f_header.attrs.size / f_header.attr_size; 2783 nr_attrs = f_header.attrs.size / f_header.attr_size;
2757 lseek(fd, f_header.attrs.offset, SEEK_SET); 2784 lseek(fd, f_header.attrs.offset, SEEK_SET);
2758 2785
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 46a0d35a05e1..9ff6cf3e9a99 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -611,6 +611,8 @@ void hists__collapse_resort(struct hists *hists)
611 next = rb_first(root); 611 next = rb_first(root);
612 612
613 while (next) { 613 while (next) {
614 if (session_done())
615 break;
614 n = rb_entry(next, struct hist_entry, rb_node_in); 616 n = rb_entry(next, struct hist_entry, rb_node_in);
615 next = rb_next(&n->rb_node_in); 617 next = rb_next(&n->rb_node_in);
616 618
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 933d14f287ca..6188d2876a71 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -792,7 +792,7 @@ static int machine__create_modules(struct machine *machine)
792 modules = path; 792 modules = path;
793 } 793 }
794 794
795 if (symbol__restricted_filename(path, "/proc/modules")) 795 if (symbol__restricted_filename(modules, "/proc/modules"))
796 return -1; 796 return -1;
797 797
798 file = fopen(modules, "r"); 798 file = fopen(modules, "r");
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index be0329394d56..c09e0a9fdf4c 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -118,7 +118,6 @@ static const Dwfl_Callbacks offline_callbacks = {
118static int debuginfo__init_offline_dwarf(struct debuginfo *self, 118static int debuginfo__init_offline_dwarf(struct debuginfo *self,
119 const char *path) 119 const char *path)
120{ 120{
121 Dwfl_Module *mod;
122 int fd; 121 int fd;
123 122
124 fd = open(path, O_RDONLY); 123 fd = open(path, O_RDONLY);
@@ -129,11 +128,11 @@ static int debuginfo__init_offline_dwarf(struct debuginfo *self,
129 if (!self->dwfl) 128 if (!self->dwfl)
130 goto error; 129 goto error;
131 130
132 mod = dwfl_report_offline(self->dwfl, "", "", fd); 131 self->mod = dwfl_report_offline(self->dwfl, "", "", fd);
133 if (!mod) 132 if (!self->mod)
134 goto error; 133 goto error;
135 134
136 self->dbg = dwfl_module_getdwarf(mod, &self->bias); 135 self->dbg = dwfl_module_getdwarf(self->mod, &self->bias);
137 if (!self->dbg) 136 if (!self->dbg)
138 goto error; 137 goto error;
139 138
@@ -676,37 +675,42 @@ static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf)
676} 675}
677 676
678/* Convert subprogram DIE to trace point */ 677/* Convert subprogram DIE to trace point */
679static int convert_to_trace_point(Dwarf_Die *sp_die, Dwarf_Addr paddr, 678static int convert_to_trace_point(Dwarf_Die *sp_die, Dwfl_Module *mod,
680 bool retprobe, struct probe_trace_point *tp) 679 Dwarf_Addr paddr, bool retprobe,
680 struct probe_trace_point *tp)
681{ 681{
682 Dwarf_Addr eaddr, highaddr; 682 Dwarf_Addr eaddr, highaddr;
683 const char *name; 683 GElf_Sym sym;
684 684 const char *symbol;
685 /* Copy the name of probe point */ 685
686 name = dwarf_diename(sp_die); 686 /* Verify the address is correct */
687 if (name) { 687 if (dwarf_entrypc(sp_die, &eaddr) != 0) {
688 if (dwarf_entrypc(sp_die, &eaddr) != 0) { 688 pr_warning("Failed to get entry address of %s\n",
689 pr_warning("Failed to get entry address of %s\n", 689 dwarf_diename(sp_die));
690 dwarf_diename(sp_die)); 690 return -ENOENT;
691 return -ENOENT; 691 }
692 } 692 if (dwarf_highpc(sp_die, &highaddr) != 0) {
693 if (dwarf_highpc(sp_die, &highaddr) != 0) { 693 pr_warning("Failed to get end address of %s\n",
694 pr_warning("Failed to get end address of %s\n", 694 dwarf_diename(sp_die));
695 dwarf_diename(sp_die)); 695 return -ENOENT;
696 return -ENOENT; 696 }
697 } 697 if (paddr > highaddr) {
698 if (paddr > highaddr) { 698 pr_warning("Offset specified is greater than size of %s\n",
699 pr_warning("Offset specified is greater than size of %s\n", 699 dwarf_diename(sp_die));
700 dwarf_diename(sp_die)); 700 return -EINVAL;
701 return -EINVAL; 701 }
702 } 702
703 tp->symbol = strdup(name); 703 /* Get an appropriate symbol from symtab */
704 if (tp->symbol == NULL) 704 symbol = dwfl_module_addrsym(mod, paddr, &sym, NULL);
705 return -ENOMEM; 705 if (!symbol) {
706 tp->offset = (unsigned long)(paddr - eaddr); 706 pr_warning("Failed to find symbol at 0x%lx\n",
707 } else 707 (unsigned long)paddr);
708 /* This function has no name. */ 708 return -ENOENT;
709 tp->offset = (unsigned long)paddr; 709 }
710 tp->offset = (unsigned long)(paddr - sym.st_value);
711 tp->symbol = strdup(symbol);
712 if (!tp->symbol)
713 return -ENOMEM;
710 714
711 /* Return probe must be on the head of a subprogram */ 715 /* Return probe must be on the head of a subprogram */
712 if (retprobe) { 716 if (retprobe) {
@@ -734,7 +738,7 @@ static int call_probe_finder(Dwarf_Die *sc_die, struct probe_finder *pf)
734 } 738 }
735 739
736 /* If not a real subprogram, find a real one */ 740 /* If not a real subprogram, find a real one */
737 if (dwarf_tag(sc_die) != DW_TAG_subprogram) { 741 if (!die_is_func_def(sc_die)) {
738 if (!die_find_realfunc(&pf->cu_die, pf->addr, &pf->sp_die)) { 742 if (!die_find_realfunc(&pf->cu_die, pf->addr, &pf->sp_die)) {
739 pr_warning("Failed to find probe point in any " 743 pr_warning("Failed to find probe point in any "
740 "functions.\n"); 744 "functions.\n");
@@ -980,12 +984,10 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
980 struct dwarf_callback_param *param = data; 984 struct dwarf_callback_param *param = data;
981 struct probe_finder *pf = param->data; 985 struct probe_finder *pf = param->data;
982 struct perf_probe_point *pp = &pf->pev->point; 986 struct perf_probe_point *pp = &pf->pev->point;
983 Dwarf_Attribute attr;
984 987
985 /* Check tag and diename */ 988 /* Check tag and diename */
986 if (dwarf_tag(sp_die) != DW_TAG_subprogram || 989 if (!die_is_func_def(sp_die) ||
987 !die_compare_name(sp_die, pp->function) || 990 !die_compare_name(sp_die, pp->function))
988 dwarf_attr(sp_die, DW_AT_declaration, &attr))
989 return DWARF_CB_OK; 991 return DWARF_CB_OK;
990 992
991 /* Check declared file */ 993 /* Check declared file */
@@ -1151,7 +1153,7 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
1151 tev = &tf->tevs[tf->ntevs++]; 1153 tev = &tf->tevs[tf->ntevs++];
1152 1154
1153 /* Trace point should be converted from subprogram DIE */ 1155 /* Trace point should be converted from subprogram DIE */
1154 ret = convert_to_trace_point(&pf->sp_die, pf->addr, 1156 ret = convert_to_trace_point(&pf->sp_die, tf->mod, pf->addr,
1155 pf->pev->point.retprobe, &tev->point); 1157 pf->pev->point.retprobe, &tev->point);
1156 if (ret < 0) 1158 if (ret < 0)
1157 return ret; 1159 return ret;
@@ -1183,7 +1185,7 @@ int debuginfo__find_trace_events(struct debuginfo *self,
1183{ 1185{
1184 struct trace_event_finder tf = { 1186 struct trace_event_finder tf = {
1185 .pf = {.pev = pev, .callback = add_probe_trace_event}, 1187 .pf = {.pev = pev, .callback = add_probe_trace_event},
1186 .max_tevs = max_tevs}; 1188 .mod = self->mod, .max_tevs = max_tevs};
1187 int ret; 1189 int ret;
1188 1190
1189 /* Allocate result tevs array */ 1191 /* Allocate result tevs array */
@@ -1252,7 +1254,7 @@ static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf)
1252 vl = &af->vls[af->nvls++]; 1254 vl = &af->vls[af->nvls++];
1253 1255
1254 /* Trace point should be converted from subprogram DIE */ 1256 /* Trace point should be converted from subprogram DIE */
1255 ret = convert_to_trace_point(&pf->sp_die, pf->addr, 1257 ret = convert_to_trace_point(&pf->sp_die, af->mod, pf->addr,
1256 pf->pev->point.retprobe, &vl->point); 1258 pf->pev->point.retprobe, &vl->point);
1257 if (ret < 0) 1259 if (ret < 0)
1258 return ret; 1260 return ret;
@@ -1291,6 +1293,7 @@ int debuginfo__find_available_vars_at(struct debuginfo *self,
1291{ 1293{
1292 struct available_var_finder af = { 1294 struct available_var_finder af = {
1293 .pf = {.pev = pev, .callback = add_available_vars}, 1295 .pf = {.pev = pev, .callback = add_available_vars},
1296 .mod = self->mod,
1294 .max_vls = max_vls, .externs = externs}; 1297 .max_vls = max_vls, .externs = externs};
1295 int ret; 1298 int ret;
1296 1299
@@ -1324,8 +1327,8 @@ int debuginfo__find_probe_point(struct debuginfo *self, unsigned long addr,
1324 struct perf_probe_point *ppt) 1327 struct perf_probe_point *ppt)
1325{ 1328{
1326 Dwarf_Die cudie, spdie, indie; 1329 Dwarf_Die cudie, spdie, indie;
1327 Dwarf_Addr _addr, baseaddr; 1330 Dwarf_Addr _addr = 0, baseaddr = 0;
1328 const char *fname = NULL, *func = NULL, *tmp; 1331 const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp;
1329 int baseline = 0, lineno = 0, ret = 0; 1332 int baseline = 0, lineno = 0, ret = 0;
1330 1333
1331 /* Adjust address with bias */ 1334 /* Adjust address with bias */
@@ -1346,27 +1349,36 @@ int debuginfo__find_probe_point(struct debuginfo *self, unsigned long addr,
1346 /* Find a corresponding function (name, baseline and baseaddr) */ 1349 /* Find a corresponding function (name, baseline and baseaddr) */
1347 if (die_find_realfunc(&cudie, (Dwarf_Addr)addr, &spdie)) { 1350 if (die_find_realfunc(&cudie, (Dwarf_Addr)addr, &spdie)) {
1348 /* Get function entry information */ 1351 /* Get function entry information */
1349 tmp = dwarf_diename(&spdie); 1352 func = basefunc = dwarf_diename(&spdie);
1350 if (!tmp || 1353 if (!func ||
1351 dwarf_entrypc(&spdie, &baseaddr) != 0 || 1354 dwarf_entrypc(&spdie, &baseaddr) != 0 ||
1352 dwarf_decl_line(&spdie, &baseline) != 0) 1355 dwarf_decl_line(&spdie, &baseline) != 0) {
1356 lineno = 0;
1353 goto post; 1357 goto post;
1354 func = tmp; 1358 }
1355 1359
1356 if (addr == (unsigned long)baseaddr) 1360 if (addr == (unsigned long)baseaddr) {
1357 /* Function entry - Relative line number is 0 */ 1361 /* Function entry - Relative line number is 0 */
1358 lineno = baseline; 1362 lineno = baseline;
1359 else if (die_find_inlinefunc(&spdie, (Dwarf_Addr)addr, 1363 fname = dwarf_decl_file(&spdie);
1360 &indie)) { 1364 goto post;
1365 }
1366
1367 /* Track down the inline functions step by step */
1368 while (die_find_top_inlinefunc(&spdie, (Dwarf_Addr)addr,
1369 &indie)) {
1370 /* There is an inline function */
1361 if (dwarf_entrypc(&indie, &_addr) == 0 && 1371 if (dwarf_entrypc(&indie, &_addr) == 0 &&
1362 _addr == addr) 1372 _addr == addr) {
1363 /* 1373 /*
1364 * addr is at an inline function entry. 1374 * addr is at an inline function entry.
1365 * In this case, lineno should be the call-site 1375 * In this case, lineno should be the call-site
1366 * line number. 1376 * line number. (overwrite lineinfo)
1367 */ 1377 */
1368 lineno = die_get_call_lineno(&indie); 1378 lineno = die_get_call_lineno(&indie);
1369 else { 1379 fname = die_get_call_file(&indie);
1380 break;
1381 } else {
1370 /* 1382 /*
1371 * addr is in an inline function body. 1383 * addr is in an inline function body.
1372 * Since lineno points one of the lines 1384 * Since lineno points one of the lines
@@ -1374,19 +1386,27 @@ int debuginfo__find_probe_point(struct debuginfo *self, unsigned long addr,
1374 * be the entry line of the inline function. 1386 * be the entry line of the inline function.
1375 */ 1387 */
1376 tmp = dwarf_diename(&indie); 1388 tmp = dwarf_diename(&indie);
1377 if (tmp && 1389 if (!tmp ||
1378 dwarf_decl_line(&spdie, &baseline) == 0) 1390 dwarf_decl_line(&indie, &baseline) != 0)
1379 func = tmp; 1391 break;
1392 func = tmp;
1393 spdie = indie;
1380 } 1394 }
1381 } 1395 }
1396 /* Verify the lineno and baseline are in a same file */
1397 tmp = dwarf_decl_file(&spdie);
1398 if (!tmp || strcmp(tmp, fname) != 0)
1399 lineno = 0;
1382 } 1400 }
1383 1401
1384post: 1402post:
1385 /* Make a relative line number or an offset */ 1403 /* Make a relative line number or an offset */
1386 if (lineno) 1404 if (lineno)
1387 ppt->line = lineno - baseline; 1405 ppt->line = lineno - baseline;
1388 else if (func) 1406 else if (basefunc) {
1389 ppt->offset = addr - (unsigned long)baseaddr; 1407 ppt->offset = addr - (unsigned long)baseaddr;
1408 func = basefunc;
1409 }
1390 1410
1391 /* Duplicate strings */ 1411 /* Duplicate strings */
1392 if (func) { 1412 if (func) {
@@ -1474,7 +1494,7 @@ static int line_range_inline_cb(Dwarf_Die *in_die, void *data)
1474 return 0; 1494 return 0;
1475} 1495}
1476 1496
1477/* Search function from function name */ 1497/* Search function definition from function name */
1478static int line_range_search_cb(Dwarf_Die *sp_die, void *data) 1498static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
1479{ 1499{
1480 struct dwarf_callback_param *param = data; 1500 struct dwarf_callback_param *param = data;
@@ -1485,7 +1505,7 @@ static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
1485 if (lr->file && strtailcmp(lr->file, dwarf_decl_file(sp_die))) 1505 if (lr->file && strtailcmp(lr->file, dwarf_decl_file(sp_die)))
1486 return DWARF_CB_OK; 1506 return DWARF_CB_OK;
1487 1507
1488 if (dwarf_tag(sp_die) == DW_TAG_subprogram && 1508 if (die_is_func_def(sp_die) &&
1489 die_compare_name(sp_die, lr->function)) { 1509 die_compare_name(sp_die, lr->function)) {
1490 lf->fname = dwarf_decl_file(sp_die); 1510 lf->fname = dwarf_decl_file(sp_die);
1491 dwarf_decl_line(sp_die, &lr->offset); 1511 dwarf_decl_line(sp_die, &lr->offset);
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index 17e94d0c36f9..3b7d63018960 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -23,6 +23,7 @@ static inline int is_c_varname(const char *name)
23/* debug information structure */ 23/* debug information structure */
24struct debuginfo { 24struct debuginfo {
25 Dwarf *dbg; 25 Dwarf *dbg;
26 Dwfl_Module *mod;
26 Dwfl *dwfl; 27 Dwfl *dwfl;
27 Dwarf_Addr bias; 28 Dwarf_Addr bias;
28}; 29};
@@ -77,6 +78,7 @@ struct probe_finder {
77 78
78struct trace_event_finder { 79struct trace_event_finder {
79 struct probe_finder pf; 80 struct probe_finder pf;
81 Dwfl_Module *mod; /* For solving symbols */
80 struct probe_trace_event *tevs; /* Found trace events */ 82 struct probe_trace_event *tevs; /* Found trace events */
81 int ntevs; /* Number of trace events */ 83 int ntevs; /* Number of trace events */
82 int max_tevs; /* Max number of trace events */ 84 int max_tevs; /* Max number of trace events */
@@ -84,6 +86,7 @@ struct trace_event_finder {
84 86
85struct available_var_finder { 87struct available_var_finder {
86 struct probe_finder pf; 88 struct probe_finder pf;
89 Dwfl_Module *mod; /* For solving symbols */
87 struct variable_list *vls; /* Found variable lists */ 90 struct variable_list *vls; /* Found variable lists */
88 int nvls; /* Number of variable lists */ 91 int nvls; /* Number of variable lists */
89 int max_vls; /* Max no. of variable lists */ 92 int max_vls; /* Max no. of variable lists */
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 51f5edf2a6d0..568b750c01f6 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -256,6 +256,8 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
256 tool->sample = process_event_sample_stub; 256 tool->sample = process_event_sample_stub;
257 if (tool->mmap == NULL) 257 if (tool->mmap == NULL)
258 tool->mmap = process_event_stub; 258 tool->mmap = process_event_stub;
259 if (tool->mmap2 == NULL)
260 tool->mmap2 = process_event_stub;
259 if (tool->comm == NULL) 261 if (tool->comm == NULL)
260 tool->comm = process_event_stub; 262 tool->comm = process_event_stub;
261 if (tool->fork == NULL) 263 if (tool->fork == NULL)
@@ -531,6 +533,9 @@ static int flush_sample_queue(struct perf_session *s,
531 return 0; 533 return 0;
532 534
533 list_for_each_entry_safe(iter, tmp, head, list) { 535 list_for_each_entry_safe(iter, tmp, head, list) {
536 if (session_done())
537 return 0;
538
534 if (iter->timestamp > limit) 539 if (iter->timestamp > limit)
535 break; 540 break;
536 541
@@ -1160,7 +1165,6 @@ static void perf_session__warn_about_errors(const struct perf_session *session,
1160 } 1165 }
1161} 1166}
1162 1167
1163#define session_done() (*(volatile int *)(&session_done))
1164volatile int session_done; 1168volatile int session_done;
1165 1169
1166static int __perf_session__process_pipe_events(struct perf_session *self, 1170static int __perf_session__process_pipe_events(struct perf_session *self,
@@ -1308,7 +1312,7 @@ int __perf_session__process_events(struct perf_session *session,
1308 file_offset = page_offset; 1312 file_offset = page_offset;
1309 head = data_offset - page_offset; 1313 head = data_offset - page_offset;
1310 1314
1311 if (data_offset + data_size < file_size) 1315 if (data_size && (data_offset + data_size < file_size))
1312 file_size = data_offset + data_size; 1316 file_size = data_offset + data_size;
1313 1317
1314 progress_next = file_size / 16; 1318 progress_next = file_size / 16;
@@ -1372,10 +1376,13 @@ more:
1372 "Processing events..."); 1376 "Processing events...");
1373 } 1377 }
1374 1378
1379 err = 0;
1380 if (session_done())
1381 goto out_err;
1382
1375 if (file_pos < file_size) 1383 if (file_pos < file_size)
1376 goto more; 1384 goto more;
1377 1385
1378 err = 0;
1379 /* do the final flush for ordered samples */ 1386 /* do the final flush for ordered samples */
1380 session->ordered_samples.next_flush = ULLONG_MAX; 1387 session->ordered_samples.next_flush = ULLONG_MAX;
1381 err = flush_sample_queue(session, tool); 1388 err = flush_sample_queue(session, tool);
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 3aa75fb2225f..04bf7373a7e5 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -124,4 +124,8 @@ int __perf_session__set_tracepoints_handlers(struct perf_session *session,
124 124
125#define perf_session__set_tracepoints_handlers(session, array) \ 125#define perf_session__set_tracepoints_handlers(session, array) \
126 __perf_session__set_tracepoints_handlers(session, array, ARRAY_SIZE(array)) 126 __perf_session__set_tracepoints_handlers(session, array, ARRAY_SIZE(array))
127
128extern volatile int session_done;
129
130#define session_done() (*(volatile int *)(&session_done))
127#endif /* __PERF_SESSION_H */ 131#endif /* __PERF_SESSION_H */
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index a7b9ab557380..a9c829be5216 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -8,6 +8,22 @@
8#include "symbol.h" 8#include "symbol.h"
9#include "debug.h" 9#include "debug.h"
10 10
11#ifndef HAVE_ELF_GETPHDRNUM
12static int elf_getphdrnum(Elf *elf, size_t *dst)
13{
14 GElf_Ehdr gehdr;
15 GElf_Ehdr *ehdr;
16
17 ehdr = gelf_getehdr(elf, &gehdr);
18 if (!ehdr)
19 return -1;
20
21 *dst = ehdr->e_phnum;
22
23 return 0;
24}
25#endif
26
11#ifndef NT_GNU_BUILD_ID 27#ifndef NT_GNU_BUILD_ID
12#define NT_GNU_BUILD_ID 3 28#define NT_GNU_BUILD_ID 3
13#endif 29#endif
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index fe7a27d67d2b..e9e1c03f927d 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -186,7 +186,7 @@ void parse_proc_kallsyms(struct pevent *pevent,
186 char *next = NULL; 186 char *next = NULL;
187 char *addr_str; 187 char *addr_str;
188 char *mod; 188 char *mod;
189 char *fmt; 189 char *fmt = NULL;
190 190
191 line = strtok_r(file, "\n", &next); 191 line = strtok_r(file, "\n", &next);
192 while (line) { 192 while (line) {
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 979bff485fb0..a9dd682cf5e3 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1064,10 +1064,12 @@ EXPORT_SYMBOL_GPL(gfn_to_hva);
1064unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) 1064unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
1065{ 1065{
1066 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 1066 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
1067 if (writable) 1067 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false);
1068
1069 if (!kvm_is_error_hva(hva) && writable)
1068 *writable = !memslot_is_readonly(slot); 1070 *writable = !memslot_is_readonly(slot);
1069 1071
1070 return __gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL, false); 1072 return hva;
1071} 1073}
1072 1074
1073static int kvm_read_hva(void *data, void __user *hva, int len) 1075static int kvm_read_hva(void *data, void __user *hva, int len)