aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--CREDITS3
-rw-r--r--Documentation/ABI/stable/sysfs-bus-usb8
-rw-r--r--Documentation/ABI/testing/sysfs-devices-power32
-rw-r--r--Documentation/ABI/testing/sysfs-power22
-rw-r--r--Documentation/acpi/dsdt-override.txt2
-rw-r--r--Documentation/block/00-INDEX2
-rw-r--r--Documentation/block/cmdline-partition.txt8
-rw-r--r--Documentation/connector/ucon.c2
-rw-r--r--Documentation/devicetree/bindings/memory.txt168
-rw-r--r--Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt10
-rw-r--r--Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt10
-rw-r--r--Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt (renamed from Documentation/devicetree/bindings/mmc/synopsis-dw-mshc.txt)8
-rw-r--r--Documentation/devicetree/bindings/mmc/tmio_mmc.txt17
-rw-r--r--Documentation/devicetree/bindings/net/fsl-tsec-phy.txt18
-rw-r--r--Documentation/devicetree/bindings/pci/designware-pcie.txt2
-rw-r--r--Documentation/devicetree/bindings/serial/qca,ar9330-uart.txt (renamed from Documentation/devicetree/bindings/tty/serial/qca,ar9330-uart.txt)0
-rw-r--r--Documentation/kernel-parameters.txt10
-rw-r--r--Documentation/sound/alsa/HD-Audio-Models.txt7
-rw-r--r--MAINTAINERS158
-rw-r--r--Makefile2
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/arc/include/asm/spinlock.h9
-rw-r--r--arch/arc/include/asm/uaccess.h4
-rw-r--r--arch/arc/kernel/ptrace.c2
-rw-r--r--arch/arc/kernel/signal.c25
-rw-r--r--arch/arc/kernel/time.c7
-rw-r--r--arch/arc/kernel/unaligned.c6
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/Makefile9
-rw-r--r--arch/arm/boot/Makefile16
-rw-r--r--arch/arm/boot/dts/Makefile2
-rw-r--r--arch/arm/boot/dts/armada-370-netgear-rn102.dts49
-rw-r--r--arch/arm/boot/dts/armada-xp.dtsi11
-rw-r--r--arch/arm/boot/dts/at91sam9x5.dtsi6
-rw-r--r--arch/arm/boot/dts/atlas6.dtsi12
-rw-r--r--arch/arm/boot/dts/exynos5250.dtsi5
-rw-r--r--arch/arm/boot/dts/integratorcp.dts9
-rw-r--r--arch/arm/boot/dts/kirkwood.dtsi3
-rw-r--r--arch/arm/boot/dts/omap3-beagle-xm.dts2
-rw-r--r--arch/arm/boot/dts/omap3.dtsi4
-rw-r--r--arch/arm/boot/dts/prima2.dtsi27
-rw-r--r--arch/arm/boot/dts/r8a73a4.dtsi6
-rw-r--r--arch/arm/boot/dts/r8a7778.dtsi1
-rw-r--r--arch/arm/boot/dts/r8a7779.dtsi1
-rw-r--r--arch/arm/boot/dts/r8a7790.dtsi9
-rw-r--r--arch/arm/boot/dts/sh73a0.dtsi6
-rw-r--r--arch/arm/boot/install.sh14
-rw-r--r--arch/arm/common/edma.c38
-rw-r--r--arch/arm/common/mcpm_entry.c6
-rw-r--r--arch/arm/common/sharpsl_param.c5
-rw-r--r--arch/arm/configs/multi_v7_defconfig1
-rw-r--r--arch/arm/crypto/aes-armv4.S6
-rw-r--r--arch/arm/include/asm/Kbuild1
-rw-r--r--arch/arm/include/asm/jump_label.h2
-rw-r--r--arch/arm/include/asm/mcpm.h14
-rw-r--r--arch/arm/include/asm/syscall.h6
-rw-r--r--arch/arm/include/asm/uaccess.h7
-rw-r--r--arch/arm/kernel/entry-common.S4
-rw-r--r--arch/arm/kernel/entry-header.S8
-rw-r--r--arch/arm/kernel/head.S21
-rw-r--r--arch/arm/kvm/reset.c6
-rw-r--r--arch/arm/mach-at91/at91rm9200_time.c2
-rw-r--r--arch/arm/mach-at91/at91sam926x_time.c2
-rw-r--r--arch/arm/mach-at91/at91sam9g45_reset.S8
-rw-r--r--arch/arm/mach-at91/at91x40_time.c2
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c2
-rw-r--r--arch/arm/mach-davinci/include/mach/serial.h4
-rw-r--r--arch/arm/mach-integrator/pci_v3.h7
-rw-r--r--arch/arm/mach-mvebu/coherency.c8
-rw-r--r--arch/arm/mach-mvebu/pmsu.c1
-rw-r--r--arch/arm/mach-mvebu/system-controller.c1
-rw-r--r--arch/arm/mach-omap2/board-generic.c18
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c9
-rw-r--r--arch/arm/mach-omap2/gpmc-onenand.c12
-rw-r--r--arch/arm/mach-omap2/mux.h4
-rw-r--r--arch/arm/mach-omap2/timer.c4
-rw-r--r--arch/arm/mach-shmobile/board-armadillo800eva.c4
-rw-r--r--arch/arm/mach-shmobile/board-lager.c27
-rw-r--r--arch/arm/mach-vexpress/tc2_pm.c11
-rw-r--r--arch/arm/mm/dma-mapping.c43
-rw-r--r--arch/arm/mm/init.c3
-rw-r--r--arch/arm/net/bpf_jit_32.c1
-rw-r--r--arch/arm64/Kconfig.debug7
-rw-r--r--arch/arm64/configs/defconfig5
-rw-r--r--arch/arm64/include/asm/uaccess.h10
-rw-r--r--arch/arm64/kernel/fpsimd.c2
-rw-r--r--arch/arm64/mm/tlb.S2
-rw-r--r--arch/avr32/include/asm/Kbuild16
-rw-r--r--arch/avr32/include/asm/cputime.h6
-rw-r--r--arch/avr32/include/asm/delay.h1
-rw-r--r--arch/avr32/include/asm/device.h7
-rw-r--r--arch/avr32/include/asm/div64.h6
-rw-r--r--arch/avr32/include/asm/emergency-restart.h6
-rw-r--r--arch/avr32/include/asm/futex.h6
-rw-r--r--arch/avr32/include/asm/irq_regs.h1
-rw-r--r--arch/avr32/include/asm/local.h6
-rw-r--r--arch/avr32/include/asm/local64.h1
-rw-r--r--arch/avr32/include/asm/percpu.h6
-rw-r--r--arch/avr32/include/asm/scatterlist.h6
-rw-r--r--arch/avr32/include/asm/sections.h6
-rw-r--r--arch/avr32/include/asm/topology.h6
-rw-r--r--arch/avr32/include/asm/xor.h6
-rw-r--r--arch/avr32/kernel/process.c2
-rw-r--r--arch/avr32/kernel/time.c9
-rw-r--r--arch/mips/alchemy/board-mtx1.c2
-rw-r--r--arch/mips/include/asm/cpu-features.h2
-rw-r--r--arch/mips/include/asm/jump_label.h2
-rw-r--r--arch/mips/kernel/octeon_switch.S2
-rw-r--r--arch/mips/kernel/r2300_switch.S2
-rw-r--r--arch/mips/kernel/r4k_switch.S2
-rw-r--r--arch/mips/mm/c-r4k.c2
-rw-r--r--arch/mips/mm/dma-default.c12
-rw-r--r--arch/openrisc/include/asm/prom.h44
-rw-r--r--arch/parisc/configs/712_defconfig2
-rw-r--r--arch/parisc/configs/a500_defconfig2
-rw-r--r--arch/parisc/configs/b180_defconfig3
-rw-r--r--arch/parisc/configs/c3000_defconfig3
-rw-r--r--arch/parisc/configs/c8000_defconfig2
-rw-r--r--arch/parisc/configs/default_defconfig2
-rw-r--r--arch/parisc/include/asm/traps.h2
-rw-r--r--arch/parisc/kernel/head.S4
-rw-r--r--arch/parisc/kernel/smp.c8
-rw-r--r--arch/parisc/kernel/traps.c11
-rw-r--r--arch/parisc/lib/memcpy.c15
-rw-r--r--arch/parisc/mm/fault.c18
-rw-r--r--arch/powerpc/boot/Makefile4
-rw-r--r--arch/powerpc/boot/epapr-wrapper.c9
-rw-r--r--arch/powerpc/boot/epapr.c4
-rw-r--r--arch/powerpc/boot/of.c16
-rwxr-xr-xarch/powerpc/boot/wrapper9
-rw-r--r--arch/powerpc/include/asm/irq.h4
-rw-r--r--arch/powerpc/include/asm/jump_label.h2
-rw-r--r--arch/powerpc/include/asm/processor.h4
-rw-r--r--arch/powerpc/kernel/asm-offsets.c3
-rw-r--r--arch/powerpc/kernel/iommu.c2
-rw-r--r--arch/powerpc/kernel/irq.c101
-rw-r--r--arch/powerpc/kernel/misc_32.S25
-rw-r--r--arch/powerpc/kernel/misc_64.S10
-rw-r--r--arch/powerpc/kernel/process.c3
-rw-r--r--arch/powerpc/kernel/prom_init.c21
-rw-r--r--arch/powerpc/kernel/sysfs.c18
-rw-r--r--arch/powerpc/kernel/tm.S95
-rw-r--r--arch/powerpc/kernel/vio.c12
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S2
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c18
-rw-r--r--arch/powerpc/lib/checksum_64.S58
-rw-r--r--arch/powerpc/lib/sstep.c3
-rw-r--r--arch/powerpc/mm/init_64.c4
-rw-r--r--arch/powerpc/mm/mem.c9
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c1
-rw-r--r--arch/powerpc/perf/power8-pmu.c5
-rw-r--r--arch/powerpc/platforms/pseries/smp.c26
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/include/asm/jump_label.h2
-rw-r--r--arch/s390/include/asm/mutex.h2
-rw-r--r--arch/s390/include/asm/pgtable.h4
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/include/asm/spinlock.h5
-rw-r--r--arch/s390/include/asm/timex.h28
-rw-r--r--arch/s390/kernel/compat_signal.c4
-rw-r--r--arch/s390/kernel/crash_dump.c42
-rw-r--r--arch/s390/kernel/debug.c2
-rw-r--r--arch/s390/kernel/entry.S1
-rw-r--r--arch/s390/kernel/entry64.S1
-rw-r--r--arch/s390/kernel/kprobes.c6
-rw-r--r--arch/s390/kvm/interrupt.c6
-rw-r--r--arch/s390/lib/delay.c14
-rw-r--r--arch/s390/net/bpf_jit_comp.c4
-rw-r--r--arch/score/Kconfig4
-rw-r--r--arch/score/Makefile4
-rw-r--r--arch/score/include/asm/checksum.h93
-rw-r--r--arch/score/include/asm/io.h1
-rw-r--r--arch/score/include/asm/pgalloc.h2
-rw-r--r--arch/score/kernel/entry.S4
-rw-r--r--arch/score/kernel/process.c4
-rw-r--r--arch/sparc/Kconfig7
-rw-r--r--arch/sparc/include/asm/floppy_64.h2
-rw-r--r--arch/sparc/include/asm/jump_label.h2
-rw-r--r--arch/sparc/kernel/Makefile3
-rw-r--r--arch/sparc/kernel/ds.c5
-rw-r--r--arch/sparc/kernel/ldc.c4
-rw-r--r--arch/sparc/net/bpf_jit_comp.c1
-rw-r--r--arch/tile/include/asm/atomic.h5
-rw-r--r--arch/tile/include/asm/atomic_32.h27
-rw-r--r--arch/tile/include/asm/cmpxchg.h28
-rw-r--r--arch/tile/include/asm/percpu.h34
-rw-r--r--arch/tile/kernel/hardwall.c6
-rw-r--r--arch/tile/kernel/intvec_32.S3
-rw-r--r--arch/tile/kernel/intvec_64.S3
-rw-r--r--arch/tile/kernel/stack.c12
-rw-r--r--arch/tile/lib/atomic_32.c8
-rw-r--r--arch/x86/Kconfig7
-rw-r--r--arch/x86/include/asm/cpufeature.h6
-rw-r--r--arch/x86/include/asm/jump_label.h2
-rw-r--r--arch/x86/include/asm/mutex_64.h4
-rw-r--r--arch/x86/include/asm/xen/page.h31
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event.c21
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c10
-rw-r--r--arch/x86/kernel/kvm.c17
-rw-r--r--arch/x86/kernel/microcode_amd.c1
-rw-r--r--arch/x86/kernel/reboot.c26
-rw-r--r--arch/x86/kernel/sysfb_simplefb.c4
-rw-r--r--arch/x86/kvm/vmx.c28
-rw-r--r--arch/x86/net/bpf_jit_comp.c18
-rw-r--r--arch/x86/pci/mmconfig-shared.c7
-rw-r--r--arch/x86/platform/efi/efi.c11
-rw-r--r--arch/x86/xen/p2m.c10
-rw-r--r--arch/x86/xen/smp.c9
-rw-r--r--arch/x86/xen/spinlock.c26
-rw-r--r--block/Kconfig9
-rw-r--r--block/Makefile2
-rw-r--r--block/partitions/Kconfig4
-rw-r--r--block/partitions/cmdline.c8
-rw-r--r--block/partitions/efi.c7
-rw-r--r--drivers/acpi/Kconfig8
-rw-r--r--drivers/acpi/acpi_ipmi.c24
-rw-r--r--drivers/acpi/device_pm.c56
-rw-r--r--drivers/acpi/power.c104
-rw-r--r--drivers/acpi/scan.c5
-rw-r--r--drivers/ata/ahci.c2
-rw-r--r--drivers/ata/ahci_platform.c2
-rw-r--r--drivers/ata/libahci.c10
-rw-r--r--drivers/ata/libata-acpi.c14
-rw-r--r--drivers/ata/libata-eh.c6
-rw-r--r--drivers/ata/libata-scsi.c3
-rw-r--r--drivers/ata/libata.h4
-rw-r--r--drivers/ata/pata_isapnp.c2
-rw-r--r--drivers/ata/sata_promise.c2
-rw-r--r--drivers/base/core.c14
-rw-r--r--drivers/base/memory.c7
-rw-r--r--drivers/bcma/driver_pci.c49
-rw-r--r--drivers/block/cciss.c1
-rw-r--r--drivers/block/cpqarray.c1
-rw-r--r--drivers/bluetooth/ath3k.c2
-rw-r--r--drivers/bluetooth/btusb.c5
-rw-r--r--drivers/bus/mvebu-mbus.c12
-rw-r--r--drivers/char/random.c11
-rw-r--r--drivers/char/tpm/xen-tpmfront.c37
-rw-r--r--drivers/clocksource/Kconfig1
-rw-r--r--drivers/clocksource/clksrc-of.c3
-rw-r--r--drivers/clocksource/em_sti.c2
-rw-r--r--drivers/clocksource/exynos_mct.c10
-rw-r--r--drivers/connector/cn_proc.c18
-rw-r--r--drivers/connector/connector.c9
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c6
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c2
-rw-r--r--drivers/cpufreq/cpufreq.c3
-rw-r--r--drivers/cpufreq/exynos5440-cpufreq.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c51
-rw-r--r--drivers/cpufreq/s3c64xx-cpufreq.c2
-rw-r--r--drivers/cpufreq/spear-cpufreq.c2
-rw-r--r--drivers/dma/Kconfig1
-rw-r--r--drivers/dma/edma.c5
-rw-r--r--drivers/dma/imx-dma.c31
-rw-r--r--drivers/dma/sh/rcar-hpbdma.c9
-rw-r--r--drivers/gpio/gpio-lynxpoint.c5
-rw-r--r--drivers/gpio/gpio-omap.c158
-rw-r--r--drivers/gpio/gpio-rcar.c7
-rw-r--r--drivers/gpio/gpiolib.c6
-rw-r--r--drivers/gpu/drm/drm_drv.c9
-rw-r--r--drivers/gpu/drm/drm_edid.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c8
-rw-r--r--drivers/gpu/drm/gma500/gtt.c1
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c3
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c15
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h8
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c109
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c6
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h12
-rw-r--r--drivers/gpu/drm/i915/intel_display.c42
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c15
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c13
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c8
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_kms.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c8
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/base.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c54
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c57
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.h2
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c26
-rw-r--r--drivers/gpu/drm/radeon/cik.c27
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c3
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c6
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h4
-rw-r--r--drivers/gpu/drm/radeon/ni.c1
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c24
-rw-r--r--drivers/gpu/drm/radeon/r100.c8
-rw-r--r--drivers/gpu/drm/radeon/r600.c1
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c44
-rw-r--r--drivers/gpu/drm/radeon/r600d.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c66
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c33
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c3
-rw-r--r--drivers/gpu/drm/radeon/si.c11
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c30
-rw-r--r--drivers/gpu/drm/radeon/sid.h4
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c2
-rw-r--r--drivers/hid/Kconfig1
-rw-r--r--drivers/hid/hid-core.c13
-rw-r--r--drivers/hid/hid-holtek-mouse.c4
-rw-r--r--drivers/hid/hid-ids.h7
-rw-r--r--drivers/hid/hid-input.c13
-rw-r--r--drivers/hid/hid-roccat-kone.c2
-rw-r--r--drivers/hid/hid-roccat-koneplus.c4
-rw-r--r--drivers/hid/hid-roccat-kovaplus.c4
-rw-r--r--drivers/hid/hid-roccat-pyra.c4
-rw-r--r--drivers/hid/hid-wiimote-core.c5
-rw-r--r--drivers/hid/hid-wiimote-modules.c40
-rw-r--r--drivers/hid/hid-wiimote.h4
-rw-r--r--drivers/hid/hidraw.c21
-rw-r--r--drivers/hid/uhid.c3
-rw-r--r--drivers/hid/usbhid/hid-quirks.c3
-rw-r--r--drivers/hv/connection.c2
-rw-r--r--drivers/hv/hv_kvp.c38
-rw-r--r--drivers/hv/hv_snapshot.c6
-rw-r--r--drivers/hv/hv_util.c71
-rw-r--r--drivers/hwmon/applesmc.c24
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c26
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c5
-rw-r--r--drivers/i2c/busses/i2c-imx.c11
-rw-r--r--drivers/i2c/busses/i2c-ismt.c3
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c16
-rw-r--r--drivers/i2c/busses/i2c-mxs.c3
-rw-r--r--drivers/i2c/busses/i2c-omap.c3
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c2
-rw-r--r--drivers/i2c/busses/i2c-stu300.c11
-rw-r--r--drivers/i2c/i2c-core.c3
-rw-r--r--drivers/i2c/muxes/i2c-arb-gpio-challenge.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c14
-rw-r--r--drivers/i2c/muxes/i2c-mux-pinctrl.c4
-rw-r--r--drivers/iio/amplifiers/ad8366.c4
-rw-r--r--drivers/iio/frequency/adf4350.c6
-rw-r--r--drivers/iio/industrialio-buffer.c3
-rw-r--r--drivers/iio/industrialio-core.c2
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c18
-rw-r--r--drivers/infiniband/Kconfig11
-rw-r--r--drivers/infiniband/core/uverbs.h2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c4
-rw-r--r--drivers/infiniband/core/uverbs_main.c6
-rw-r--r--drivers/infiniband/hw/amso1100/c2_ae.c2
-rw-r--r--drivers/infiniband/hw/mlx4/main.c2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c16
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c70
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c80
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c6
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c2
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c14
-rw-r--r--drivers/iommu/Kconfig2
-rw-r--r--drivers/iommu/arm-smmu.c13
-rw-r--r--drivers/md/bcache/bcache.h7
-rw-r--r--drivers/md/bcache/bset.c39
-rw-r--r--drivers/md/bcache/btree.c4
-rw-r--r--drivers/md/bcache/journal.c33
-rw-r--r--drivers/md/bcache/request.c20
-rw-r--r--drivers/md/bcache/sysfs.c9
-rw-r--r--drivers/md/bcache/util.c11
-rw-r--r--drivers/md/bcache/util.h12
-rw-r--r--drivers/md/bcache/writeback.c42
-rw-r--r--drivers/md/dm-io.c7
-rw-r--r--drivers/md/dm-mpath.c18
-rw-r--r--drivers/md/dm-snap-persistent.c20
-rw-r--r--drivers/md/dm-snap.c5
-rw-r--r--drivers/md/dm-stats.c23
-rw-r--r--drivers/md/dm-thin.c14
-rw-r--r--drivers/md/dm.c71
-rw-r--r--drivers/md/dm.h3
-rw-r--r--drivers/md/md.c5
-rw-r--r--drivers/md/raid1.c1
-rw-r--r--drivers/md/raid10.c1
-rw-r--r--drivers/md/raid5.c20
-rw-r--r--drivers/media/dvb-frontends/tda10071.c9
-rw-r--r--drivers/media/i2c/ad9389b.c15
-rw-r--r--drivers/media/i2c/adv7511.c18
-rw-r--r--drivers/media/i2c/adv7842.c30
-rw-r--r--drivers/media/i2c/ths8200.c12
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c1
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c1
-rw-r--r--drivers/media/platform/sh_vou.c2
-rw-r--r--drivers/media/platform/soc_camera/mx3_camera.c5
-rw-r--r--drivers/media/tuners/e4000.c3
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c7
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c18
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c4
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c87
-rw-r--r--drivers/misc/mei/amthif.c1
-rw-r--r--drivers/misc/mei/bus.c5
-rw-r--r--drivers/misc/mei/client.h6
-rw-r--r--drivers/misc/mei/hbm.c10
-rw-r--r--drivers/misc/mei/init.c3
-rw-r--r--drivers/misc/mei/main.c11
-rw-r--r--drivers/misc/mei/mei_dev.h6
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c16
-rw-r--r--drivers/mtd/devices/m25p80.c17
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c2
-rw-r--r--drivers/mtd/nand/nand_base.c8
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c7
-rw-r--r--drivers/net/bonding/bond_main.c13
-rw-r--r--drivers/net/can/at91_can.c4
-rw-r--r--drivers/net/can/dev.c10
-rw-r--r--drivers/net/can/flexcan.c24
-rw-r--r--drivers/net/can/slcan.c139
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c40
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h38
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c189
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c412
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c34
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c52
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c23
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c56
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c12
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c76
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c38
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c162
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c5
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c7
-rw-r--r--drivers/net/ethernet/marvell/skge.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c16
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c24
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c52
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_mpi.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c4
-rw-r--r--drivers/net/ethernet/sfc/ef10.c87
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c28
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h56
-rw-r--r--drivers/net/ethernet/sfc/nic.c9
-rw-r--r--drivers/net/ethernet/sfc/nic.h12
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h6
-rw-r--r--drivers/net/ethernet/ti/cpsw.c19
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c3
-rw-r--r--drivers/net/ethernet/via/via-rhine.c9
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c6
-rw-r--r--drivers/net/hamradio/yam.c1
-rw-r--r--drivers/net/ieee802154/mrf24j40.c31
-rw-r--r--drivers/net/slip/slip.c3
-rw-r--r--drivers/net/tun.c8
-rw-r--r--drivers/net/usb/ax88179_178a.c23
-rw-r--r--drivers/net/usb/dm9601.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c3
-rw-r--r--drivers/net/usb/usbnet.c29
-rw-r--r--drivers/net/virtio_net.c14
-rw-r--r--drivers/net/vxlan.c9
-rw-r--r--drivers/net/wan/farsync.c1
-rw-r--r--drivers/net/wan/wanxl.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c26
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c28
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c14
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c4
-rw-r--r--drivers/net/wireless/cw1200/cw1200_spi.c28
-rw-r--r--drivers/net/wireless/cw1200/fwio.c2
-rw-r--r--drivers/net/wireless/cw1200/hwbus.h1
-rw-r--r--drivers/net/wireless/cw1200/hwio.c15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c12
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c42
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c8
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c2
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c3
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.h2
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c5
-rw-r--r--drivers/net/wireless/mwifiex/join.c10
-rw-r--r--drivers/net/wireless/mwifiex/main.c6
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c3
-rw-r--r--drivers/net/wireless/mwifiex/usb.c7
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c3
-rw-r--r--drivers/net/wireless/p54/p54usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c9
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c3
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h2
-rw-r--r--drivers/net/xen-netback/xenbus.c152
-rw-r--r--drivers/of/Kconfig6
-rw-r--r--drivers/of/Makefile1
-rw-r--r--drivers/of/base.c4
-rw-r--r--drivers/of/fdt.c12
-rw-r--r--drivers/of/of_reserved_mem.c173
-rw-r--r--drivers/of/platform.c4
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c8
-rw-r--r--drivers/pci/pci.c8
-rw-r--r--drivers/pinctrl/pinconf.c4
-rw-r--r--drivers/pinctrl/pinctrl-exynos.c12
-rw-r--r--drivers/pinctrl/pinctrl-palmas.c5
-rw-r--r--drivers/pinctrl/pinctrl-tegra114.c5
-rw-r--r--drivers/platform/x86/Kconfig1
-rw-r--r--drivers/platform/x86/sony-laptop.c26
-rw-r--r--drivers/regulator/da9063-regulator.c2
-rw-r--r--drivers/regulator/palmas-regulator.c14
-rw-r--r--drivers/regulator/ti-abb-regulator.c16
-rw-r--r--drivers/regulator/wm831x-ldo.c4
-rw-r--r--drivers/regulator/wm8350-regulator.c2
-rw-r--r--drivers/s390/block/dasd_eckd.c98
-rw-r--r--drivers/s390/char/sclp.c4
-rw-r--r--drivers/s390/char/sclp_cmd.c8
-rw-r--r--drivers/s390/char/tty3270.c2
-rw-r--r--drivers/s390/char/vmlogrdr.c2
-rw-r--r--drivers/s390/cio/cio.c4
-rw-r--r--drivers/s390/cio/qdio_main.c10
-rw-r--r--drivers/scsi/BusLogic.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c9
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/spi/spi-atmel.c3
-rw-r--r--drivers/spi/spi-clps711x.c3
-rw-r--r--drivers/spi/spi-fsl-dspi.c10
-rw-r--r--drivers/spi/spi-mpc512x-psc.c4
-rw-r--r--drivers/spi/spi-pxa2xx.c11
-rw-r--r--drivers/spi/spi-s3c64xx.c4
-rw-r--r--drivers/spi/spi-sh-hspi.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_65xx.c25
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c11
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c2
-rw-r--r--drivers/staging/media/msi3101/Kconfig1
-rw-r--r--drivers/staging/media/msi3101/sdr-msi3101.c10
-rw-r--r--drivers/staging/octeon-usb/cvmx-usb.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mp.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_dm.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c1
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.c2
-rw-r--r--drivers/staging/vt6656/iwctl.c3
-rw-r--r--drivers/staging/vt6656/main_usb.c3
-rw-r--r--drivers/staging/vt6656/rxtx.c2
-rw-r--r--drivers/target/iscsi/iscsi_target.c13
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c4
-rw-r--r--drivers/target/target_core_pscsi.c8
-rw-r--r--drivers/target/target_core_sbc.c33
-rw-r--r--drivers/target/target_core_transport.c20
-rw-r--r--drivers/target/target_core_xcopy.c57
-rw-r--r--drivers/thermal/samsung/exynos_thermal_common.c2
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c12
-rw-r--r--drivers/thermal/samsung/exynos_tmu.h7
-rw-r--r--drivers/thermal/samsung/exynos_tmu_data.c30
-rw-r--r--drivers/thermal/samsung/exynos_tmu_data.h13
-rw-r--r--drivers/thermal/thermal_hwmon.c2
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c1
-rw-r--r--drivers/thermal/x86_pkg_temp_thermal.c14
-rw-r--r--drivers/tty/hvc/hvc_xen.c1
-rw-r--r--drivers/tty/n_tty.c49
-rw-r--r--drivers/tty/serial/imx.c3
-rw-r--r--drivers/tty/serial/pch_uart.c13
-rw-r--r--drivers/tty/serial/serial-tegra.c4
-rw-r--r--drivers/tty/serial/vt8500_serial.c5
-rw-r--r--drivers/tty/tty_ioctl.c3
-rw-r--r--drivers/usb/chipidea/Kconfig2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c7
-rw-r--r--drivers/usb/chipidea/ci_hdrc_pci.c7
-rw-r--r--drivers/usb/chipidea/core.c1
-rw-r--r--drivers/usb/chipidea/host.c6
-rw-r--r--drivers/usb/chipidea/udc.c4
-rw-r--r--drivers/usb/core/devio.c16
-rw-r--r--drivers/usb/core/hub.c3
-rw-r--r--drivers/usb/core/quirks.c6
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/gadget/f_fs.c62
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c9
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c2
-rw-r--r--drivers/usb/host/ehci-fsl.c17
-rw-r--r--drivers/usb/host/ehci-pci.c2
-rw-r--r--drivers/usb/host/imx21-hcd.c8
-rw-r--r--drivers/usb/host/ohci-hcd.c22
-rw-r--r--drivers/usb/host/ohci-q.c26
-rw-r--r--drivers/usb/host/pci-quirks.c4
-rw-r--r--drivers/usb/host/uhci-pci.c2
-rw-r--r--drivers/usb/host/uhci-q.c12
-rw-r--r--drivers/usb/host/xhci-hub.c73
-rw-r--r--drivers/usb/host/xhci-mem.c2
-rw-r--r--drivers/usb/host/xhci-pci.c27
-rw-r--r--drivers/usb/host/xhci-ring.c37
-rw-r--r--drivers/usb/host/xhci.c39
-rw-r--r--drivers/usb/host/xhci.h13
-rw-r--r--drivers/usb/misc/Kconfig2
-rw-r--r--drivers/usb/musb/musb_core.c46
-rw-r--r--drivers/usb/musb/musb_core.h1
-rw-r--r--drivers/usb/musb/musb_dsps.c3
-rw-r--r--drivers/usb/musb/musb_gadget.c6
-rw-r--r--drivers/usb/musb/musb_virthub.c46
-rw-r--r--drivers/usb/phy/phy-gpio-vbus-usb.c11
-rw-r--r--drivers/usb/serial/option.c228
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c1
-rw-r--r--drivers/usb/storage/scsiglue.c5
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/vfio/vfio_iommu_type1.c40
-rw-r--r--drivers/vhost/scsi.c9
-rw-r--r--drivers/video/mmp/hw/mmp_ctrl.c17
-rw-r--r--drivers/video/mxsfb.c1
-rw-r--r--drivers/video/neofb.c4
-rw-r--r--drivers/video/of_display_timing.c6
-rw-r--r--drivers/video/omap2/displays-new/Kconfig1
-rw-r--r--drivers/video/omap2/displays-new/connector-analog-tv.c2
-rw-r--r--drivers/video/omap2/displays-new/connector-dvi.c2
-rw-r--r--drivers/video/omap2/displays-new/connector-hdmi.c2
-rw-r--r--drivers/video/omap2/dss/dispc.c1
-rw-r--r--drivers/video/s3fb.c9
-rw-r--r--drivers/w1/w1.c6
-rw-r--r--drivers/watchdog/hpwdt.c6
-rw-r--r--drivers/watchdog/kempld_wdt.c2
-rw-r--r--drivers/watchdog/sunxi_wdt.c4
-rw-r--r--drivers/watchdog/ts72xx_wdt.c3
-rw-r--r--drivers/xen/balloon.c23
-rw-r--r--fs/afs/dir.c3
-rw-r--r--fs/aio.c52
-rw-r--r--fs/binfmt_elf.c30
-rw-r--r--fs/bio.c4
-rw-r--r--fs/btrfs/async-thread.c25
-rw-r--r--fs/btrfs/async-thread.h2
-rw-r--r--fs/btrfs/dev-replace.c5
-rw-r--r--fs/btrfs/disk-io.c9
-rw-r--r--fs/btrfs/disk-io.h13
-rw-r--r--fs/btrfs/extent_io.c22
-rw-r--r--fs/btrfs/inode.c3
-rw-r--r--fs/btrfs/relocation.c2
-rw-r--r--fs/btrfs/root-tree.c8
-rw-r--r--fs/btrfs/transaction.c7
-rw-r--r--fs/btrfs/volumes.c7
-rw-r--r--fs/buffer.c14
-rw-r--r--fs/cifs/cifsfs.c6
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h5
-rw-r--r--fs/cifs/cifspdu.h52
-rw-r--r--fs/cifs/cifssmb.c41
-rw-r--r--fs/cifs/file.c8
-rw-r--r--fs/cifs/fscache.c7
-rw-r--r--fs/cifs/fscache.h13
-rw-r--r--fs/cifs/inode.c45
-rw-r--r--fs/cifs/netmisc.c4
-rw-r--r--fs/cifs/readdir.c3
-rw-r--r--fs/cifs/sess.c88
-rw-r--r--fs/cifs/smb2pdu.c6
-rw-r--r--fs/cifs/smbfsctl.h14
-rw-r--r--fs/cifs/transport.c9
-rw-r--r--fs/dcache.c15
-rw-r--r--fs/ecryptfs/crypto.c2
-rw-r--r--fs/ecryptfs/keystore.c3
-rw-r--r--fs/ext3/namei.c5
-rw-r--r--fs/ext4/inode.c2
-rw-r--r--fs/ext4/namei.c5
-rw-r--r--fs/ext4/xattr.c2
-rw-r--r--fs/file_table.c4
-rw-r--r--fs/fuse/dir.c20
-rw-r--r--fs/fuse/file.c23
-rw-r--r--fs/fuse/fuse_i.h2
-rw-r--r--fs/jfs/jfs_inode.c3
-rw-r--r--fs/namei.c3
-rw-r--r--fs/nfs/dir.c2
-rw-r--r--fs/nfs/nfs4file.c3
-rw-r--r--fs/nfs/nfs4filelayoutdev.c20
-rw-r--r--fs/nfs/nfs4proc.c58
-rw-r--r--fs/nilfs2/page.c2
-rw-r--r--fs/nilfs2/segment.c11
-rw-r--r--fs/ocfs2/dcache.c7
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/proc/inode.c10
-rw-r--r--fs/proc/task_mmu.c4
-rw-r--r--fs/reiserfs/journal.c67
-rw-r--r--fs/seq_file.c2
-rw-r--r--fs/statfs.c2
-rw-r--r--fs/super.c4
-rw-r--r--fs/sysv/super.c1
-rw-r--r--fs/udf/ialloc.c16
-rw-r--r--fs/udf/super.c64
-rw-r--r--fs/udf/udf_sb.h2
-rw-r--r--fs/xfs/xfs_buf_item.c1
-rw-r--r--fs/xfs/xfs_da_btree.c5
-rw-r--r--fs/xfs/xfs_dir2_block.c6
-rw-r--r--fs/xfs/xfs_dir2_format.h51
-rw-r--r--fs/xfs/xfs_dir2_readdir.c4
-rw-r--r--fs/xfs/xfs_dir2_sf.c6
-rw-r--r--fs/xfs/xfs_dquot.c19
-rw-r--r--fs/xfs/xfs_fs.h2
-rw-r--r--fs/xfs/xfs_icache.c9
-rw-r--r--fs/xfs/xfs_log_recover.c74
-rw-r--r--include/acpi/acpi_bus.h7
-rw-r--r--include/asm-generic/hugetlb.h4
-rw-r--r--include/asm-generic/vtime.h1
-rw-r--r--include/dt-bindings/pinctrl/omap.h4
-rw-r--r--include/linux/balloon_compaction.h25
-rw-r--r--include/linux/bcma/bcma_driver_pci.h1
-rw-r--r--include/linux/compiler-gcc4.h15
-rw-r--r--include/linux/device-mapper.h3
-rw-r--r--include/linux/filter.h15
-rw-r--r--include/linux/hyperv.h7
-rw-r--r--include/linux/intel-iommu.h2
-rw-r--r--include/linux/kernel.h11
-rw-r--r--include/linux/memcontrol.h105
-rw-r--r--include/linux/miscdevice.h1
-rw-r--r--include/linux/mlx5/device.h4
-rw-r--r--include/linux/mlx5/driver.h6
-rw-r--r--include/linux/mutex.h6
-rw-r--r--include/linux/netdevice.h5
-rw-r--r--include/linux/nfs_xdr.h3
-rw-r--r--include/linux/of_irq.h20
-rw-r--r--include/linux/of_reserved_mem.h14
-rw-r--r--include/linux/perf_event.h24
-rw-r--r--include/linux/random.h1
-rw-r--r--include/linux/regulator/driver.h2
-rw-r--r--include/linux/sched.h7
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--include/linux/smp.h6
-rw-r--r--include/linux/timex.h14
-rw-r--r--include/linux/usb/usb_phy_gen_xceiv.h2
-rw-r--r--include/linux/usb/usbnet.h1
-rw-r--r--include/linux/usb_usual.h4
-rw-r--r--include/linux/vgaarb.h7
-rw-r--r--include/linux/yam.h2
-rw-r--r--include/net/addrconf.h4
-rw-r--r--include/net/bluetooth/hci.h1
-rw-r--r--include/net/cipso_ipv4.h6
-rw-r--r--include/net/dst.h12
-rw-r--r--include/net/ip6_route.h6
-rw-r--r--include/net/ip_vs.h9
-rw-r--r--include/net/mac802154.h2
-rw-r--r--include/net/mrp.h1
-rw-r--r--include/net/net_namespace.h1
-rw-r--r--include/net/netfilter/nf_conntrack_synproxy.h2
-rw-r--r--include/net/secure_seq.h1
-rw-r--r--include/net/sock.h11
-rw-r--r--include/sound/rcar_snd.h1
-rw-r--r--include/trace/events/target.h4
-rw-r--r--include/uapi/drm/drm_mode.h2
-rw-r--r--include/uapi/drm/radeon_drm.h2
-rw-r--r--include/uapi/linux/perf_event.h15
-rw-r--r--include/uapi/linux/tc_act/Kbuild1
-rw-r--r--include/uapi/linux/tc_act/tc_defact.h (renamed from include/linux/tc_act/tc_defact.h)2
-rw-r--r--include/uapi/rdma/ib_user_verbs.h6
-rw-r--r--init/main.c2
-rw-r--r--ipc/msg.c32
-rw-r--r--ipc/sem.c256
-rw-r--r--ipc/shm.c17
-rw-r--r--ipc/util.c59
-rw-r--r--ipc/util.h10
-rw-r--r--kernel/audit.c5
-rw-r--r--kernel/cgroup.c14
-rw-r--r--kernel/context_tracking.c12
-rw-r--r--kernel/events/core.c31
-rw-r--r--kernel/kmod.c4
-rw-r--r--kernel/mutex.c32
-rw-r--r--kernel/params.c6
-rw-r--r--kernel/pid.c5
-rw-r--r--kernel/power/hibernate.c2
-rw-r--r--kernel/power/snapshot.c5
-rw-r--r--kernel/power/user.c8
-rw-r--r--kernel/reboot.c9
-rw-r--r--kernel/sched/fair.c9
-rw-r--r--kernel/softirq.c15
-rw-r--r--kernel/time/clockevents.c65
-rw-r--r--kernel/watchdog.c60
-rw-r--r--lib/hexdump.c2
-rw-r--r--lib/kobject.c7
-rw-r--r--lib/lockref.c23
-rw-r--r--lib/percpu-refcount.c3
-rw-r--r--mm/Kconfig2
-rw-r--r--mm/bounce.c2
-rw-r--r--mm/compaction.c7
-rw-r--r--mm/filemap.c11
-rw-r--r--mm/huge_memory.c10
-rw-r--r--mm/hugetlb.c17
-rw-r--r--mm/hwpoison-inject.c5
-rw-r--r--mm/madvise.c5
-rw-r--r--mm/memcontrol.c703
-rw-r--r--mm/memory-failure.c8
-rw-r--r--mm/memory.c20
-rw-r--r--mm/migrate.c4
-rw-r--r--mm/mlock.c9
-rw-r--r--mm/mprotect.c7
-rw-r--r--mm/mremap.c5
-rw-r--r--mm/oom_kill.c2
-rw-r--r--mm/page-writeback.c10
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/slab_common.c2
-rw-r--r--mm/swapfile.c4
-rw-r--r--mm/vmscan.c88
-rw-r--r--mm/zswap.c4
-rw-r--r--net/802/mrp.c27
-rw-r--r--net/8021q/vlan_netlink.c2
-rw-r--r--net/batman-adv/main.c5
-rw-r--r--net/batman-adv/network-coding.c28
-rw-r--r--net/batman-adv/network-coding.h14
-rw-r--r--net/bluetooth/hci_core.c26
-rw-r--r--net/bluetooth/hci_event.c6
-rw-r--r--net/bluetooth/l2cap_core.c7
-rw-r--r--net/bluetooth/rfcomm/tty.c35
-rw-r--r--net/bridge/br_fdb.c4
-rw-r--r--net/bridge/br_mdb.c2
-rw-r--r--net/bridge/br_multicast.c38
-rw-r--r--net/bridge/br_netlink.c2
-rw-r--r--net/bridge/br_private.h5
-rw-r--r--net/bridge/br_stp_if.c2
-rw-r--r--net/bridge/br_vlan.c125
-rw-r--r--net/compat.c2
-rw-r--r--net/core/dev.c52
-rw-r--r--net/core/filter.c8
-rw-r--r--net/core/flow_dissector.c4
-rw-r--r--net/core/secure_seq.c29
-rw-r--r--net/core/sock.c1
-rw-r--r--net/ieee802154/6lowpan.c5
-rw-r--r--net/ipv4/af_inet.c4
-rw-r--r--net/ipv4/igmp.c4
-rw-r--r--net/ipv4/inet_hashtables.c2
-rw-r--r--net/ipv4/ip_output.c13
-rw-r--r--net/ipv4/ip_tunnel.c22
-rw-r--r--net/ipv4/ip_tunnel_core.c2
-rw-r--r--net/ipv4/ip_vti.c14
-rw-r--r--net/ipv4/netfilter/ipt_SYNPROXY.c10
-rw-r--r--net/ipv4/raw.c4
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/tcp_input.c9
-rw-r--r--net/ipv4/tcp_output.c31
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv4/xfrm4_policy.c1
-rw-r--r--net/ipv6/addrconf.c79
-rw-r--r--net/ipv6/ah6.c3
-rw-r--r--net/ipv6/esp6.c3
-rw-r--r--net/ipv6/inet6_hashtables.c2
-rw-r--r--net/ipv6/ip6_gre.c10
-rw-r--r--net/ipv6/ip6_output.c78
-rw-r--r--net/ipv6/ip6_tunnel.c15
-rw-r--r--net/ipv6/ipcomp6.c3
-rw-r--r--net/ipv6/mcast.c6
-rw-r--r--net/ipv6/netfilter/ip6t_SYNPROXY.c10
-rw-r--r--net/ipv6/raw.c4
-rw-r--r--net/ipv6/route.c46
-rw-r--r--net/ipv6/sit.c86
-rw-r--r--net/ipv6/udp.c9
-rw-r--r--net/ipv6/xfrm6_policy.c1
-rw-r--r--net/key/af_key.c3
-rw-r--r--net/l2tp/l2tp_core.c36
-rw-r--r--net/l2tp/l2tp_core.h3
-rw-r--r--net/l2tp/l2tp_ppp.c4
-rw-r--r--net/lapb/lapb_timer.c1
-rw-r--r--net/mac80211/cfg.c2
-rw-r--r--net/mac80211/ieee80211_i.h3
-rw-r--r--net/mac80211/offchannel.c2
-rw-r--r--net/mac80211/rx.c3
-rw-r--r--net/mac80211/scan.c19
-rw-r--r--net/mac80211/status.c3
-rw-r--r--net/mac80211/tx.c3
-rw-r--r--net/mac80211/util.c9
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c12
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c86
-rw-r--r--net/netfilter/ipvs/ip_vs_est.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c72
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c62
-rw-r--r--net/netfilter/ipvs/ip_vs_nq.c8
-rw-r--r--net/netfilter/ipvs/ip_vs_sed.c8
-rw-r--r--net/netfilter/ipvs/ip_vs_wlc.c6
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c4
-rw-r--r--net/netfilter/nf_synproxy_core.c12
-rw-r--r--net/sched/sch_fq.c102
-rw-r--r--net/sched/sch_netem.c17
-rw-r--r--net/sctp/output.c3
-rw-r--r--net/socket.c24
-rw-r--r--net/sysctl_net.c4
-rw-r--r--net/unix/af_unix.c10
-rw-r--r--net/unix/diag.c1
-rw-r--r--net/wireless/core.c23
-rw-r--r--net/wireless/core.h3
-rw-r--r--net/wireless/ibss.c3
-rw-r--r--net/wireless/nl80211.c4
-rw-r--r--net/wireless/radiotap.c7
-rw-r--r--net/xfrm/xfrm_policy.c28
-rw-r--r--net/xfrm/xfrm_replay.c54
-rw-r--r--net/xfrm/xfrm_user.c5
-rwxr-xr-xscripts/checkpatch.pl4
-rw-r--r--security/apparmor/apparmorfs.c4
-rw-r--r--security/apparmor/crypto.c34
-rw-r--r--security/apparmor/include/policy.h4
-rw-r--r--security/apparmor/policy.c4
-rw-r--r--security/selinux/avc.c9
-rw-r--r--security/selinux/hooks.c15
-rw-r--r--security/selinux/include/avc.h18
-rw-r--r--sound/core/compress_offload.c15
-rw-r--r--sound/pci/ac97/ac97_codec.c1
-rw-r--r--sound/pci/hda/hda_generic.c2
-rw-r--r--sound/pci/hda/patch_cirrus.c72
-rw-r--r--sound/pci/hda/patch_conexant.c11
-rw-r--r--sound/pci/hda/patch_hdmi.c65
-rw-r--r--sound/pci/hda/patch_realtek.c70
-rw-r--r--sound/pci/rme9652/hdsp.c1
-rw-r--r--sound/soc/blackfin/bf6xx-i2s.c1
-rw-r--r--sound/soc/codecs/88pm860x-codec.c3
-rw-r--r--sound/soc/codecs/ab8500-codec.c7
-rw-r--r--sound/soc/codecs/max98095.c4
-rw-r--r--sound/soc/codecs/pcm1681.c2
-rw-r--r--sound/soc/codecs/pcm1792a.c2
-rw-r--r--sound/soc/codecs/tlv320aic3x.c4
-rw-r--r--sound/soc/fsl/fsl_ssi.c2
-rw-r--r--sound/soc/fsl/imx-mc13783.c2
-rw-r--r--sound/soc/fsl/imx-sgtl5000.c7
-rw-r--r--sound/soc/fsl/imx-ssi.c23
-rw-r--r--sound/soc/fsl/imx-ssi.h2
-rw-r--r--sound/soc/omap/Kconfig4
-rw-r--r--sound/soc/sh/rcar/rsnd.h4
-rw-r--r--sound/soc/soc-core.c1
-rw-r--r--sound/usb/usx2y/us122l.c4
-rw-r--r--sound/usb/usx2y/usbusx2yaudio.c22
-rw-r--r--sound/usb/usx2y/usx2yhwdeppcm.c7
-rw-r--r--tools/lib/lk/debugfs.c1
-rw-r--r--tools/perf/Makefile1
-rw-r--r--tools/perf/arch/x86/util/tsc.c6
-rw-r--r--tools/perf/builtin-inject.c2
-rw-r--r--tools/perf/builtin-kmem.c2
-rw-r--r--tools/perf/builtin-report.c5
-rw-r--r--tools/perf/builtin-script.c2
-rw-r--r--tools/perf/builtin-stat.c1
-rw-r--r--tools/perf/builtin-trace.c18
-rw-r--r--tools/perf/config/Makefile5
-rw-r--r--tools/perf/config/feature-tests.mak10
-rw-r--r--tools/perf/util/annotate.c2
-rw-r--r--tools/perf/util/dwarf-aux.c44
-rw-r--r--tools/perf/util/dwarf-aux.h9
-rw-r--r--tools/perf/util/event.c30
-rw-r--r--tools/perf/util/evsel.c1
-rw-r--r--tools/perf/util/header.c53
-rw-r--r--tools/perf/util/hist.c2
-rw-r--r--tools/perf/util/machine.c2
-rw-r--r--tools/perf/util/probe-finder.c138
-rw-r--r--tools/perf/util/probe-finder.h3
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c2
-rw-r--r--tools/perf/util/session.c13
-rw-r--r--tools/perf/util/session.h4
-rw-r--r--tools/perf/util/symbol-elf.c16
-rw-r--r--tools/perf/util/trace-event-parse.c2
-rw-r--r--tools/testing/selftests/timers/posix_timers.c2
-rw-r--r--virt/kvm/kvm_main.c6
969 files changed, 9217 insertions, 5670 deletions
diff --git a/CREDITS b/CREDITS
index 9416a9a8b95e..0640e1650483 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2808,8 +2808,7 @@ S: Ottawa, Ontario
2808S: Canada K2P 0X8 2808S: Canada K2P 0X8
2809 2809
2810N: Mikael Pettersson 2810N: Mikael Pettersson
2811E: mikpe@it.uu.se 2811E: mikpelinux@gmail.com
2812W: http://user.it.uu.se/~mikpe/linux/
2813D: Miscellaneous fixes 2812D: Miscellaneous fixes
2814 2813
2815N: Reed H. Petty 2814N: Reed H. Petty
diff --git a/Documentation/ABI/stable/sysfs-bus-usb b/Documentation/ABI/stable/sysfs-bus-usb
index 2be603c52a24..a6b685724740 100644
--- a/Documentation/ABI/stable/sysfs-bus-usb
+++ b/Documentation/ABI/stable/sysfs-bus-usb
@@ -37,8 +37,8 @@ Description:
37 that the USB device has been connected to the machine. This 37 that the USB device has been connected to the machine. This
38 file is read-only. 38 file is read-only.
39Users: 39Users:
40 PowerTOP <power@bughost.org> 40 PowerTOP <powertop@lists.01.org>
41 http://www.lesswatts.org/projects/powertop/ 41 https://01.org/powertop/
42 42
43What: /sys/bus/usb/device/.../power/active_duration 43What: /sys/bus/usb/device/.../power/active_duration
44Date: January 2008 44Date: January 2008
@@ -57,8 +57,8 @@ Description:
57 will give an integer percentage. Note that this does not 57 will give an integer percentage. Note that this does not
58 account for counter wrap. 58 account for counter wrap.
59Users: 59Users:
60 PowerTOP <power@bughost.org> 60 PowerTOP <powertop@lists.01.org>
61 http://www.lesswatts.org/projects/powertop/ 61 https://01.org/powertop/
62 62
63What: /sys/bus/usb/devices/<busnum>-<port[.port]>...:<config num>-<interface num>/supports_autosuspend 63What: /sys/bus/usb/devices/<busnum>-<port[.port]>...:<config num>-<interface num>/supports_autosuspend
64Date: January 2008 64Date: January 2008
diff --git a/Documentation/ABI/testing/sysfs-devices-power b/Documentation/ABI/testing/sysfs-devices-power
index 9d43e7670841..efe449bdf811 100644
--- a/Documentation/ABI/testing/sysfs-devices-power
+++ b/Documentation/ABI/testing/sysfs-devices-power
@@ -1,6 +1,6 @@
1What: /sys/devices/.../power/ 1What: /sys/devices/.../power/
2Date: January 2009 2Date: January 2009
3Contact: Rafael J. Wysocki <rjw@sisk.pl> 3Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
4Description: 4Description:
5 The /sys/devices/.../power directory contains attributes 5 The /sys/devices/.../power directory contains attributes
6 allowing the user space to check and modify some power 6 allowing the user space to check and modify some power
@@ -8,7 +8,7 @@ Description:
8 8
9What: /sys/devices/.../power/wakeup 9What: /sys/devices/.../power/wakeup
10Date: January 2009 10Date: January 2009
11Contact: Rafael J. Wysocki <rjw@sisk.pl> 11Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
12Description: 12Description:
13 The /sys/devices/.../power/wakeup attribute allows the user 13 The /sys/devices/.../power/wakeup attribute allows the user
14 space to check if the device is enabled to wake up the system 14 space to check if the device is enabled to wake up the system
@@ -34,7 +34,7 @@ Description:
34 34
35What: /sys/devices/.../power/control 35What: /sys/devices/.../power/control
36Date: January 2009 36Date: January 2009
37Contact: Rafael J. Wysocki <rjw@sisk.pl> 37Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
38Description: 38Description:
39 The /sys/devices/.../power/control attribute allows the user 39 The /sys/devices/.../power/control attribute allows the user
40 space to control the run-time power management of the device. 40 space to control the run-time power management of the device.
@@ -53,7 +53,7 @@ Description:
53 53
54What: /sys/devices/.../power/async 54What: /sys/devices/.../power/async
55Date: January 2009 55Date: January 2009
56Contact: Rafael J. Wysocki <rjw@sisk.pl> 56Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
57Description: 57Description:
58 The /sys/devices/.../async attribute allows the user space to 58 The /sys/devices/.../async attribute allows the user space to
59 enable or diasble the device's suspend and resume callbacks to 59 enable or diasble the device's suspend and resume callbacks to
@@ -79,7 +79,7 @@ Description:
79 79
80What: /sys/devices/.../power/wakeup_count 80What: /sys/devices/.../power/wakeup_count
81Date: September 2010 81Date: September 2010
82Contact: Rafael J. Wysocki <rjw@sisk.pl> 82Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
83Description: 83Description:
84 The /sys/devices/.../wakeup_count attribute contains the number 84 The /sys/devices/.../wakeup_count attribute contains the number
85 of signaled wakeup events associated with the device. This 85 of signaled wakeup events associated with the device. This
@@ -88,7 +88,7 @@ Description:
88 88
89What: /sys/devices/.../power/wakeup_active_count 89What: /sys/devices/.../power/wakeup_active_count
90Date: September 2010 90Date: September 2010
91Contact: Rafael J. Wysocki <rjw@sisk.pl> 91Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
92Description: 92Description:
93 The /sys/devices/.../wakeup_active_count attribute contains the 93 The /sys/devices/.../wakeup_active_count attribute contains the
94 number of times the processing of wakeup events associated with 94 number of times the processing of wakeup events associated with
@@ -98,7 +98,7 @@ Description:
98 98
99What: /sys/devices/.../power/wakeup_abort_count 99What: /sys/devices/.../power/wakeup_abort_count
100Date: February 2012 100Date: February 2012
101Contact: Rafael J. Wysocki <rjw@sisk.pl> 101Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
102Description: 102Description:
103 The /sys/devices/.../wakeup_abort_count attribute contains the 103 The /sys/devices/.../wakeup_abort_count attribute contains the
104 number of times the processing of a wakeup event associated with 104 number of times the processing of a wakeup event associated with
@@ -109,7 +109,7 @@ Description:
109 109
110What: /sys/devices/.../power/wakeup_expire_count 110What: /sys/devices/.../power/wakeup_expire_count
111Date: February 2012 111Date: February 2012
112Contact: Rafael J. Wysocki <rjw@sisk.pl> 112Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
113Description: 113Description:
114 The /sys/devices/.../wakeup_expire_count attribute contains the 114 The /sys/devices/.../wakeup_expire_count attribute contains the
115 number of times a wakeup event associated with the device has 115 number of times a wakeup event associated with the device has
@@ -119,7 +119,7 @@ Description:
119 119
120What: /sys/devices/.../power/wakeup_active 120What: /sys/devices/.../power/wakeup_active
121Date: September 2010 121Date: September 2010
122Contact: Rafael J. Wysocki <rjw@sisk.pl> 122Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
123Description: 123Description:
124 The /sys/devices/.../wakeup_active attribute contains either 1, 124 The /sys/devices/.../wakeup_active attribute contains either 1,
125 or 0, depending on whether or not a wakeup event associated with 125 or 0, depending on whether or not a wakeup event associated with
@@ -129,7 +129,7 @@ Description:
129 129
130What: /sys/devices/.../power/wakeup_total_time_ms 130What: /sys/devices/.../power/wakeup_total_time_ms
131Date: September 2010 131Date: September 2010
132Contact: Rafael J. Wysocki <rjw@sisk.pl> 132Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
133Description: 133Description:
134 The /sys/devices/.../wakeup_total_time_ms attribute contains 134 The /sys/devices/.../wakeup_total_time_ms attribute contains
135 the total time of processing wakeup events associated with the 135 the total time of processing wakeup events associated with the
@@ -139,7 +139,7 @@ Description:
139 139
140What: /sys/devices/.../power/wakeup_max_time_ms 140What: /sys/devices/.../power/wakeup_max_time_ms
141Date: September 2010 141Date: September 2010
142Contact: Rafael J. Wysocki <rjw@sisk.pl> 142Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
143Description: 143Description:
144 The /sys/devices/.../wakeup_max_time_ms attribute contains 144 The /sys/devices/.../wakeup_max_time_ms attribute contains
145 the maximum time of processing a single wakeup event associated 145 the maximum time of processing a single wakeup event associated
@@ -149,7 +149,7 @@ Description:
149 149
150What: /sys/devices/.../power/wakeup_last_time_ms 150What: /sys/devices/.../power/wakeup_last_time_ms
151Date: September 2010 151Date: September 2010
152Contact: Rafael J. Wysocki <rjw@sisk.pl> 152Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
153Description: 153Description:
154 The /sys/devices/.../wakeup_last_time_ms attribute contains 154 The /sys/devices/.../wakeup_last_time_ms attribute contains
155 the value of the monotonic clock corresponding to the time of 155 the value of the monotonic clock corresponding to the time of
@@ -160,7 +160,7 @@ Description:
160 160
161What: /sys/devices/.../power/wakeup_prevent_sleep_time_ms 161What: /sys/devices/.../power/wakeup_prevent_sleep_time_ms
162Date: February 2012 162Date: February 2012
163Contact: Rafael J. Wysocki <rjw@sisk.pl> 163Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
164Description: 164Description:
165 The /sys/devices/.../wakeup_prevent_sleep_time_ms attribute 165 The /sys/devices/.../wakeup_prevent_sleep_time_ms attribute
166 contains the total time the device has been preventing 166 contains the total time the device has been preventing
@@ -189,7 +189,7 @@ Description:
189 189
190What: /sys/devices/.../power/pm_qos_latency_us 190What: /sys/devices/.../power/pm_qos_latency_us
191Date: March 2012 191Date: March 2012
192Contact: Rafael J. Wysocki <rjw@sisk.pl> 192Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
193Description: 193Description:
194 The /sys/devices/.../power/pm_qos_resume_latency_us attribute 194 The /sys/devices/.../power/pm_qos_resume_latency_us attribute
195 contains the PM QoS resume latency limit for the given device, 195 contains the PM QoS resume latency limit for the given device,
@@ -207,7 +207,7 @@ Description:
207 207
208What: /sys/devices/.../power/pm_qos_no_power_off 208What: /sys/devices/.../power/pm_qos_no_power_off
209Date: September 2012 209Date: September 2012
210Contact: Rafael J. Wysocki <rjw@sisk.pl> 210Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
211Description: 211Description:
212 The /sys/devices/.../power/pm_qos_no_power_off attribute 212 The /sys/devices/.../power/pm_qos_no_power_off attribute
213 is used for manipulating the PM QoS "no power off" flag. If 213 is used for manipulating the PM QoS "no power off" flag. If
@@ -222,7 +222,7 @@ Description:
222 222
223What: /sys/devices/.../power/pm_qos_remote_wakeup 223What: /sys/devices/.../power/pm_qos_remote_wakeup
224Date: September 2012 224Date: September 2012
225Contact: Rafael J. Wysocki <rjw@sisk.pl> 225Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
226Description: 226Description:
227 The /sys/devices/.../power/pm_qos_remote_wakeup attribute 227 The /sys/devices/.../power/pm_qos_remote_wakeup attribute
228 is used for manipulating the PM QoS "remote wakeup required" 228 is used for manipulating the PM QoS "remote wakeup required"
diff --git a/Documentation/ABI/testing/sysfs-power b/Documentation/ABI/testing/sysfs-power
index 217772615d02..205a73878441 100644
--- a/Documentation/ABI/testing/sysfs-power
+++ b/Documentation/ABI/testing/sysfs-power
@@ -1,6 +1,6 @@
1What: /sys/power/ 1What: /sys/power/
2Date: August 2006 2Date: August 2006
3Contact: Rafael J. Wysocki <rjw@sisk.pl> 3Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
4Description: 4Description:
5 The /sys/power directory will contain files that will 5 The /sys/power directory will contain files that will
6 provide a unified interface to the power management 6 provide a unified interface to the power management
@@ -8,7 +8,7 @@ Description:
8 8
9What: /sys/power/state 9What: /sys/power/state
10Date: August 2006 10Date: August 2006
11Contact: Rafael J. Wysocki <rjw@sisk.pl> 11Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
12Description: 12Description:
13 The /sys/power/state file controls the system power state. 13 The /sys/power/state file controls the system power state.
14 Reading from this file returns what states are supported, 14 Reading from this file returns what states are supported,
@@ -22,7 +22,7 @@ Description:
22 22
23What: /sys/power/disk 23What: /sys/power/disk
24Date: September 2006 24Date: September 2006
25Contact: Rafael J. Wysocki <rjw@sisk.pl> 25Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
26Description: 26Description:
27 The /sys/power/disk file controls the operating mode of the 27 The /sys/power/disk file controls the operating mode of the
28 suspend-to-disk mechanism. Reading from this file returns 28 suspend-to-disk mechanism. Reading from this file returns
@@ -67,7 +67,7 @@ Description:
67 67
68What: /sys/power/image_size 68What: /sys/power/image_size
69Date: August 2006 69Date: August 2006
70Contact: Rafael J. Wysocki <rjw@sisk.pl> 70Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
71Description: 71Description:
72 The /sys/power/image_size file controls the size of the image 72 The /sys/power/image_size file controls the size of the image
73 created by the suspend-to-disk mechanism. It can be written a 73 created by the suspend-to-disk mechanism. It can be written a
@@ -84,7 +84,7 @@ Description:
84 84
85What: /sys/power/pm_trace 85What: /sys/power/pm_trace
86Date: August 2006 86Date: August 2006
87Contact: Rafael J. Wysocki <rjw@sisk.pl> 87Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
88Description: 88Description:
89 The /sys/power/pm_trace file controls the code which saves the 89 The /sys/power/pm_trace file controls the code which saves the
90 last PM event point in the RTC across reboots, so that you can 90 last PM event point in the RTC across reboots, so that you can
@@ -133,7 +133,7 @@ Description:
133 133
134What: /sys/power/pm_async 134What: /sys/power/pm_async
135Date: January 2009 135Date: January 2009
136Contact: Rafael J. Wysocki <rjw@sisk.pl> 136Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
137Description: 137Description:
138 The /sys/power/pm_async file controls the switch allowing the 138 The /sys/power/pm_async file controls the switch allowing the
139 user space to enable or disable asynchronous suspend and resume 139 user space to enable or disable asynchronous suspend and resume
@@ -146,7 +146,7 @@ Description:
146 146
147What: /sys/power/wakeup_count 147What: /sys/power/wakeup_count
148Date: July 2010 148Date: July 2010
149Contact: Rafael J. Wysocki <rjw@sisk.pl> 149Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
150Description: 150Description:
151 The /sys/power/wakeup_count file allows user space to put the 151 The /sys/power/wakeup_count file allows user space to put the
152 system into a sleep state while taking into account the 152 system into a sleep state while taking into account the
@@ -161,7 +161,7 @@ Description:
161 161
162What: /sys/power/reserved_size 162What: /sys/power/reserved_size
163Date: May 2011 163Date: May 2011
164Contact: Rafael J. Wysocki <rjw@sisk.pl> 164Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
165Description: 165Description:
166 The /sys/power/reserved_size file allows user space to control 166 The /sys/power/reserved_size file allows user space to control
167 the amount of memory reserved for allocations made by device 167 the amount of memory reserved for allocations made by device
@@ -175,7 +175,7 @@ Description:
175 175
176What: /sys/power/autosleep 176What: /sys/power/autosleep
177Date: April 2012 177Date: April 2012
178Contact: Rafael J. Wysocki <rjw@sisk.pl> 178Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
179Description: 179Description:
180 The /sys/power/autosleep file can be written one of the strings 180 The /sys/power/autosleep file can be written one of the strings
181 returned by reads from /sys/power/state. If that happens, a 181 returned by reads from /sys/power/state. If that happens, a
@@ -192,7 +192,7 @@ Description:
192 192
193What: /sys/power/wake_lock 193What: /sys/power/wake_lock
194Date: February 2012 194Date: February 2012
195Contact: Rafael J. Wysocki <rjw@sisk.pl> 195Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
196Description: 196Description:
197 The /sys/power/wake_lock file allows user space to create 197 The /sys/power/wake_lock file allows user space to create
198 wakeup source objects and activate them on demand (if one of 198 wakeup source objects and activate them on demand (if one of
@@ -219,7 +219,7 @@ Description:
219 219
220What: /sys/power/wake_unlock 220What: /sys/power/wake_unlock
221Date: February 2012 221Date: February 2012
222Contact: Rafael J. Wysocki <rjw@sisk.pl> 222Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
223Description: 223Description:
224 The /sys/power/wake_unlock file allows user space to deactivate 224 The /sys/power/wake_unlock file allows user space to deactivate
225 wakeup sources created with the help of /sys/power/wake_lock. 225 wakeup sources created with the help of /sys/power/wake_lock.
diff --git a/Documentation/acpi/dsdt-override.txt b/Documentation/acpi/dsdt-override.txt
index febbb1ba4d23..784841caa6e6 100644
--- a/Documentation/acpi/dsdt-override.txt
+++ b/Documentation/acpi/dsdt-override.txt
@@ -4,4 +4,4 @@ CONFIG_ACPI_CUSTOM_DSDT builds the image into the kernel.
4 4
5When to use this method is described in detail on the 5When to use this method is described in detail on the
6Linux/ACPI home page: 6Linux/ACPI home page:
7http://www.lesswatts.org/projects/acpi/overridingDSDT.php 7https://01.org/linux-acpi/documentation/overriding-dsdt
diff --git a/Documentation/block/00-INDEX b/Documentation/block/00-INDEX
index d18ecd827c40..929d9904f74b 100644
--- a/Documentation/block/00-INDEX
+++ b/Documentation/block/00-INDEX
@@ -6,6 +6,8 @@ capability.txt
6 - Generic Block Device Capability (/sys/block/<device>/capability) 6 - Generic Block Device Capability (/sys/block/<device>/capability)
7cfq-iosched.txt 7cfq-iosched.txt
8 - CFQ IO scheduler tunables 8 - CFQ IO scheduler tunables
9cmdline-partition.txt
10 - how to specify block device partitions on kernel command line
9data-integrity.txt 11data-integrity.txt
10 - Block data integrity 12 - Block data integrity
11deadline-iosched.txt 13deadline-iosched.txt
diff --git a/Documentation/block/cmdline-partition.txt b/Documentation/block/cmdline-partition.txt
index 2bbf4cc40c3f..525b9f6d7fb4 100644
--- a/Documentation/block/cmdline-partition.txt
+++ b/Documentation/block/cmdline-partition.txt
@@ -1,9 +1,9 @@
1Embedded device command line partition 1Embedded device command line partition parsing
2===================================================================== 2=====================================================================
3 3
4Read block device partition table from command line. 4Support for reading the block device partition table from the command line.
5The partition used for fixed block device (eMMC) embedded device. 5It is typically used for fixed block (eMMC) embedded devices.
6It is no MBR, save storage space. Bootloader can be easily accessed 6It has no MBR, so saves storage space. Bootloader can be easily accessed
7by absolute address of data on the block device. 7by absolute address of data on the block device.
8Users can easily change the partition. 8Users can easily change the partition.
9 9
diff --git a/Documentation/connector/ucon.c b/Documentation/connector/ucon.c
index 4848db8c71ff..8a4da64e02a8 100644
--- a/Documentation/connector/ucon.c
+++ b/Documentation/connector/ucon.c
@@ -71,7 +71,7 @@ static int netlink_send(int s, struct cn_msg *msg)
71 nlh->nlmsg_seq = seq++; 71 nlh->nlmsg_seq = seq++;
72 nlh->nlmsg_pid = getpid(); 72 nlh->nlmsg_pid = getpid();
73 nlh->nlmsg_type = NLMSG_DONE; 73 nlh->nlmsg_type = NLMSG_DONE;
74 nlh->nlmsg_len = NLMSG_LENGTH(size - sizeof(*nlh)); 74 nlh->nlmsg_len = size;
75 nlh->nlmsg_flags = 0; 75 nlh->nlmsg_flags = 0;
76 76
77 m = NLMSG_DATA(nlh); 77 m = NLMSG_DATA(nlh);
diff --git a/Documentation/devicetree/bindings/memory.txt b/Documentation/devicetree/bindings/memory.txt
deleted file mode 100644
index eb2469365593..000000000000
--- a/Documentation/devicetree/bindings/memory.txt
+++ /dev/null
@@ -1,168 +0,0 @@
1*** Memory binding ***
2
3The /memory node provides basic information about the address and size
4of the physical memory. This node is usually filled or updated by the
5bootloader, depending on the actual memory configuration of the given
6hardware.
7
8The memory layout is described by the following node:
9
10/ {
11 #address-cells = <(n)>;
12 #size-cells = <(m)>;
13 memory {
14 device_type = "memory";
15 reg = <(baseaddr1) (size1)
16 (baseaddr2) (size2)
17 ...
18 (baseaddrN) (sizeN)>;
19 };
20 ...
21};
22
23A memory node follows the typical device tree rules for "reg" property:
24n: number of cells used to store base address value
25m: number of cells used to store size value
26baseaddrX: defines a base address of the defined memory bank
27sizeX: the size of the defined memory bank
28
29
30More than one memory bank can be defined.
31
32
33*** Reserved memory regions ***
34
35In /memory/reserved-memory node one can create child nodes describing
36particular reserved (excluded from normal use) memory regions. Such
37memory regions are usually designed for the special usage by various
38device drivers. A good example are contiguous memory allocations or
39memory sharing with other operating system on the same hardware board.
40Those special memory regions might depend on the board configuration and
41devices used on the target system.
42
43Parameters for each memory region can be encoded into the device tree
44with the following convention:
45
46[(label):] (name) {
47 compatible = "linux,contiguous-memory-region", "reserved-memory-region";
48 reg = <(address) (size)>;
49 (linux,default-contiguous-region);
50};
51
52compatible: one or more of:
53 - "linux,contiguous-memory-region" - enables binding of this
54 region to Contiguous Memory Allocator (special region for
55 contiguous memory allocations, shared with movable system
56 memory, Linux kernel-specific).
57 - "reserved-memory-region" - compatibility is defined, given
58 region is assigned for exclusive usage for by the respective
59 devices.
60
61reg: standard property defining the base address and size of
62 the memory region
63
64linux,default-contiguous-region: property indicating that the region
65 is the default region for all contiguous memory
66 allocations, Linux specific (optional)
67
68It is optional to specify the base address, so if one wants to use
69autoconfiguration of the base address, '0' can be specified as a base
70address in the 'reg' property.
71
72The /memory/reserved-memory node must contain the same #address-cells
73and #size-cells value as the root node.
74
75
76*** Device node's properties ***
77
78Once regions in the /memory/reserved-memory node have been defined, they
79may be referenced by other device nodes. Bindings that wish to reference
80memory regions should explicitly document their use of the following
81property:
82
83memory-region = <&phandle_to_defined_region>;
84
85This property indicates that the device driver should use the memory
86region pointed by the given phandle.
87
88
89*** Example ***
90
91This example defines a memory consisting of 4 memory banks. 3 contiguous
92regions are defined for Linux kernel, one default of all device drivers
93(named contig_mem, placed at 0x72000000, 64MiB), one dedicated to the
94framebuffer device (labelled display_mem, placed at 0x78000000, 8MiB)
95and one for multimedia processing (labelled multimedia_mem, placed at
960x77000000, 64MiB). 'display_mem' region is then assigned to fb@12300000
97device for DMA memory allocations (Linux kernel drivers will use CMA is
98available or dma-exclusive usage otherwise). 'multimedia_mem' is
99assigned to scaler@12500000 and codec@12600000 devices for contiguous
100memory allocations when CMA driver is enabled.
101
102The reason for creating a separate region for framebuffer device is to
103match the framebuffer base address to the one configured by bootloader,
104so once Linux kernel drivers starts no glitches on the displayed boot
105logo appears. Scaller and codec drivers should share the memory
106allocations.
107
108/ {
109 #address-cells = <1>;
110 #size-cells = <1>;
111
112 /* ... */
113
114 memory {
115 reg = <0x40000000 0x10000000
116 0x50000000 0x10000000
117 0x60000000 0x10000000
118 0x70000000 0x10000000>;
119
120 reserved-memory {
121 #address-cells = <1>;
122 #size-cells = <1>;
123
124 /*
125 * global autoconfigured region for contiguous allocations
126 * (used only with Contiguous Memory Allocator)
127 */
128 contig_region@0 {
129 compatible = "linux,contiguous-memory-region";
130 reg = <0x0 0x4000000>;
131 linux,default-contiguous-region;
132 };
133
134 /*
135 * special region for framebuffer
136 */
137 display_region: region@78000000 {
138 compatible = "linux,contiguous-memory-region", "reserved-memory-region";
139 reg = <0x78000000 0x800000>;
140 };
141
142 /*
143 * special region for multimedia processing devices
144 */
145 multimedia_region: region@77000000 {
146 compatible = "linux,contiguous-memory-region";
147 reg = <0x77000000 0x4000000>;
148 };
149 };
150 };
151
152 /* ... */
153
154 fb0: fb@12300000 {
155 status = "okay";
156 memory-region = <&display_region>;
157 };
158
159 scaler: scaler@12500000 {
160 status = "okay";
161 memory-region = <&multimedia_region>;
162 };
163
164 codec: codec@12600000 {
165 status = "okay";
166 memory-region = <&multimedia_region>;
167 };
168};
diff --git a/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt
index 6d1c0988cfc7..c67b975c8906 100644
--- a/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt
@@ -1,11 +1,11 @@
1* Samsung Exynos specific extensions to the Synopsis Designware Mobile 1* Samsung Exynos specific extensions to the Synopsys Designware Mobile
2 Storage Host Controller 2 Storage Host Controller
3 3
4The Synopsis designware mobile storage host controller is used to interface 4The Synopsys designware mobile storage host controller is used to interface
5a SoC with storage medium such as eMMC or SD/MMC cards. This file documents 5a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
6differences between the core Synopsis dw mshc controller properties described 6differences between the core Synopsys dw mshc controller properties described
7by synopsis-dw-mshc.txt and the properties used by the Samsung Exynos specific 7by synopsys-dw-mshc.txt and the properties used by the Samsung Exynos specific
8extensions to the Synopsis Designware Mobile Storage Host Controller. 8extensions to the Synopsys Designware Mobile Storage Host Controller.
9 9
10Required Properties: 10Required Properties:
11 11
diff --git a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
index 8a3d91d47b6a..c559f3f36309 100644
--- a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
@@ -1,11 +1,11 @@
1* Rockchip specific extensions to the Synopsis Designware Mobile 1* Rockchip specific extensions to the Synopsys Designware Mobile
2 Storage Host Controller 2 Storage Host Controller
3 3
4The Synopsis designware mobile storage host controller is used to interface 4The Synopsys designware mobile storage host controller is used to interface
5a SoC with storage medium such as eMMC or SD/MMC cards. This file documents 5a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
6differences between the core Synopsis dw mshc controller properties described 6differences between the core Synopsys dw mshc controller properties described
7by synopsis-dw-mshc.txt and the properties used by the Rockchip specific 7by synopsys-dw-mshc.txt and the properties used by the Rockchip specific
8extensions to the Synopsis Designware Mobile Storage Host Controller. 8extensions to the Synopsys Designware Mobile Storage Host Controller.
9 9
10Required Properties: 10Required Properties:
11 11
diff --git a/Documentation/devicetree/bindings/mmc/synopsis-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
index cdcebea9c6f5..066a78b034ca 100644
--- a/Documentation/devicetree/bindings/mmc/synopsis-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
@@ -1,14 +1,14 @@
1* Synopsis Designware Mobile Storage Host Controller 1* Synopsys Designware Mobile Storage Host Controller
2 2
3The Synopsis designware mobile storage host controller is used to interface 3The Synopsys designware mobile storage host controller is used to interface
4a SoC with storage medium such as eMMC or SD/MMC cards. This file documents 4a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
5differences between the core mmc properties described by mmc.txt and the 5differences between the core mmc properties described by mmc.txt and the
6properties used by the Synopsis Designware Mobile Storage Host Controller. 6properties used by the Synopsys Designware Mobile Storage Host Controller.
7 7
8Required Properties: 8Required Properties:
9 9
10* compatible: should be 10* compatible: should be
11 - snps,dw-mshc: for controllers compliant with synopsis dw-mshc. 11 - snps,dw-mshc: for controllers compliant with synopsys dw-mshc.
12* #address-cells: should be 1. 12* #address-cells: should be 1.
13* #size-cells: should be 0. 13* #size-cells: should be 0.
14 14
diff --git a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
index df204e18e030..6a2a1160a70d 100644
--- a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
@@ -9,12 +9,15 @@ compulsory and any optional properties, common to all SD/MMC drivers, as
9described in mmc.txt, can be used. Additionally the following tmio_mmc-specific 9described in mmc.txt, can be used. Additionally the following tmio_mmc-specific
10optional bindings can be used. 10optional bindings can be used.
11 11
12Required properties:
13- compatible: "renesas,sdhi-shmobile" - a generic sh-mobile SDHI unit
14 "renesas,sdhi-sh7372" - SDHI IP on SH7372 SoC
15 "renesas,sdhi-sh73a0" - SDHI IP on SH73A0 SoC
16 "renesas,sdhi-r8a73a4" - SDHI IP on R8A73A4 SoC
17 "renesas,sdhi-r8a7740" - SDHI IP on R8A7740 SoC
18 "renesas,sdhi-r8a7778" - SDHI IP on R8A7778 SoC
19 "renesas,sdhi-r8a7779" - SDHI IP on R8A7779 SoC
20 "renesas,sdhi-r8a7790" - SDHI IP on R8A7790 SoC
21
12Optional properties: 22Optional properties:
13- toshiba,mmc-wrprotect-disable: write-protect detection is unavailable 23- toshiba,mmc-wrprotect-disable: write-protect detection is unavailable
14
15When used with Renesas SDHI hardware, the following compatibility strings
16configure various model-specific properties:
17
18"renesas,sh7372-sdhi": (default) compatible with SH7372
19"renesas,r8a7740-sdhi": compatible with R8A7740: certain MMC/SD commands have to
20 wait for the interface to become idle.
diff --git a/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
index 2c6be0377f55..d2ea4605d078 100644
--- a/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
+++ b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
@@ -86,6 +86,7 @@ General Properties:
86 86
87Clock Properties: 87Clock Properties:
88 88
89 - fsl,cksel Timer reference clock source.
89 - fsl,tclk-period Timer reference clock period in nanoseconds. 90 - fsl,tclk-period Timer reference clock period in nanoseconds.
90 - fsl,tmr-prsc Prescaler, divides the output clock. 91 - fsl,tmr-prsc Prescaler, divides the output clock.
91 - fsl,tmr-add Frequency compensation value. 92 - fsl,tmr-add Frequency compensation value.
@@ -97,7 +98,7 @@ Clock Properties:
97 clock. You must choose these carefully for the clock to work right. 98 clock. You must choose these carefully for the clock to work right.
98 Here is how to figure good values: 99 Here is how to figure good values:
99 100
100 TimerOsc = system clock MHz 101 TimerOsc = selected reference clock MHz
101 tclk_period = desired clock period nanoseconds 102 tclk_period = desired clock period nanoseconds
102 NominalFreq = 1000 / tclk_period MHz 103 NominalFreq = 1000 / tclk_period MHz
103 FreqDivRatio = TimerOsc / NominalFreq (must be greater that 1.0) 104 FreqDivRatio = TimerOsc / NominalFreq (must be greater that 1.0)
@@ -114,6 +115,20 @@ Clock Properties:
114 Pulse Per Second (PPS) signal, since this will be offered to the PPS 115 Pulse Per Second (PPS) signal, since this will be offered to the PPS
115 subsystem to synchronize the Linux clock. 116 subsystem to synchronize the Linux clock.
116 117
118 Reference clock source is determined by the value, which is holded
119 in CKSEL bits in TMR_CTRL register. "fsl,cksel" property keeps the
120 value, which will be directly written in those bits, that is why,
121 according to reference manual, the next clock sources can be used:
122
123 <0> - external high precision timer reference clock (TSEC_TMR_CLK
124 input is used for this purpose);
125 <1> - eTSEC system clock;
126 <2> - eTSEC1 transmit clock;
127 <3> - RTC clock input.
128
129 When this attribute is not used, eTSEC system clock will serve as
130 IEEE 1588 timer reference clock.
131
117Example: 132Example:
118 133
119 ptp_clock@24E00 { 134 ptp_clock@24E00 {
@@ -121,6 +136,7 @@ Example:
121 reg = <0x24E00 0xB0>; 136 reg = <0x24E00 0xB0>;
122 interrupts = <12 0x8 13 0x8>; 137 interrupts = <12 0x8 13 0x8>;
123 interrupt-parent = < &ipic >; 138 interrupt-parent = < &ipic >;
139 fsl,cksel = <1>;
124 fsl,tclk-period = <10>; 140 fsl,tclk-period = <10>;
125 fsl,tmr-prsc = <100>; 141 fsl,tmr-prsc = <100>;
126 fsl,tmr-add = <0x999999A4>; 142 fsl,tmr-add = <0x999999A4>;
diff --git a/Documentation/devicetree/bindings/pci/designware-pcie.txt b/Documentation/devicetree/bindings/pci/designware-pcie.txt
index eabcb4b5db6e..e216af356847 100644
--- a/Documentation/devicetree/bindings/pci/designware-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/designware-pcie.txt
@@ -1,4 +1,4 @@
1* Synopsis Designware PCIe interface 1* Synopsys Designware PCIe interface
2 2
3Required properties: 3Required properties:
4- compatible: should contain "snps,dw-pcie" to identify the 4- compatible: should contain "snps,dw-pcie" to identify the
diff --git a/Documentation/devicetree/bindings/tty/serial/qca,ar9330-uart.txt b/Documentation/devicetree/bindings/serial/qca,ar9330-uart.txt
index c5e032c85bf9..c5e032c85bf9 100644
--- a/Documentation/devicetree/bindings/tty/serial/qca,ar9330-uart.txt
+++ b/Documentation/devicetree/bindings/serial/qca,ar9330-uart.txt
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 1a036cd972fb..fcbb736d55fe 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -480,6 +480,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
480 Format: <io>,<irq>,<mode> 480 Format: <io>,<irq>,<mode>
481 See header of drivers/net/hamradio/baycom_ser_hdx.c. 481 See header of drivers/net/hamradio/baycom_ser_hdx.c.
482 482
483 blkdevparts= Manual partition parsing of block device(s) for
484 embedded devices based on command line input.
485 See Documentation/block/cmdline-partition.txt
486
483 boot_delay= Milliseconds to delay each printk during boot. 487 boot_delay= Milliseconds to delay each printk during boot.
484 Values larger than 10 seconds (10000) are changed to 488 Values larger than 10 seconds (10000) are changed to
485 no delay (0). 489 no delay (0).
@@ -1357,7 +1361,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1357 pages. In the event, a node is too small to have both 1361 pages. In the event, a node is too small to have both
1358 kernelcore and Movable pages, kernelcore pages will 1362 kernelcore and Movable pages, kernelcore pages will
1359 take priority and other nodes will have a larger number 1363 take priority and other nodes will have a larger number
1360 of kernelcore pages. The Movable zone is used for the 1364 of Movable pages. The Movable zone is used for the
1361 allocation of pages that may be reclaimed or moved 1365 allocation of pages that may be reclaimed or moved
1362 by the page migration subsystem. This means that 1366 by the page migration subsystem. This means that
1363 HugeTLB pages may not be allocated from this zone. 1367 HugeTLB pages may not be allocated from this zone.
@@ -3485,6 +3489,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
3485 the unplug protocol 3489 the unplug protocol
3486 never -- do not unplug even if version check succeeds 3490 never -- do not unplug even if version check succeeds
3487 3491
3492 xen_nopvspin [X86,XEN]
3493 Disables the ticketlock slowpath using Xen PV
3494 optimizations.
3495
3488 xirc2ps_cs= [NET,PCMCIA] 3496 xirc2ps_cs= [NET,PCMCIA]
3489 Format: 3497 Format:
3490 <irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]] 3498 <irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
index a46ddb85e83a..85c362d8ea34 100644
--- a/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -28,6 +28,7 @@ ALC269/270/275/276/28x/29x
28 alc269-dmic Enable ALC269(VA) digital mic workaround 28 alc269-dmic Enable ALC269(VA) digital mic workaround
29 alc271-dmic Enable ALC271X digital mic workaround 29 alc271-dmic Enable ALC271X digital mic workaround
30 inv-dmic Inverted internal mic workaround 30 inv-dmic Inverted internal mic workaround
31 headset-mic Indicates a combined headset (headphone+mic) jack
31 lenovo-dock Enables docking station I/O for some Lenovos 32 lenovo-dock Enables docking station I/O for some Lenovos
32 dell-headset-multi Headset jack, which can also be used as mic-in 33 dell-headset-multi Headset jack, which can also be used as mic-in
33 dell-headset-dock Headset jack (without mic-in), and also dock I/O 34 dell-headset-dock Headset jack (without mic-in), and also dock I/O
@@ -296,6 +297,12 @@ Cirrus Logic CS4206/4207
296 imac27 IMac 27 Inch 297 imac27 IMac 27 Inch
297 auto BIOS setup (default) 298 auto BIOS setup (default)
298 299
300Cirrus Logic CS4208
301===================
302 mba6 MacBook Air 6,1 and 6,2
303 gpio0 Enable GPIO 0 amp
304 auto BIOS setup (default)
305
299VIA VT17xx/VT18xx/VT20xx 306VIA VT17xx/VT18xx/VT20xx
300======================== 307========================
301 auto BIOS setup (default) 308 auto BIOS setup (default)
diff --git a/MAINTAINERS b/MAINTAINERS
index e61c2e83fc2b..3438384d270c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -237,11 +237,11 @@ F: drivers/platform/x86/acer-wmi.c
237 237
238ACPI 238ACPI
239M: Len Brown <lenb@kernel.org> 239M: Len Brown <lenb@kernel.org>
240M: Rafael J. Wysocki <rjw@sisk.pl> 240M: Rafael J. Wysocki <rjw@rjwysocki.net>
241L: linux-acpi@vger.kernel.org 241L: linux-acpi@vger.kernel.org
242W: http://www.lesswatts.org/projects/acpi/ 242W: https://01.org/linux-acpi
243Q: http://patchwork.kernel.org/project/linux-acpi/list/ 243Q: https://patchwork.kernel.org/project/linux-acpi/list/
244T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux 244T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
245S: Supported 245S: Supported
246F: drivers/acpi/ 246F: drivers/acpi/
247F: drivers/pnp/pnpacpi/ 247F: drivers/pnp/pnpacpi/
@@ -256,21 +256,21 @@ F: drivers/pci/*/*/*acpi*
256ACPI FAN DRIVER 256ACPI FAN DRIVER
257M: Zhang Rui <rui.zhang@intel.com> 257M: Zhang Rui <rui.zhang@intel.com>
258L: linux-acpi@vger.kernel.org 258L: linux-acpi@vger.kernel.org
259W: http://www.lesswatts.org/projects/acpi/ 259W: https://01.org/linux-acpi
260S: Supported 260S: Supported
261F: drivers/acpi/fan.c 261F: drivers/acpi/fan.c
262 262
263ACPI THERMAL DRIVER 263ACPI THERMAL DRIVER
264M: Zhang Rui <rui.zhang@intel.com> 264M: Zhang Rui <rui.zhang@intel.com>
265L: linux-acpi@vger.kernel.org 265L: linux-acpi@vger.kernel.org
266W: http://www.lesswatts.org/projects/acpi/ 266W: https://01.org/linux-acpi
267S: Supported 267S: Supported
268F: drivers/acpi/*thermal* 268F: drivers/acpi/*thermal*
269 269
270ACPI VIDEO DRIVER 270ACPI VIDEO DRIVER
271M: Zhang Rui <rui.zhang@intel.com> 271M: Zhang Rui <rui.zhang@intel.com>
272L: linux-acpi@vger.kernel.org 272L: linux-acpi@vger.kernel.org
273W: http://www.lesswatts.org/projects/acpi/ 273W: https://01.org/linux-acpi
274S: Supported 274S: Supported
275F: drivers/acpi/video.c 275F: drivers/acpi/video.c
276 276
@@ -824,15 +824,21 @@ S: Maintained
824F: arch/arm/mach-gemini/ 824F: arch/arm/mach-gemini/
825 825
826ARM/CSR SIRFPRIMA2 MACHINE SUPPORT 826ARM/CSR SIRFPRIMA2 MACHINE SUPPORT
827M: Barry Song <baohua.song@csr.com> 827M: Barry Song <baohua@kernel.org>
828L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 828L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
829T: git git://git.kernel.org/pub/scm/linux/kernel/git/baohua/linux.git 829T: git git://git.kernel.org/pub/scm/linux/kernel/git/baohua/linux.git
830S: Maintained 830S: Maintained
831F: arch/arm/mach-prima2/ 831F: arch/arm/mach-prima2/
832F: drivers/clk/clk-prima2.c
833F: drivers/clocksource/timer-prima2.c
834F: drivers/clocksource/timer-marco.c
832F: drivers/dma/sirf-dma.c 835F: drivers/dma/sirf-dma.c
833F: drivers/i2c/busses/i2c-sirf.c 836F: drivers/i2c/busses/i2c-sirf.c
837F: drivers/input/misc/sirfsoc-onkey.c
838F: drivers/irqchip/irq-sirfsoc.c
834F: drivers/mmc/host/sdhci-sirf.c 839F: drivers/mmc/host/sdhci-sirf.c
835F: drivers/pinctrl/sirf/ 840F: drivers/pinctrl/sirf/
841F: drivers/rtc/rtc-sirfsoc.c
836F: drivers/spi/spi-sirf.c 842F: drivers/spi/spi-sirf.c
837 843
838ARM/EBSA110 MACHINE SUPPORT 844ARM/EBSA110 MACHINE SUPPORT
@@ -1003,6 +1009,7 @@ ARM/Marvell Armada 370 and Armada XP SOC support
1003M: Jason Cooper <jason@lakedaemon.net> 1009M: Jason Cooper <jason@lakedaemon.net>
1004M: Andrew Lunn <andrew@lunn.ch> 1010M: Andrew Lunn <andrew@lunn.ch>
1005M: Gregory Clement <gregory.clement@free-electrons.com> 1011M: Gregory Clement <gregory.clement@free-electrons.com>
1012M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
1006L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1013L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1007S: Maintained 1014S: Maintained
1008F: arch/arm/mach-mvebu/ 1015F: arch/arm/mach-mvebu/
@@ -1010,6 +1017,7 @@ F: arch/arm/mach-mvebu/
1010ARM/Marvell Dove/Kirkwood/MV78xx0/Orion SOC support 1017ARM/Marvell Dove/Kirkwood/MV78xx0/Orion SOC support
1011M: Jason Cooper <jason@lakedaemon.net> 1018M: Jason Cooper <jason@lakedaemon.net>
1012M: Andrew Lunn <andrew@lunn.ch> 1019M: Andrew Lunn <andrew@lunn.ch>
1020M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
1013L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1021L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1014S: Maintained 1022S: Maintained
1015F: arch/arm/mach-dove/ 1023F: arch/arm/mach-dove/
@@ -1142,6 +1150,13 @@ F: drivers/net/ethernet/i825xx/ether1*
1142F: drivers/net/ethernet/seeq/ether3* 1150F: drivers/net/ethernet/seeq/ether3*
1143F: drivers/scsi/arm/ 1151F: drivers/scsi/arm/
1144 1152
1153ARM/Rockchip SoC support
1154M: Heiko Stuebner <heiko@sntech.de>
1155L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1156S: Maintained
1157F: arch/arm/mach-rockchip/
1158F: drivers/*/*rockchip*
1159
1145ARM/SHARK MACHINE SUPPORT 1160ARM/SHARK MACHINE SUPPORT
1146M: Alexander Schulz <alex@shark-linux.de> 1161M: Alexander Schulz <alex@shark-linux.de>
1147W: http://www.shark-linux.de/shark.html 1162W: http://www.shark-linux.de/shark.html
@@ -1785,6 +1800,7 @@ F: include/net/bluetooth/
1785 1800
1786BONDING DRIVER 1801BONDING DRIVER
1787M: Jay Vosburgh <fubar@us.ibm.com> 1802M: Jay Vosburgh <fubar@us.ibm.com>
1803M: Veaceslav Falico <vfalico@redhat.com>
1788M: Andy Gospodarek <andy@greyhouse.net> 1804M: Andy Gospodarek <andy@greyhouse.net>
1789L: netdev@vger.kernel.org 1805L: netdev@vger.kernel.org
1790W: http://sourceforge.net/projects/bonding/ 1806W: http://sourceforge.net/projects/bonding/
@@ -1812,7 +1828,8 @@ S: Supported
1812F: drivers/net/ethernet/broadcom/bnx2x/ 1828F: drivers/net/ethernet/broadcom/bnx2x/
1813 1829
1814BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE 1830BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE
1815M: Christian Daudt <csd@broadcom.com> 1831M: Christian Daudt <bcm@fixthebug.org>
1832L: bcm-kernel-feedback-list@broadcom.com
1816T: git git://git.github.com/broadcom/bcm11351 1833T: git git://git.github.com/broadcom/bcm11351
1817S: Maintained 1834S: Maintained
1818F: arch/arm/mach-bcm/ 1835F: arch/arm/mach-bcm/
@@ -2293,7 +2310,7 @@ S: Maintained
2293F: drivers/net/ethernet/ti/cpmac.c 2310F: drivers/net/ethernet/ti/cpmac.c
2294 2311
2295CPU FREQUENCY DRIVERS 2312CPU FREQUENCY DRIVERS
2296M: Rafael J. Wysocki <rjw@sisk.pl> 2313M: Rafael J. Wysocki <rjw@rjwysocki.net>
2297M: Viresh Kumar <viresh.kumar@linaro.org> 2314M: Viresh Kumar <viresh.kumar@linaro.org>
2298L: cpufreq@vger.kernel.org 2315L: cpufreq@vger.kernel.org
2299L: linux-pm@vger.kernel.org 2316L: linux-pm@vger.kernel.org
@@ -2324,7 +2341,7 @@ S: Maintained
2324F: drivers/cpuidle/cpuidle-big_little.c 2341F: drivers/cpuidle/cpuidle-big_little.c
2325 2342
2326CPUIDLE DRIVERS 2343CPUIDLE DRIVERS
2327M: Rafael J. Wysocki <rjw@sisk.pl> 2344M: Rafael J. Wysocki <rjw@rjwysocki.net>
2328M: Daniel Lezcano <daniel.lezcano@linaro.org> 2345M: Daniel Lezcano <daniel.lezcano@linaro.org>
2329L: linux-pm@vger.kernel.org 2346L: linux-pm@vger.kernel.org
2330S: Maintained 2347S: Maintained
@@ -2639,6 +2656,18 @@ F: include/linux/device-mapper.h
2639F: include/linux/dm-*.h 2656F: include/linux/dm-*.h
2640F: include/uapi/linux/dm-*.h 2657F: include/uapi/linux/dm-*.h
2641 2658
2659DIGI NEO AND CLASSIC PCI PRODUCTS
2660M: Lidza Louina <lidza.louina@gmail.com>
2661L: driverdev-devel@linuxdriverproject.org
2662S: Maintained
2663F: drivers/staging/dgnc/
2664
2665DIGI EPCA PCI PRODUCTS
2666M: Lidza Louina <lidza.louina@gmail.com>
2667L: driverdev-devel@linuxdriverproject.org
2668S: Maintained
2669F: drivers/staging/dgap/
2670
2642DIOLAN U2C-12 I2C DRIVER 2671DIOLAN U2C-12 I2C DRIVER
2643M: Guenter Roeck <linux@roeck-us.net> 2672M: Guenter Roeck <linux@roeck-us.net>
2644L: linux-i2c@vger.kernel.org 2673L: linux-i2c@vger.kernel.org
@@ -2699,6 +2728,8 @@ T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
2699DMA GENERIC OFFLOAD ENGINE SUBSYSTEM 2728DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
2700M: Vinod Koul <vinod.koul@intel.com> 2729M: Vinod Koul <vinod.koul@intel.com>
2701M: Dan Williams <dan.j.williams@intel.com> 2730M: Dan Williams <dan.j.williams@intel.com>
2731L: dmaengine@vger.kernel.org
2732Q: https://patchwork.kernel.org/project/linux-dmaengine/list/
2702S: Supported 2733S: Supported
2703F: drivers/dma/ 2734F: drivers/dma/
2704F: include/linux/dma* 2735F: include/linux/dma*
@@ -2802,7 +2833,7 @@ M: Terje Bergström <tbergstrom@nvidia.com>
2802L: dri-devel@lists.freedesktop.org 2833L: dri-devel@lists.freedesktop.org
2803L: linux-tegra@vger.kernel.org 2834L: linux-tegra@vger.kernel.org
2804T: git git://anongit.freedesktop.org/tegra/linux.git 2835T: git git://anongit.freedesktop.org/tegra/linux.git
2805S: Maintained 2836S: Supported
2806F: drivers/gpu/host1x/ 2837F: drivers/gpu/host1x/
2807F: include/uapi/drm/tegra_drm.h 2838F: include/uapi/drm/tegra_drm.h
2808F: Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt 2839F: Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
@@ -3534,7 +3565,7 @@ F: fs/freevxfs/
3534 3565
3535FREEZER 3566FREEZER
3536M: Pavel Machek <pavel@ucw.cz> 3567M: Pavel Machek <pavel@ucw.cz>
3537M: "Rafael J. Wysocki" <rjw@sisk.pl> 3568M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
3538L: linux-pm@vger.kernel.org 3569L: linux-pm@vger.kernel.org
3539S: Supported 3570S: Supported
3540F: Documentation/power/freezing-of-tasks.txt 3571F: Documentation/power/freezing-of-tasks.txt
@@ -3605,6 +3636,12 @@ L: linux-scsi@vger.kernel.org
3605S: Odd Fixes (e.g., new signatures) 3636S: Odd Fixes (e.g., new signatures)
3606F: drivers/scsi/fdomain.* 3637F: drivers/scsi/fdomain.*
3607 3638
3639GCOV BASED KERNEL PROFILING
3640M: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
3641S: Maintained
3642F: kernel/gcov/
3643F: Documentation/gcov.txt
3644
3608GDT SCSI DISK ARRAY CONTROLLER DRIVER 3645GDT SCSI DISK ARRAY CONTROLLER DRIVER
3609M: Achim Leubner <achim_leubner@adaptec.com> 3646M: Achim Leubner <achim_leubner@adaptec.com>
3610L: linux-scsi@vger.kernel.org 3647L: linux-scsi@vger.kernel.org
@@ -3870,7 +3907,7 @@ F: drivers/video/hgafb.c
3870 3907
3871HIBERNATION (aka Software Suspend, aka swsusp) 3908HIBERNATION (aka Software Suspend, aka swsusp)
3872M: Pavel Machek <pavel@ucw.cz> 3909M: Pavel Machek <pavel@ucw.cz>
3873M: "Rafael J. Wysocki" <rjw@sisk.pl> 3910M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
3874L: linux-pm@vger.kernel.org 3911L: linux-pm@vger.kernel.org
3875S: Supported 3912S: Supported
3876F: arch/x86/power/ 3913F: arch/x86/power/
@@ -4320,7 +4357,7 @@ F: drivers/video/i810/
4320INTEL MENLOW THERMAL DRIVER 4357INTEL MENLOW THERMAL DRIVER
4321M: Sujith Thomas <sujith.thomas@intel.com> 4358M: Sujith Thomas <sujith.thomas@intel.com>
4322L: platform-driver-x86@vger.kernel.org 4359L: platform-driver-x86@vger.kernel.org
4323W: http://www.lesswatts.org/projects/acpi/ 4360W: https://01.org/linux-acpi
4324S: Supported 4361S: Supported
4325F: drivers/platform/x86/intel_menlow.c 4362F: drivers/platform/x86/intel_menlow.c
4326 4363
@@ -4332,7 +4369,10 @@ F: arch/x86/kernel/microcode_intel.c
4332 4369
4333INTEL I/OAT DMA DRIVER 4370INTEL I/OAT DMA DRIVER
4334M: Dan Williams <dan.j.williams@intel.com> 4371M: Dan Williams <dan.j.williams@intel.com>
4335S: Maintained 4372M: Dave Jiang <dave.jiang@intel.com>
4373L: dmaengine@vger.kernel.org
4374Q: https://patchwork.kernel.org/project/linux-dmaengine/list/
4375S: Supported
4336F: drivers/dma/ioat* 4376F: drivers/dma/ioat*
4337 4377
4338INTEL IOMMU (VT-d) 4378INTEL IOMMU (VT-d)
@@ -4457,6 +4497,13 @@ L: linux-serial@vger.kernel.org
4457S: Maintained 4497S: Maintained
4458F: drivers/tty/serial/ioc3_serial.c 4498F: drivers/tty/serial/ioc3_serial.c
4459 4499
4500IOMMU DRIVERS
4501M: Joerg Roedel <joro@8bytes.org>
4502L: iommu@lists.linux-foundation.org
4503T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
4504S: Maintained
4505F: drivers/iommu/
4506
4460IP MASQUERADING 4507IP MASQUERADING
4461M: Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar> 4508M: Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar>
4462S: Maintained 4509S: Maintained
@@ -6595,7 +6642,7 @@ S: Obsolete
6595F: drivers/net/wireless/prism54/ 6642F: drivers/net/wireless/prism54/
6596 6643
6597PROMISE SATA TX2/TX4 CONTROLLER LIBATA DRIVER 6644PROMISE SATA TX2/TX4 CONTROLLER LIBATA DRIVER
6598M: Mikael Pettersson <mikpe@it.uu.se> 6645M: Mikael Pettersson <mikpelinux@gmail.com>
6599L: linux-ide@vger.kernel.org 6646L: linux-ide@vger.kernel.org
6600S: Maintained 6647S: Maintained
6601F: drivers/ata/sata_promise.* 6648F: drivers/ata/sata_promise.*
@@ -7258,9 +7305,9 @@ F: include/linux/sched.h
7258F: include/uapi/linux/sched.h 7305F: include/uapi/linux/sched.h
7259 7306
7260SCORE ARCHITECTURE 7307SCORE ARCHITECTURE
7261M: Chen Liqin <liqin.chen@sunplusct.com> 7308M: Chen Liqin <liqin.linux@gmail.com>
7262M: Lennox Wu <lennox.wu@gmail.com> 7309M: Lennox Wu <lennox.wu@gmail.com>
7263W: http://www.sunplusct.com 7310W: http://www.sunplus.com
7264S: Supported 7311S: Supported
7265F: arch/score/ 7312F: arch/score/
7266 7313
@@ -7790,6 +7837,13 @@ F: Documentation/sound/alsa/soc/
7790F: sound/soc/ 7837F: sound/soc/
7791F: include/sound/soc* 7838F: include/sound/soc*
7792 7839
7840SOUND - DMAENGINE HELPERS
7841M: Lars-Peter Clausen <lars@metafoo.de>
7842S: Supported
7843F: include/sound/dmaengine_pcm.h
7844F: sound/core/pcm_dmaengine.c
7845F: sound/soc/soc-generic-dmaengine-pcm.c
7846
7793SPARC + UltraSPARC (sparc/sparc64) 7847SPARC + UltraSPARC (sparc/sparc64)
7794M: "David S. Miller" <davem@davemloft.net> 7848M: "David S. Miller" <davem@davemloft.net>
7795L: sparclinux@vger.kernel.org 7849L: sparclinux@vger.kernel.org
@@ -8069,7 +8123,7 @@ F: drivers/sh/
8069SUSPEND TO RAM 8123SUSPEND TO RAM
8070M: Len Brown <len.brown@intel.com> 8124M: Len Brown <len.brown@intel.com>
8071M: Pavel Machek <pavel@ucw.cz> 8125M: Pavel Machek <pavel@ucw.cz>
8072M: "Rafael J. Wysocki" <rjw@sisk.pl> 8126M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
8073L: linux-pm@vger.kernel.org 8127L: linux-pm@vger.kernel.org
8074S: Supported 8128S: Supported
8075F: Documentation/power/ 8129F: Documentation/power/
@@ -8262,14 +8316,72 @@ L: linux-media@vger.kernel.org
8262S: Maintained 8316S: Maintained
8263F: drivers/media/rc/ttusbir.c 8317F: drivers/media/rc/ttusbir.c
8264 8318
8265TEGRA SUPPORT 8319TEGRA ARCHITECTURE SUPPORT
8266M: Stephen Warren <swarren@wwwdotorg.org> 8320M: Stephen Warren <swarren@wwwdotorg.org>
8321M: Thierry Reding <thierry.reding@gmail.com>
8267L: linux-tegra@vger.kernel.org 8322L: linux-tegra@vger.kernel.org
8268Q: http://patchwork.ozlabs.org/project/linux-tegra/list/ 8323Q: http://patchwork.ozlabs.org/project/linux-tegra/list/
8269T: git git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-tegra.git 8324T: git git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-tegra.git
8270S: Supported 8325S: Supported
8271N: [^a-z]tegra 8326N: [^a-z]tegra
8272 8327
8328TEGRA ASOC DRIVER
8329M: Stephen Warren <swarren@wwwdotorg.org>
8330S: Supported
8331F: sound/soc/tegra/
8332
8333TEGRA CLOCK DRIVER
8334M: Peter De Schrijver <pdeschrijver@nvidia.com>
8335M: Prashant Gaikwad <pgaikwad@nvidia.com>
8336S: Supported
8337F: drivers/clk/tegra/
8338
8339TEGRA DMA DRIVER
8340M: Laxman Dewangan <ldewangan@nvidia.com>
8341S: Supported
8342F: drivers/dma/tegra20-apb-dma.c
8343
8344TEGRA GPIO DRIVER
8345M: Stephen Warren <swarren@wwwdotorg.org>
8346S: Supported
8347F: drivers/gpio/gpio-tegra.c
8348
8349TEGRA I2C DRIVER
8350M: Laxman Dewangan <ldewangan@nvidia.com>
8351S: Supported
8352F: drivers/i2c/busses/i2c-tegra.c
8353
8354TEGRA IOMMU DRIVERS
8355M: Hiroshi Doyu <hdoyu@nvidia.com>
8356S: Supported
8357F: drivers/iommu/tegra*
8358
8359TEGRA KBC DRIVER
8360M: Rakesh Iyer <riyer@nvidia.com>
8361M: Laxman Dewangan <ldewangan@nvidia.com>
8362S: Supported
8363F: drivers/input/keyboard/tegra-kbc.c
8364
8365TEGRA PINCTRL DRIVER
8366M: Stephen Warren <swarren@wwwdotorg.org>
8367S: Supported
8368F: drivers/pinctrl/pinctrl-tegra*
8369
8370TEGRA PWM DRIVER
8371M: Thierry Reding <thierry.reding@gmail.com>
8372S: Supported
8373F: drivers/pwm/pwm-tegra.c
8374
8375TEGRA SERIAL DRIVER
8376M: Laxman Dewangan <ldewangan@nvidia.com>
8377S: Supported
8378F: drivers/tty/serial/serial-tegra.c
8379
8380TEGRA SPI DRIVER
8381M: Laxman Dewangan <ldewangan@nvidia.com>
8382S: Supported
8383F: drivers/spi/spi-tegra*
8384
8273TEHUTI ETHERNET DRIVER 8385TEHUTI ETHERNET DRIVER
8274M: Andy Gospodarek <andy@greyhouse.net> 8386M: Andy Gospodarek <andy@greyhouse.net>
8275L: netdev@vger.kernel.org 8387L: netdev@vger.kernel.org
@@ -8724,9 +8836,8 @@ F: Documentation/hid/hiddev.txt
8724F: drivers/hid/usbhid/ 8836F: drivers/hid/usbhid/
8725 8837
8726USB/IP DRIVERS 8838USB/IP DRIVERS
8727M: Matt Mooney <mfm@muteddisk.com>
8728L: linux-usb@vger.kernel.org 8839L: linux-usb@vger.kernel.org
8729S: Maintained 8840S: Orphan
8730F: drivers/staging/usbip/ 8841F: drivers/staging/usbip/
8731 8842
8732USB ISP116X DRIVER 8843USB ISP116X DRIVER
@@ -9366,6 +9477,7 @@ F: arch/arm64/include/asm/xen/
9366 9477
9367XEN NETWORK BACKEND DRIVER 9478XEN NETWORK BACKEND DRIVER
9368M: Ian Campbell <ian.campbell@citrix.com> 9479M: Ian Campbell <ian.campbell@citrix.com>
9480M: Wei Liu <wei.liu2@citrix.com>
9369L: xen-devel@lists.xenproject.org (moderated for non-subscribers) 9481L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
9370L: netdev@vger.kernel.org 9482L: netdev@vger.kernel.org
9371S: Supported 9483S: Supported
diff --git a/Makefile b/Makefile
index 8d0668f473ba..868c0eb67b08 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 12 2PATCHLEVEL = 12
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc2 4EXTRAVERSION = -rc7
5NAME = One Giant Leap for Frogkind 5NAME = One Giant Leap for Frogkind
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/Kconfig b/arch/Kconfig
index 1feb169274fe..af2cc6eabcc7 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -286,9 +286,6 @@ config HAVE_PERF_USER_STACK_DUMP
286config HAVE_ARCH_JUMP_LABEL 286config HAVE_ARCH_JUMP_LABEL
287 bool 287 bool
288 288
289config HAVE_ARCH_MUTEX_CPU_RELAX
290 bool
291
292config HAVE_RCU_TABLE_FREE 289config HAVE_RCU_TABLE_FREE
293 bool 290 bool
294 291
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
index f158197ac5b0..b6a8c2dfbe6e 100644
--- a/arch/arc/include/asm/spinlock.h
+++ b/arch/arc/include/asm/spinlock.h
@@ -45,7 +45,14 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
45 45
46static inline void arch_spin_unlock(arch_spinlock_t *lock) 46static inline void arch_spin_unlock(arch_spinlock_t *lock)
47{ 47{
48 lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__; 48 unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__;
49
50 __asm__ __volatile__(
51 " ex %0, [%1] \n"
52 : "+r" (tmp)
53 : "r"(&(lock->slock))
54 : "memory");
55
49 smp_mb(); 56 smp_mb();
50} 57}
51 58
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
index 32420824375b..30c9baffa96f 100644
--- a/arch/arc/include/asm/uaccess.h
+++ b/arch/arc/include/asm/uaccess.h
@@ -43,7 +43,7 @@
43 * Because it essentially checks if buffer end is within limit and @len is 43 * Because it essentially checks if buffer end is within limit and @len is
44 * non-ngeative, which implies that buffer start will be within limit too. 44 * non-ngeative, which implies that buffer start will be within limit too.
45 * 45 *
46 * The reason for rewriting being, for majorit yof cases, @len is generally 46 * The reason for rewriting being, for majority of cases, @len is generally
47 * compile time constant, causing first sub-expression to be compile time 47 * compile time constant, causing first sub-expression to be compile time
48 * subsumed. 48 * subsumed.
49 * 49 *
@@ -53,7 +53,7 @@
53 * 53 *
54 */ 54 */
55#define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \ 55#define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \
56 (((addr)+(sz)) <= get_fs())) 56 ((addr) <= (get_fs() - (sz))))
57#define __access_ok(addr, sz) (unlikely(__kernel_ok) || \ 57#define __access_ok(addr, sz) (unlikely(__kernel_ok) || \
58 likely(__user_ok((addr), (sz)))) 58 likely(__user_ok((addr), (sz))))
59 59
diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c
index 333238564b67..5d76706139dd 100644
--- a/arch/arc/kernel/ptrace.c
+++ b/arch/arc/kernel/ptrace.c
@@ -102,7 +102,7 @@ static int genregs_set(struct task_struct *target,
102 REG_IGNORE_ONE(pad2); 102 REG_IGNORE_ONE(pad2);
103 REG_IN_CHUNK(callee, efa, cregs); /* callee_regs[r25..r13] */ 103 REG_IN_CHUNK(callee, efa, cregs); /* callee_regs[r25..r13] */
104 REG_IGNORE_ONE(efa); /* efa update invalid */ 104 REG_IGNORE_ONE(efa); /* efa update invalid */
105 REG_IN_ONE(stop_pc, &ptregs->ret); /* stop_pc: PC update */ 105 REG_IGNORE_ONE(stop_pc); /* PC updated via @ret */
106 106
107 return ret; 107 return ret;
108} 108}
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index ee6ef2f60a28..7e95e1a86510 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -101,7 +101,6 @@ SYSCALL_DEFINE0(rt_sigreturn)
101{ 101{
102 struct rt_sigframe __user *sf; 102 struct rt_sigframe __user *sf;
103 unsigned int magic; 103 unsigned int magic;
104 int err;
105 struct pt_regs *regs = current_pt_regs(); 104 struct pt_regs *regs = current_pt_regs();
106 105
107 /* Always make any pending restarted system calls return -EINTR */ 106 /* Always make any pending restarted system calls return -EINTR */
@@ -119,15 +118,16 @@ SYSCALL_DEFINE0(rt_sigreturn)
119 if (!access_ok(VERIFY_READ, sf, sizeof(*sf))) 118 if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
120 goto badframe; 119 goto badframe;
121 120
122 err = restore_usr_regs(regs, sf); 121 if (__get_user(magic, &sf->sigret_magic))
123 err |= __get_user(magic, &sf->sigret_magic);
124 if (err)
125 goto badframe; 122 goto badframe;
126 123
127 if (unlikely(is_do_ss_needed(magic))) 124 if (unlikely(is_do_ss_needed(magic)))
128 if (restore_altstack(&sf->uc.uc_stack)) 125 if (restore_altstack(&sf->uc.uc_stack))
129 goto badframe; 126 goto badframe;
130 127
128 if (restore_usr_regs(regs, sf))
129 goto badframe;
130
131 /* Don't restart from sigreturn */ 131 /* Don't restart from sigreturn */
132 syscall_wont_restart(regs); 132 syscall_wont_restart(regs);
133 133
@@ -191,6 +191,15 @@ setup_rt_frame(int signo, struct k_sigaction *ka, siginfo_t *info,
191 return 1; 191 return 1;
192 192
193 /* 193 /*
194 * w/o SA_SIGINFO, struct ucontext is partially populated (only
195 * uc_mcontext/uc_sigmask) for kernel's normal user state preservation
196 * during signal handler execution. This works for SA_SIGINFO as well
197 * although the semantics are now overloaded (the same reg state can be
198 * inspected by userland: but are they allowed to fiddle with it ?
199 */
200 err |= stash_usr_regs(sf, regs, set);
201
202 /*
194 * SA_SIGINFO requires 3 args to signal handler: 203 * SA_SIGINFO requires 3 args to signal handler:
195 * #1: sig-no (common to any handler) 204 * #1: sig-no (common to any handler)
196 * #2: struct siginfo 205 * #2: struct siginfo
@@ -213,14 +222,6 @@ setup_rt_frame(int signo, struct k_sigaction *ka, siginfo_t *info,
213 magic = MAGIC_SIGALTSTK; 222 magic = MAGIC_SIGALTSTK;
214 } 223 }
215 224
216 /*
217 * w/o SA_SIGINFO, struct ucontext is partially populated (only
218 * uc_mcontext/uc_sigmask) for kernel's normal user state preservation
219 * during signal handler execution. This works for SA_SIGINFO as well
220 * although the semantics are now overloaded (the same reg state can be
221 * inspected by userland: but are they allowed to fiddle with it ?
222 */
223 err |= stash_usr_regs(sf, regs, set);
224 err |= __put_user(magic, &sf->sigret_magic); 225 err |= __put_user(magic, &sf->sigret_magic);
225 if (err) 226 if (err)
226 return err; 227 return err;
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
index 0e51e69cf30d..3fde7de3ea67 100644
--- a/arch/arc/kernel/time.c
+++ b/arch/arc/kernel/time.c
@@ -227,12 +227,9 @@ void __attribute__((weak)) arc_local_timer_setup(unsigned int cpu)
227{ 227{
228 struct clock_event_device *clk = &per_cpu(arc_clockevent_device, cpu); 228 struct clock_event_device *clk = &per_cpu(arc_clockevent_device, cpu);
229 229
230 clockevents_calc_mult_shift(clk, arc_get_core_freq(), 5);
231
232 clk->max_delta_ns = clockevent_delta2ns(ARC_TIMER_MAX, clk);
233 clk->cpumask = cpumask_of(cpu); 230 clk->cpumask = cpumask_of(cpu);
234 231 clockevents_config_and_register(clk, arc_get_core_freq(),
235 clockevents_register_device(clk); 232 0, ARC_TIMER_MAX);
236 233
237 /* 234 /*
238 * setup the per-cpu timer IRQ handler - for all cpus 235 * setup the per-cpu timer IRQ handler - for all cpus
diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
index 28d170060747..7ff5b5c183bb 100644
--- a/arch/arc/kernel/unaligned.c
+++ b/arch/arc/kernel/unaligned.c
@@ -245,6 +245,12 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
245 regs->status32 &= ~STATUS_DE_MASK; 245 regs->status32 &= ~STATUS_DE_MASK;
246 } else { 246 } else {
247 regs->ret += state.instr_len; 247 regs->ret += state.instr_len;
248
249 /* handle zero-overhead-loop */
250 if ((regs->ret == regs->lp_end) && (regs->lp_count)) {
251 regs->ret = regs->lp_start;
252 regs->lp_count--;
253 }
248 } 254 }
249 255
250 return 0; 256 return 0;
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 3f7714d8d2d2..1ad6fb6c094d 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -2217,8 +2217,7 @@ config NEON
2217 2217
2218config KERNEL_MODE_NEON 2218config KERNEL_MODE_NEON
2219 bool "Support for NEON in kernel mode" 2219 bool "Support for NEON in kernel mode"
2220 default n 2220 depends on NEON && AEABI
2221 depends on NEON
2222 help 2221 help
2223 Say Y to include support for NEON in kernel mode. 2222 Say Y to include support for NEON in kernel mode.
2224 2223
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index a37a50f575a2..db50b626be98 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -296,10 +296,15 @@ archprepare:
296# Convert bzImage to zImage 296# Convert bzImage to zImage
297bzImage: zImage 297bzImage: zImage
298 298
299zImage Image xipImage bootpImage uImage: vmlinux 299BOOT_TARGETS = zImage Image xipImage bootpImage uImage
300INSTALL_TARGETS = zinstall uinstall install
301
302PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
303
304$(BOOT_TARGETS): vmlinux
300 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ 305 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
301 306
302zinstall uinstall install: vmlinux 307$(INSTALL_TARGETS):
303 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@ 308 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
304 309
305%.dtb: | scripts 310%.dtb: | scripts
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index 84aa2caf07ed..ec2f8065f955 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -95,24 +95,24 @@ initrd:
95 @test "$(INITRD)" != "" || \ 95 @test "$(INITRD)" != "" || \
96 (echo You must specify INITRD; exit -1) 96 (echo You must specify INITRD; exit -1)
97 97
98install: $(obj)/Image 98install:
99 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ 99 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
100 $(obj)/Image System.map "$(INSTALL_PATH)" 100 $(obj)/Image System.map "$(INSTALL_PATH)"
101 101
102zinstall: $(obj)/zImage 102zinstall:
103 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ 103 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
104 $(obj)/zImage System.map "$(INSTALL_PATH)" 104 $(obj)/zImage System.map "$(INSTALL_PATH)"
105 105
106uinstall: $(obj)/uImage 106uinstall:
107 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ 107 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
108 $(obj)/uImage System.map "$(INSTALL_PATH)" 108 $(obj)/uImage System.map "$(INSTALL_PATH)"
109 109
110zi: 110zi:
111 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ 111 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
112 $(obj)/zImage System.map "$(INSTALL_PATH)" 112 $(obj)/zImage System.map "$(INSTALL_PATH)"
113 113
114i: 114i:
115 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ 115 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
116 $(obj)/Image System.map "$(INSTALL_PATH)" 116 $(obj)/Image System.map "$(INSTALL_PATH)"
117 117
118subdir- := bootp compressed dts 118subdir- := bootp compressed dts
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index e95af3f5433b..802720e3e8fd 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -41,6 +41,8 @@ dtb-$(CONFIG_ARCH_AT91) += sama5d33ek.dtb
41dtb-$(CONFIG_ARCH_AT91) += sama5d34ek.dtb 41dtb-$(CONFIG_ARCH_AT91) += sama5d34ek.dtb
42dtb-$(CONFIG_ARCH_AT91) += sama5d35ek.dtb 42dtb-$(CONFIG_ARCH_AT91) += sama5d35ek.dtb
43 43
44dtb-$(CONFIG_ARCH_ATLAS6) += atlas6-evb.dtb
45
44dtb-$(CONFIG_ARCH_BCM2835) += bcm2835-rpi-b.dtb 46dtb-$(CONFIG_ARCH_BCM2835) += bcm2835-rpi-b.dtb
45dtb-$(CONFIG_ARCH_BCM) += bcm11351-brt.dtb \ 47dtb-$(CONFIG_ARCH_BCM) += bcm11351-brt.dtb \
46 bcm28155-ap.dtb 48 bcm28155-ap.dtb
diff --git a/arch/arm/boot/dts/armada-370-netgear-rn102.dts b/arch/arm/boot/dts/armada-370-netgear-rn102.dts
index 05e4485a8225..8ac2ac1f69cc 100644
--- a/arch/arm/boot/dts/armada-370-netgear-rn102.dts
+++ b/arch/arm/boot/dts/armada-370-netgear-rn102.dts
@@ -27,6 +27,25 @@
27 }; 27 };
28 28
29 soc { 29 soc {
30 ranges = <MBUS_ID(0xf0, 0x01) 0 0xd0000000 0x100000
31 MBUS_ID(0x01, 0xe0) 0 0xfff00000 0x100000>;
32
33 pcie-controller {
34 status = "okay";
35
36 /* Connected to Marvell SATA controller */
37 pcie@1,0 {
38 /* Port 0, Lane 0 */
39 status = "okay";
40 };
41
42 /* Connected to FL1009 USB 3.0 controller */
43 pcie@2,0 {
44 /* Port 1, Lane 0 */
45 status = "okay";
46 };
47 };
48
30 internal-regs { 49 internal-regs {
31 serial@12000 { 50 serial@12000 {
32 clock-frequency = <200000000>; 51 clock-frequency = <200000000>;
@@ -57,6 +76,11 @@
57 marvell,pins = "mpp56"; 76 marvell,pins = "mpp56";
58 marvell,function = "gpio"; 77 marvell,function = "gpio";
59 }; 78 };
79
80 poweroff: poweroff {
81 marvell,pins = "mpp8";
82 marvell,function = "gpio";
83 };
60 }; 84 };
61 85
62 mdio { 86 mdio {
@@ -89,22 +113,6 @@
89 pwm_polarity = <0>; 113 pwm_polarity = <0>;
90 }; 114 };
91 }; 115 };
92
93 pcie-controller {
94 status = "okay";
95
96 /* Connected to Marvell SATA controller */
97 pcie@1,0 {
98 /* Port 0, Lane 0 */
99 status = "okay";
100 };
101
102 /* Connected to FL1009 USB 3.0 controller */
103 pcie@2,0 {
104 /* Port 1, Lane 0 */
105 status = "okay";
106 };
107 };
108 }; 116 };
109 }; 117 };
110 118
@@ -160,7 +168,7 @@
160 button@1 { 168 button@1 {
161 label = "Power Button"; 169 label = "Power Button";
162 linux,code = <116>; /* KEY_POWER */ 170 linux,code = <116>; /* KEY_POWER */
163 gpios = <&gpio1 30 1>; 171 gpios = <&gpio1 30 0>;
164 }; 172 };
165 173
166 button@2 { 174 button@2 {
@@ -176,4 +184,11 @@
176 }; 184 };
177 }; 185 };
178 186
187 gpio_poweroff {
188 compatible = "gpio-poweroff";
189 pinctrl-0 = <&poweroff>;
190 pinctrl-names = "default";
191 gpios = <&gpio0 8 1>;
192 };
193
179}; 194};
diff --git a/arch/arm/boot/dts/armada-xp.dtsi b/arch/arm/boot/dts/armada-xp.dtsi
index def125c0eeaa..3058522f5aad 100644
--- a/arch/arm/boot/dts/armada-xp.dtsi
+++ b/arch/arm/boot/dts/armada-xp.dtsi
@@ -70,6 +70,8 @@
70 70
71 timer@20300 { 71 timer@20300 {
72 compatible = "marvell,armada-xp-timer"; 72 compatible = "marvell,armada-xp-timer";
73 clocks = <&coreclk 2>, <&refclk>;
74 clock-names = "nbclk", "fixed";
73 }; 75 };
74 76
75 coreclk: mvebu-sar@18230 { 77 coreclk: mvebu-sar@18230 {
@@ -169,4 +171,13 @@
169 }; 171 };
170 }; 172 };
171 }; 173 };
174
175 clocks {
176 /* 25 MHz reference crystal */
177 refclk: oscillator {
178 compatible = "fixed-clock";
179 #clock-cells = <0>;
180 clock-frequency = <25000000>;
181 };
182 };
172}; 183};
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index cf78ac0b04b1..e74dc15efa9d 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -190,12 +190,12 @@
190 AT91_PIOA 8 AT91_PERIPH_A AT91_PINCTRL_NONE>; /* PA8 periph A */ 190 AT91_PIOA 8 AT91_PERIPH_A AT91_PINCTRL_NONE>; /* PA8 periph A */
191 }; 191 };
192 192
193 pinctrl_uart2_rts: uart2_rts-0 { 193 pinctrl_usart2_rts: usart2_rts-0 {
194 atmel,pins = 194 atmel,pins =
195 <AT91_PIOB 0 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PB0 periph B */ 195 <AT91_PIOB 0 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PB0 periph B */
196 }; 196 };
197 197
198 pinctrl_uart2_cts: uart2_cts-0 { 198 pinctrl_usart2_cts: usart2_cts-0 {
199 atmel,pins = 199 atmel,pins =
200 <AT91_PIOB 1 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PB1 periph B */ 200 <AT91_PIOB 1 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PB1 periph B */
201 }; 201 };
@@ -556,6 +556,7 @@
556 interrupts = <12 IRQ_TYPE_LEVEL_HIGH 0>; 556 interrupts = <12 IRQ_TYPE_LEVEL_HIGH 0>;
557 dmas = <&dma0 1 AT91_DMA_CFG_PER_ID(0)>; 557 dmas = <&dma0 1 AT91_DMA_CFG_PER_ID(0)>;
558 dma-names = "rxtx"; 558 dma-names = "rxtx";
559 pinctrl-names = "default";
559 #address-cells = <1>; 560 #address-cells = <1>;
560 #size-cells = <0>; 561 #size-cells = <0>;
561 status = "disabled"; 562 status = "disabled";
@@ -567,6 +568,7 @@
567 interrupts = <26 IRQ_TYPE_LEVEL_HIGH 0>; 568 interrupts = <26 IRQ_TYPE_LEVEL_HIGH 0>;
568 dmas = <&dma1 1 AT91_DMA_CFG_PER_ID(0)>; 569 dmas = <&dma1 1 AT91_DMA_CFG_PER_ID(0)>;
569 dma-names = "rxtx"; 570 dma-names = "rxtx";
571 pinctrl-names = "default";
570 #address-cells = <1>; 572 #address-cells = <1>;
571 #size-cells = <0>; 573 #size-cells = <0>;
572 status = "disabled"; 574 status = "disabled";
diff --git a/arch/arm/boot/dts/atlas6.dtsi b/arch/arm/boot/dts/atlas6.dtsi
index 8678e0c11119..6db4f81d4795 100644
--- a/arch/arm/boot/dts/atlas6.dtsi
+++ b/arch/arm/boot/dts/atlas6.dtsi
@@ -181,6 +181,8 @@
181 interrupts = <17>; 181 interrupts = <17>;
182 fifosize = <128>; 182 fifosize = <128>;
183 clocks = <&clks 13>; 183 clocks = <&clks 13>;
184 sirf,uart-dma-rx-channel = <21>;
185 sirf,uart-dma-tx-channel = <2>;
184 }; 186 };
185 187
186 uart1: uart@b0060000 { 188 uart1: uart@b0060000 {
@@ -199,6 +201,8 @@
199 interrupts = <19>; 201 interrupts = <19>;
200 fifosize = <128>; 202 fifosize = <128>;
201 clocks = <&clks 15>; 203 clocks = <&clks 15>;
204 sirf,uart-dma-rx-channel = <6>;
205 sirf,uart-dma-tx-channel = <7>;
202 }; 206 };
203 207
204 usp0: usp@b0080000 { 208 usp0: usp@b0080000 {
@@ -206,7 +210,10 @@
206 compatible = "sirf,prima2-usp"; 210 compatible = "sirf,prima2-usp";
207 reg = <0xb0080000 0x10000>; 211 reg = <0xb0080000 0x10000>;
208 interrupts = <20>; 212 interrupts = <20>;
213 fifosize = <128>;
209 clocks = <&clks 28>; 214 clocks = <&clks 28>;
215 sirf,usp-dma-rx-channel = <17>;
216 sirf,usp-dma-tx-channel = <18>;
210 }; 217 };
211 218
212 usp1: usp@b0090000 { 219 usp1: usp@b0090000 {
@@ -214,7 +221,10 @@
214 compatible = "sirf,prima2-usp"; 221 compatible = "sirf,prima2-usp";
215 reg = <0xb0090000 0x10000>; 222 reg = <0xb0090000 0x10000>;
216 interrupts = <21>; 223 interrupts = <21>;
224 fifosize = <128>;
217 clocks = <&clks 29>; 225 clocks = <&clks 29>;
226 sirf,usp-dma-rx-channel = <14>;
227 sirf,usp-dma-tx-channel = <15>;
218 }; 228 };
219 229
220 dmac0: dma-controller@b00b0000 { 230 dmac0: dma-controller@b00b0000 {
@@ -237,6 +247,8 @@
237 compatible = "sirf,prima2-vip"; 247 compatible = "sirf,prima2-vip";
238 reg = <0xb00C0000 0x10000>; 248 reg = <0xb00C0000 0x10000>;
239 clocks = <&clks 31>; 249 clocks = <&clks 31>;
250 interrupts = <14>;
251 sirf,vip-dma-rx-channel = <16>;
240 }; 252 };
241 253
242 spi0: spi@b00d0000 { 254 spi0: spi@b00d0000 {
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index 7d7cc777ff7b..bbac42a78ce5 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -96,6 +96,11 @@
96 <1 14 0xf08>, 96 <1 14 0xf08>,
97 <1 11 0xf08>, 97 <1 11 0xf08>,
98 <1 10 0xf08>; 98 <1 10 0xf08>;
99 /* Unfortunately we need this since some versions of U-Boot
100 * on Exynos don't set the CNTFRQ register, so we need the
101 * value from DT.
102 */
103 clock-frequency = <24000000>;
99 }; 104 };
100 105
101 mct@101C0000 { 106 mct@101C0000 {
diff --git a/arch/arm/boot/dts/integratorcp.dts b/arch/arm/boot/dts/integratorcp.dts
index ff1aea0ee043..72693a69f830 100644
--- a/arch/arm/boot/dts/integratorcp.dts
+++ b/arch/arm/boot/dts/integratorcp.dts
@@ -9,11 +9,6 @@
9 model = "ARM Integrator/CP"; 9 model = "ARM Integrator/CP";
10 compatible = "arm,integrator-cp"; 10 compatible = "arm,integrator-cp";
11 11
12 aliases {
13 arm,timer-primary = &timer2;
14 arm,timer-secondary = &timer1;
15 };
16
17 chosen { 12 chosen {
18 bootargs = "root=/dev/ram0 console=ttyAMA0,38400n8 earlyprintk"; 13 bootargs = "root=/dev/ram0 console=ttyAMA0,38400n8 earlyprintk";
19 }; 14 };
@@ -24,14 +19,18 @@
24 }; 19 };
25 20
26 timer0: timer@13000000 { 21 timer0: timer@13000000 {
22 /* TIMER0 runs @ 25MHz */
27 compatible = "arm,integrator-cp-timer"; 23 compatible = "arm,integrator-cp-timer";
24 status = "disabled";
28 }; 25 };
29 26
30 timer1: timer@13000100 { 27 timer1: timer@13000100 {
28 /* TIMER1 runs @ 1MHz */
31 compatible = "arm,integrator-cp-timer"; 29 compatible = "arm,integrator-cp-timer";
32 }; 30 };
33 31
34 timer2: timer@13000200 { 32 timer2: timer@13000200 {
33 /* TIMER2 runs @ 1MHz */
35 compatible = "arm,integrator-cp-timer"; 34 compatible = "arm,integrator-cp-timer";
36 }; 35 };
37 36
diff --git a/arch/arm/boot/dts/kirkwood.dtsi b/arch/arm/boot/dts/kirkwood.dtsi
index cf7aeaf89e9c..1335b2e1bed4 100644
--- a/arch/arm/boot/dts/kirkwood.dtsi
+++ b/arch/arm/boot/dts/kirkwood.dtsi
@@ -13,6 +13,7 @@
13 cpu@0 { 13 cpu@0 {
14 device_type = "cpu"; 14 device_type = "cpu";
15 compatible = "marvell,feroceon"; 15 compatible = "marvell,feroceon";
16 reg = <0>;
16 clocks = <&core_clk 1>, <&core_clk 3>, <&gate_clk 11>; 17 clocks = <&core_clk 1>, <&core_clk 3>, <&gate_clk 11>;
17 clock-names = "cpu_clk", "ddrclk", "powersave"; 18 clock-names = "cpu_clk", "ddrclk", "powersave";
18 }; 19 };
@@ -167,7 +168,7 @@
167 xor@60900 { 168 xor@60900 {
168 compatible = "marvell,orion-xor"; 169 compatible = "marvell,orion-xor";
169 reg = <0x60900 0x100 170 reg = <0x60900 0x100
170 0xd0B00 0x100>; 171 0x60B00 0x100>;
171 status = "okay"; 172 status = "okay";
172 clocks = <&gate_clk 16>; 173 clocks = <&gate_clk 16>;
173 174
diff --git a/arch/arm/boot/dts/omap3-beagle-xm.dts b/arch/arm/boot/dts/omap3-beagle-xm.dts
index 0c514dc8460c..2816bf612672 100644
--- a/arch/arm/boot/dts/omap3-beagle-xm.dts
+++ b/arch/arm/boot/dts/omap3-beagle-xm.dts
@@ -11,7 +11,7 @@
11 11
12/ { 12/ {
13 model = "TI OMAP3 BeagleBoard xM"; 13 model = "TI OMAP3 BeagleBoard xM";
14 compatible = "ti,omap3-beagle-xm", "ti,omap3-beagle", "ti,omap3"; 14 compatible = "ti,omap3-beagle-xm", "ti,omap36xx", "ti,omap3";
15 15
16 cpus { 16 cpus {
17 cpu@0 { 17 cpu@0 {
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index 7d95cda1fae4..b41bd57f4328 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -108,7 +108,7 @@
108 #address-cells = <1>; 108 #address-cells = <1>;
109 #size-cells = <0>; 109 #size-cells = <0>;
110 pinctrl-single,register-width = <16>; 110 pinctrl-single,register-width = <16>;
111 pinctrl-single,function-mask = <0x7f1f>; 111 pinctrl-single,function-mask = <0xff1f>;
112 }; 112 };
113 113
114 omap3_pmx_wkup: pinmux@0x48002a00 { 114 omap3_pmx_wkup: pinmux@0x48002a00 {
@@ -117,7 +117,7 @@
117 #address-cells = <1>; 117 #address-cells = <1>;
118 #size-cells = <0>; 118 #size-cells = <0>;
119 pinctrl-single,register-width = <16>; 119 pinctrl-single,register-width = <16>;
120 pinctrl-single,function-mask = <0x7f1f>; 120 pinctrl-single,function-mask = <0xff1f>;
121 }; 121 };
122 122
123 gpio1: gpio@48310000 { 123 gpio1: gpio@48310000 {
diff --git a/arch/arm/boot/dts/prima2.dtsi b/arch/arm/boot/dts/prima2.dtsi
index bbeb623fc2c6..27ed9f5144bc 100644
--- a/arch/arm/boot/dts/prima2.dtsi
+++ b/arch/arm/boot/dts/prima2.dtsi
@@ -171,7 +171,8 @@
171 compatible = "simple-bus"; 171 compatible = "simple-bus";
172 #address-cells = <1>; 172 #address-cells = <1>;
173 #size-cells = <1>; 173 #size-cells = <1>;
174 ranges = <0xb0000000 0xb0000000 0x180000>; 174 ranges = <0xb0000000 0xb0000000 0x180000>,
175 <0x56000000 0x56000000 0x1b00000>;
175 176
176 timer@b0020000 { 177 timer@b0020000 {
177 compatible = "sirf,prima2-tick"; 178 compatible = "sirf,prima2-tick";
@@ -196,25 +197,32 @@
196 uart0: uart@b0050000 { 197 uart0: uart@b0050000 {
197 cell-index = <0>; 198 cell-index = <0>;
198 compatible = "sirf,prima2-uart"; 199 compatible = "sirf,prima2-uart";
199 reg = <0xb0050000 0x10000>; 200 reg = <0xb0050000 0x1000>;
200 interrupts = <17>; 201 interrupts = <17>;
202 fifosize = <128>;
201 clocks = <&clks 13>; 203 clocks = <&clks 13>;
204 sirf,uart-dma-rx-channel = <21>;
205 sirf,uart-dma-tx-channel = <2>;
202 }; 206 };
203 207
204 uart1: uart@b0060000 { 208 uart1: uart@b0060000 {
205 cell-index = <1>; 209 cell-index = <1>;
206 compatible = "sirf,prima2-uart"; 210 compatible = "sirf,prima2-uart";
207 reg = <0xb0060000 0x10000>; 211 reg = <0xb0060000 0x1000>;
208 interrupts = <18>; 212 interrupts = <18>;
213 fifosize = <32>;
209 clocks = <&clks 14>; 214 clocks = <&clks 14>;
210 }; 215 };
211 216
212 uart2: uart@b0070000 { 217 uart2: uart@b0070000 {
213 cell-index = <2>; 218 cell-index = <2>;
214 compatible = "sirf,prima2-uart"; 219 compatible = "sirf,prima2-uart";
215 reg = <0xb0070000 0x10000>; 220 reg = <0xb0070000 0x1000>;
216 interrupts = <19>; 221 interrupts = <19>;
222 fifosize = <128>;
217 clocks = <&clks 15>; 223 clocks = <&clks 15>;
224 sirf,uart-dma-rx-channel = <6>;
225 sirf,uart-dma-tx-channel = <7>;
218 }; 226 };
219 227
220 usp0: usp@b0080000 { 228 usp0: usp@b0080000 {
@@ -222,7 +230,10 @@
222 compatible = "sirf,prima2-usp"; 230 compatible = "sirf,prima2-usp";
223 reg = <0xb0080000 0x10000>; 231 reg = <0xb0080000 0x10000>;
224 interrupts = <20>; 232 interrupts = <20>;
233 fifosize = <128>;
225 clocks = <&clks 28>; 234 clocks = <&clks 28>;
235 sirf,usp-dma-rx-channel = <17>;
236 sirf,usp-dma-tx-channel = <18>;
226 }; 237 };
227 238
228 usp1: usp@b0090000 { 239 usp1: usp@b0090000 {
@@ -230,7 +241,10 @@
230 compatible = "sirf,prima2-usp"; 241 compatible = "sirf,prima2-usp";
231 reg = <0xb0090000 0x10000>; 242 reg = <0xb0090000 0x10000>;
232 interrupts = <21>; 243 interrupts = <21>;
244 fifosize = <128>;
233 clocks = <&clks 29>; 245 clocks = <&clks 29>;
246 sirf,usp-dma-rx-channel = <14>;
247 sirf,usp-dma-tx-channel = <15>;
234 }; 248 };
235 249
236 usp2: usp@b00a0000 { 250 usp2: usp@b00a0000 {
@@ -238,7 +252,10 @@
238 compatible = "sirf,prima2-usp"; 252 compatible = "sirf,prima2-usp";
239 reg = <0xb00a0000 0x10000>; 253 reg = <0xb00a0000 0x10000>;
240 interrupts = <22>; 254 interrupts = <22>;
255 fifosize = <128>;
241 clocks = <&clks 30>; 256 clocks = <&clks 30>;
257 sirf,usp-dma-rx-channel = <10>;
258 sirf,usp-dma-tx-channel = <11>;
242 }; 259 };
243 260
244 dmac0: dma-controller@b00b0000 { 261 dmac0: dma-controller@b00b0000 {
@@ -261,6 +278,8 @@
261 compatible = "sirf,prima2-vip"; 278 compatible = "sirf,prima2-vip";
262 reg = <0xb00C0000 0x10000>; 279 reg = <0xb00C0000 0x10000>;
263 clocks = <&clks 31>; 280 clocks = <&clks 31>;
281 interrupts = <14>;
282 sirf,vip-dma-rx-channel = <16>;
264 }; 283 };
265 284
266 spi0: spi@b00d0000 { 285 spi0: spi@b00d0000 {
diff --git a/arch/arm/boot/dts/r8a73a4.dtsi b/arch/arm/boot/dts/r8a73a4.dtsi
index 6c26caa880f2..658fcc537576 100644
--- a/arch/arm/boot/dts/r8a73a4.dtsi
+++ b/arch/arm/boot/dts/r8a73a4.dtsi
@@ -193,7 +193,7 @@
193 }; 193 };
194 194
195 sdhi0: sdhi@ee100000 { 195 sdhi0: sdhi@ee100000 {
196 compatible = "renesas,r8a73a4-sdhi"; 196 compatible = "renesas,sdhi-r8a73a4";
197 reg = <0 0xee100000 0 0x100>; 197 reg = <0 0xee100000 0 0x100>;
198 interrupt-parent = <&gic>; 198 interrupt-parent = <&gic>;
199 interrupts = <0 165 4>; 199 interrupts = <0 165 4>;
@@ -202,7 +202,7 @@
202 }; 202 };
203 203
204 sdhi1: sdhi@ee120000 { 204 sdhi1: sdhi@ee120000 {
205 compatible = "renesas,r8a73a4-sdhi"; 205 compatible = "renesas,sdhi-r8a73a4";
206 reg = <0 0xee120000 0 0x100>; 206 reg = <0 0xee120000 0 0x100>;
207 interrupt-parent = <&gic>; 207 interrupt-parent = <&gic>;
208 interrupts = <0 166 4>; 208 interrupts = <0 166 4>;
@@ -211,7 +211,7 @@
211 }; 211 };
212 212
213 sdhi2: sdhi@ee140000 { 213 sdhi2: sdhi@ee140000 {
214 compatible = "renesas,r8a73a4-sdhi"; 214 compatible = "renesas,sdhi-r8a73a4";
215 reg = <0 0xee140000 0 0x100>; 215 reg = <0 0xee140000 0 0x100>;
216 interrupt-parent = <&gic>; 216 interrupt-parent = <&gic>;
217 interrupts = <0 167 4>; 217 interrupts = <0 167 4>;
diff --git a/arch/arm/boot/dts/r8a7778.dtsi b/arch/arm/boot/dts/r8a7778.dtsi
index 45ac404ab6d8..3577aba82583 100644
--- a/arch/arm/boot/dts/r8a7778.dtsi
+++ b/arch/arm/boot/dts/r8a7778.dtsi
@@ -96,6 +96,5 @@
96 pfc: pfc@fffc0000 { 96 pfc: pfc@fffc0000 {
97 compatible = "renesas,pfc-r8a7778"; 97 compatible = "renesas,pfc-r8a7778";
98 reg = <0xfffc000 0x118>; 98 reg = <0xfffc000 0x118>;
99 #gpio-range-cells = <3>;
100 }; 99 };
101}; 100};
diff --git a/arch/arm/boot/dts/r8a7779.dtsi b/arch/arm/boot/dts/r8a7779.dtsi
index 23a62447359c..ebbe507fcbfa 100644
--- a/arch/arm/boot/dts/r8a7779.dtsi
+++ b/arch/arm/boot/dts/r8a7779.dtsi
@@ -188,7 +188,6 @@
188 pfc: pfc@fffc0000 { 188 pfc: pfc@fffc0000 {
189 compatible = "renesas,pfc-r8a7779"; 189 compatible = "renesas,pfc-r8a7779";
190 reg = <0xfffc0000 0x23c>; 190 reg = <0xfffc0000 0x23c>;
191 #gpio-range-cells = <3>;
192 }; 191 };
193 192
194 thermal@ffc48000 { 193 thermal@ffc48000 {
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
index 3b879e7c697c..413b4c29e782 100644
--- a/arch/arm/boot/dts/r8a7790.dtsi
+++ b/arch/arm/boot/dts/r8a7790.dtsi
@@ -148,11 +148,10 @@
148 pfc: pfc@e6060000 { 148 pfc: pfc@e6060000 {
149 compatible = "renesas,pfc-r8a7790"; 149 compatible = "renesas,pfc-r8a7790";
150 reg = <0 0xe6060000 0 0x250>; 150 reg = <0 0xe6060000 0 0x250>;
151 #gpio-range-cells = <3>;
152 }; 151 };
153 152
154 sdhi0: sdhi@ee100000 { 153 sdhi0: sdhi@ee100000 {
155 compatible = "renesas,r8a7790-sdhi"; 154 compatible = "renesas,sdhi-r8a7790";
156 reg = <0 0xee100000 0 0x100>; 155 reg = <0 0xee100000 0 0x100>;
157 interrupt-parent = <&gic>; 156 interrupt-parent = <&gic>;
158 interrupts = <0 165 4>; 157 interrupts = <0 165 4>;
@@ -161,7 +160,7 @@
161 }; 160 };
162 161
163 sdhi1: sdhi@ee120000 { 162 sdhi1: sdhi@ee120000 {
164 compatible = "renesas,r8a7790-sdhi"; 163 compatible = "renesas,sdhi-r8a7790";
165 reg = <0 0xee120000 0 0x100>; 164 reg = <0 0xee120000 0 0x100>;
166 interrupt-parent = <&gic>; 165 interrupt-parent = <&gic>;
167 interrupts = <0 166 4>; 166 interrupts = <0 166 4>;
@@ -170,7 +169,7 @@
170 }; 169 };
171 170
172 sdhi2: sdhi@ee140000 { 171 sdhi2: sdhi@ee140000 {
173 compatible = "renesas,r8a7790-sdhi"; 172 compatible = "renesas,sdhi-r8a7790";
174 reg = <0 0xee140000 0 0x100>; 173 reg = <0 0xee140000 0 0x100>;
175 interrupt-parent = <&gic>; 174 interrupt-parent = <&gic>;
176 interrupts = <0 167 4>; 175 interrupts = <0 167 4>;
@@ -179,7 +178,7 @@
179 }; 178 };
180 179
181 sdhi3: sdhi@ee160000 { 180 sdhi3: sdhi@ee160000 {
182 compatible = "renesas,r8a7790-sdhi"; 181 compatible = "renesas,sdhi-r8a7790";
183 reg = <0 0xee160000 0 0x100>; 182 reg = <0 0xee160000 0 0x100>;
184 interrupt-parent = <&gic>; 183 interrupt-parent = <&gic>;
185 interrupts = <0 168 4>; 184 interrupts = <0 168 4>;
diff --git a/arch/arm/boot/dts/sh73a0.dtsi b/arch/arm/boot/dts/sh73a0.dtsi
index ba59a5875a10..3955c7606a6f 100644
--- a/arch/arm/boot/dts/sh73a0.dtsi
+++ b/arch/arm/boot/dts/sh73a0.dtsi
@@ -196,7 +196,7 @@
196 }; 196 };
197 197
198 sdhi0: sdhi@ee100000 { 198 sdhi0: sdhi@ee100000 {
199 compatible = "renesas,r8a7740-sdhi"; 199 compatible = "renesas,sdhi-r8a7740";
200 reg = <0xee100000 0x100>; 200 reg = <0xee100000 0x100>;
201 interrupt-parent = <&gic>; 201 interrupt-parent = <&gic>;
202 interrupts = <0 83 4 202 interrupts = <0 83 4
@@ -208,7 +208,7 @@
208 208
209 /* SDHI1 and SDHI2 have no CD pins, no need for CD IRQ */ 209 /* SDHI1 and SDHI2 have no CD pins, no need for CD IRQ */
210 sdhi1: sdhi@ee120000 { 210 sdhi1: sdhi@ee120000 {
211 compatible = "renesas,r8a7740-sdhi"; 211 compatible = "renesas,sdhi-r8a7740";
212 reg = <0xee120000 0x100>; 212 reg = <0xee120000 0x100>;
213 interrupt-parent = <&gic>; 213 interrupt-parent = <&gic>;
214 interrupts = <0 88 4 214 interrupts = <0 88 4
@@ -219,7 +219,7 @@
219 }; 219 };
220 220
221 sdhi2: sdhi@ee140000 { 221 sdhi2: sdhi@ee140000 {
222 compatible = "renesas,r8a7740-sdhi"; 222 compatible = "renesas,sdhi-r8a7740";
223 reg = <0xee140000 0x100>; 223 reg = <0xee140000 0x100>;
224 interrupt-parent = <&gic>; 224 interrupt-parent = <&gic>;
225 interrupts = <0 104 4 225 interrupts = <0 104 4
diff --git a/arch/arm/boot/install.sh b/arch/arm/boot/install.sh
index 06ea7d42ce8e..2a45092a40e3 100644
--- a/arch/arm/boot/install.sh
+++ b/arch/arm/boot/install.sh
@@ -20,6 +20,20 @@
20# $4 - default install path (blank if root directory) 20# $4 - default install path (blank if root directory)
21# 21#
22 22
23verify () {
24 if [ ! -f "$1" ]; then
25 echo "" 1>&2
26 echo " *** Missing file: $1" 1>&2
27 echo ' *** You need to run "make" before "make install".' 1>&2
28 echo "" 1>&2
29 exit 1
30 fi
31}
32
33# Make sure the files actually exist
34verify "$2"
35verify "$3"
36
23# User may have a custom install script 37# User may have a custom install script
24if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi 38if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
25if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi 39if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c
index 117f955a2a06..8e1a0245907f 100644
--- a/arch/arm/common/edma.c
+++ b/arch/arm/common/edma.c
@@ -269,6 +269,11 @@ static const struct edmacc_param dummy_paramset = {
269 .ccnt = 1, 269 .ccnt = 1,
270}; 270};
271 271
272static const struct of_device_id edma_of_ids[] = {
273 { .compatible = "ti,edma3", },
274 {}
275};
276
272/*****************************************************************************/ 277/*****************************************************************************/
273 278
274static void map_dmach_queue(unsigned ctlr, unsigned ch_no, 279static void map_dmach_queue(unsigned ctlr, unsigned ch_no,
@@ -560,14 +565,38 @@ static int reserve_contiguous_slots(int ctlr, unsigned int id,
560static int prepare_unused_channel_list(struct device *dev, void *data) 565static int prepare_unused_channel_list(struct device *dev, void *data)
561{ 566{
562 struct platform_device *pdev = to_platform_device(dev); 567 struct platform_device *pdev = to_platform_device(dev);
563 int i, ctlr; 568 int i, count, ctlr;
569 struct of_phandle_args dma_spec;
564 570
571 if (dev->of_node) {
572 count = of_property_count_strings(dev->of_node, "dma-names");
573 if (count < 0)
574 return 0;
575 for (i = 0; i < count; i++) {
576 if (of_parse_phandle_with_args(dev->of_node, "dmas",
577 "#dma-cells", i,
578 &dma_spec))
579 continue;
580
581 if (!of_match_node(edma_of_ids, dma_spec.np)) {
582 of_node_put(dma_spec.np);
583 continue;
584 }
585
586 clear_bit(EDMA_CHAN_SLOT(dma_spec.args[0]),
587 edma_cc[0]->edma_unused);
588 of_node_put(dma_spec.np);
589 }
590 return 0;
591 }
592
593 /* For non-OF case */
565 for (i = 0; i < pdev->num_resources; i++) { 594 for (i = 0; i < pdev->num_resources; i++) {
566 if ((pdev->resource[i].flags & IORESOURCE_DMA) && 595 if ((pdev->resource[i].flags & IORESOURCE_DMA) &&
567 (int)pdev->resource[i].start >= 0) { 596 (int)pdev->resource[i].start >= 0) {
568 ctlr = EDMA_CTLR(pdev->resource[i].start); 597 ctlr = EDMA_CTLR(pdev->resource[i].start);
569 clear_bit(EDMA_CHAN_SLOT(pdev->resource[i].start), 598 clear_bit(EDMA_CHAN_SLOT(pdev->resource[i].start),
570 edma_cc[ctlr]->edma_unused); 599 edma_cc[ctlr]->edma_unused);
571 } 600 }
572 } 601 }
573 602
@@ -1762,11 +1791,6 @@ static int edma_probe(struct platform_device *pdev)
1762 return 0; 1791 return 0;
1763} 1792}
1764 1793
1765static const struct of_device_id edma_of_ids[] = {
1766 { .compatible = "ti,edma3", },
1767 {}
1768};
1769
1770static struct platform_driver edma_driver = { 1794static struct platform_driver edma_driver = {
1771 .driver = { 1795 .driver = {
1772 .name = "edma", 1796 .name = "edma",
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c
index 370236dd1a03..990250965f2c 100644
--- a/arch/arm/common/mcpm_entry.c
+++ b/arch/arm/common/mcpm_entry.c
@@ -51,7 +51,8 @@ void mcpm_cpu_power_down(void)
51{ 51{
52 phys_reset_t phys_reset; 52 phys_reset_t phys_reset;
53 53
54 BUG_ON(!platform_ops); 54 if (WARN_ON_ONCE(!platform_ops || !platform_ops->power_down))
55 return;
55 BUG_ON(!irqs_disabled()); 56 BUG_ON(!irqs_disabled());
56 57
57 /* 58 /*
@@ -93,7 +94,8 @@ void mcpm_cpu_suspend(u64 expected_residency)
93{ 94{
94 phys_reset_t phys_reset; 95 phys_reset_t phys_reset;
95 96
96 BUG_ON(!platform_ops); 97 if (WARN_ON_ONCE(!platform_ops || !platform_ops->suspend))
98 return;
97 BUG_ON(!irqs_disabled()); 99 BUG_ON(!irqs_disabled());
98 100
99 /* Very similar to mcpm_cpu_power_down() */ 101 /* Very similar to mcpm_cpu_power_down() */
diff --git a/arch/arm/common/sharpsl_param.c b/arch/arm/common/sharpsl_param.c
index d56c932580eb..025f6ce38596 100644
--- a/arch/arm/common/sharpsl_param.c
+++ b/arch/arm/common/sharpsl_param.c
@@ -15,6 +15,7 @@
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/string.h> 16#include <linux/string.h>
17#include <asm/mach/sharpsl_param.h> 17#include <asm/mach/sharpsl_param.h>
18#include <asm/memory.h>
18 19
19/* 20/*
20 * Certain hardware parameters determined at the time of device manufacture, 21 * Certain hardware parameters determined at the time of device manufacture,
@@ -25,8 +26,10 @@
25 */ 26 */
26#ifdef CONFIG_ARCH_SA1100 27#ifdef CONFIG_ARCH_SA1100
27#define PARAM_BASE 0xe8ffc000 28#define PARAM_BASE 0xe8ffc000
29#define param_start(x) (void *)(x)
28#else 30#else
29#define PARAM_BASE 0xa0000a00 31#define PARAM_BASE 0xa0000a00
32#define param_start(x) __va(x)
30#endif 33#endif
31#define MAGIC_CHG(a,b,c,d) ( ( d << 24 ) | ( c << 16 ) | ( b << 8 ) | a ) 34#define MAGIC_CHG(a,b,c,d) ( ( d << 24 ) | ( c << 16 ) | ( b << 8 ) | a )
32 35
@@ -41,7 +44,7 @@ EXPORT_SYMBOL(sharpsl_param);
41 44
42void sharpsl_save_param(void) 45void sharpsl_save_param(void)
43{ 46{
44 memcpy(&sharpsl_param, (void *)PARAM_BASE, sizeof(struct sharpsl_param_info)); 47 memcpy(&sharpsl_param, param_start(PARAM_BASE), sizeof(struct sharpsl_param_info));
45 48
46 if (sharpsl_param.comadj_keyword != COMADJ_MAGIC) 49 if (sharpsl_param.comadj_keyword != COMADJ_MAGIC)
47 sharpsl_param.comadj=-1; 50 sharpsl_param.comadj=-1;
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index f3935b46df29..119fc378fc52 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -135,6 +135,7 @@ CONFIG_MMC=y
135CONFIG_MMC_ARMMMCI=y 135CONFIG_MMC_ARMMMCI=y
136CONFIG_MMC_SDHCI=y 136CONFIG_MMC_SDHCI=y
137CONFIG_MMC_SDHCI_PLTFM=y 137CONFIG_MMC_SDHCI_PLTFM=y
138CONFIG_MMC_SDHCI_ESDHC_IMX=y
138CONFIG_MMC_SDHCI_TEGRA=y 139CONFIG_MMC_SDHCI_TEGRA=y
139CONFIG_MMC_SDHCI_SPEAR=y 140CONFIG_MMC_SDHCI_SPEAR=y
140CONFIG_MMC_OMAP=y 141CONFIG_MMC_OMAP=y
diff --git a/arch/arm/crypto/aes-armv4.S b/arch/arm/crypto/aes-armv4.S
index 19d6cd6f29f9..3a14ea8fe97e 100644
--- a/arch/arm/crypto/aes-armv4.S
+++ b/arch/arm/crypto/aes-armv4.S
@@ -148,7 +148,7 @@ AES_Te:
148@ const AES_KEY *key) { 148@ const AES_KEY *key) {
149.align 5 149.align 5
150ENTRY(AES_encrypt) 150ENTRY(AES_encrypt)
151 sub r3,pc,#8 @ AES_encrypt 151 adr r3,AES_encrypt
152 stmdb sp!,{r1,r4-r12,lr} 152 stmdb sp!,{r1,r4-r12,lr}
153 mov r12,r0 @ inp 153 mov r12,r0 @ inp
154 mov r11,r2 154 mov r11,r2
@@ -381,7 +381,7 @@ _armv4_AES_encrypt:
381.align 5 381.align 5
382ENTRY(private_AES_set_encrypt_key) 382ENTRY(private_AES_set_encrypt_key)
383_armv4_AES_set_encrypt_key: 383_armv4_AES_set_encrypt_key:
384 sub r3,pc,#8 @ AES_set_encrypt_key 384 adr r3,_armv4_AES_set_encrypt_key
385 teq r0,#0 385 teq r0,#0
386 moveq r0,#-1 386 moveq r0,#-1
387 beq .Labrt 387 beq .Labrt
@@ -843,7 +843,7 @@ AES_Td:
843@ const AES_KEY *key) { 843@ const AES_KEY *key) {
844.align 5 844.align 5
845ENTRY(AES_decrypt) 845ENTRY(AES_decrypt)
846 sub r3,pc,#8 @ AES_decrypt 846 adr r3,AES_decrypt
847 stmdb sp!,{r1,r4-r12,lr} 847 stmdb sp!,{r1,r4-r12,lr}
848 mov r12,r0 @ inp 848 mov r12,r0 @ inp
849 mov r11,r2 849 mov r11,r2
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index d3db39860b9c..59ceae8f3c95 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -31,5 +31,4 @@ generic-y += termbits.h
31generic-y += termios.h 31generic-y += termios.h
32generic-y += timex.h 32generic-y += timex.h
33generic-y += trace_clock.h 33generic-y += trace_clock.h
34generic-y += types.h
35generic-y += unaligned.h 34generic-y += unaligned.h
diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h
index bfc198c75913..863c892b4aaa 100644
--- a/arch/arm/include/asm/jump_label.h
+++ b/arch/arm/include/asm/jump_label.h
@@ -16,7 +16,7 @@
16 16
17static __always_inline bool arch_static_branch(struct static_key *key) 17static __always_inline bool arch_static_branch(struct static_key *key)
18{ 18{
19 asm goto("1:\n\t" 19 asm_volatile_goto("1:\n\t"
20 JUMP_LABEL_NOP "\n\t" 20 JUMP_LABEL_NOP "\n\t"
21 ".pushsection __jump_table, \"aw\"\n\t" 21 ".pushsection __jump_table, \"aw\"\n\t"
22 ".word 1b, %l[l_yes], %c0\n\t" 22 ".word 1b, %l[l_yes], %c0\n\t"
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h
index 0f7b7620e9a5..fc82a88f5b69 100644
--- a/arch/arm/include/asm/mcpm.h
+++ b/arch/arm/include/asm/mcpm.h
@@ -76,8 +76,11 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster);
76 * 76 *
77 * This must be called with interrupts disabled. 77 * This must be called with interrupts disabled.
78 * 78 *
79 * This does not return. Re-entry in the kernel is expected via 79 * On success this does not return. Re-entry in the kernel is expected
80 * mcpm_entry_point. 80 * via mcpm_entry_point.
81 *
82 * This will return if mcpm_platform_register() has not been called
83 * previously in which case the caller should take appropriate action.
81 */ 84 */
82void mcpm_cpu_power_down(void); 85void mcpm_cpu_power_down(void);
83 86
@@ -98,8 +101,11 @@ void mcpm_cpu_power_down(void);
98 * 101 *
99 * This must be called with interrupts disabled. 102 * This must be called with interrupts disabled.
100 * 103 *
101 * This does not return. Re-entry in the kernel is expected via 104 * On success this does not return. Re-entry in the kernel is expected
102 * mcpm_entry_point. 105 * via mcpm_entry_point.
106 *
107 * This will return if mcpm_platform_register() has not been called
108 * previously in which case the caller should take appropriate action.
103 */ 109 */
104void mcpm_cpu_suspend(u64 expected_residency); 110void mcpm_cpu_suspend(u64 expected_residency);
105 111
diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h
index f1d96d4e8092..73ddd7239b33 100644
--- a/arch/arm/include/asm/syscall.h
+++ b/arch/arm/include/asm/syscall.h
@@ -57,6 +57,9 @@ static inline void syscall_get_arguments(struct task_struct *task,
57 unsigned int i, unsigned int n, 57 unsigned int i, unsigned int n,
58 unsigned long *args) 58 unsigned long *args)
59{ 59{
60 if (n == 0)
61 return;
62
60 if (i + n > SYSCALL_MAX_ARGS) { 63 if (i + n > SYSCALL_MAX_ARGS) {
61 unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i; 64 unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
62 unsigned int n_bad = n + i - SYSCALL_MAX_ARGS; 65 unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
@@ -81,6 +84,9 @@ static inline void syscall_set_arguments(struct task_struct *task,
81 unsigned int i, unsigned int n, 84 unsigned int i, unsigned int n,
82 const unsigned long *args) 85 const unsigned long *args)
83{ 86{
87 if (n == 0)
88 return;
89
84 if (i + n > SYSCALL_MAX_ARGS) { 90 if (i + n > SYSCALL_MAX_ARGS) {
85 pr_warning("%s called with max args %d, handling only %d\n", 91 pr_warning("%s called with max args %d, handling only %d\n",
86 __func__, i + n, SYSCALL_MAX_ARGS); 92 __func__, i + n, SYSCALL_MAX_ARGS);
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 7e1f76027f66..72abdc541f38 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -19,6 +19,13 @@
19#include <asm/unified.h> 19#include <asm/unified.h>
20#include <asm/compiler.h> 20#include <asm/compiler.h>
21 21
22#if __LINUX_ARM_ARCH__ < 6
23#include <asm-generic/uaccess-unaligned.h>
24#else
25#define __get_user_unaligned __get_user
26#define __put_user_unaligned __put_user
27#endif
28
22#define VERIFY_READ 0 29#define VERIFY_READ 0
23#define VERIFY_WRITE 1 30#define VERIFY_WRITE 1
24 31
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 74ad15d1a065..bc6bd9683ba4 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -442,10 +442,10 @@ local_restart:
442 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine 442 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
443 443
444 add r1, sp, #S_OFF 444 add r1, sp, #S_OFF
445 cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) 4452: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
446 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back 446 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
447 bcs arm_syscall 447 bcs arm_syscall
4482: mov why, #0 @ no longer a real syscall 448 mov why, #0 @ no longer a real syscall
449 b sys_ni_syscall @ not private func 449 b sys_ni_syscall @ not private func
450 450
451#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI) 451#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI)
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index de23a9beed13..39f89fbd5111 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -329,10 +329,10 @@
329#ifdef CONFIG_CONTEXT_TRACKING 329#ifdef CONFIG_CONTEXT_TRACKING
330 .if \save 330 .if \save
331 stmdb sp!, {r0-r3, ip, lr} 331 stmdb sp!, {r0-r3, ip, lr}
332 bl user_exit 332 bl context_tracking_user_exit
333 ldmia sp!, {r0-r3, ip, lr} 333 ldmia sp!, {r0-r3, ip, lr}
334 .else 334 .else
335 bl user_exit 335 bl context_tracking_user_exit
336 .endif 336 .endif
337#endif 337#endif
338 .endm 338 .endm
@@ -341,10 +341,10 @@
341#ifdef CONFIG_CONTEXT_TRACKING 341#ifdef CONFIG_CONTEXT_TRACKING
342 .if \save 342 .if \save
343 stmdb sp!, {r0-r3, ip, lr} 343 stmdb sp!, {r0-r3, ip, lr}
344 bl user_enter 344 bl context_tracking_user_enter
345 ldmia sp!, {r0-r3, ip, lr} 345 ldmia sp!, {r0-r3, ip, lr}
346 .else 346 .else
347 bl user_enter 347 bl context_tracking_user_enter
348 .endif 348 .endif
349#endif 349#endif
350 .endm 350 .endm
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 2c7cc1e03473..476de57dcef2 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -487,7 +487,26 @@ __fixup_smp:
487 mrc p15, 0, r0, c0, c0, 5 @ read MPIDR 487 mrc p15, 0, r0, c0, c0, 5 @ read MPIDR
488 and r0, r0, #0xc0000000 @ multiprocessing extensions and 488 and r0, r0, #0xc0000000 @ multiprocessing extensions and
489 teq r0, #0x80000000 @ not part of a uniprocessor system? 489 teq r0, #0x80000000 @ not part of a uniprocessor system?
490 moveq pc, lr @ yes, assume SMP 490 bne __fixup_smp_on_up @ no, assume UP
491
492 @ Core indicates it is SMP. Check for Aegis SOC where a single
493 @ Cortex-A9 CPU is present but SMP operations fault.
494 mov r4, #0x41000000
495 orr r4, r4, #0x0000c000
496 orr r4, r4, #0x00000090
497 teq r3, r4 @ Check for ARM Cortex-A9
498 movne pc, lr @ Not ARM Cortex-A9,
499
500 @ If a future SoC *does* use 0x0 as the PERIPH_BASE, then the
501 @ below address check will need to be #ifdef'd or equivalent
502 @ for the Aegis platform.
503 mrc p15, 4, r0, c15, c0 @ get SCU base address
504 teq r0, #0x0 @ '0' on actual UP A9 hardware
505 beq __fixup_smp_on_up @ So its an A9 UP
506 ldr r0, [r0, #4] @ read SCU Config
507 and r0, r0, #0x3 @ number of CPUs
508 teq r0, #0x0 @ is 1?
509 movne pc, lr
491 510
492__fixup_smp_on_up: 511__fixup_smp_on_up:
493 adr r0, 1f 512 adr r0, 1f
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c
index 71e08baee209..c02ba4af599f 100644
--- a/arch/arm/kvm/reset.c
+++ b/arch/arm/kvm/reset.c
@@ -58,14 +58,14 @@ static const struct kvm_irq_level a15_vtimer_irq = {
58 */ 58 */
59int kvm_reset_vcpu(struct kvm_vcpu *vcpu) 59int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
60{ 60{
61 struct kvm_regs *cpu_reset; 61 struct kvm_regs *reset_regs;
62 const struct kvm_irq_level *cpu_vtimer_irq; 62 const struct kvm_irq_level *cpu_vtimer_irq;
63 63
64 switch (vcpu->arch.target) { 64 switch (vcpu->arch.target) {
65 case KVM_ARM_TARGET_CORTEX_A15: 65 case KVM_ARM_TARGET_CORTEX_A15:
66 if (vcpu->vcpu_id > a15_max_cpu_idx) 66 if (vcpu->vcpu_id > a15_max_cpu_idx)
67 return -EINVAL; 67 return -EINVAL;
68 cpu_reset = &a15_regs_reset; 68 reset_regs = &a15_regs_reset;
69 vcpu->arch.midr = read_cpuid_id(); 69 vcpu->arch.midr = read_cpuid_id();
70 cpu_vtimer_irq = &a15_vtimer_irq; 70 cpu_vtimer_irq = &a15_vtimer_irq;
71 break; 71 break;
@@ -74,7 +74,7 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
74 } 74 }
75 75
76 /* Reset core registers */ 76 /* Reset core registers */
77 memcpy(&vcpu->arch.regs, cpu_reset, sizeof(vcpu->arch.regs)); 77 memcpy(&vcpu->arch.regs, reset_regs, sizeof(vcpu->arch.regs));
78 78
79 /* Reset CP15 registers */ 79 /* Reset CP15 registers */
80 kvm_reset_coprocs(vcpu); 80 kvm_reset_coprocs(vcpu);
diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c
index 180b3024bec3..f607deb40f4d 100644
--- a/arch/arm/mach-at91/at91rm9200_time.c
+++ b/arch/arm/mach-at91/at91rm9200_time.c
@@ -93,7 +93,7 @@ static irqreturn_t at91rm9200_timer_interrupt(int irq, void *dev_id)
93 93
94static struct irqaction at91rm9200_timer_irq = { 94static struct irqaction at91rm9200_timer_irq = {
95 .name = "at91_tick", 95 .name = "at91_tick",
96 .flags = IRQF_SHARED | IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, 96 .flags = IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
97 .handler = at91rm9200_timer_interrupt, 97 .handler = at91rm9200_timer_interrupt,
98 .irq = NR_IRQS_LEGACY + AT91_ID_SYS, 98 .irq = NR_IRQS_LEGACY + AT91_ID_SYS,
99}; 99};
diff --git a/arch/arm/mach-at91/at91sam926x_time.c b/arch/arm/mach-at91/at91sam926x_time.c
index 3a4bc2e1a65e..bb392320a0dd 100644
--- a/arch/arm/mach-at91/at91sam926x_time.c
+++ b/arch/arm/mach-at91/at91sam926x_time.c
@@ -171,7 +171,7 @@ static irqreturn_t at91sam926x_pit_interrupt(int irq, void *dev_id)
171 171
172static struct irqaction at91sam926x_pit_irq = { 172static struct irqaction at91sam926x_pit_irq = {
173 .name = "at91_tick", 173 .name = "at91_tick",
174 .flags = IRQF_SHARED | IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, 174 .flags = IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
175 .handler = at91sam926x_pit_interrupt, 175 .handler = at91sam926x_pit_interrupt,
176 .irq = NR_IRQS_LEGACY + AT91_ID_SYS, 176 .irq = NR_IRQS_LEGACY + AT91_ID_SYS,
177}; 177};
diff --git a/arch/arm/mach-at91/at91sam9g45_reset.S b/arch/arm/mach-at91/at91sam9g45_reset.S
index 721a1a34dd1d..c40c1e2ef80f 100644
--- a/arch/arm/mach-at91/at91sam9g45_reset.S
+++ b/arch/arm/mach-at91/at91sam9g45_reset.S
@@ -16,11 +16,17 @@
16#include "at91_rstc.h" 16#include "at91_rstc.h"
17 .arm 17 .arm
18 18
19/*
20 * at91_ramc_base is an array void*
21 * init at NULL if only one DDR controler is present in or DT
22 */
19 .globl at91sam9g45_restart 23 .globl at91sam9g45_restart
20 24
21at91sam9g45_restart: 25at91sam9g45_restart:
22 ldr r5, =at91_ramc_base @ preload constants 26 ldr r5, =at91_ramc_base @ preload constants
23 ldr r0, [r5] 27 ldr r0, [r5]
28 ldr r5, [r5, #4] @ ddr1
29 cmp r5, #0
24 ldr r4, =at91_rstc_base 30 ldr r4, =at91_rstc_base
25 ldr r1, [r4] 31 ldr r1, [r4]
26 32
@@ -30,6 +36,8 @@ at91sam9g45_restart:
30 36
31 .balign 32 @ align to cache line 37 .balign 32 @ align to cache line
32 38
39 strne r2, [r5, #AT91_DDRSDRC_RTR] @ disable DDR1 access
40 strne r3, [r5, #AT91_DDRSDRC_LPR] @ power down DDR1
33 str r2, [r0, #AT91_DDRSDRC_RTR] @ disable DDR0 access 41 str r2, [r0, #AT91_DDRSDRC_RTR] @ disable DDR0 access
34 str r3, [r0, #AT91_DDRSDRC_LPR] @ power down DDR0 42 str r3, [r0, #AT91_DDRSDRC_LPR] @ power down DDR0
35 str r4, [r1, #AT91_RSTC_CR] @ reset processor 43 str r4, [r1, #AT91_RSTC_CR] @ reset processor
diff --git a/arch/arm/mach-at91/at91x40_time.c b/arch/arm/mach-at91/at91x40_time.c
index 2919eba41ff4..c0e637adf65d 100644
--- a/arch/arm/mach-at91/at91x40_time.c
+++ b/arch/arm/mach-at91/at91x40_time.c
@@ -57,7 +57,7 @@ static irqreturn_t at91x40_timer_interrupt(int irq, void *dev_id)
57 57
58static struct irqaction at91x40_timer_irq = { 58static struct irqaction at91x40_timer_irq = {
59 .name = "at91_tick", 59 .name = "at91_tick",
60 .flags = IRQF_DISABLED | IRQF_TIMER, 60 .flags = IRQF_TIMER,
61 .handler = at91x40_timer_interrupt 61 .handler = at91x40_timer_interrupt
62}; 62};
63 63
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index 92b7f770615a..4078ba93776b 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -176,7 +176,7 @@ static struct at24_platform_data eeprom_info = {
176 .context = (void *)0x7f00, 176 .context = (void *)0x7f00,
177}; 177};
178 178
179static struct snd_platform_data dm365_evm_snd_data = { 179static struct snd_platform_data dm365_evm_snd_data __maybe_unused = {
180 .asp_chan_q = EVENTQ_3, 180 .asp_chan_q = EVENTQ_3,
181}; 181};
182 182
diff --git a/arch/arm/mach-davinci/include/mach/serial.h b/arch/arm/mach-davinci/include/mach/serial.h
index 52b8571b2e70..ce402cd21fa0 100644
--- a/arch/arm/mach-davinci/include/mach/serial.h
+++ b/arch/arm/mach-davinci/include/mach/serial.h
@@ -15,8 +15,6 @@
15 15
16#include <mach/hardware.h> 16#include <mach/hardware.h>
17 17
18#include <linux/platform_device.h>
19
20#define DAVINCI_UART0_BASE (IO_PHYS + 0x20000) 18#define DAVINCI_UART0_BASE (IO_PHYS + 0x20000)
21#define DAVINCI_UART1_BASE (IO_PHYS + 0x20400) 19#define DAVINCI_UART1_BASE (IO_PHYS + 0x20400)
22#define DAVINCI_UART2_BASE (IO_PHYS + 0x20800) 20#define DAVINCI_UART2_BASE (IO_PHYS + 0x20800)
@@ -39,6 +37,8 @@
39#define UART_DM646X_SCR_TX_WATERMARK 0x08 37#define UART_DM646X_SCR_TX_WATERMARK 0x08
40 38
41#ifndef __ASSEMBLY__ 39#ifndef __ASSEMBLY__
40#include <linux/platform_device.h>
41
42extern int davinci_serial_init(struct platform_device *); 42extern int davinci_serial_init(struct platform_device *);
43#endif 43#endif
44 44
diff --git a/arch/arm/mach-integrator/pci_v3.h b/arch/arm/mach-integrator/pci_v3.h
index 755fd29fed4a..06a9e2e7d007 100644
--- a/arch/arm/mach-integrator/pci_v3.h
+++ b/arch/arm/mach-integrator/pci_v3.h
@@ -1,2 +1,9 @@
1/* Simple oneliner include to the PCIv3 early init */ 1/* Simple oneliner include to the PCIv3 early init */
2#ifdef CONFIG_PCI
2extern int pci_v3_early_init(void); 3extern int pci_v3_early_init(void);
4#else
5static inline int pci_v3_early_init(void)
6{
7 return 0;
8}
9#endif
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
index 4c24303ec481..58adf2fd9cfc 100644
--- a/arch/arm/mach-mvebu/coherency.c
+++ b/arch/arm/mach-mvebu/coherency.c
@@ -140,6 +140,7 @@ int __init coherency_init(void)
140 coherency_base = of_iomap(np, 0); 140 coherency_base = of_iomap(np, 0);
141 coherency_cpu_base = of_iomap(np, 1); 141 coherency_cpu_base = of_iomap(np, 1);
142 set_cpu_coherent(cpu_logical_map(smp_processor_id()), 0); 142 set_cpu_coherent(cpu_logical_map(smp_processor_id()), 0);
143 of_node_put(np);
143 } 144 }
144 145
145 return 0; 146 return 0;
@@ -147,9 +148,14 @@ int __init coherency_init(void)
147 148
148static int __init coherency_late_init(void) 149static int __init coherency_late_init(void)
149{ 150{
150 if (of_find_matching_node(NULL, of_coherency_table)) 151 struct device_node *np;
152
153 np = of_find_matching_node(NULL, of_coherency_table);
154 if (np) {
151 bus_register_notifier(&platform_bus_type, 155 bus_register_notifier(&platform_bus_type,
152 &mvebu_hwcc_platform_nb); 156 &mvebu_hwcc_platform_nb);
157 of_node_put(np);
158 }
153 return 0; 159 return 0;
154} 160}
155 161
diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c
index 3cc4bef6401c..27fc4f049474 100644
--- a/arch/arm/mach-mvebu/pmsu.c
+++ b/arch/arm/mach-mvebu/pmsu.c
@@ -67,6 +67,7 @@ int __init armada_370_xp_pmsu_init(void)
67 pr_info("Initializing Power Management Service Unit\n"); 67 pr_info("Initializing Power Management Service Unit\n");
68 pmsu_mp_base = of_iomap(np, 0); 68 pmsu_mp_base = of_iomap(np, 0);
69 pmsu_reset_base = of_iomap(np, 1); 69 pmsu_reset_base = of_iomap(np, 1);
70 of_node_put(np);
70 } 71 }
71 72
72 return 0; 73 return 0;
diff --git a/arch/arm/mach-mvebu/system-controller.c b/arch/arm/mach-mvebu/system-controller.c
index f875124ff4f9..5175083cdb34 100644
--- a/arch/arm/mach-mvebu/system-controller.c
+++ b/arch/arm/mach-mvebu/system-controller.c
@@ -98,6 +98,7 @@ static int __init mvebu_system_controller_init(void)
98 BUG_ON(!match); 98 BUG_ON(!match);
99 system_controller_base = of_iomap(np, 0); 99 system_controller_base = of_iomap(np, 0);
100 mvebu_sc = (struct mvebu_system_controller *)match->data; 100 mvebu_sc = (struct mvebu_system_controller *)match->data;
101 of_node_put(np);
101 } 102 }
102 103
103 return 0; 104 return 0;
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 39c78387ddec..87162e1b94a5 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -129,6 +129,24 @@ DT_MACHINE_START(OMAP3_DT, "Generic OMAP3 (Flattened Device Tree)")
129 .restart = omap3xxx_restart, 129 .restart = omap3xxx_restart,
130MACHINE_END 130MACHINE_END
131 131
132static const char *omap36xx_boards_compat[] __initdata = {
133 "ti,omap36xx",
134 NULL,
135};
136
137DT_MACHINE_START(OMAP36XX_DT, "Generic OMAP36xx (Flattened Device Tree)")
138 .reserve = omap_reserve,
139 .map_io = omap3_map_io,
140 .init_early = omap3630_init_early,
141 .init_irq = omap_intc_of_init,
142 .handle_irq = omap3_intc_handle_irq,
143 .init_machine = omap_generic_init,
144 .init_late = omap3_init_late,
145 .init_time = omap3_sync32k_timer_init,
146 .dt_compat = omap36xx_boards_compat,
147 .restart = omap3xxx_restart,
148MACHINE_END
149
132static const char *omap3_gp_boards_compat[] __initdata = { 150static const char *omap3_gp_boards_compat[] __initdata = {
133 "ti,omap3-beagle", 151 "ti,omap3-beagle",
134 "timll,omap3-devkit8000", 152 "timll,omap3-devkit8000",
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index c3270c0f1fce..f6fe388af989 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -167,38 +167,47 @@ static struct lp55xx_led_config rx51_lp5523_led_config[] = {
167 .name = "lp5523:kb1", 167 .name = "lp5523:kb1",
168 .chan_nr = 0, 168 .chan_nr = 0,
169 .led_current = 50, 169 .led_current = 50,
170 .max_current = 100,
170 }, { 171 }, {
171 .name = "lp5523:kb2", 172 .name = "lp5523:kb2",
172 .chan_nr = 1, 173 .chan_nr = 1,
173 .led_current = 50, 174 .led_current = 50,
175 .max_current = 100,
174 }, { 176 }, {
175 .name = "lp5523:kb3", 177 .name = "lp5523:kb3",
176 .chan_nr = 2, 178 .chan_nr = 2,
177 .led_current = 50, 179 .led_current = 50,
180 .max_current = 100,
178 }, { 181 }, {
179 .name = "lp5523:kb4", 182 .name = "lp5523:kb4",
180 .chan_nr = 3, 183 .chan_nr = 3,
181 .led_current = 50, 184 .led_current = 50,
185 .max_current = 100,
182 }, { 186 }, {
183 .name = "lp5523:b", 187 .name = "lp5523:b",
184 .chan_nr = 4, 188 .chan_nr = 4,
185 .led_current = 50, 189 .led_current = 50,
190 .max_current = 100,
186 }, { 191 }, {
187 .name = "lp5523:g", 192 .name = "lp5523:g",
188 .chan_nr = 5, 193 .chan_nr = 5,
189 .led_current = 50, 194 .led_current = 50,
195 .max_current = 100,
190 }, { 196 }, {
191 .name = "lp5523:r", 197 .name = "lp5523:r",
192 .chan_nr = 6, 198 .chan_nr = 6,
193 .led_current = 50, 199 .led_current = 50,
200 .max_current = 100,
194 }, { 201 }, {
195 .name = "lp5523:kb5", 202 .name = "lp5523:kb5",
196 .chan_nr = 7, 203 .chan_nr = 7,
197 .led_current = 50, 204 .led_current = 50,
205 .max_current = 100,
198 }, { 206 }, {
199 .name = "lp5523:kb6", 207 .name = "lp5523:kb6",
200 .chan_nr = 8, 208 .chan_nr = 8,
201 .led_current = 50, 209 .led_current = 50,
210 .max_current = 100,
202 } 211 }
203}; 212};
204 213
diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c
index 64b5a8346982..8b6876c98ce1 100644
--- a/arch/arm/mach-omap2/gpmc-onenand.c
+++ b/arch/arm/mach-omap2/gpmc-onenand.c
@@ -272,9 +272,19 @@ static int omap2_onenand_setup_async(void __iomem *onenand_base)
272 struct gpmc_timings t; 272 struct gpmc_timings t;
273 int ret; 273 int ret;
274 274
275 if (gpmc_onenand_data->of_node) 275 if (gpmc_onenand_data->of_node) {
276 gpmc_read_settings_dt(gpmc_onenand_data->of_node, 276 gpmc_read_settings_dt(gpmc_onenand_data->of_node,
277 &onenand_async); 277 &onenand_async);
278 if (onenand_async.sync_read || onenand_async.sync_write) {
279 if (onenand_async.sync_write)
280 gpmc_onenand_data->flags |=
281 ONENAND_SYNC_READWRITE;
282 else
283 gpmc_onenand_data->flags |= ONENAND_SYNC_READ;
284 onenand_async.sync_read = false;
285 onenand_async.sync_write = false;
286 }
287 }
278 288
279 omap2_onenand_set_async_mode(onenand_base); 289 omap2_onenand_set_async_mode(onenand_base);
280 290
diff --git a/arch/arm/mach-omap2/mux.h b/arch/arm/mach-omap2/mux.h
index 5d2080ef7923..16f78a990d04 100644
--- a/arch/arm/mach-omap2/mux.h
+++ b/arch/arm/mach-omap2/mux.h
@@ -28,7 +28,7 @@
28#define OMAP_PULL_UP (1 << 4) 28#define OMAP_PULL_UP (1 << 4)
29#define OMAP_ALTELECTRICALSEL (1 << 5) 29#define OMAP_ALTELECTRICALSEL (1 << 5)
30 30
31/* 34xx specific mux bit defines */ 31/* omap3/4/5 specific mux bit defines */
32#define OMAP_INPUT_EN (1 << 8) 32#define OMAP_INPUT_EN (1 << 8)
33#define OMAP_OFF_EN (1 << 9) 33#define OMAP_OFF_EN (1 << 9)
34#define OMAP_OFFOUT_EN (1 << 10) 34#define OMAP_OFFOUT_EN (1 << 10)
@@ -36,8 +36,6 @@
36#define OMAP_OFF_PULL_EN (1 << 12) 36#define OMAP_OFF_PULL_EN (1 << 12)
37#define OMAP_OFF_PULL_UP (1 << 13) 37#define OMAP_OFF_PULL_UP (1 << 13)
38#define OMAP_WAKEUP_EN (1 << 14) 38#define OMAP_WAKEUP_EN (1 << 14)
39
40/* 44xx specific mux bit defines */
41#define OMAP_WAKEUP_EVENT (1 << 15) 39#define OMAP_WAKEUP_EVENT (1 << 15)
42 40
43/* Active pin states */ 41/* Active pin states */
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index fa74a0625da1..ead48fa5715e 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -628,7 +628,7 @@ void __init omap4_local_timer_init(void)
628#endif /* CONFIG_HAVE_ARM_TWD */ 628#endif /* CONFIG_HAVE_ARM_TWD */
629#endif /* CONFIG_ARCH_OMAP4 */ 629#endif /* CONFIG_ARCH_OMAP4 */
630 630
631#ifdef CONFIG_SOC_OMAP5 631#if defined(CONFIG_SOC_OMAP5) || defined(CONFIG_SOC_DRA7XX)
632void __init omap5_realtime_timer_init(void) 632void __init omap5_realtime_timer_init(void)
633{ 633{
634 omap4_sync32k_timer_init(); 634 omap4_sync32k_timer_init();
@@ -636,7 +636,7 @@ void __init omap5_realtime_timer_init(void)
636 636
637 clocksource_of_init(); 637 clocksource_of_init();
638} 638}
639#endif /* CONFIG_SOC_OMAP5 */ 639#endif /* CONFIG_SOC_OMAP5 || CONFIG_SOC_DRA7XX */
640 640
641/** 641/**
642 * omap_timer_init - build and register timer device with an 642 * omap_timer_init - build and register timer device with an
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
index 5bd1479d3deb..7f8f6076d360 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
@@ -1108,9 +1108,9 @@ static const struct pinctrl_map eva_pinctrl_map[] = {
1108 PIN_MAP_MUX_GROUP_DEFAULT("asoc-simple-card.1", "pfc-r8a7740", 1108 PIN_MAP_MUX_GROUP_DEFAULT("asoc-simple-card.1", "pfc-r8a7740",
1109 "fsib_mclk_in", "fsib"), 1109 "fsib_mclk_in", "fsib"),
1110 /* GETHER */ 1110 /* GETHER */
1111 PIN_MAP_MUX_GROUP_DEFAULT("sh-eth", "pfc-r8a7740", 1111 PIN_MAP_MUX_GROUP_DEFAULT("r8a7740-gether", "pfc-r8a7740",
1112 "gether_mii", "gether"), 1112 "gether_mii", "gether"),
1113 PIN_MAP_MUX_GROUP_DEFAULT("sh-eth", "pfc-r8a7740", 1113 PIN_MAP_MUX_GROUP_DEFAULT("r8a7740-gether", "pfc-r8a7740",
1114 "gether_int", "gether"), 1114 "gether_int", "gether"),
1115 /* HDMI */ 1115 /* HDMI */
1116 PIN_MAP_MUX_GROUP_DEFAULT("sh-mobile-hdmi", "pfc-r8a7740", 1116 PIN_MAP_MUX_GROUP_DEFAULT("sh-mobile-hdmi", "pfc-r8a7740",
diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c
index ffb6f0ac7606..5930af8d434f 100644
--- a/arch/arm/mach-shmobile/board-lager.c
+++ b/arch/arm/mach-shmobile/board-lager.c
@@ -29,6 +29,7 @@
29#include <linux/pinctrl/machine.h> 29#include <linux/pinctrl/machine.h>
30#include <linux/platform_data/gpio-rcar.h> 30#include <linux/platform_data/gpio-rcar.h>
31#include <linux/platform_device.h> 31#include <linux/platform_device.h>
32#include <linux/phy.h>
32#include <linux/regulator/fixed.h> 33#include <linux/regulator/fixed.h>
33#include <linux/regulator/machine.h> 34#include <linux/regulator/machine.h>
34#include <linux/sh_eth.h> 35#include <linux/sh_eth.h>
@@ -155,6 +156,30 @@ static void __init lager_add_standard_devices(void)
155 &ether_pdata, sizeof(ether_pdata)); 156 &ether_pdata, sizeof(ether_pdata));
156} 157}
157 158
159/*
160 * Ether LEDs on the Lager board are named LINK and ACTIVE which corresponds
161 * to non-default 01 setting of the Micrel KSZ8041 PHY control register 1 bits
162 * 14-15. We have to set them back to 01 from the default 00 value each time
163 * the PHY is reset. It's also important because the PHY's LED0 signal is
164 * connected to SoC's ETH_LINK signal and in the PHY's default mode it will
165 * bounce on and off after each packet, which we apparently want to avoid.
166 */
167static int lager_ksz8041_fixup(struct phy_device *phydev)
168{
169 u16 phyctrl1 = phy_read(phydev, 0x1e);
170
171 phyctrl1 &= ~0xc000;
172 phyctrl1 |= 0x4000;
173 return phy_write(phydev, 0x1e, phyctrl1);
174}
175
176static void __init lager_init(void)
177{
178 lager_add_standard_devices();
179
180 phy_register_fixup_for_id("r8a7790-ether-ff:01", lager_ksz8041_fixup);
181}
182
158static const char *lager_boards_compat_dt[] __initdata = { 183static const char *lager_boards_compat_dt[] __initdata = {
159 "renesas,lager", 184 "renesas,lager",
160 NULL, 185 NULL,
@@ -163,6 +188,6 @@ static const char *lager_boards_compat_dt[] __initdata = {
163DT_MACHINE_START(LAGER_DT, "lager") 188DT_MACHINE_START(LAGER_DT, "lager")
164 .init_early = r8a7790_init_delay, 189 .init_early = r8a7790_init_delay,
165 .init_time = r8a7790_timer_init, 190 .init_time = r8a7790_timer_init,
166 .init_machine = lager_add_standard_devices, 191 .init_machine = lager_init,
167 .dt_compat = lager_boards_compat_dt, 192 .dt_compat = lager_boards_compat_dt,
168MACHINE_END 193MACHINE_END
diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c
index 7aeb5d60e484..e6eb48192912 100644
--- a/arch/arm/mach-vexpress/tc2_pm.c
+++ b/arch/arm/mach-vexpress/tc2_pm.c
@@ -131,6 +131,16 @@ static void tc2_pm_down(u64 residency)
131 } else 131 } else
132 BUG(); 132 BUG();
133 133
134 /*
135 * If the CPU is committed to power down, make sure
136 * the power controller will be in charge of waking it
137 * up upon IRQ, ie IRQ lines are cut from GIC CPU IF
138 * to the CPU by disabling the GIC CPU IF to prevent wfi
139 * from completing execution behind power controller back
140 */
141 if (!skip_wfi)
142 gic_cpu_if_down();
143
134 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { 144 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
135 arch_spin_unlock(&tc2_pm_lock); 145 arch_spin_unlock(&tc2_pm_lock);
136 146
@@ -231,7 +241,6 @@ static void tc2_pm_suspend(u64 residency)
231 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); 241 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
232 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); 242 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
233 ve_spc_set_resume_addr(cluster, cpu, virt_to_phys(mcpm_entry_point)); 243 ve_spc_set_resume_addr(cluster, cpu, virt_to_phys(mcpm_entry_point));
234 gic_cpu_if_down();
235 tc2_pm_down(residency); 244 tc2_pm_down(residency);
236} 245}
237 246
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index f5e1a8471714..1272ed202dde 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1232,7 +1232,8 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
1232 break; 1232 break;
1233 1233
1234 len = (j - i) << PAGE_SHIFT; 1234 len = (j - i) << PAGE_SHIFT;
1235 ret = iommu_map(mapping->domain, iova, phys, len, 0); 1235 ret = iommu_map(mapping->domain, iova, phys, len,
1236 IOMMU_READ|IOMMU_WRITE);
1236 if (ret < 0) 1237 if (ret < 0)
1237 goto fail; 1238 goto fail;
1238 iova += len; 1239 iova += len;
@@ -1431,6 +1432,27 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
1431 GFP_KERNEL); 1432 GFP_KERNEL);
1432} 1433}
1433 1434
1435static int __dma_direction_to_prot(enum dma_data_direction dir)
1436{
1437 int prot;
1438
1439 switch (dir) {
1440 case DMA_BIDIRECTIONAL:
1441 prot = IOMMU_READ | IOMMU_WRITE;
1442 break;
1443 case DMA_TO_DEVICE:
1444 prot = IOMMU_READ;
1445 break;
1446 case DMA_FROM_DEVICE:
1447 prot = IOMMU_WRITE;
1448 break;
1449 default:
1450 prot = 0;
1451 }
1452
1453 return prot;
1454}
1455
1434/* 1456/*
1435 * Map a part of the scatter-gather list into contiguous io address space 1457 * Map a part of the scatter-gather list into contiguous io address space
1436 */ 1458 */
@@ -1444,6 +1466,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
1444 int ret = 0; 1466 int ret = 0;
1445 unsigned int count; 1467 unsigned int count;
1446 struct scatterlist *s; 1468 struct scatterlist *s;
1469 int prot;
1447 1470
1448 size = PAGE_ALIGN(size); 1471 size = PAGE_ALIGN(size);
1449 *handle = DMA_ERROR_CODE; 1472 *handle = DMA_ERROR_CODE;
@@ -1460,7 +1483,9 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
1460 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 1483 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
1461 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 1484 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1462 1485
1463 ret = iommu_map(mapping->domain, iova, phys, len, 0); 1486 prot = __dma_direction_to_prot(dir);
1487
1488 ret = iommu_map(mapping->domain, iova, phys, len, prot);
1464 if (ret < 0) 1489 if (ret < 0)
1465 goto fail; 1490 goto fail;
1466 count += len >> PAGE_SHIFT; 1491 count += len >> PAGE_SHIFT;
@@ -1665,19 +1690,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
1665 if (dma_addr == DMA_ERROR_CODE) 1690 if (dma_addr == DMA_ERROR_CODE)
1666 return dma_addr; 1691 return dma_addr;
1667 1692
1668 switch (dir) { 1693 prot = __dma_direction_to_prot(dir);
1669 case DMA_BIDIRECTIONAL:
1670 prot = IOMMU_READ | IOMMU_WRITE;
1671 break;
1672 case DMA_TO_DEVICE:
1673 prot = IOMMU_READ;
1674 break;
1675 case DMA_FROM_DEVICE:
1676 prot = IOMMU_WRITE;
1677 break;
1678 default:
1679 prot = 0;
1680 }
1681 1694
1682 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); 1695 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
1683 if (ret < 0) 1696 if (ret < 0)
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index febaee7ca57b..18ec4c504abf 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -17,7 +17,6 @@
17#include <linux/nodemask.h> 17#include <linux/nodemask.h>
18#include <linux/initrd.h> 18#include <linux/initrd.h>
19#include <linux/of_fdt.h> 19#include <linux/of_fdt.h>
20#include <linux/of_reserved_mem.h>
21#include <linux/highmem.h> 20#include <linux/highmem.h>
22#include <linux/gfp.h> 21#include <linux/gfp.h>
23#include <linux/memblock.h> 22#include <linux/memblock.h>
@@ -379,8 +378,6 @@ void __init arm_memblock_init(struct meminfo *mi,
379 if (mdesc->reserve) 378 if (mdesc->reserve)
380 mdesc->reserve(); 379 mdesc->reserve();
381 380
382 early_init_dt_scan_reserved_mem();
383
384 /* 381 /*
385 * reserve memory for DMA contigouos allocations, 382 * reserve memory for DMA contigouos allocations,
386 * must come from DMA area inside low memory 383 * must come from DMA area inside low memory
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index f50d223a0bd3..99b44e0e8d86 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -930,4 +930,5 @@ void bpf_jit_free(struct sk_filter *fp)
930{ 930{
931 if (fp->bpf_func != sk_run_filter) 931 if (fp->bpf_func != sk_run_filter)
932 module_free(NULL, fp->bpf_func); 932 module_free(NULL, fp->bpf_func);
933 kfree(fp);
933} 934}
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index 1a6bfe954d49..835c559786bd 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -6,13 +6,6 @@ config FRAME_POINTER
6 bool 6 bool
7 default y 7 default y
8 8
9config DEBUG_STACK_USAGE
10 bool "Enable stack utilization instrumentation"
11 depends on DEBUG_KERNEL
12 help
13 Enables the display of the minimum amount of free stack which each
14 task has ever had available in the sysrq-T output.
15
16config EARLY_PRINTK 9config EARLY_PRINTK
17 bool "Early printk support" 10 bool "Early printk support"
18 default y 11 default y
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 5b3e83217b03..31c81e9b792e 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -42,7 +42,7 @@ CONFIG_IP_PNP_BOOTP=y
42# CONFIG_WIRELESS is not set 42# CONFIG_WIRELESS is not set
43CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 43CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
44CONFIG_DEVTMPFS=y 44CONFIG_DEVTMPFS=y
45# CONFIG_BLK_DEV is not set 45CONFIG_BLK_DEV=y
46CONFIG_SCSI=y 46CONFIG_SCSI=y
47# CONFIG_SCSI_PROC_FS is not set 47# CONFIG_SCSI_PROC_FS is not set
48CONFIG_BLK_DEV_SD=y 48CONFIG_BLK_DEV_SD=y
@@ -72,6 +72,7 @@ CONFIG_LOGO=y
72# CONFIG_IOMMU_SUPPORT is not set 72# CONFIG_IOMMU_SUPPORT is not set
73CONFIG_EXT2_FS=y 73CONFIG_EXT2_FS=y
74CONFIG_EXT3_FS=y 74CONFIG_EXT3_FS=y
75CONFIG_EXT4_FS=y
75# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 76# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
76# CONFIG_EXT3_FS_XATTR is not set 77# CONFIG_EXT3_FS_XATTR is not set
77CONFIG_FUSE_FS=y 78CONFIG_FUSE_FS=y
@@ -90,3 +91,5 @@ CONFIG_DEBUG_KERNEL=y
90CONFIG_DEBUG_INFO=y 91CONFIG_DEBUG_INFO=y
91# CONFIG_FTRACE is not set 92# CONFIG_FTRACE is not set
92CONFIG_ATOMIC64_SELFTEST=y 93CONFIG_ATOMIC64_SELFTEST=y
94CONFIG_VIRTIO_MMIO=y
95CONFIG_VIRTIO_BLK=y
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index edb3d5c73a32..7ecc2b23882e 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -166,9 +166,10 @@ do { \
166 166
167#define get_user(x, ptr) \ 167#define get_user(x, ptr) \
168({ \ 168({ \
169 __typeof__(*(ptr)) __user *__p = (ptr); \
169 might_fault(); \ 170 might_fault(); \
170 access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) ? \ 171 access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \
171 __get_user((x), (ptr)) : \ 172 __get_user((x), __p) : \
172 ((x) = 0, -EFAULT); \ 173 ((x) = 0, -EFAULT); \
173}) 174})
174 175
@@ -227,9 +228,10 @@ do { \
227 228
228#define put_user(x, ptr) \ 229#define put_user(x, ptr) \
229({ \ 230({ \
231 __typeof__(*(ptr)) __user *__p = (ptr); \
230 might_fault(); \ 232 might_fault(); \
231 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \ 233 access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \
232 __put_user((x), (ptr)) : \ 234 __put_user((x), __p) : \
233 -EFAULT; \ 235 -EFAULT; \
234}) 236})
235 237
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 1f2e4d5a5c0f..bb785d23dbde 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -80,8 +80,10 @@ void fpsimd_thread_switch(struct task_struct *next)
80 80
81void fpsimd_flush_thread(void) 81void fpsimd_flush_thread(void)
82{ 82{
83 preempt_disable();
83 memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state)); 84 memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
84 fpsimd_load_state(&current->thread.fpsimd_state); 85 fpsimd_load_state(&current->thread.fpsimd_state);
86 preempt_enable();
85} 87}
86 88
87#ifdef CONFIG_KERNEL_MODE_NEON 89#ifdef CONFIG_KERNEL_MODE_NEON
diff --git a/arch/arm64/mm/tlb.S b/arch/arm64/mm/tlb.S
index 8ae80a18e8ec..19da91e0cd27 100644
--- a/arch/arm64/mm/tlb.S
+++ b/arch/arm64/mm/tlb.S
@@ -35,7 +35,7 @@
35 */ 35 */
36ENTRY(__cpu_flush_user_tlb_range) 36ENTRY(__cpu_flush_user_tlb_range)
37 vma_vm_mm x3, x2 // get vma->vm_mm 37 vma_vm_mm x3, x2 // get vma->vm_mm
38 mmid x3, x3 // get vm_mm->context.id 38 mmid w3, x3 // get vm_mm->context.id
39 dsb sy 39 dsb sy
40 lsr x0, x0, #12 // align address 40 lsr x0, x0, #12 // align address
41 lsr x1, x1, #12 41 lsr x1, x1, #12
diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild
index d22af851f3f6..fd7980743890 100644
--- a/arch/avr32/include/asm/Kbuild
+++ b/arch/avr32/include/asm/Kbuild
@@ -1,5 +1,19 @@
1 1
2generic-y += clkdev.h 2generic-y += clkdev.h
3generic-y += cputime.h
4generic-y += delay.h
5generic-y += device.h
6generic-y += div64.h
7generic-y += emergency-restart.h
3generic-y += exec.h 8generic-y += exec.h
4generic-y += trace_clock.h 9generic-y += futex.h
10generic-y += irq_regs.h
5generic-y += param.h 11generic-y += param.h
12generic-y += local.h
13generic-y += local64.h
14generic-y += percpu.h
15generic-y += scatterlist.h
16generic-y += sections.h
17generic-y += topology.h
18generic-y += trace_clock.h
19generic-y += xor.h
diff --git a/arch/avr32/include/asm/cputime.h b/arch/avr32/include/asm/cputime.h
deleted file mode 100644
index e87e0f81cbeb..000000000000
--- a/arch/avr32/include/asm/cputime.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_AVR32_CPUTIME_H
2#define __ASM_AVR32_CPUTIME_H
3
4#include <asm-generic/cputime.h>
5
6#endif /* __ASM_AVR32_CPUTIME_H */
diff --git a/arch/avr32/include/asm/delay.h b/arch/avr32/include/asm/delay.h
deleted file mode 100644
index 9670e127b7b2..000000000000
--- a/arch/avr32/include/asm/delay.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/delay.h>
diff --git a/arch/avr32/include/asm/device.h b/arch/avr32/include/asm/device.h
deleted file mode 100644
index d8f9872b0e2d..000000000000
--- a/arch/avr32/include/asm/device.h
+++ /dev/null
@@ -1,7 +0,0 @@
1/*
2 * Arch specific extensions to struct device
3 *
4 * This file is released under the GPLv2
5 */
6#include <asm-generic/device.h>
7
diff --git a/arch/avr32/include/asm/div64.h b/arch/avr32/include/asm/div64.h
deleted file mode 100644
index d7ddd4fdeca6..000000000000
--- a/arch/avr32/include/asm/div64.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_AVR32_DIV64_H
2#define __ASM_AVR32_DIV64_H
3
4#include <asm-generic/div64.h>
5
6#endif /* __ASM_AVR32_DIV64_H */
diff --git a/arch/avr32/include/asm/emergency-restart.h b/arch/avr32/include/asm/emergency-restart.h
deleted file mode 100644
index 3e7e014776ba..000000000000
--- a/arch/avr32/include/asm/emergency-restart.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_AVR32_EMERGENCY_RESTART_H
2#define __ASM_AVR32_EMERGENCY_RESTART_H
3
4#include <asm-generic/emergency-restart.h>
5
6#endif /* __ASM_AVR32_EMERGENCY_RESTART_H */
diff --git a/arch/avr32/include/asm/futex.h b/arch/avr32/include/asm/futex.h
deleted file mode 100644
index 10419f14a68a..000000000000
--- a/arch/avr32/include/asm/futex.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_AVR32_FUTEX_H
2#define __ASM_AVR32_FUTEX_H
3
4#include <asm-generic/futex.h>
5
6#endif /* __ASM_AVR32_FUTEX_H */
diff --git a/arch/avr32/include/asm/irq_regs.h b/arch/avr32/include/asm/irq_regs.h
deleted file mode 100644
index 3dd9c0b70270..000000000000
--- a/arch/avr32/include/asm/irq_regs.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/irq_regs.h>
diff --git a/arch/avr32/include/asm/local.h b/arch/avr32/include/asm/local.h
deleted file mode 100644
index 1c1619694da3..000000000000
--- a/arch/avr32/include/asm/local.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_AVR32_LOCAL_H
2#define __ASM_AVR32_LOCAL_H
3
4#include <asm-generic/local.h>
5
6#endif /* __ASM_AVR32_LOCAL_H */
diff --git a/arch/avr32/include/asm/local64.h b/arch/avr32/include/asm/local64.h
deleted file mode 100644
index 36c93b5cc239..000000000000
--- a/arch/avr32/include/asm/local64.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/local64.h>
diff --git a/arch/avr32/include/asm/percpu.h b/arch/avr32/include/asm/percpu.h
deleted file mode 100644
index 69227b4cd0d4..000000000000
--- a/arch/avr32/include/asm/percpu.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_AVR32_PERCPU_H
2#define __ASM_AVR32_PERCPU_H
3
4#include <asm-generic/percpu.h>
5
6#endif /* __ASM_AVR32_PERCPU_H */
diff --git a/arch/avr32/include/asm/scatterlist.h b/arch/avr32/include/asm/scatterlist.h
deleted file mode 100644
index a5902d9834e8..000000000000
--- a/arch/avr32/include/asm/scatterlist.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_AVR32_SCATTERLIST_H
2#define __ASM_AVR32_SCATTERLIST_H
3
4#include <asm-generic/scatterlist.h>
5
6#endif /* __ASM_AVR32_SCATTERLIST_H */
diff --git a/arch/avr32/include/asm/sections.h b/arch/avr32/include/asm/sections.h
deleted file mode 100644
index aa14252e4181..000000000000
--- a/arch/avr32/include/asm/sections.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_AVR32_SECTIONS_H
2#define __ASM_AVR32_SECTIONS_H
3
4#include <asm-generic/sections.h>
5
6#endif /* __ASM_AVR32_SECTIONS_H */
diff --git a/arch/avr32/include/asm/topology.h b/arch/avr32/include/asm/topology.h
deleted file mode 100644
index 5b766cbb4806..000000000000
--- a/arch/avr32/include/asm/topology.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_AVR32_TOPOLOGY_H
2#define __ASM_AVR32_TOPOLOGY_H
3
4#include <asm-generic/topology.h>
5
6#endif /* __ASM_AVR32_TOPOLOGY_H */
diff --git a/arch/avr32/include/asm/xor.h b/arch/avr32/include/asm/xor.h
deleted file mode 100644
index 99c87aa0af4f..000000000000
--- a/arch/avr32/include/asm/xor.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASM_XOR_H
2#define _ASM_XOR_H
3
4#include <asm-generic/xor.h>
5
6#endif
diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c
index c2731003edef..42a53e740a7e 100644
--- a/arch/avr32/kernel/process.c
+++ b/arch/avr32/kernel/process.c
@@ -289,7 +289,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
289 memset(childregs, 0, sizeof(struct pt_regs)); 289 memset(childregs, 0, sizeof(struct pt_regs));
290 p->thread.cpu_context.r0 = arg; 290 p->thread.cpu_context.r0 = arg;
291 p->thread.cpu_context.r1 = usp; /* fn */ 291 p->thread.cpu_context.r1 = usp; /* fn */
292 p->thread.cpu_context.r2 = syscall_return; 292 p->thread.cpu_context.r2 = (unsigned long)syscall_return;
293 p->thread.cpu_context.pc = (unsigned long)ret_from_kernel_thread; 293 p->thread.cpu_context.pc = (unsigned long)ret_from_kernel_thread;
294 childregs->sr = MODE_SUPERVISOR; 294 childregs->sr = MODE_SUPERVISOR;
295 } else { 295 } else {
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c
index 869a1c6ffeee..12f828ad5058 100644
--- a/arch/avr32/kernel/time.c
+++ b/arch/avr32/kernel/time.c
@@ -98,7 +98,14 @@ static void comparator_mode(enum clock_event_mode mode,
98 case CLOCK_EVT_MODE_SHUTDOWN: 98 case CLOCK_EVT_MODE_SHUTDOWN:
99 sysreg_write(COMPARE, 0); 99 sysreg_write(COMPARE, 0);
100 pr_debug("%s: stop\n", evdev->name); 100 pr_debug("%s: stop\n", evdev->name);
101 cpu_idle_poll_ctrl(false); 101 if (evdev->mode == CLOCK_EVT_MODE_ONESHOT ||
102 evdev->mode == CLOCK_EVT_MODE_RESUME) {
103 /*
104 * Only disable idle poll if we have forced that
105 * in a previous call.
106 */
107 cpu_idle_poll_ctrl(false);
108 }
102 break; 109 break;
103 default: 110 default:
104 BUG(); 111 BUG();
diff --git a/arch/mips/alchemy/board-mtx1.c b/arch/mips/alchemy/board-mtx1.c
index 4a9baa9f6330..9969dbab19e3 100644
--- a/arch/mips/alchemy/board-mtx1.c
+++ b/arch/mips/alchemy/board-mtx1.c
@@ -276,7 +276,7 @@ static struct platform_device mtx1_pci_host = {
276 .resource = alchemy_pci_host_res, 276 .resource = alchemy_pci_host_res,
277}; 277};
278 278
279static struct __initdata platform_device * mtx1_devs[] = { 279static struct platform_device *mtx1_devs[] __initdata = {
280 &mtx1_pci_host, 280 &mtx1_pci_host,
281 &mtx1_gpio_leds, 281 &mtx1_gpio_leds,
282 &mtx1_wdt, 282 &mtx1_wdt,
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 51680d15ca8e..d445d060e346 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -187,7 +187,7 @@
187 187
188/* 188/*
189 * MIPS32, MIPS64, VR5500, IDT32332, IDT32334 and maybe a few other 189 * MIPS32, MIPS64, VR5500, IDT32332, IDT32334 and maybe a few other
190 * pre-MIPS32/MIPS53 processors have CLO, CLZ. The IDT RC64574 is 64-bit and 190 * pre-MIPS32/MIPS64 processors have CLO, CLZ. The IDT RC64574 is 64-bit and
191 * has CLO and CLZ but not DCLO nor DCLZ. For 64-bit kernels 191 * has CLO and CLZ but not DCLO nor DCLZ. For 64-bit kernels
192 * cpu_has_clo_clz also indicates the availability of DCLO and DCLZ. 192 * cpu_has_clo_clz also indicates the availability of DCLO and DCLZ.
193 */ 193 */
diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
index 4d6d77ed9b9d..e194f957ca8c 100644
--- a/arch/mips/include/asm/jump_label.h
+++ b/arch/mips/include/asm/jump_label.h
@@ -22,7 +22,7 @@
22 22
23static __always_inline bool arch_static_branch(struct static_key *key) 23static __always_inline bool arch_static_branch(struct static_key *key)
24{ 24{
25 asm goto("1:\tnop\n\t" 25 asm_volatile_goto("1:\tnop\n\t"
26 "nop\n\t" 26 "nop\n\t"
27 ".pushsection __jump_table, \"aw\"\n\t" 27 ".pushsection __jump_table, \"aw\"\n\t"
28 WORD_INSN " 1b, %l[l_yes], %0\n\t" 28 WORD_INSN " 1b, %l[l_yes], %0\n\t"
diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S
index 4204d76af854..029e002a4ea0 100644
--- a/arch/mips/kernel/octeon_switch.S
+++ b/arch/mips/kernel/octeon_switch.S
@@ -73,7 +73,7 @@
733: 733:
74 74
75#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) 75#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
76 PTR_L t8, __stack_chk_guard 76 PTR_LA t8, __stack_chk_guard
77 LONG_L t9, TASK_STACK_CANARY(a1) 77 LONG_L t9, TASK_STACK_CANARY(a1)
78 LONG_S t9, 0(t8) 78 LONG_S t9, 0(t8)
79#endif 79#endif
diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S
index 38af83f84c4a..20b7b040e76f 100644
--- a/arch/mips/kernel/r2300_switch.S
+++ b/arch/mips/kernel/r2300_switch.S
@@ -67,7 +67,7 @@ LEAF(resume)
671: 671:
68 68
69#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) 69#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
70 PTR_L t8, __stack_chk_guard 70 PTR_LA t8, __stack_chk_guard
71 LONG_L t9, TASK_STACK_CANARY(a1) 71 LONG_L t9, TASK_STACK_CANARY(a1)
72 LONG_S t9, 0(t8) 72 LONG_S t9, 0(t8)
73#endif 73#endif
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index 921238a6bd26..078de5eaca8f 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -69,7 +69,7 @@
691: 691:
70 70
71#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) 71#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
72 PTR_L t8, __stack_chk_guard 72 PTR_LA t8, __stack_chk_guard
73 LONG_L t9, TASK_STACK_CANARY(a1) 73 LONG_L t9, TASK_STACK_CANARY(a1)
74 LONG_S t9, 0(t8) 74 LONG_S t9, 0(t8)
75#endif 75#endif
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 627883bc6d5f..bc6f96fcb529 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -609,6 +609,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
609 r4k_blast_scache(); 609 r4k_blast_scache();
610 else 610 else
611 blast_scache_range(addr, addr + size); 611 blast_scache_range(addr, addr + size);
612 preempt_enable();
612 __sync(); 613 __sync();
613 return; 614 return;
614 } 615 }
@@ -650,6 +651,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
650 */ 651 */
651 blast_inv_scache_range(addr, addr + size); 652 blast_inv_scache_range(addr, addr + size);
652 } 653 }
654 preempt_enable();
653 __sync(); 655 __sync();
654 return; 656 return;
655 } 657 }
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index f25a7e9f8cbc..5f8b95512580 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -308,12 +308,10 @@ static void mips_dma_sync_sg_for_cpu(struct device *dev,
308{ 308{
309 int i; 309 int i;
310 310
311 /* Make sure that gcc doesn't leave the empty loop body. */ 311 if (cpu_needs_post_dma_flush(dev))
312 for (i = 0; i < nelems; i++, sg++) { 312 for (i = 0; i < nelems; i++, sg++)
313 if (cpu_needs_post_dma_flush(dev))
314 __dma_sync(sg_page(sg), sg->offset, sg->length, 313 __dma_sync(sg_page(sg), sg->offset, sg->length,
315 direction); 314 direction);
316 }
317} 315}
318 316
319static void mips_dma_sync_sg_for_device(struct device *dev, 317static void mips_dma_sync_sg_for_device(struct device *dev,
@@ -321,12 +319,10 @@ static void mips_dma_sync_sg_for_device(struct device *dev,
321{ 319{
322 int i; 320 int i;
323 321
324 /* Make sure that gcc doesn't leave the empty loop body. */ 322 if (!plat_device_is_coherent(dev))
325 for (i = 0; i < nelems; i++, sg++) { 323 for (i = 0; i < nelems; i++, sg++)
326 if (!plat_device_is_coherent(dev))
327 __dma_sync(sg_page(sg), sg->offset, sg->length, 324 __dma_sync(sg_page(sg), sg->offset, sg->length,
328 direction); 325 direction);
329 }
330} 326}
331 327
332int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 328int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
diff --git a/arch/openrisc/include/asm/prom.h b/arch/openrisc/include/asm/prom.h
index eb59bfe23e85..93c9980e1b6b 100644
--- a/arch/openrisc/include/asm/prom.h
+++ b/arch/openrisc/include/asm/prom.h
@@ -14,53 +14,9 @@
14 * the Free Software Foundation; either version 2 of the License, or 14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version. 15 * (at your option) any later version.
16 */ 16 */
17
18#include <linux/of.h> /* linux/of.h gets to determine #include ordering */
19
20#ifndef _ASM_OPENRISC_PROM_H 17#ifndef _ASM_OPENRISC_PROM_H
21#define _ASM_OPENRISC_PROM_H 18#define _ASM_OPENRISC_PROM_H
22#ifdef __KERNEL__
23#ifndef __ASSEMBLY__
24 19
25#include <linux/types.h>
26#include <asm/irq.h>
27#include <linux/irqdomain.h>
28#include <linux/atomic.h>
29#include <linux/of_irq.h>
30#include <linux/of_fdt.h>
31#include <linux/of_address.h>
32#include <linux/proc_fs.h>
33#include <linux/platform_device.h>
34#define HAVE_ARCH_DEVTREE_FIXUPS 20#define HAVE_ARCH_DEVTREE_FIXUPS
35 21
36/* Other Prototypes */
37extern int early_uartlite_console(void);
38
39/* Parse the ibm,dma-window property of an OF node into the busno, phys and
40 * size parameters.
41 */
42void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
43 unsigned long *busno, unsigned long *phys, unsigned long *size);
44
45extern void kdump_move_device_tree(void);
46
47/* Get the MAC address */
48extern const void *of_get_mac_address(struct device_node *np);
49
50/**
51 * of_irq_map_pci - Resolve the interrupt for a PCI device
52 * @pdev: the device whose interrupt is to be resolved
53 * @out_irq: structure of_irq filled by this function
54 *
55 * This function resolves the PCI interrupt for a given PCI device. If a
56 * device-node exists for a given pci_dev, it will use normal OF tree
57 * walking. If not, it will implement standard swizzling and walk up the
58 * PCI tree until an device-node is found, at which point it will finish
59 * resolving using the OF tree walking.
60 */
61struct pci_dev;
62extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq);
63
64#endif /* __ASSEMBLY__ */
65#endif /* __KERNEL__ */
66#endif /* _ASM_OPENRISC_PROM_H */ 22#endif /* _ASM_OPENRISC_PROM_H */
diff --git a/arch/parisc/configs/712_defconfig b/arch/parisc/configs/712_defconfig
index 0f90569b9d85..9387cc2693f6 100644
--- a/arch/parisc/configs/712_defconfig
+++ b/arch/parisc/configs/712_defconfig
@@ -40,6 +40,8 @@ CONFIG_IP_NF_QUEUE=m
40CONFIG_LLC2=m 40CONFIG_LLC2=m
41CONFIG_NET_PKTGEN=m 41CONFIG_NET_PKTGEN=m
42CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 42CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
43CONFIG_DEVTMPFS=y
44CONFIG_DEVTMPFS_MOUNT=y
43# CONFIG_STANDALONE is not set 45# CONFIG_STANDALONE is not set
44# CONFIG_PREVENT_FIRMWARE_BUILD is not set 46# CONFIG_PREVENT_FIRMWARE_BUILD is not set
45CONFIG_PARPORT=y 47CONFIG_PARPORT=y
diff --git a/arch/parisc/configs/a500_defconfig b/arch/parisc/configs/a500_defconfig
index b647b182dacc..90025322b75e 100644
--- a/arch/parisc/configs/a500_defconfig
+++ b/arch/parisc/configs/a500_defconfig
@@ -79,6 +79,8 @@ CONFIG_IP_DCCP=m
79CONFIG_LLC2=m 79CONFIG_LLC2=m
80CONFIG_NET_PKTGEN=m 80CONFIG_NET_PKTGEN=m
81CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 81CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
82CONFIG_DEVTMPFS=y
83CONFIG_DEVTMPFS_MOUNT=y
82# CONFIG_STANDALONE is not set 84# CONFIG_STANDALONE is not set
83# CONFIG_PREVENT_FIRMWARE_BUILD is not set 85# CONFIG_PREVENT_FIRMWARE_BUILD is not set
84CONFIG_BLK_DEV_UMEM=m 86CONFIG_BLK_DEV_UMEM=m
diff --git a/arch/parisc/configs/b180_defconfig b/arch/parisc/configs/b180_defconfig
index e289f5bf3148..f1a0c25bef8d 100644
--- a/arch/parisc/configs/b180_defconfig
+++ b/arch/parisc/configs/b180_defconfig
@@ -4,6 +4,7 @@ CONFIG_IKCONFIG=y
4CONFIG_IKCONFIG_PROC=y 4CONFIG_IKCONFIG_PROC=y
5CONFIG_LOG_BUF_SHIFT=16 5CONFIG_LOG_BUF_SHIFT=16
6CONFIG_SYSFS_DEPRECATED_V2=y 6CONFIG_SYSFS_DEPRECATED_V2=y
7CONFIG_BLK_DEV_INITRD=y
7CONFIG_SLAB=y 8CONFIG_SLAB=y
8CONFIG_MODULES=y 9CONFIG_MODULES=y
9CONFIG_MODVERSIONS=y 10CONFIG_MODVERSIONS=y
@@ -27,6 +28,8 @@ CONFIG_IP_PNP_BOOTP=y
27# CONFIG_INET_LRO is not set 28# CONFIG_INET_LRO is not set
28CONFIG_IPV6=y 29CONFIG_IPV6=y
29CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 30CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
31CONFIG_DEVTMPFS=y
32CONFIG_DEVTMPFS_MOUNT=y
30# CONFIG_PREVENT_FIRMWARE_BUILD is not set 33# CONFIG_PREVENT_FIRMWARE_BUILD is not set
31CONFIG_PARPORT=y 34CONFIG_PARPORT=y
32CONFIG_PARPORT_PC=y 35CONFIG_PARPORT_PC=y
diff --git a/arch/parisc/configs/c3000_defconfig b/arch/parisc/configs/c3000_defconfig
index 311ca367b622..ec1b014952b6 100644
--- a/arch/parisc/configs/c3000_defconfig
+++ b/arch/parisc/configs/c3000_defconfig
@@ -5,6 +5,7 @@ CONFIG_IKCONFIG=y
5CONFIG_IKCONFIG_PROC=y 5CONFIG_IKCONFIG_PROC=y
6CONFIG_LOG_BUF_SHIFT=16 6CONFIG_LOG_BUF_SHIFT=16
7CONFIG_SYSFS_DEPRECATED_V2=y 7CONFIG_SYSFS_DEPRECATED_V2=y
8CONFIG_BLK_DEV_INITRD=y
8# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 9# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
9CONFIG_EXPERT=y 10CONFIG_EXPERT=y
10CONFIG_KALLSYMS_ALL=y 11CONFIG_KALLSYMS_ALL=y
@@ -39,6 +40,8 @@ CONFIG_NETFILTER_DEBUG=y
39CONFIG_IP_NF_QUEUE=m 40CONFIG_IP_NF_QUEUE=m
40CONFIG_NET_PKTGEN=m 41CONFIG_NET_PKTGEN=m
41CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 42CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
43CONFIG_DEVTMPFS=y
44CONFIG_DEVTMPFS_MOUNT=y
42# CONFIG_STANDALONE is not set 45# CONFIG_STANDALONE is not set
43# CONFIG_PREVENT_FIRMWARE_BUILD is not set 46# CONFIG_PREVENT_FIRMWARE_BUILD is not set
44CONFIG_BLK_DEV_UMEM=m 47CONFIG_BLK_DEV_UMEM=m
diff --git a/arch/parisc/configs/c8000_defconfig b/arch/parisc/configs/c8000_defconfig
index f11006361297..e1c8d2015c89 100644
--- a/arch/parisc/configs/c8000_defconfig
+++ b/arch/parisc/configs/c8000_defconfig
@@ -62,6 +62,8 @@ CONFIG_TIPC=m
62CONFIG_LLC2=m 62CONFIG_LLC2=m
63CONFIG_DNS_RESOLVER=y 63CONFIG_DNS_RESOLVER=y
64CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 64CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
65CONFIG_DEVTMPFS=y
66CONFIG_DEVTMPFS_MOUNT=y
65# CONFIG_STANDALONE is not set 67# CONFIG_STANDALONE is not set
66CONFIG_PARPORT=y 68CONFIG_PARPORT=y
67CONFIG_PARPORT_PC=y 69CONFIG_PARPORT_PC=y
diff --git a/arch/parisc/configs/default_defconfig b/arch/parisc/configs/default_defconfig
index dfe88f6c95c4..ba61495e1fa4 100644
--- a/arch/parisc/configs/default_defconfig
+++ b/arch/parisc/configs/default_defconfig
@@ -49,6 +49,8 @@ CONFIG_INET6_ESP=y
49CONFIG_INET6_IPCOMP=y 49CONFIG_INET6_IPCOMP=y
50CONFIG_LLC2=m 50CONFIG_LLC2=m
51CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 51CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
52CONFIG_DEVTMPFS=y
53CONFIG_DEVTMPFS_MOUNT=y
52# CONFIG_STANDALONE is not set 54# CONFIG_STANDALONE is not set
53# CONFIG_PREVENT_FIRMWARE_BUILD is not set 55# CONFIG_PREVENT_FIRMWARE_BUILD is not set
54CONFIG_PARPORT=y 56CONFIG_PARPORT=y
diff --git a/arch/parisc/include/asm/traps.h b/arch/parisc/include/asm/traps.h
index 1945f995f2df..4736020ba5ea 100644
--- a/arch/parisc/include/asm/traps.h
+++ b/arch/parisc/include/asm/traps.h
@@ -6,7 +6,7 @@ struct pt_regs;
6 6
7/* traps.c */ 7/* traps.c */
8void parisc_terminate(char *msg, struct pt_regs *regs, 8void parisc_terminate(char *msg, struct pt_regs *regs,
9 int code, unsigned long offset); 9 int code, unsigned long offset) __noreturn __cold;
10 10
11/* mm/fault.c */ 11/* mm/fault.c */
12void do_page_fault(struct pt_regs *regs, unsigned long code, 12void do_page_fault(struct pt_regs *regs, unsigned long code,
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
index 37aabd772fbb..d2d58258aea6 100644
--- a/arch/parisc/kernel/head.S
+++ b/arch/parisc/kernel/head.S
@@ -195,6 +195,8 @@ common_stext:
195 ldw MEM_PDC_HI(%r0),%r6 195 ldw MEM_PDC_HI(%r0),%r6
196 depd %r6, 31, 32, %r3 /* move to upper word */ 196 depd %r6, 31, 32, %r3 /* move to upper word */
197 197
198 mfctl %cr30,%r6 /* PCX-W2 firmware bug */
199
198 ldo PDC_PSW(%r0),%arg0 /* 21 */ 200 ldo PDC_PSW(%r0),%arg0 /* 21 */
199 ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */ 201 ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
200 ldo PDC_PSW_WIDE_BIT(%r0),%arg2 /* 2 */ 202 ldo PDC_PSW_WIDE_BIT(%r0),%arg2 /* 2 */
@@ -203,6 +205,8 @@ common_stext:
203 copy %r0,%arg3 205 copy %r0,%arg3
204 206
205stext_pdc_ret: 207stext_pdc_ret:
208 mtctl %r6,%cr30 /* restore task thread info */
209
206 /* restore rfi target address*/ 210 /* restore rfi target address*/
207 ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10 211 ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10
208 tophys_r1 %r10 212 tophys_r1 %r10
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 8a252f2d6c08..2b96602e812f 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -72,7 +72,6 @@ enum ipi_message_type {
72 IPI_NOP=0, 72 IPI_NOP=0,
73 IPI_RESCHEDULE=1, 73 IPI_RESCHEDULE=1,
74 IPI_CALL_FUNC, 74 IPI_CALL_FUNC,
75 IPI_CALL_FUNC_SINGLE,
76 IPI_CPU_START, 75 IPI_CPU_START,
77 IPI_CPU_STOP, 76 IPI_CPU_STOP,
78 IPI_CPU_TEST 77 IPI_CPU_TEST
@@ -164,11 +163,6 @@ ipi_interrupt(int irq, void *dev_id)
164 generic_smp_call_function_interrupt(); 163 generic_smp_call_function_interrupt();
165 break; 164 break;
166 165
167 case IPI_CALL_FUNC_SINGLE:
168 smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC_SINGLE\n", this_cpu);
169 generic_smp_call_function_single_interrupt();
170 break;
171
172 case IPI_CPU_START: 166 case IPI_CPU_START:
173 smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_START\n", this_cpu); 167 smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_START\n", this_cpu);
174 break; 168 break;
@@ -260,7 +254,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
260 254
261void arch_send_call_function_single_ipi(int cpu) 255void arch_send_call_function_single_ipi(int cpu)
262{ 256{
263 send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE); 257 send_IPI_single(cpu, IPI_CALL_FUNC);
264} 258}
265 259
266/* 260/*
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 04e47c6a4562..1cd1d0c83b6d 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -291,11 +291,6 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err)
291 do_exit(SIGSEGV); 291 do_exit(SIGSEGV);
292} 292}
293 293
294int syscall_ipi(int (*syscall) (struct pt_regs *), struct pt_regs *regs)
295{
296 return syscall(regs);
297}
298
299/* gdb uses break 4,8 */ 294/* gdb uses break 4,8 */
300#define GDB_BREAK_INSN 0x10004 295#define GDB_BREAK_INSN 0x10004
301static void handle_gdb_break(struct pt_regs *regs, int wot) 296static void handle_gdb_break(struct pt_regs *regs, int wot)
@@ -805,14 +800,14 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
805 else { 800 else {
806 801
807 /* 802 /*
808 * The kernel should never fault on its own address space. 803 * The kernel should never fault on its own address space,
804 * unless pagefault_disable() was called before.
809 */ 805 */
810 806
811 if (fault_space == 0) 807 if (fault_space == 0 && !in_atomic())
812 { 808 {
813 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC); 809 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
814 parisc_terminate("Kernel Fault", regs, code, fault_address); 810 parisc_terminate("Kernel Fault", regs, code, fault_address);
815
816 } 811 }
817 } 812 }
818 813
diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c
index ac4370b1ca40..b5507ec06b84 100644
--- a/arch/parisc/lib/memcpy.c
+++ b/arch/parisc/lib/memcpy.c
@@ -56,7 +56,7 @@
56#ifdef __KERNEL__ 56#ifdef __KERNEL__
57#include <linux/module.h> 57#include <linux/module.h>
58#include <linux/compiler.h> 58#include <linux/compiler.h>
59#include <asm/uaccess.h> 59#include <linux/uaccess.h>
60#define s_space "%%sr1" 60#define s_space "%%sr1"
61#define d_space "%%sr2" 61#define d_space "%%sr2"
62#else 62#else
@@ -524,4 +524,17 @@ EXPORT_SYMBOL(copy_to_user);
524EXPORT_SYMBOL(copy_from_user); 524EXPORT_SYMBOL(copy_from_user);
525EXPORT_SYMBOL(copy_in_user); 525EXPORT_SYMBOL(copy_in_user);
526EXPORT_SYMBOL(memcpy); 526EXPORT_SYMBOL(memcpy);
527
528long probe_kernel_read(void *dst, const void *src, size_t size)
529{
530 unsigned long addr = (unsigned long)src;
531
532 if (size < 0 || addr < PAGE_SIZE)
533 return -EFAULT;
534
535 /* check for I/O space F_EXTEND(0xfff00000) access as well? */
536
537 return __probe_kernel_read(dst, src, size);
538}
539
527#endif 540#endif
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index d10d27a720c0..0293588d5b8c 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -171,17 +171,25 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
171 unsigned long address) 171 unsigned long address)
172{ 172{
173 struct vm_area_struct *vma, *prev_vma; 173 struct vm_area_struct *vma, *prev_vma;
174 struct task_struct *tsk = current; 174 struct task_struct *tsk;
175 struct mm_struct *mm = tsk->mm; 175 struct mm_struct *mm;
176 unsigned long acc_type; 176 unsigned long acc_type;
177 int fault; 177 int fault;
178 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 178 unsigned int flags;
179 179
180 if (in_atomic() || !mm) 180 if (in_atomic())
181 goto no_context; 181 goto no_context;
182 182
183 tsk = current;
184 mm = tsk->mm;
185 if (!mm)
186 goto no_context;
187
188 flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
183 if (user_mode(regs)) 189 if (user_mode(regs))
184 flags |= FAULT_FLAG_USER; 190 flags |= FAULT_FLAG_USER;
191
192 acc_type = parisc_acctyp(code, regs->iir);
185 if (acc_type & VM_WRITE) 193 if (acc_type & VM_WRITE)
186 flags |= FAULT_FLAG_WRITE; 194 flags |= FAULT_FLAG_WRITE;
187retry: 195retry:
@@ -196,8 +204,6 @@ retry:
196 204
197good_area: 205good_area:
198 206
199 acc_type = parisc_acctyp(code,regs->iir);
200
201 if ((vma->vm_flags & acc_type) != acc_type) 207 if ((vma->vm_flags & acc_type) != acc_type)
202 goto bad_area; 208 goto bad_area;
203 209
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 6a15c968d214..15ca2255f438 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -74,7 +74,7 @@ src-wlib-$(CONFIG_8xx) += mpc8xx.c planetcore.c
74src-wlib-$(CONFIG_PPC_82xx) += pq2.c fsl-soc.c planetcore.c 74src-wlib-$(CONFIG_PPC_82xx) += pq2.c fsl-soc.c planetcore.c
75src-wlib-$(CONFIG_EMBEDDED6xx) += mv64x60.c mv64x60_i2c.c ugecon.c 75src-wlib-$(CONFIG_EMBEDDED6xx) += mv64x60.c mv64x60_i2c.c ugecon.c
76 76
77src-plat-y := of.c 77src-plat-y := of.c epapr.c
78src-plat-$(CONFIG_40x) += fixed-head.S ep405.c cuboot-hotfoot.c \ 78src-plat-$(CONFIG_40x) += fixed-head.S ep405.c cuboot-hotfoot.c \
79 treeboot-walnut.c cuboot-acadia.c \ 79 treeboot-walnut.c cuboot-acadia.c \
80 cuboot-kilauea.c simpleboot.c \ 80 cuboot-kilauea.c simpleboot.c \
@@ -97,7 +97,7 @@ src-plat-$(CONFIG_EMBEDDED6xx) += cuboot-pq2.c cuboot-mpc7448hpc2.c \
97 prpmc2800.c 97 prpmc2800.c
98src-plat-$(CONFIG_AMIGAONE) += cuboot-amigaone.c 98src-plat-$(CONFIG_AMIGAONE) += cuboot-amigaone.c
99src-plat-$(CONFIG_PPC_PS3) += ps3-head.S ps3-hvcall.S ps3.c 99src-plat-$(CONFIG_PPC_PS3) += ps3-head.S ps3-hvcall.S ps3.c
100src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c 100src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c epapr-wrapper.c
101 101
102src-wlib := $(sort $(src-wlib-y)) 102src-wlib := $(sort $(src-wlib-y))
103src-plat := $(sort $(src-plat-y)) 103src-plat := $(sort $(src-plat-y))
diff --git a/arch/powerpc/boot/epapr-wrapper.c b/arch/powerpc/boot/epapr-wrapper.c
new file mode 100644
index 000000000000..c10191006673
--- /dev/null
+++ b/arch/powerpc/boot/epapr-wrapper.c
@@ -0,0 +1,9 @@
1extern void epapr_platform_init(unsigned long r3, unsigned long r4,
2 unsigned long r5, unsigned long r6,
3 unsigned long r7);
4
5void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
6 unsigned long r6, unsigned long r7)
7{
8 epapr_platform_init(r3, r4, r5, r6, r7);
9}
diff --git a/arch/powerpc/boot/epapr.c b/arch/powerpc/boot/epapr.c
index 06c1961bd124..02e91aa2194a 100644
--- a/arch/powerpc/boot/epapr.c
+++ b/arch/powerpc/boot/epapr.c
@@ -48,8 +48,8 @@ static void platform_fixups(void)
48 fdt_addr, fdt_totalsize((void *)fdt_addr), ima_size); 48 fdt_addr, fdt_totalsize((void *)fdt_addr), ima_size);
49} 49}
50 50
51void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, 51void epapr_platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
52 unsigned long r6, unsigned long r7) 52 unsigned long r6, unsigned long r7)
53{ 53{
54 epapr_magic = r6; 54 epapr_magic = r6;
55 ima_size = r7; 55 ima_size = r7;
diff --git a/arch/powerpc/boot/of.c b/arch/powerpc/boot/of.c
index 61d9899aa0d0..62e2f43ec1df 100644
--- a/arch/powerpc/boot/of.c
+++ b/arch/powerpc/boot/of.c
@@ -26,6 +26,9 @@
26 26
27static unsigned long claim_base; 27static unsigned long claim_base;
28 28
29void epapr_platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
30 unsigned long r6, unsigned long r7);
31
29static void *of_try_claim(unsigned long size) 32static void *of_try_claim(unsigned long size)
30{ 33{
31 unsigned long addr = 0; 34 unsigned long addr = 0;
@@ -61,7 +64,7 @@ static void of_image_hdr(const void *hdr)
61 } 64 }
62} 65}
63 66
64void platform_init(unsigned long a1, unsigned long a2, void *promptr) 67static void of_platform_init(unsigned long a1, unsigned long a2, void *promptr)
65{ 68{
66 platform_ops.image_hdr = of_image_hdr; 69 platform_ops.image_hdr = of_image_hdr;
67 platform_ops.malloc = of_try_claim; 70 platform_ops.malloc = of_try_claim;
@@ -81,3 +84,14 @@ void platform_init(unsigned long a1, unsigned long a2, void *promptr)
81 loader_info.initrd_size = a2; 84 loader_info.initrd_size = a2;
82 } 85 }
83} 86}
87
88void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
89 unsigned long r6, unsigned long r7)
90{
91 /* Detect OF vs. ePAPR boot */
92 if (r5)
93 of_platform_init(r3, r4, (void *)r5);
94 else
95 epapr_platform_init(r3, r4, r5, r6, r7);
96}
97
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index 6761c746048d..cd7af841ba05 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -148,18 +148,18 @@ make_space=y
148 148
149case "$platform" in 149case "$platform" in
150pseries) 150pseries)
151 platformo=$object/of.o 151 platformo="$object/of.o $object/epapr.o"
152 link_address='0x4000000' 152 link_address='0x4000000'
153 ;; 153 ;;
154maple) 154maple)
155 platformo=$object/of.o 155 platformo="$object/of.o $object/epapr.o"
156 link_address='0x400000' 156 link_address='0x400000'
157 ;; 157 ;;
158pmac|chrp) 158pmac|chrp)
159 platformo=$object/of.o 159 platformo="$object/of.o $object/epapr.o"
160 ;; 160 ;;
161coff) 161coff)
162 platformo="$object/crt0.o $object/of.o" 162 platformo="$object/crt0.o $object/of.o $object/epapr.o"
163 lds=$object/zImage.coff.lds 163 lds=$object/zImage.coff.lds
164 link_address='0x500000' 164 link_address='0x500000'
165 pie= 165 pie=
@@ -253,6 +253,7 @@ treeboot-iss4xx-mpic)
253 platformo="$object/treeboot-iss4xx.o" 253 platformo="$object/treeboot-iss4xx.o"
254 ;; 254 ;;
255epapr) 255epapr)
256 platformo="$object/epapr.o $object/epapr-wrapper.o"
256 link_address='0x20000000' 257 link_address='0x20000000'
257 pie=-pie 258 pie=-pie
258 ;; 259 ;;
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index 0e40843a1c6e..41f13cec8a8f 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -69,9 +69,9 @@ extern struct thread_info *softirq_ctx[NR_CPUS];
69 69
70extern void irq_ctx_init(void); 70extern void irq_ctx_init(void);
71extern void call_do_softirq(struct thread_info *tp); 71extern void call_do_softirq(struct thread_info *tp);
72extern int call_handle_irq(int irq, void *p1, 72extern void call_do_irq(struct pt_regs *regs, struct thread_info *tp);
73 struct thread_info *tp, void *func);
74extern void do_IRQ(struct pt_regs *regs); 73extern void do_IRQ(struct pt_regs *regs);
74extern void __do_irq(struct pt_regs *regs);
75 75
76int irq_choose_cpu(const struct cpumask *mask); 76int irq_choose_cpu(const struct cpumask *mask);
77 77
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
index ae098c438f00..f016bb699b5f 100644
--- a/arch/powerpc/include/asm/jump_label.h
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -19,7 +19,7 @@
19 19
20static __always_inline bool arch_static_branch(struct static_key *key) 20static __always_inline bool arch_static_branch(struct static_key *key)
21{ 21{
22 asm goto("1:\n\t" 22 asm_volatile_goto("1:\n\t"
23 "nop\n\t" 23 "nop\n\t"
24 ".pushsection __jump_table, \"aw\"\n\t" 24 ".pushsection __jump_table, \"aw\"\n\t"
25 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t" 25 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index e378cccfca55..ce4de5aed7b5 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -149,8 +149,6 @@ typedef struct {
149 149
150struct thread_struct { 150struct thread_struct {
151 unsigned long ksp; /* Kernel stack pointer */ 151 unsigned long ksp; /* Kernel stack pointer */
152 unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
153
154#ifdef CONFIG_PPC64 152#ifdef CONFIG_PPC64
155 unsigned long ksp_vsid; 153 unsigned long ksp_vsid;
156#endif 154#endif
@@ -162,6 +160,7 @@ struct thread_struct {
162#endif 160#endif
163#ifdef CONFIG_PPC32 161#ifdef CONFIG_PPC32
164 void *pgdir; /* root of page-table tree */ 162 void *pgdir; /* root of page-table tree */
163 unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
165#endif 164#endif
166#ifdef CONFIG_PPC_ADV_DEBUG_REGS 165#ifdef CONFIG_PPC_ADV_DEBUG_REGS
167 /* 166 /*
@@ -321,7 +320,6 @@ struct thread_struct {
321#else 320#else
322#define INIT_THREAD { \ 321#define INIT_THREAD { \
323 .ksp = INIT_SP, \ 322 .ksp = INIT_SP, \
324 .ksp_limit = INIT_SP_LIMIT, \
325 .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \ 323 .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
326 .fs = KERNEL_DS, \ 324 .fs = KERNEL_DS, \
327 .fpr = {{0}}, \ 325 .fpr = {{0}}, \
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index d8958be5f31a..502c7a4e73f7 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -80,10 +80,11 @@ int main(void)
80 DEFINE(TASKTHREADPPR, offsetof(struct task_struct, thread.ppr)); 80 DEFINE(TASKTHREADPPR, offsetof(struct task_struct, thread.ppr));
81#else 81#else
82 DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); 82 DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
83 DEFINE(THREAD_INFO_GAP, _ALIGN_UP(sizeof(struct thread_info), 16));
84 DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
83#endif /* CONFIG_PPC64 */ 85#endif /* CONFIG_PPC64 */
84 86
85 DEFINE(KSP, offsetof(struct thread_struct, ksp)); 87 DEFINE(KSP, offsetof(struct thread_struct, ksp));
86 DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
87 DEFINE(PT_REGS, offsetof(struct thread_struct, regs)); 88 DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
88#ifdef CONFIG_BOOKE 89#ifdef CONFIG_BOOKE
89 DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0])); 90 DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0]));
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 0adab06ce5c0..572bb5b95f35 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -661,7 +661,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
661 /* number of bytes needed for the bitmap */ 661 /* number of bytes needed for the bitmap */
662 sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long); 662 sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
663 663
664 page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz)); 664 page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz));
665 if (!page) 665 if (!page)
666 panic("iommu_init_table: Can't allocate %ld bytes\n", sz); 666 panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
667 tbl->it_map = page_address(page); 667 tbl->it_map = page_address(page);
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index c69440cef7af..c7cb8c232d2f 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -441,50 +441,6 @@ void migrate_irqs(void)
441} 441}
442#endif 442#endif
443 443
444static inline void handle_one_irq(unsigned int irq)
445{
446 struct thread_info *curtp, *irqtp;
447 unsigned long saved_sp_limit;
448 struct irq_desc *desc;
449
450 desc = irq_to_desc(irq);
451 if (!desc)
452 return;
453
454 /* Switch to the irq stack to handle this */
455 curtp = current_thread_info();
456 irqtp = hardirq_ctx[smp_processor_id()];
457
458 if (curtp == irqtp) {
459 /* We're already on the irq stack, just handle it */
460 desc->handle_irq(irq, desc);
461 return;
462 }
463
464 saved_sp_limit = current->thread.ksp_limit;
465
466 irqtp->task = curtp->task;
467 irqtp->flags = 0;
468
469 /* Copy the softirq bits in preempt_count so that the
470 * softirq checks work in the hardirq context. */
471 irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
472 (curtp->preempt_count & SOFTIRQ_MASK);
473
474 current->thread.ksp_limit = (unsigned long)irqtp +
475 _ALIGN_UP(sizeof(struct thread_info), 16);
476
477 call_handle_irq(irq, desc, irqtp, desc->handle_irq);
478 current->thread.ksp_limit = saved_sp_limit;
479 irqtp->task = NULL;
480
481 /* Set any flag that may have been set on the
482 * alternate stack
483 */
484 if (irqtp->flags)
485 set_bits(irqtp->flags, &curtp->flags);
486}
487
488static inline void check_stack_overflow(void) 444static inline void check_stack_overflow(void)
489{ 445{
490#ifdef CONFIG_DEBUG_STACKOVERFLOW 446#ifdef CONFIG_DEBUG_STACKOVERFLOW
@@ -501,9 +457,9 @@ static inline void check_stack_overflow(void)
501#endif 457#endif
502} 458}
503 459
504void do_IRQ(struct pt_regs *regs) 460void __do_irq(struct pt_regs *regs)
505{ 461{
506 struct pt_regs *old_regs = set_irq_regs(regs); 462 struct irq_desc *desc;
507 unsigned int irq; 463 unsigned int irq;
508 464
509 irq_enter(); 465 irq_enter();
@@ -519,18 +475,57 @@ void do_IRQ(struct pt_regs *regs)
519 */ 475 */
520 irq = ppc_md.get_irq(); 476 irq = ppc_md.get_irq();
521 477
522 /* We can hard enable interrupts now */ 478 /* We can hard enable interrupts now to allow perf interrupts */
523 may_hard_irq_enable(); 479 may_hard_irq_enable();
524 480
525 /* And finally process it */ 481 /* And finally process it */
526 if (irq != NO_IRQ) 482 if (unlikely(irq == NO_IRQ))
527 handle_one_irq(irq);
528 else
529 __get_cpu_var(irq_stat).spurious_irqs++; 483 __get_cpu_var(irq_stat).spurious_irqs++;
484 else {
485 desc = irq_to_desc(irq);
486 if (likely(desc))
487 desc->handle_irq(irq, desc);
488 }
530 489
531 trace_irq_exit(regs); 490 trace_irq_exit(regs);
532 491
533 irq_exit(); 492 irq_exit();
493}
494
495void do_IRQ(struct pt_regs *regs)
496{
497 struct pt_regs *old_regs = set_irq_regs(regs);
498 struct thread_info *curtp, *irqtp, *sirqtp;
499
500 /* Switch to the irq stack to handle this */
501 curtp = current_thread_info();
502 irqtp = hardirq_ctx[raw_smp_processor_id()];
503 sirqtp = softirq_ctx[raw_smp_processor_id()];
504
505 /* Already there ? */
506 if (unlikely(curtp == irqtp || curtp == sirqtp)) {
507 __do_irq(regs);
508 set_irq_regs(old_regs);
509 return;
510 }
511
512 /* Prepare the thread_info in the irq stack */
513 irqtp->task = curtp->task;
514 irqtp->flags = 0;
515
516 /* Copy the preempt_count so that the [soft]irq checks work. */
517 irqtp->preempt_count = curtp->preempt_count;
518
519 /* Switch stack and call */
520 call_do_irq(regs, irqtp);
521
522 /* Restore stack limit */
523 irqtp->task = NULL;
524
525 /* Copy back updates to the thread_info */
526 if (irqtp->flags)
527 set_bits(irqtp->flags, &curtp->flags);
528
534 set_irq_regs(old_regs); 529 set_irq_regs(old_regs);
535} 530}
536 531
@@ -592,28 +587,22 @@ void irq_ctx_init(void)
592 memset((void *)softirq_ctx[i], 0, THREAD_SIZE); 587 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
593 tp = softirq_ctx[i]; 588 tp = softirq_ctx[i];
594 tp->cpu = i; 589 tp->cpu = i;
595 tp->preempt_count = 0;
596 590
597 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); 591 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
598 tp = hardirq_ctx[i]; 592 tp = hardirq_ctx[i];
599 tp->cpu = i; 593 tp->cpu = i;
600 tp->preempt_count = HARDIRQ_OFFSET;
601 } 594 }
602} 595}
603 596
604static inline void do_softirq_onstack(void) 597static inline void do_softirq_onstack(void)
605{ 598{
606 struct thread_info *curtp, *irqtp; 599 struct thread_info *curtp, *irqtp;
607 unsigned long saved_sp_limit = current->thread.ksp_limit;
608 600
609 curtp = current_thread_info(); 601 curtp = current_thread_info();
610 irqtp = softirq_ctx[smp_processor_id()]; 602 irqtp = softirq_ctx[smp_processor_id()];
611 irqtp->task = curtp->task; 603 irqtp->task = curtp->task;
612 irqtp->flags = 0; 604 irqtp->flags = 0;
613 current->thread.ksp_limit = (unsigned long)irqtp +
614 _ALIGN_UP(sizeof(struct thread_info), 16);
615 call_do_softirq(irqtp); 605 call_do_softirq(irqtp);
616 current->thread.ksp_limit = saved_sp_limit;
617 irqtp->task = NULL; 606 irqtp->task = NULL;
618 607
619 /* Set any flag that may have been set on the 608 /* Set any flag that may have been set on the
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 777d999f563b..2b0ad9845363 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -36,26 +36,41 @@
36 36
37 .text 37 .text
38 38
39/*
40 * We store the saved ksp_limit in the unused part
41 * of the STACK_FRAME_OVERHEAD
42 */
39_GLOBAL(call_do_softirq) 43_GLOBAL(call_do_softirq)
40 mflr r0 44 mflr r0
41 stw r0,4(r1) 45 stw r0,4(r1)
46 lwz r10,THREAD+KSP_LIMIT(r2)
47 addi r11,r3,THREAD_INFO_GAP
42 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3) 48 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
43 mr r1,r3 49 mr r1,r3
50 stw r10,8(r1)
51 stw r11,THREAD+KSP_LIMIT(r2)
44 bl __do_softirq 52 bl __do_softirq
53 lwz r10,8(r1)
45 lwz r1,0(r1) 54 lwz r1,0(r1)
46 lwz r0,4(r1) 55 lwz r0,4(r1)
56 stw r10,THREAD+KSP_LIMIT(r2)
47 mtlr r0 57 mtlr r0
48 blr 58 blr
49 59
50_GLOBAL(call_handle_irq) 60_GLOBAL(call_do_irq)
51 mflr r0 61 mflr r0
52 stw r0,4(r1) 62 stw r0,4(r1)
53 mtctr r6 63 lwz r10,THREAD+KSP_LIMIT(r2)
54 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5) 64 addi r11,r3,THREAD_INFO_GAP
55 mr r1,r5 65 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
56 bctrl 66 mr r1,r4
67 stw r10,8(r1)
68 stw r11,THREAD+KSP_LIMIT(r2)
69 bl __do_irq
70 lwz r10,8(r1)
57 lwz r1,0(r1) 71 lwz r1,0(r1)
58 lwz r0,4(r1) 72 lwz r0,4(r1)
73 stw r10,THREAD+KSP_LIMIT(r2)
59 mtlr r0 74 mtlr r0
60 blr 75 blr
61 76
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 971d7e78aff2..e59caf874d05 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -40,14 +40,12 @@ _GLOBAL(call_do_softirq)
40 mtlr r0 40 mtlr r0
41 blr 41 blr
42 42
43_GLOBAL(call_handle_irq) 43_GLOBAL(call_do_irq)
44 ld r8,0(r6)
45 mflr r0 44 mflr r0
46 std r0,16(r1) 45 std r0,16(r1)
47 mtctr r8 46 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
48 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5) 47 mr r1,r4
49 mr r1,r5 48 bl .__do_irq
50 bctrl
51 ld r1,0(r1) 49 ld r1,0(r1)
52 ld r0,16(r1) 50 ld r0,16(r1)
53 mtlr r0 51 mtlr r0
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 6f428da53e20..96d2fdf3aa9e 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1000,9 +1000,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
1000 kregs = (struct pt_regs *) sp; 1000 kregs = (struct pt_regs *) sp;
1001 sp -= STACK_FRAME_OVERHEAD; 1001 sp -= STACK_FRAME_OVERHEAD;
1002 p->thread.ksp = sp; 1002 p->thread.ksp = sp;
1003#ifdef CONFIG_PPC32
1003 p->thread.ksp_limit = (unsigned long)task_stack_page(p) + 1004 p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
1004 _ALIGN_UP(sizeof(struct thread_info), 16); 1005 _ALIGN_UP(sizeof(struct thread_info), 16);
1005 1006#endif
1006#ifdef CONFIG_HAVE_HW_BREAKPOINT 1007#ifdef CONFIG_HAVE_HW_BREAKPOINT
1007 p->thread.ptrace_bps[0] = NULL; 1008 p->thread.ptrace_bps[0] = NULL;
1008#endif 1009#endif
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 12e656ffe60e..5fe2842e8bab 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -196,6 +196,8 @@ static int __initdata mem_reserve_cnt;
196 196
197static cell_t __initdata regbuf[1024]; 197static cell_t __initdata regbuf[1024];
198 198
199static bool rtas_has_query_cpu_stopped;
200
199 201
200/* 202/*
201 * Error results ... some OF calls will return "-1" on error, some 203 * Error results ... some OF calls will return "-1" on error, some
@@ -1574,6 +1576,11 @@ static void __init prom_instantiate_rtas(void)
1574 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry", 1576 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
1575 &val, sizeof(val)); 1577 &val, sizeof(val));
1576 1578
1579 /* Check if it supports "query-cpu-stopped-state" */
1580 if (prom_getprop(rtas_node, "query-cpu-stopped-state",
1581 &val, sizeof(val)) != PROM_ERROR)
1582 rtas_has_query_cpu_stopped = true;
1583
1577#if defined(CONFIG_PPC_POWERNV) && defined(__BIG_ENDIAN__) 1584#if defined(CONFIG_PPC_POWERNV) && defined(__BIG_ENDIAN__)
1578 /* PowerVN takeover hack */ 1585 /* PowerVN takeover hack */
1579 prom_rtas_data = base; 1586 prom_rtas_data = base;
@@ -1815,6 +1822,18 @@ static void __init prom_hold_cpus(void)
1815 = (void *) LOW_ADDR(__secondary_hold_acknowledge); 1822 = (void *) LOW_ADDR(__secondary_hold_acknowledge);
1816 unsigned long secondary_hold = LOW_ADDR(__secondary_hold); 1823 unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
1817 1824
1825 /*
1826 * On pseries, if RTAS supports "query-cpu-stopped-state",
1827 * we skip this stage, the CPUs will be started by the
1828 * kernel using RTAS.
1829 */
1830 if ((of_platform == PLATFORM_PSERIES ||
1831 of_platform == PLATFORM_PSERIES_LPAR) &&
1832 rtas_has_query_cpu_stopped) {
1833 prom_printf("prom_hold_cpus: skipped\n");
1834 return;
1835 }
1836
1818 prom_debug("prom_hold_cpus: start...\n"); 1837 prom_debug("prom_hold_cpus: start...\n");
1819 prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop); 1838 prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop);
1820 prom_debug(" 1) *spinloop = 0x%x\n", *spinloop); 1839 prom_debug(" 1) *spinloop = 0x%x\n", *spinloop);
@@ -3011,6 +3030,8 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
3011 * On non-powermacs, put all CPUs in spin-loops. 3030 * On non-powermacs, put all CPUs in spin-loops.
3012 * 3031 *
3013 * PowerMacs use a different mechanism to spin CPUs 3032 * PowerMacs use a different mechanism to spin CPUs
3033 *
3034 * (This must be done after instanciating RTAS)
3014 */ 3035 */
3015 if (of_platform != PLATFORM_POWERMAC && 3036 if (of_platform != PLATFORM_POWERMAC &&
3016 of_platform != PLATFORM_OPAL) 3037 of_platform != PLATFORM_OPAL)
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 27a90b99ef67..b4e667663d9b 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -17,6 +17,7 @@
17#include <asm/machdep.h> 17#include <asm/machdep.h>
18#include <asm/smp.h> 18#include <asm/smp.h>
19#include <asm/pmc.h> 19#include <asm/pmc.h>
20#include <asm/firmware.h>
20 21
21#include "cacheinfo.h" 22#include "cacheinfo.h"
22 23
@@ -179,15 +180,25 @@ SYSFS_PMCSETUP(spurr, SPRN_SPURR);
179SYSFS_PMCSETUP(dscr, SPRN_DSCR); 180SYSFS_PMCSETUP(dscr, SPRN_DSCR);
180SYSFS_PMCSETUP(pir, SPRN_PIR); 181SYSFS_PMCSETUP(pir, SPRN_PIR);
181 182
183/*
184 Lets only enable read for phyp resources and
185 enable write when needed with a separate function.
186 Lets be conservative and default to pseries.
187*/
182static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra); 188static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
183static DEVICE_ATTR(spurr, 0400, show_spurr, NULL); 189static DEVICE_ATTR(spurr, 0400, show_spurr, NULL);
184static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr); 190static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr);
185static DEVICE_ATTR(purr, 0600, show_purr, store_purr); 191static DEVICE_ATTR(purr, 0400, show_purr, store_purr);
186static DEVICE_ATTR(pir, 0400, show_pir, NULL); 192static DEVICE_ATTR(pir, 0400, show_pir, NULL);
187 193
188unsigned long dscr_default = 0; 194unsigned long dscr_default = 0;
189EXPORT_SYMBOL(dscr_default); 195EXPORT_SYMBOL(dscr_default);
190 196
197static void add_write_permission_dev_attr(struct device_attribute *attr)
198{
199 attr->attr.mode |= 0200;
200}
201
191static ssize_t show_dscr_default(struct device *dev, 202static ssize_t show_dscr_default(struct device *dev,
192 struct device_attribute *attr, char *buf) 203 struct device_attribute *attr, char *buf)
193{ 204{
@@ -394,8 +405,11 @@ static void register_cpu_online(unsigned int cpu)
394 if (cpu_has_feature(CPU_FTR_MMCRA)) 405 if (cpu_has_feature(CPU_FTR_MMCRA))
395 device_create_file(s, &dev_attr_mmcra); 406 device_create_file(s, &dev_attr_mmcra);
396 407
397 if (cpu_has_feature(CPU_FTR_PURR)) 408 if (cpu_has_feature(CPU_FTR_PURR)) {
409 if (!firmware_has_feature(FW_FEATURE_LPAR))
410 add_write_permission_dev_attr(&dev_attr_purr);
398 device_create_file(s, &dev_attr_purr); 411 device_create_file(s, &dev_attr_purr);
412 }
399 413
400 if (cpu_has_feature(CPU_FTR_SPURR)) 414 if (cpu_has_feature(CPU_FTR_SPURR))
401 device_create_file(s, &dev_attr_spurr); 415 device_create_file(s, &dev_attr_spurr);
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index 7b60b9851469..cd809eaa8b5c 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -79,6 +79,11 @@ _GLOBAL(tm_abort)
79 TABORT(R3) 79 TABORT(R3)
80 blr 80 blr
81 81
82 .section ".toc","aw"
83DSCR_DEFAULT:
84 .tc dscr_default[TC],dscr_default
85
86 .section ".text"
82 87
83/* void tm_reclaim(struct thread_struct *thread, 88/* void tm_reclaim(struct thread_struct *thread,
84 * unsigned long orig_msr, 89 * unsigned long orig_msr,
@@ -123,6 +128,7 @@ _GLOBAL(tm_reclaim)
123 mr r15, r14 128 mr r15, r14
124 ori r15, r15, MSR_FP 129 ori r15, r15, MSR_FP
125 li r16, MSR_RI 130 li r16, MSR_RI
131 ori r16, r16, MSR_EE /* IRQs hard off */
126 andc r15, r15, r16 132 andc r15, r15, r16
127 oris r15, r15, MSR_VEC@h 133 oris r15, r15, MSR_VEC@h
128#ifdef CONFIG_VSX 134#ifdef CONFIG_VSX
@@ -187,11 +193,18 @@ dont_backup_fp:
187 std r1, PACATMSCRATCH(r13) 193 std r1, PACATMSCRATCH(r13)
188 ld r1, PACAR1(r13) 194 ld r1, PACAR1(r13)
189 195
196 /* Store the PPR in r11 and reset to decent value */
197 std r11, GPR11(r1) /* Temporary stash */
198 mfspr r11, SPRN_PPR
199 HMT_MEDIUM
200
190 /* Now get some more GPRS free */ 201 /* Now get some more GPRS free */
191 std r7, GPR7(r1) /* Temporary stash */ 202 std r7, GPR7(r1) /* Temporary stash */
192 std r12, GPR12(r1) /* '' '' '' */ 203 std r12, GPR12(r1) /* '' '' '' */
193 ld r12, STACK_PARAM(0)(r1) /* Param 0, thread_struct * */ 204 ld r12, STACK_PARAM(0)(r1) /* Param 0, thread_struct * */
194 205
206 std r11, THREAD_TM_PPR(r12) /* Store PPR and free r11 */
207
195 addi r7, r12, PT_CKPT_REGS /* Thread's ckpt_regs */ 208 addi r7, r12, PT_CKPT_REGS /* Thread's ckpt_regs */
196 209
197 /* Make r7 look like an exception frame so that we 210 /* Make r7 look like an exception frame so that we
@@ -203,15 +216,19 @@ dont_backup_fp:
203 SAVE_GPR(0, r7) /* user r0 */ 216 SAVE_GPR(0, r7) /* user r0 */
204 SAVE_GPR(2, r7) /* user r2 */ 217 SAVE_GPR(2, r7) /* user r2 */
205 SAVE_4GPRS(3, r7) /* user r3-r6 */ 218 SAVE_4GPRS(3, r7) /* user r3-r6 */
206 SAVE_4GPRS(8, r7) /* user r8-r11 */ 219 SAVE_GPR(8, r7) /* user r8 */
220 SAVE_GPR(9, r7) /* user r9 */
221 SAVE_GPR(10, r7) /* user r10 */
207 ld r3, PACATMSCRATCH(r13) /* user r1 */ 222 ld r3, PACATMSCRATCH(r13) /* user r1 */
208 ld r4, GPR7(r1) /* user r7 */ 223 ld r4, GPR7(r1) /* user r7 */
209 ld r5, GPR12(r1) /* user r12 */ 224 ld r5, GPR11(r1) /* user r11 */
210 GET_SCRATCH0(6) /* user r13 */ 225 ld r6, GPR12(r1) /* user r12 */
226 GET_SCRATCH0(8) /* user r13 */
211 std r3, GPR1(r7) 227 std r3, GPR1(r7)
212 std r4, GPR7(r7) 228 std r4, GPR7(r7)
213 std r5, GPR12(r7) 229 std r5, GPR11(r7)
214 std r6, GPR13(r7) 230 std r6, GPR12(r7)
231 std r8, GPR13(r7)
215 232
216 SAVE_NVGPRS(r7) /* user r14-r31 */ 233 SAVE_NVGPRS(r7) /* user r14-r31 */
217 234
@@ -234,14 +251,12 @@ dont_backup_fp:
234 std r6, _XER(r7) 251 std r6, _XER(r7)
235 252
236 253
237 /* ******************** TAR, PPR, DSCR ********** */ 254 /* ******************** TAR, DSCR ********** */
238 mfspr r3, SPRN_TAR 255 mfspr r3, SPRN_TAR
239 mfspr r4, SPRN_PPR 256 mfspr r4, SPRN_DSCR
240 mfspr r5, SPRN_DSCR
241 257
242 std r3, THREAD_TM_TAR(r12) 258 std r3, THREAD_TM_TAR(r12)
243 std r4, THREAD_TM_PPR(r12) 259 std r4, THREAD_TM_DSCR(r12)
244 std r5, THREAD_TM_DSCR(r12)
245 260
246 /* MSR and flags: We don't change CRs, and we don't need to alter 261 /* MSR and flags: We don't change CRs, and we don't need to alter
247 * MSR. 262 * MSR.
@@ -258,7 +273,7 @@ dont_backup_fp:
258 std r3, THREAD_TM_TFHAR(r12) 273 std r3, THREAD_TM_TFHAR(r12)
259 std r4, THREAD_TM_TFIAR(r12) 274 std r4, THREAD_TM_TFIAR(r12)
260 275
261 /* AMR and PPR are checkpointed too, but are unsupported by Linux. */ 276 /* AMR is checkpointed too, but is unsupported by Linux. */
262 277
263 /* Restore original MSR/IRQ state & clear TM mode */ 278 /* Restore original MSR/IRQ state & clear TM mode */
264 ld r14, TM_FRAME_L0(r1) /* Orig MSR */ 279 ld r14, TM_FRAME_L0(r1) /* Orig MSR */
@@ -274,6 +289,12 @@ dont_backup_fp:
274 mtcr r4 289 mtcr r4
275 mtlr r0 290 mtlr r0
276 ld r2, 40(r1) 291 ld r2, 40(r1)
292
293 /* Load system default DSCR */
294 ld r4, DSCR_DEFAULT@toc(r2)
295 ld r0, 0(r4)
296 mtspr SPRN_DSCR, r0
297
277 blr 298 blr
278 299
279 300
@@ -358,25 +379,24 @@ dont_restore_fp:
358 379
359restore_gprs: 380restore_gprs:
360 381
361 /* ******************** TAR, PPR, DSCR ********** */ 382 /* ******************** CR,LR,CCR,MSR ********** */
362 ld r4, THREAD_TM_TAR(r3) 383 ld r4, _CTR(r7)
363 ld r5, THREAD_TM_PPR(r3) 384 ld r5, _LINK(r7)
364 ld r6, THREAD_TM_DSCR(r3) 385 ld r6, _CCR(r7)
386 ld r8, _XER(r7)
365 387
366 mtspr SPRN_TAR, r4 388 mtctr r4
367 mtspr SPRN_PPR, r5 389 mtlr r5
368 mtspr SPRN_DSCR, r6 390 mtcr r6
391 mtxer r8
369 392
370 /* ******************** CR,LR,CCR,MSR ********** */ 393 /* ******************** TAR ******************** */
371 ld r3, _CTR(r7) 394 ld r4, THREAD_TM_TAR(r3)
372 ld r4, _LINK(r7) 395 mtspr SPRN_TAR, r4
373 ld r5, _CCR(r7)
374 ld r6, _XER(r7)
375 396
376 mtctr r3 397 /* Load up the PPR and DSCR in GPRs only at this stage */
377 mtlr r4 398 ld r5, THREAD_TM_DSCR(r3)
378 mtcr r5 399 ld r6, THREAD_TM_PPR(r3)
379 mtxer r6
380 400
381 /* Clear the MSR RI since we are about to change R1. EE is already off 401 /* Clear the MSR RI since we are about to change R1. EE is already off
382 */ 402 */
@@ -384,19 +404,26 @@ restore_gprs:
384 mtmsrd r4, 1 404 mtmsrd r4, 1
385 405
386 REST_4GPRS(0, r7) /* GPR0-3 */ 406 REST_4GPRS(0, r7) /* GPR0-3 */
387 REST_GPR(4, r7) /* GPR4-6 */ 407 REST_GPR(4, r7) /* GPR4 */
388 REST_GPR(5, r7)
389 REST_GPR(6, r7)
390 REST_4GPRS(8, r7) /* GPR8-11 */ 408 REST_4GPRS(8, r7) /* GPR8-11 */
391 REST_2GPRS(12, r7) /* GPR12-13 */ 409 REST_2GPRS(12, r7) /* GPR12-13 */
392 410
393 REST_NVGPRS(r7) /* GPR14-31 */ 411 REST_NVGPRS(r7) /* GPR14-31 */
394 412
395 ld r7, GPR7(r7) /* GPR7 */ 413 /* Load up PPR and DSCR here so we don't run with user values for long
414 */
415 mtspr SPRN_DSCR, r5
416 mtspr SPRN_PPR, r6
417
418 REST_GPR(5, r7) /* GPR5-7 */
419 REST_GPR(6, r7)
420 ld r7, GPR7(r7)
396 421
397 /* Commit register state as checkpointed state: */ 422 /* Commit register state as checkpointed state: */
398 TRECHKPT 423 TRECHKPT
399 424
425 HMT_MEDIUM
426
400 /* Our transactional state has now changed. 427 /* Our transactional state has now changed.
401 * 428 *
402 * Now just get out of here. Transactional (current) state will be 429 * Now just get out of here. Transactional (current) state will be
@@ -419,6 +446,12 @@ restore_gprs:
419 mtcr r4 446 mtcr r4
420 mtlr r0 447 mtlr r0
421 ld r2, 40(r1) 448 ld r2, 40(r1)
449
450 /* Load system default DSCR */
451 ld r4, DSCR_DEFAULT@toc(r2)
452 ld r0, 0(r4)
453 mtspr SPRN_DSCR, r0
454
422 blr 455 blr
423 456
424 /* ****************************************************************** */ 457 /* ****************************************************************** */
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 78a350670de3..d38cc08b16c7 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -1530,11 +1530,15 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
1530 const char *cp; 1530 const char *cp;
1531 1531
1532 dn = dev->of_node; 1532 dn = dev->of_node;
1533 if (!dn) 1533 if (!dn) {
1534 return -ENODEV; 1534 strcat(buf, "\n");
1535 return strlen(buf);
1536 }
1535 cp = of_get_property(dn, "compatible", NULL); 1537 cp = of_get_property(dn, "compatible", NULL);
1536 if (!cp) 1538 if (!cp) {
1537 return -ENODEV; 1539 strcat(buf, "\n");
1540 return strlen(buf);
1541 }
1538 1542
1539 return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp); 1543 return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
1540} 1544}
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 294b7af28cdd..c71103b8a748 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1066,7 +1066,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1066BEGIN_FTR_SECTION 1066BEGIN_FTR_SECTION
1067 mfspr r8, SPRN_DSCR 1067 mfspr r8, SPRN_DSCR
1068 ld r7, HSTATE_DSCR(r13) 1068 ld r7, HSTATE_DSCR(r13)
1069 std r8, VCPU_DSCR(r7) 1069 std r8, VCPU_DSCR(r9)
1070 mtspr SPRN_DSCR, r7 1070 mtspr SPRN_DSCR, r7
1071END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1071END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1072 1072
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 1c6a9d729df4..c65593abae8e 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -332,6 +332,13 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
332 unsigned long hva; 332 unsigned long hva;
333 int pfnmap = 0; 333 int pfnmap = 0;
334 int tsize = BOOK3E_PAGESZ_4K; 334 int tsize = BOOK3E_PAGESZ_4K;
335 int ret = 0;
336 unsigned long mmu_seq;
337 struct kvm *kvm = vcpu_e500->vcpu.kvm;
338
339 /* used to check for invalidations in progress */
340 mmu_seq = kvm->mmu_notifier_seq;
341 smp_rmb();
335 342
336 /* 343 /*
337 * Translate guest physical to true physical, acquiring 344 * Translate guest physical to true physical, acquiring
@@ -449,6 +456,12 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
449 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); 456 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
450 } 457 }
451 458
459 spin_lock(&kvm->mmu_lock);
460 if (mmu_notifier_retry(kvm, mmu_seq)) {
461 ret = -EAGAIN;
462 goto out;
463 }
464
452 kvmppc_e500_ref_setup(ref, gtlbe, pfn); 465 kvmppc_e500_ref_setup(ref, gtlbe, pfn);
453 466
454 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, 467 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
@@ -457,10 +470,13 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
457 /* Clear i-cache for new pages */ 470 /* Clear i-cache for new pages */
458 kvmppc_mmu_flush_icache(pfn); 471 kvmppc_mmu_flush_icache(pfn);
459 472
473out:
474 spin_unlock(&kvm->mmu_lock);
475
460 /* Drop refcount on page, so that mmu notifiers can clear it */ 476 /* Drop refcount on page, so that mmu notifiers can clear it */
461 kvm_release_pfn_clean(pfn); 477 kvm_release_pfn_clean(pfn);
462 478
463 return 0; 479 return ret;
464} 480}
465 481
466/* XXX only map the one-one case, for now use TLB0 */ 482/* XXX only map the one-one case, for now use TLB0 */
diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S
index 167f72555d60..57a072065057 100644
--- a/arch/powerpc/lib/checksum_64.S
+++ b/arch/powerpc/lib/checksum_64.S
@@ -226,19 +226,35 @@ _GLOBAL(csum_partial)
226 blr 226 blr
227 227
228 228
229 .macro source 229 .macro srcnr
230100: 230100:
231 .section __ex_table,"a" 231 .section __ex_table,"a"
232 .align 3 232 .align 3
233 .llong 100b,.Lsrc_error 233 .llong 100b,.Lsrc_error_nr
234 .previous 234 .previous
235 .endm 235 .endm
236 236
237 .macro dest 237 .macro source
238150:
239 .section __ex_table,"a"
240 .align 3
241 .llong 150b,.Lsrc_error
242 .previous
243 .endm
244
245 .macro dstnr
238200: 246200:
239 .section __ex_table,"a" 247 .section __ex_table,"a"
240 .align 3 248 .align 3
241 .llong 200b,.Ldest_error 249 .llong 200b,.Ldest_error_nr
250 .previous
251 .endm
252
253 .macro dest
254250:
255 .section __ex_table,"a"
256 .align 3
257 .llong 250b,.Ldest_error
242 .previous 258 .previous
243 .endm 259 .endm
244 260
@@ -269,16 +285,16 @@ _GLOBAL(csum_partial_copy_generic)
269 rldicl. r6,r3,64-1,64-2 /* r6 = (r3 & 0x3) >> 1 */ 285 rldicl. r6,r3,64-1,64-2 /* r6 = (r3 & 0x3) >> 1 */
270 beq .Lcopy_aligned 286 beq .Lcopy_aligned
271 287
272 li r7,4 288 li r9,4
273 sub r6,r7,r6 289 sub r6,r9,r6
274 mtctr r6 290 mtctr r6
275 291
2761: 2921:
277source; lhz r6,0(r3) /* align to doubleword */ 293srcnr; lhz r6,0(r3) /* align to doubleword */
278 subi r5,r5,2 294 subi r5,r5,2
279 addi r3,r3,2 295 addi r3,r3,2
280 adde r0,r0,r6 296 adde r0,r0,r6
281dest; sth r6,0(r4) 297dstnr; sth r6,0(r4)
282 addi r4,r4,2 298 addi r4,r4,2
283 bdnz 1b 299 bdnz 1b
284 300
@@ -392,10 +408,10 @@ dest; std r16,56(r4)
392 408
393 mtctr r6 409 mtctr r6
3943: 4103:
395source; ld r6,0(r3) 411srcnr; ld r6,0(r3)
396 addi r3,r3,8 412 addi r3,r3,8
397 adde r0,r0,r6 413 adde r0,r0,r6
398dest; std r6,0(r4) 414dstnr; std r6,0(r4)
399 addi r4,r4,8 415 addi r4,r4,8
400 bdnz 3b 416 bdnz 3b
401 417
@@ -405,10 +421,10 @@ dest; std r6,0(r4)
405 srdi. r6,r5,2 421 srdi. r6,r5,2
406 beq .Lcopy_tail_halfword 422 beq .Lcopy_tail_halfword
407 423
408source; lwz r6,0(r3) 424srcnr; lwz r6,0(r3)
409 addi r3,r3,4 425 addi r3,r3,4
410 adde r0,r0,r6 426 adde r0,r0,r6
411dest; stw r6,0(r4) 427dstnr; stw r6,0(r4)
412 addi r4,r4,4 428 addi r4,r4,4
413 subi r5,r5,4 429 subi r5,r5,4
414 430
@@ -416,10 +432,10 @@ dest; stw r6,0(r4)
416 srdi. r6,r5,1 432 srdi. r6,r5,1
417 beq .Lcopy_tail_byte 433 beq .Lcopy_tail_byte
418 434
419source; lhz r6,0(r3) 435srcnr; lhz r6,0(r3)
420 addi r3,r3,2 436 addi r3,r3,2
421 adde r0,r0,r6 437 adde r0,r0,r6
422dest; sth r6,0(r4) 438dstnr; sth r6,0(r4)
423 addi r4,r4,2 439 addi r4,r4,2
424 subi r5,r5,2 440 subi r5,r5,2
425 441
@@ -427,10 +443,10 @@ dest; sth r6,0(r4)
427 andi. r6,r5,1 443 andi. r6,r5,1
428 beq .Lcopy_finish 444 beq .Lcopy_finish
429 445
430source; lbz r6,0(r3) 446srcnr; lbz r6,0(r3)
431 sldi r9,r6,8 /* Pad the byte out to 16 bits */ 447 sldi r9,r6,8 /* Pad the byte out to 16 bits */
432 adde r0,r0,r9 448 adde r0,r0,r9
433dest; stb r6,0(r4) 449dstnr; stb r6,0(r4)
434 450
435.Lcopy_finish: 451.Lcopy_finish:
436 addze r0,r0 /* add in final carry */ 452 addze r0,r0 /* add in final carry */
@@ -440,6 +456,11 @@ dest; stb r6,0(r4)
440 blr 456 blr
441 457
442.Lsrc_error: 458.Lsrc_error:
459 ld r14,STK_REG(R14)(r1)
460 ld r15,STK_REG(R15)(r1)
461 ld r16,STK_REG(R16)(r1)
462 addi r1,r1,STACKFRAMESIZE
463.Lsrc_error_nr:
443 cmpdi 0,r7,0 464 cmpdi 0,r7,0
444 beqlr 465 beqlr
445 li r6,-EFAULT 466 li r6,-EFAULT
@@ -447,6 +468,11 @@ dest; stb r6,0(r4)
447 blr 468 blr
448 469
449.Ldest_error: 470.Ldest_error:
471 ld r14,STK_REG(R14)(r1)
472 ld r15,STK_REG(R15)(r1)
473 ld r16,STK_REG(R16)(r1)
474 addi r1,r1,STACKFRAMESIZE
475.Ldest_error_nr:
450 cmpdi 0,r8,0 476 cmpdi 0,r8,0
451 beqlr 477 beqlr
452 li r6,-EFAULT 478 li r6,-EFAULT
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index a7ee978fb860..b1faa1593c90 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -1505,6 +1505,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1505 */ 1505 */
1506 if ((ra == 1) && !(regs->msr & MSR_PR) \ 1506 if ((ra == 1) && !(regs->msr & MSR_PR) \
1507 && (val3 >= (regs->gpr[1] - STACK_INT_FRAME_SIZE))) { 1507 && (val3 >= (regs->gpr[1] - STACK_INT_FRAME_SIZE))) {
1508#ifdef CONFIG_PPC32
1508 /* 1509 /*
1509 * Check if we will touch kernel sack overflow 1510 * Check if we will touch kernel sack overflow
1510 */ 1511 */
@@ -1513,7 +1514,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1513 err = -EINVAL; 1514 err = -EINVAL;
1514 break; 1515 break;
1515 } 1516 }
1516 1517#endif /* CONFIG_PPC32 */
1517 /* 1518 /*
1518 * Check if we already set since that means we'll 1519 * Check if we already set since that means we'll
1519 * lose the previous value. 1520 * lose the previous value.
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index d0cd9e4c6837..8ed035d2edb5 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -300,5 +300,9 @@ void vmemmap_free(unsigned long start, unsigned long end)
300{ 300{
301} 301}
302 302
303void register_page_bootmem_memmap(unsigned long section_nr,
304 struct page *start_page, unsigned long size)
305{
306}
303#endif /* CONFIG_SPARSEMEM_VMEMMAP */ 307#endif /* CONFIG_SPARSEMEM_VMEMMAP */
304 308
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 1cf9c5b67f24..3fa93dc7fe75 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -297,12 +297,21 @@ void __init paging_init(void)
297} 297}
298#endif /* ! CONFIG_NEED_MULTIPLE_NODES */ 298#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
299 299
300static void __init register_page_bootmem_info(void)
301{
302 int i;
303
304 for_each_online_node(i)
305 register_page_bootmem_info_node(NODE_DATA(i));
306}
307
300void __init mem_init(void) 308void __init mem_init(void)
301{ 309{
302#ifdef CONFIG_SWIOTLB 310#ifdef CONFIG_SWIOTLB
303 swiotlb_init(0); 311 swiotlb_init(0);
304#endif 312#endif
305 313
314 register_page_bootmem_info();
306 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 315 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
307 set_max_mapnr(max_pfn); 316 set_max_mapnr(max_pfn);
308 free_all_bootmem(); 317 free_all_bootmem();
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index bf56e33f8257..2345bdb4d917 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -691,4 +691,5 @@ void bpf_jit_free(struct sk_filter *fp)
691{ 691{
692 if (fp->bpf_func != sk_run_filter) 692 if (fp->bpf_func != sk_run_filter)
693 module_free(NULL, fp->bpf_func); 693 module_free(NULL, fp->bpf_func);
694 kfree(fp);
694} 695}
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index 2ee4a707f0df..a3f7abd2f13f 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -199,6 +199,7 @@
199#define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1))) 199#define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1)))
200#define MMCR1_COMBINE_SHIFT(pmc) (35 - ((pmc) - 1)) 200#define MMCR1_COMBINE_SHIFT(pmc) (35 - ((pmc) - 1))
201#define MMCR1_PMCSEL_SHIFT(pmc) (24 - (((pmc) - 1)) * 8) 201#define MMCR1_PMCSEL_SHIFT(pmc) (24 - (((pmc) - 1)) * 8)
202#define MMCR1_FAB_SHIFT 36
202#define MMCR1_DC_QUAL_SHIFT 47 203#define MMCR1_DC_QUAL_SHIFT 47
203#define MMCR1_IC_QUAL_SHIFT 46 204#define MMCR1_IC_QUAL_SHIFT 46
204 205
@@ -388,8 +389,8 @@ static int power8_compute_mmcr(u64 event[], int n_ev,
388 * the threshold bits are used for the match value. 389 * the threshold bits are used for the match value.
389 */ 390 */
390 if (event_is_fab_match(event[i])) { 391 if (event_is_fab_match(event[i])) {
391 mmcr1 |= (event[i] >> EVENT_THR_CTL_SHIFT) & 392 mmcr1 |= ((event[i] >> EVENT_THR_CTL_SHIFT) &
392 EVENT_THR_CTL_MASK; 393 EVENT_THR_CTL_MASK) << MMCR1_FAB_SHIFT;
393 } else { 394 } else {
394 val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK; 395 val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK;
395 mmcra |= val << MMCRA_THR_CTL_SHIFT; 396 mmcra |= val << MMCRA_THR_CTL_SHIFT;
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 1c1771a40250..24f58cb0a543 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -233,18 +233,24 @@ static void __init smp_init_pseries(void)
233 233
234 alloc_bootmem_cpumask_var(&of_spin_mask); 234 alloc_bootmem_cpumask_var(&of_spin_mask);
235 235
236 /* Mark threads which are still spinning in hold loops. */ 236 /*
237 if (cpu_has_feature(CPU_FTR_SMT)) { 237 * Mark threads which are still spinning in hold loops
238 for_each_present_cpu(i) { 238 *
239 if (cpu_thread_in_core(i) == 0) 239 * We know prom_init will not have started them if RTAS supports
240 cpumask_set_cpu(i, of_spin_mask); 240 * query-cpu-stopped-state.
241 } 241 */
242 } else { 242 if (rtas_token("query-cpu-stopped-state") == RTAS_UNKNOWN_SERVICE) {
243 cpumask_copy(of_spin_mask, cpu_present_mask); 243 if (cpu_has_feature(CPU_FTR_SMT)) {
244 for_each_present_cpu(i) {
245 if (cpu_thread_in_core(i) == 0)
246 cpumask_set_cpu(i, of_spin_mask);
247 }
248 } else
249 cpumask_copy(of_spin_mask, cpu_present_mask);
250
251 cpumask_clear_cpu(boot_cpuid, of_spin_mask);
244 } 252 }
245 253
246 cpumask_clear_cpu(boot_cpuid, of_spin_mask);
247
248 /* Non-lpar has additional take/give timebase */ 254 /* Non-lpar has additional take/give timebase */
249 if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) { 255 if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
250 smp_ops->give_timebase = rtas_give_timebase; 256 smp_ops->give_timebase = rtas_give_timebase;
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index dcc6ac2d8026..7143793859fa 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -93,6 +93,7 @@ config S390
93 select ARCH_INLINE_WRITE_UNLOCK_IRQ 93 select ARCH_INLINE_WRITE_UNLOCK_IRQ
94 select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE 94 select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
95 select ARCH_SAVE_PAGE_KEYS if HIBERNATION 95 select ARCH_SAVE_PAGE_KEYS if HIBERNATION
96 select ARCH_USE_CMPXCHG_LOCKREF
96 select ARCH_WANT_IPC_PARSE_VERSION 97 select ARCH_WANT_IPC_PARSE_VERSION
97 select BUILDTIME_EXTABLE_SORT 98 select BUILDTIME_EXTABLE_SORT
98 select CLONE_BACKWARDS2 99 select CLONE_BACKWARDS2
@@ -102,7 +103,6 @@ config S390
102 select GENERIC_TIME_VSYSCALL_OLD 103 select GENERIC_TIME_VSYSCALL_OLD
103 select HAVE_ALIGNED_STRUCT_PAGE if SLUB 104 select HAVE_ALIGNED_STRUCT_PAGE if SLUB
104 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 105 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
105 select HAVE_ARCH_MUTEX_CPU_RELAX
106 select HAVE_ARCH_SECCOMP_FILTER 106 select HAVE_ARCH_SECCOMP_FILTER
107 select HAVE_ARCH_TRACEHOOK 107 select HAVE_ARCH_TRACEHOOK
108 select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT 108 select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
index 6c32190dc73e..346b1c85ffb4 100644
--- a/arch/s390/include/asm/jump_label.h
+++ b/arch/s390/include/asm/jump_label.h
@@ -15,7 +15,7 @@
15 15
16static __always_inline bool arch_static_branch(struct static_key *key) 16static __always_inline bool arch_static_branch(struct static_key *key)
17{ 17{
18 asm goto("0: brcl 0,0\n" 18 asm_volatile_goto("0: brcl 0,0\n"
19 ".pushsection __jump_table, \"aw\"\n" 19 ".pushsection __jump_table, \"aw\"\n"
20 ASM_ALIGN "\n" 20 ASM_ALIGN "\n"
21 ASM_PTR " 0b, %l[label], %0\n" 21 ASM_PTR " 0b, %l[label], %0\n"
diff --git a/arch/s390/include/asm/mutex.h b/arch/s390/include/asm/mutex.h
index 688271f5f2e4..458c1f7fbc18 100644
--- a/arch/s390/include/asm/mutex.h
+++ b/arch/s390/include/asm/mutex.h
@@ -7,5 +7,3 @@
7 */ 7 */
8 8
9#include <asm-generic/mutex-dec.h> 9#include <asm-generic/mutex-dec.h>
10
11#define arch_mutex_cpu_relax() barrier()
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 9b60a36c348d..2204400d0bd5 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -748,7 +748,9 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry)
748 748
749static inline void pgste_set_pte(pte_t *ptep, pte_t entry) 749static inline void pgste_set_pte(pte_t *ptep, pte_t entry)
750{ 750{
751 if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_WRITE)) { 751 if (!MACHINE_HAS_ESOP &&
752 (pte_val(entry) & _PAGE_PRESENT) &&
753 (pte_val(entry) & _PAGE_WRITE)) {
752 /* 754 /*
753 * Without enhanced suppression-on-protection force 755 * Without enhanced suppression-on-protection force
754 * the dirty bit on for all writable ptes. 756 * the dirty bit on for all writable ptes.
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 0eb37505cab1..ca7821f07260 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -198,6 +198,8 @@ static inline void cpu_relax(void)
198 barrier(); 198 barrier();
199} 199}
200 200
201#define arch_mutex_cpu_relax() barrier()
202
201static inline void psw_set_key(unsigned int key) 203static inline void psw_set_key(unsigned int key)
202{ 204{
203 asm volatile("spka 0(%0)" : : "d" (key)); 205 asm volatile("spka 0(%0)" : : "d" (key));
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 701fe8c59e1f..83e5d216105e 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -44,6 +44,11 @@ extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
44extern int arch_spin_trylock_retry(arch_spinlock_t *); 44extern int arch_spin_trylock_retry(arch_spinlock_t *);
45extern void arch_spin_relax(arch_spinlock_t *lock); 45extern void arch_spin_relax(arch_spinlock_t *lock);
46 46
47static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
48{
49 return lock.owner_cpu == 0;
50}
51
47static inline void arch_spin_lock(arch_spinlock_t *lp) 52static inline void arch_spin_lock(arch_spinlock_t *lp)
48{ 53{
49 int old; 54 int old;
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 8ad8af915032..819b94d22720 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -71,30 +71,30 @@ static inline void local_tick_enable(unsigned long long comp)
71 71
72typedef unsigned long long cycles_t; 72typedef unsigned long long cycles_t;
73 73
74static inline unsigned long long get_tod_clock(void)
75{
76 unsigned long long clk;
77
78#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
79 asm volatile(".insn s,0xb27c0000,%0" : "=Q" (clk) : : "cc");
80#else
81 asm volatile("stck %0" : "=Q" (clk) : : "cc");
82#endif
83 return clk;
84}
85
86static inline void get_tod_clock_ext(char *clk) 74static inline void get_tod_clock_ext(char *clk)
87{ 75{
88 asm volatile("stcke %0" : "=Q" (*clk) : : "cc"); 76 asm volatile("stcke %0" : "=Q" (*clk) : : "cc");
89} 77}
90 78
91static inline unsigned long long get_tod_clock_xt(void) 79static inline unsigned long long get_tod_clock(void)
92{ 80{
93 unsigned char clk[16]; 81 unsigned char clk[16];
94 get_tod_clock_ext(clk); 82 get_tod_clock_ext(clk);
95 return *((unsigned long long *)&clk[1]); 83 return *((unsigned long long *)&clk[1]);
96} 84}
97 85
86static inline unsigned long long get_tod_clock_fast(void)
87{
88#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
89 unsigned long long clk;
90
91 asm volatile("stckf %0" : "=Q" (clk) : : "cc");
92 return clk;
93#else
94 return get_tod_clock();
95#endif
96}
97
98static inline cycles_t get_cycles(void) 98static inline cycles_t get_cycles(void)
99{ 99{
100 return (cycles_t) get_tod_clock() >> 2; 100 return (cycles_t) get_tod_clock() >> 2;
@@ -125,7 +125,7 @@ extern u64 sched_clock_base_cc;
125 */ 125 */
126static inline unsigned long long get_tod_clock_monotonic(void) 126static inline unsigned long long get_tod_clock_monotonic(void)
127{ 127{
128 return get_tod_clock_xt() - sched_clock_base_cc; 128 return get_tod_clock() - sched_clock_base_cc;
129} 129}
130 130
131/** 131/**
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 1389b637dae5..adaa9e9478d8 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -99,7 +99,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
99 break; 99 break;
100 } 100 }
101 } 101 }
102 return err; 102 return err ? -EFAULT : 0;
103} 103}
104 104
105int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) 105int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
@@ -148,7 +148,7 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
148 break; 148 break;
149 } 149 }
150 } 150 }
151 return err; 151 return err ? -EFAULT : 0;
152} 152}
153 153
154static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs) 154static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index c84f33d51f7b..7dd21720e5b0 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -40,28 +40,26 @@ static inline void *load_real_addr(void *addr)
40} 40}
41 41
42/* 42/*
43 * Copy up to one page to vmalloc or real memory 43 * Copy real to virtual or real memory
44 */ 44 */
45static ssize_t copy_page_real(void *buf, void *src, size_t csize) 45static int copy_from_realmem(void *dest, void *src, size_t count)
46{ 46{
47 size_t size; 47 unsigned long size;
48 int rc;
48 49
49 if (is_vmalloc_addr(buf)) { 50 if (!count)
50 BUG_ON(csize >= PAGE_SIZE); 51 return 0;
51 /* If buf is not page aligned, copy first part */ 52 if (!is_vmalloc_or_module_addr(dest))
52 size = min(roundup(__pa(buf), PAGE_SIZE) - __pa(buf), csize); 53 return memcpy_real(dest, src, count);
53 if (size) { 54 do {
54 if (memcpy_real(load_real_addr(buf), src, size)) 55 size = min(count, PAGE_SIZE - (__pa(dest) & ~PAGE_MASK));
55 return -EFAULT; 56 if (memcpy_real(load_real_addr(dest), src, size))
56 buf += size; 57 return -EFAULT;
57 src += size; 58 count -= size;
58 } 59 dest += size;
59 /* Copy second part */ 60 src += size;
60 size = csize - size; 61 } while (count);
61 return (size) ? memcpy_real(load_real_addr(buf), src, size) : 0; 62 return 0;
62 } else {
63 return memcpy_real(buf, src, csize);
64 }
65} 63}
66 64
67/* 65/*
@@ -114,7 +112,7 @@ static ssize_t copy_oldmem_page_kdump(char *buf, size_t csize,
114 rc = copy_to_user_real((void __force __user *) buf, 112 rc = copy_to_user_real((void __force __user *) buf,
115 (void *) src, csize); 113 (void *) src, csize);
116 else 114 else
117 rc = copy_page_real(buf, (void *) src, csize); 115 rc = copy_from_realmem(buf, (void *) src, csize);
118 return (rc == 0) ? rc : csize; 116 return (rc == 0) ? rc : csize;
119} 117}
120 118
@@ -210,7 +208,7 @@ int copy_from_oldmem(void *dest, void *src, size_t count)
210 if (OLDMEM_BASE) { 208 if (OLDMEM_BASE) {
211 if ((unsigned long) src < OLDMEM_SIZE) { 209 if ((unsigned long) src < OLDMEM_SIZE) {
212 copied = min(count, OLDMEM_SIZE - (unsigned long) src); 210 copied = min(count, OLDMEM_SIZE - (unsigned long) src);
213 rc = memcpy_real(dest, src + OLDMEM_BASE, copied); 211 rc = copy_from_realmem(dest, src + OLDMEM_BASE, copied);
214 if (rc) 212 if (rc)
215 return rc; 213 return rc;
216 } 214 }
@@ -223,7 +221,7 @@ int copy_from_oldmem(void *dest, void *src, size_t count)
223 return rc; 221 return rc;
224 } 222 }
225 } 223 }
226 return memcpy_real(dest + copied, src + copied, count - copied); 224 return copy_from_realmem(dest + copied, src + copied, count - copied);
227} 225}
228 226
229/* 227/*
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index f1279dc2e1bc..17d62fe5d7b7 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -867,7 +867,7 @@ static inline void
867debug_finish_entry(debug_info_t * id, debug_entry_t* active, int level, 867debug_finish_entry(debug_info_t * id, debug_entry_t* active, int level,
868 int exception) 868 int exception)
869{ 869{
870 active->id.stck = get_tod_clock(); 870 active->id.stck = get_tod_clock_fast();
871 active->id.fields.cpuid = smp_processor_id(); 871 active->id.fields.cpuid = smp_processor_id();
872 active->caller = __builtin_return_address(0); 872 active->caller = __builtin_return_address(0);
873 active->id.fields.exception = exception; 873 active->id.fields.exception = exception;
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index cc30d1fb000c..0dc2b6d0a1ec 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -266,6 +266,7 @@ sysc_sigpending:
266 tm __TI_flags+3(%r12),_TIF_SYSCALL 266 tm __TI_flags+3(%r12),_TIF_SYSCALL
267 jno sysc_return 267 jno sysc_return
268 lm %r2,%r7,__PT_R2(%r11) # load svc arguments 268 lm %r2,%r7,__PT_R2(%r11) # load svc arguments
269 l %r10,__TI_sysc_table(%r12) # 31 bit system call table
269 xr %r8,%r8 # svc 0 returns -ENOSYS 270 xr %r8,%r8 # svc 0 returns -ENOSYS
270 clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2) 271 clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2)
271 jnl sysc_nr_ok # invalid svc number -> do svc 0 272 jnl sysc_nr_ok # invalid svc number -> do svc 0
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 2b2188b97c6a..e5b43c97a834 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -297,6 +297,7 @@ sysc_sigpending:
297 tm __TI_flags+7(%r12),_TIF_SYSCALL 297 tm __TI_flags+7(%r12),_TIF_SYSCALL
298 jno sysc_return 298 jno sysc_return
299 lmg %r2,%r7,__PT_R2(%r11) # load svc arguments 299 lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
300 lg %r10,__TI_sysc_table(%r12) # address of system call table
300 lghi %r8,0 # svc 0 returns -ENOSYS 301 lghi %r8,0 # svc 0 returns -ENOSYS
301 llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number 302 llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number
302 cghi %r1,NR_syscalls 303 cghi %r1,NR_syscalls
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 0ce9fb245034..d86e64eddb42 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -67,6 +67,11 @@ static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn)
67 case 0xac: /* stnsm */ 67 case 0xac: /* stnsm */
68 case 0xad: /* stosm */ 68 case 0xad: /* stosm */
69 return -EINVAL; 69 return -EINVAL;
70 case 0xc6:
71 switch (insn[0] & 0x0f) {
72 case 0x00: /* exrl */
73 return -EINVAL;
74 }
70 } 75 }
71 switch (insn[0]) { 76 switch (insn[0]) {
72 case 0x0101: /* pr */ 77 case 0x0101: /* pr */
@@ -180,7 +185,6 @@ static int __kprobes is_insn_relative_long(kprobe_opcode_t *insn)
180 break; 185 break;
181 case 0xc6: 186 case 0xc6:
182 switch (insn[0] & 0x0f) { 187 switch (insn[0] & 0x0f) {
183 case 0x00: /* exrl */
184 case 0x02: /* pfdrl */ 188 case 0x02: /* pfdrl */
185 case 0x04: /* cghrl */ 189 case 0x04: /* cghrl */
186 case 0x05: /* chrl */ 190 case 0x05: /* chrl */
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 7f35cb33e510..7f1f7ac5cf7f 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -385,7 +385,7 @@ static int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
385 } 385 }
386 386
387 if ((!rc) && (vcpu->arch.sie_block->ckc < 387 if ((!rc) && (vcpu->arch.sie_block->ckc <
388 get_tod_clock() + vcpu->arch.sie_block->epoch)) { 388 get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) {
389 if ((!psw_extint_disabled(vcpu)) && 389 if ((!psw_extint_disabled(vcpu)) &&
390 (vcpu->arch.sie_block->gcr[0] & 0x800ul)) 390 (vcpu->arch.sie_block->gcr[0] & 0x800ul))
391 rc = 1; 391 rc = 1;
@@ -425,7 +425,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
425 goto no_timer; 425 goto no_timer;
426 } 426 }
427 427
428 now = get_tod_clock() + vcpu->arch.sie_block->epoch; 428 now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
429 if (vcpu->arch.sie_block->ckc < now) { 429 if (vcpu->arch.sie_block->ckc < now) {
430 __unset_cpu_idle(vcpu); 430 __unset_cpu_idle(vcpu);
431 return 0; 431 return 0;
@@ -515,7 +515,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
515 } 515 }
516 516
517 if ((vcpu->arch.sie_block->ckc < 517 if ((vcpu->arch.sie_block->ckc <
518 get_tod_clock() + vcpu->arch.sie_block->epoch)) 518 get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
519 __try_deliver_ckc_interrupt(vcpu); 519 __try_deliver_ckc_interrupt(vcpu);
520 520
521 if (atomic_read(&fi->active)) { 521 if (atomic_read(&fi->active)) {
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index 57c87d7d7ede..a9f3d0042d58 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -44,7 +44,7 @@ static void __udelay_disabled(unsigned long long usecs)
44 do { 44 do {
45 set_clock_comparator(end); 45 set_clock_comparator(end);
46 vtime_stop_cpu(); 46 vtime_stop_cpu();
47 } while (get_tod_clock() < end); 47 } while (get_tod_clock_fast() < end);
48 lockdep_on(); 48 lockdep_on();
49 __ctl_load(cr0, 0, 0); 49 __ctl_load(cr0, 0, 0);
50 __ctl_load(cr6, 6, 6); 50 __ctl_load(cr6, 6, 6);
@@ -55,7 +55,7 @@ static void __udelay_enabled(unsigned long long usecs)
55{ 55{
56 u64 clock_saved, end; 56 u64 clock_saved, end;
57 57
58 end = get_tod_clock() + (usecs << 12); 58 end = get_tod_clock_fast() + (usecs << 12);
59 do { 59 do {
60 clock_saved = 0; 60 clock_saved = 0;
61 if (end < S390_lowcore.clock_comparator) { 61 if (end < S390_lowcore.clock_comparator) {
@@ -65,7 +65,7 @@ static void __udelay_enabled(unsigned long long usecs)
65 vtime_stop_cpu(); 65 vtime_stop_cpu();
66 if (clock_saved) 66 if (clock_saved)
67 local_tick_enable(clock_saved); 67 local_tick_enable(clock_saved);
68 } while (get_tod_clock() < end); 68 } while (get_tod_clock_fast() < end);
69} 69}
70 70
71/* 71/*
@@ -109,8 +109,8 @@ void udelay_simple(unsigned long long usecs)
109{ 109{
110 u64 end; 110 u64 end;
111 111
112 end = get_tod_clock() + (usecs << 12); 112 end = get_tod_clock_fast() + (usecs << 12);
113 while (get_tod_clock() < end) 113 while (get_tod_clock_fast() < end)
114 cpu_relax(); 114 cpu_relax();
115} 115}
116 116
@@ -120,10 +120,10 @@ void __ndelay(unsigned long long nsecs)
120 120
121 nsecs <<= 9; 121 nsecs <<= 9;
122 do_div(nsecs, 125); 122 do_div(nsecs, 125);
123 end = get_tod_clock() + nsecs; 123 end = get_tod_clock_fast() + nsecs;
124 if (nsecs & ~0xfffUL) 124 if (nsecs & ~0xfffUL)
125 __udelay(nsecs >> 12); 125 __udelay(nsecs >> 12);
126 while (get_tod_clock() < end) 126 while (get_tod_clock_fast() < end)
127 barrier(); 127 barrier();
128} 128}
129EXPORT_SYMBOL(__ndelay); 129EXPORT_SYMBOL(__ndelay);
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 709239285869..a5df511e27a2 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -881,7 +881,9 @@ void bpf_jit_free(struct sk_filter *fp)
881 struct bpf_binary_header *header = (void *)addr; 881 struct bpf_binary_header *header = (void *)addr;
882 882
883 if (fp->bpf_func == sk_run_filter) 883 if (fp->bpf_func == sk_run_filter)
884 return; 884 goto free_filter;
885 set_memory_rw(addr, header->pages); 885 set_memory_rw(addr, header->pages);
886 module_free(NULL, header); 886 module_free(NULL, header);
887free_filter:
888 kfree(fp);
887} 889}
diff --git a/arch/score/Kconfig b/arch/score/Kconfig
index a1be70db75fe..305f7ee1f382 100644
--- a/arch/score/Kconfig
+++ b/arch/score/Kconfig
@@ -2,6 +2,7 @@ menu "Machine selection"
2 2
3config SCORE 3config SCORE
4 def_bool y 4 def_bool y
5 select HAVE_GENERIC_HARDIRQS
5 select GENERIC_IRQ_SHOW 6 select GENERIC_IRQ_SHOW
6 select GENERIC_IOMAP 7 select GENERIC_IOMAP
7 select GENERIC_ATOMIC64 8 select GENERIC_ATOMIC64
@@ -110,3 +111,6 @@ source "security/Kconfig"
110source "crypto/Kconfig" 111source "crypto/Kconfig"
111 112
112source "lib/Kconfig" 113source "lib/Kconfig"
114
115config NO_IOMEM
116 def_bool y
diff --git a/arch/score/Makefile b/arch/score/Makefile
index 974aefe86123..9e3e060290e0 100644
--- a/arch/score/Makefile
+++ b/arch/score/Makefile
@@ -20,8 +20,8 @@ cflags-y += -G0 -pipe -mel -mnhwloop -D__SCOREEL__ \
20# 20#
21KBUILD_AFLAGS += $(cflags-y) 21KBUILD_AFLAGS += $(cflags-y)
22KBUILD_CFLAGS += $(cflags-y) 22KBUILD_CFLAGS += $(cflags-y)
23KBUILD_AFLAGS_MODULE += -mlong-calls 23KBUILD_AFLAGS_MODULE +=
24KBUILD_CFLAGS_MODULE += -mlong-calls 24KBUILD_CFLAGS_MODULE +=
25LDFLAGS += --oformat elf32-littlescore 25LDFLAGS += --oformat elf32-littlescore
26LDFLAGS_vmlinux += -G0 -static -nostdlib 26LDFLAGS_vmlinux += -G0 -static -nostdlib
27 27
diff --git a/arch/score/include/asm/checksum.h b/arch/score/include/asm/checksum.h
index f909ac3144a4..961bd64015a8 100644
--- a/arch/score/include/asm/checksum.h
+++ b/arch/score/include/asm/checksum.h
@@ -184,48 +184,57 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
184 __wsum sum) 184 __wsum sum)
185{ 185{
186 __asm__ __volatile__( 186 __asm__ __volatile__(
187 ".set\tnoreorder\t\t\t# csum_ipv6_magic\n\t" 187 ".set\tvolatile\t\t\t# csum_ipv6_magic\n\t"
188 ".set\tnoat\n\t" 188 "add\t%0, %0, %5\t\t\t# proto (long in network byte order)\n\t"
189 "addu\t%0, %5\t\t\t# proto (long in network byte order)\n\t" 189 "cmp.c\t%5, %0\n\t"
190 "sltu\t$1, %0, %5\n\t" 190 "bleu 1f\n\t"
191 "addu\t%0, $1\n\t" 191 "addi\t%0, 0x1\n\t"
192 "addu\t%0, %6\t\t\t# csum\n\t" 192 "1:add\t%0, %0, %6\t\t\t# csum\n\t"
193 "sltu\t$1, %0, %6\n\t" 193 "cmp.c\t%6, %0\n\t"
194 "lw\t%1, 0(%2)\t\t\t# four words source address\n\t" 194 "lw\t%1, [%2, 0]\t\t\t# four words source address\n\t"
195 "addu\t%0, $1\n\t" 195 "bleu 1f\n\t"
196 "addu\t%0, %1\n\t" 196 "addi\t%0, 0x1\n\t"
197 "sltu\t$1, %0, %1\n\t" 197 "1:add\t%0, %0, %1\n\t"
198 "lw\t%1, 4(%2)\n\t" 198 "cmp.c\t%1, %0\n\t"
199 "addu\t%0, $1\n\t" 199 "1:lw\t%1, [%2, 4]\n\t"
200 "addu\t%0, %1\n\t" 200 "bleu 1f\n\t"
201 "sltu\t$1, %0, %1\n\t" 201 "addi\t%0, 0x1\n\t"
202 "lw\t%1, 8(%2)\n\t" 202 "1:add\t%0, %0, %1\n\t"
203 "addu\t%0, $1\n\t" 203 "cmp.c\t%1, %0\n\t"
204 "addu\t%0, %1\n\t" 204 "lw\t%1, [%2,8]\n\t"
205 "sltu\t$1, %0, %1\n\t" 205 "bleu 1f\n\t"
206 "lw\t%1, 12(%2)\n\t" 206 "addi\t%0, 0x1\n\t"
207 "addu\t%0, $1\n\t" 207 "1:add\t%0, %0, %1\n\t"
208 "addu\t%0, %1\n\t" 208 "cmp.c\t%1, %0\n\t"
209 "sltu\t$1, %0, %1\n\t" 209 "lw\t%1, [%2, 12]\n\t"
210 "lw\t%1, 0(%3)\n\t" 210 "bleu 1f\n\t"
211 "addu\t%0, $1\n\t" 211 "addi\t%0, 0x1\n\t"
212 "addu\t%0, %1\n\t" 212 "1:add\t%0, %0,%1\n\t"
213 "sltu\t$1, %0, %1\n\t" 213 "cmp.c\t%1, %0\n\t"
214 "lw\t%1, 4(%3)\n\t" 214 "lw\t%1, [%3, 0]\n\t"
215 "addu\t%0, $1\n\t" 215 "bleu 1f\n\t"
216 "addu\t%0, %1\n\t" 216 "addi\t%0, 0x1\n\t"
217 "sltu\t$1, %0, %1\n\t" 217 "1:add\t%0, %0, %1\n\t"
218 "lw\t%1, 8(%3)\n\t" 218 "cmp.c\t%1, %0\n\t"
219 "addu\t%0, $1\n\t" 219 "lw\t%1, [%3, 4]\n\t"
220 "addu\t%0, %1\n\t" 220 "bleu 1f\n\t"
221 "sltu\t$1, %0, %1\n\t" 221 "addi\t%0, 0x1\n\t"
222 "lw\t%1, 12(%3)\n\t" 222 "1:add\t%0, %0, %1\n\t"
223 "addu\t%0, $1\n\t" 223 "cmp.c\t%1, %0\n\t"
224 "addu\t%0, %1\n\t" 224 "lw\t%1, [%3, 8]\n\t"
225 "sltu\t$1, %0, %1\n\t" 225 "bleu 1f\n\t"
226 "addu\t%0, $1\t\t\t# Add final carry\n\t" 226 "addi\t%0, 0x1\n\t"
227 ".set\tnoat\n\t" 227 "1:add\t%0, %0, %1\n\t"
228 ".set\tnoreorder" 228 "cmp.c\t%1, %0\n\t"
229 "lw\t%1, [%3, 12]\n\t"
230 "bleu 1f\n\t"
231 "addi\t%0, 0x1\n\t"
232 "1:add\t%0, %0, %1\n\t"
233 "cmp.c\t%1, %0\n\t"
234 "bleu 1f\n\t"
235 "addi\t%0, 0x1\n\t"
236 "1:\n\t"
237 ".set\toptimize"
229 : "=r" (sum), "=r" (proto) 238 : "=r" (sum), "=r" (proto)
230 : "r" (saddr), "r" (daddr), 239 : "r" (saddr), "r" (daddr),
231 "0" (htonl(len)), "1" (htonl(proto)), "r" (sum)); 240 "0" (htonl(len)), "1" (htonl(proto)), "r" (sum));
diff --git a/arch/score/include/asm/io.h b/arch/score/include/asm/io.h
index fbbfd7132e3b..574c8827abe2 100644
--- a/arch/score/include/asm/io.h
+++ b/arch/score/include/asm/io.h
@@ -5,5 +5,4 @@
5 5
6#define virt_to_bus virt_to_phys 6#define virt_to_bus virt_to_phys
7#define bus_to_virt phys_to_virt 7#define bus_to_virt phys_to_virt
8
9#endif /* _ASM_SCORE_IO_H */ 8#endif /* _ASM_SCORE_IO_H */
diff --git a/arch/score/include/asm/pgalloc.h b/arch/score/include/asm/pgalloc.h
index 059a61b7071b..716b3fd1d863 100644
--- a/arch/score/include/asm/pgalloc.h
+++ b/arch/score/include/asm/pgalloc.h
@@ -2,7 +2,7 @@
2#define _ASM_SCORE_PGALLOC_H 2#define _ASM_SCORE_PGALLOC_H
3 3
4#include <linux/mm.h> 4#include <linux/mm.h>
5 5#include <linux/highmem.h>
6static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, 6static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
7 pte_t *pte) 7 pte_t *pte)
8{ 8{
diff --git a/arch/score/kernel/entry.S b/arch/score/kernel/entry.S
index 7234ed09b7b7..befb87d30a89 100644
--- a/arch/score/kernel/entry.S
+++ b/arch/score/kernel/entry.S
@@ -264,7 +264,7 @@ resume_kernel:
264 disable_irq 264 disable_irq
265 lw r8, [r28, TI_PRE_COUNT] 265 lw r8, [r28, TI_PRE_COUNT]
266 cmpz.c r8 266 cmpz.c r8
267 bne r8, restore_all 267 bne restore_all
268need_resched: 268need_resched:
269 lw r8, [r28, TI_FLAGS] 269 lw r8, [r28, TI_FLAGS]
270 andri.c r9, r8, _TIF_NEED_RESCHED 270 andri.c r9, r8, _TIF_NEED_RESCHED
@@ -415,7 +415,7 @@ ENTRY(handle_sys)
415 sw r9, [r0, PT_EPC] 415 sw r9, [r0, PT_EPC]
416 416
417 cmpi.c r27, __NR_syscalls # check syscall number 417 cmpi.c r27, __NR_syscalls # check syscall number
418 bgeu illegal_syscall 418 bcs illegal_syscall
419 419
420 slli r8, r27, 2 # get syscall routine 420 slli r8, r27, 2 # get syscall routine
421 la r11, sys_call_table 421 la r11, sys_call_table
diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
index f4c6d02421d3..a1519ad3d49d 100644
--- a/arch/score/kernel/process.c
+++ b/arch/score/kernel/process.c
@@ -78,8 +78,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
78 p->thread.reg0 = (unsigned long) childregs; 78 p->thread.reg0 = (unsigned long) childregs;
79 if (unlikely(p->flags & PF_KTHREAD)) { 79 if (unlikely(p->flags & PF_KTHREAD)) {
80 memset(childregs, 0, sizeof(struct pt_regs)); 80 memset(childregs, 0, sizeof(struct pt_regs));
81 p->thread->reg12 = usp; 81 p->thread.reg12 = usp;
82 p->thread->reg13 = arg; 82 p->thread.reg13 = arg;
83 p->thread.reg3 = (unsigned long) ret_from_kernel_thread; 83 p->thread.reg3 = (unsigned long) ret_from_kernel_thread;
84 } else { 84 } else {
85 *childregs = *current_pt_regs(); 85 *childregs = *current_pt_regs();
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 2137ad667438..78c4fdb91bc5 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -506,12 +506,17 @@ config SUN_OPENPROMFS
506 Only choose N if you know in advance that you will not need to modify 506 Only choose N if you know in advance that you will not need to modify
507 OpenPROM settings on the running system. 507 OpenPROM settings on the running system.
508 508
509# Makefile helper 509# Makefile helpers
510config SPARC64_PCI 510config SPARC64_PCI
511 bool 511 bool
512 default y 512 default y
513 depends on SPARC64 && PCI 513 depends on SPARC64 && PCI
514 514
515config SPARC64_PCI_MSI
516 bool
517 default y
518 depends on SPARC64_PCI && PCI_MSI
519
515endmenu 520endmenu
516 521
517menu "Executable file formats" 522menu "Executable file formats"
diff --git a/arch/sparc/include/asm/floppy_64.h b/arch/sparc/include/asm/floppy_64.h
index e204f902e6c9..7c90c50c200d 100644
--- a/arch/sparc/include/asm/floppy_64.h
+++ b/arch/sparc/include/asm/floppy_64.h
@@ -254,7 +254,7 @@ static int sun_fd_request_irq(void)
254 once = 1; 254 once = 1;
255 255
256 error = request_irq(FLOPPY_IRQ, sparc_floppy_irq, 256 error = request_irq(FLOPPY_IRQ, sparc_floppy_irq,
257 IRQF_DISABLED, "floppy", NULL); 257 0, "floppy", NULL);
258 258
259 return ((error == 0) ? 0 : -1); 259 return ((error == 0) ? 0 : -1);
260 } 260 }
diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
index 5080d16a832f..ec2e2e2aba7d 100644
--- a/arch/sparc/include/asm/jump_label.h
+++ b/arch/sparc/include/asm/jump_label.h
@@ -9,7 +9,7 @@
9 9
10static __always_inline bool arch_static_branch(struct static_key *key) 10static __always_inline bool arch_static_branch(struct static_key *key)
11{ 11{
12 asm goto("1:\n\t" 12 asm_volatile_goto("1:\n\t"
13 "nop\n\t" 13 "nop\n\t"
14 "nop\n\t" 14 "nop\n\t"
15 ".pushsection __jump_table, \"aw\"\n\t" 15 ".pushsection __jump_table, \"aw\"\n\t"
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index d432fb20358e..d15cc1794b0e 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -1,3 +1,4 @@
1
1# 2#
2# Makefile for the linux kernel. 3# Makefile for the linux kernel.
3# 4#
@@ -99,7 +100,7 @@ obj-$(CONFIG_STACKTRACE) += stacktrace.o
99obj-$(CONFIG_SPARC64_PCI) += pci.o pci_common.o psycho_common.o 100obj-$(CONFIG_SPARC64_PCI) += pci.o pci_common.o psycho_common.o
100obj-$(CONFIG_SPARC64_PCI) += pci_psycho.o pci_sabre.o pci_schizo.o 101obj-$(CONFIG_SPARC64_PCI) += pci_psycho.o pci_sabre.o pci_schizo.o
101obj-$(CONFIG_SPARC64_PCI) += pci_sun4v.o pci_sun4v_asm.o pci_fire.o 102obj-$(CONFIG_SPARC64_PCI) += pci_sun4v.o pci_sun4v_asm.o pci_fire.o
102obj-$(CONFIG_PCI_MSI) += pci_msi.o 103obj-$(CONFIG_SPARC64_PCI_MSI) += pci_msi.o
103 104
104obj-$(CONFIG_COMPAT) += sys32.o sys_sparc32.o signal32.o 105obj-$(CONFIG_COMPAT) += sys32.o sys_sparc32.o signal32.o
105 106
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index 62d6b153ffa2..dff60abbea01 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -849,9 +849,8 @@ void ldom_reboot(const char *boot_command)
849 if (boot_command && strlen(boot_command)) { 849 if (boot_command && strlen(boot_command)) {
850 unsigned long len; 850 unsigned long len;
851 851
852 strcpy(full_boot_str, "boot "); 852 snprintf(full_boot_str, sizeof(full_boot_str), "boot %s",
853 strlcpy(full_boot_str + strlen("boot "), boot_command, 853 boot_command);
854 sizeof(full_boot_str + strlen("boot ")));
855 len = strlen(full_boot_str); 854 len = strlen(full_boot_str);
856 855
857 if (reboot_data_supported) { 856 if (reboot_data_supported) {
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index 54df554b82d9..e01d75d40329 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -1249,12 +1249,12 @@ int ldc_bind(struct ldc_channel *lp, const char *name)
1249 snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name); 1249 snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name);
1250 snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name); 1250 snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
1251 1251
1252 err = request_irq(lp->cfg.rx_irq, ldc_rx, IRQF_DISABLED, 1252 err = request_irq(lp->cfg.rx_irq, ldc_rx, 0,
1253 lp->rx_irq_name, lp); 1253 lp->rx_irq_name, lp);
1254 if (err) 1254 if (err)
1255 return err; 1255 return err;
1256 1256
1257 err = request_irq(lp->cfg.tx_irq, ldc_tx, IRQF_DISABLED, 1257 err = request_irq(lp->cfg.tx_irq, ldc_tx, 0,
1258 lp->tx_irq_name, lp); 1258 lp->tx_irq_name, lp);
1259 if (err) { 1259 if (err) {
1260 free_irq(lp->cfg.rx_irq, lp); 1260 free_irq(lp->cfg.rx_irq, lp);
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
index 9c7be59e6f5a..218b6b23c378 100644
--- a/arch/sparc/net/bpf_jit_comp.c
+++ b/arch/sparc/net/bpf_jit_comp.c
@@ -808,4 +808,5 @@ void bpf_jit_free(struct sk_filter *fp)
808{ 808{
809 if (fp->bpf_func != sk_run_filter) 809 if (fp->bpf_func != sk_run_filter)
810 module_free(NULL, fp->bpf_func); 810 module_free(NULL, fp->bpf_func);
811 kfree(fp);
811} 812}
diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h
index d385eaadece7..709798460763 100644
--- a/arch/tile/include/asm/atomic.h
+++ b/arch/tile/include/asm/atomic.h
@@ -166,7 +166,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
166 * 166 *
167 * Atomically sets @v to @i and returns old @v 167 * Atomically sets @v to @i and returns old @v
168 */ 168 */
169static inline u64 atomic64_xchg(atomic64_t *v, u64 n) 169static inline long long atomic64_xchg(atomic64_t *v, long long n)
170{ 170{
171 return xchg64(&v->counter, n); 171 return xchg64(&v->counter, n);
172} 172}
@@ -180,7 +180,8 @@ static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
180 * Atomically checks if @v holds @o and replaces it with @n if so. 180 * Atomically checks if @v holds @o and replaces it with @n if so.
181 * Returns the old value at @v. 181 * Returns the old value at @v.
182 */ 182 */
183static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n) 183static inline long long atomic64_cmpxchg(atomic64_t *v, long long o,
184 long long n)
184{ 185{
185 return cmpxchg64(&v->counter, o, n); 186 return cmpxchg64(&v->counter, o, n);
186} 187}
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index 0d0395b1b152..1ad4a1f7d42b 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -80,7 +80,7 @@ static inline void atomic_set(atomic_t *v, int n)
80/* A 64bit atomic type */ 80/* A 64bit atomic type */
81 81
82typedef struct { 82typedef struct {
83 u64 __aligned(8) counter; 83 long long counter;
84} atomic64_t; 84} atomic64_t;
85 85
86#define ATOMIC64_INIT(val) { (val) } 86#define ATOMIC64_INIT(val) { (val) }
@@ -91,14 +91,14 @@ typedef struct {
91 * 91 *
92 * Atomically reads the value of @v. 92 * Atomically reads the value of @v.
93 */ 93 */
94static inline u64 atomic64_read(const atomic64_t *v) 94static inline long long atomic64_read(const atomic64_t *v)
95{ 95{
96 /* 96 /*
97 * Requires an atomic op to read both 32-bit parts consistently. 97 * Requires an atomic op to read both 32-bit parts consistently.
98 * Casting away const is safe since the atomic support routines 98 * Casting away const is safe since the atomic support routines
99 * do not write to memory if the value has not been modified. 99 * do not write to memory if the value has not been modified.
100 */ 100 */
101 return _atomic64_xchg_add((u64 *)&v->counter, 0); 101 return _atomic64_xchg_add((long long *)&v->counter, 0);
102} 102}
103 103
104/** 104/**
@@ -108,7 +108,7 @@ static inline u64 atomic64_read(const atomic64_t *v)
108 * 108 *
109 * Atomically adds @i to @v. 109 * Atomically adds @i to @v.
110 */ 110 */
111static inline void atomic64_add(u64 i, atomic64_t *v) 111static inline void atomic64_add(long long i, atomic64_t *v)
112{ 112{
113 _atomic64_xchg_add(&v->counter, i); 113 _atomic64_xchg_add(&v->counter, i);
114} 114}
@@ -120,7 +120,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
120 * 120 *
121 * Atomically adds @i to @v and returns @i + @v 121 * Atomically adds @i to @v and returns @i + @v
122 */ 122 */
123static inline u64 atomic64_add_return(u64 i, atomic64_t *v) 123static inline long long atomic64_add_return(long long i, atomic64_t *v)
124{ 124{
125 smp_mb(); /* barrier for proper semantics */ 125 smp_mb(); /* barrier for proper semantics */
126 return _atomic64_xchg_add(&v->counter, i) + i; 126 return _atomic64_xchg_add(&v->counter, i) + i;
@@ -135,7 +135,8 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
135 * Atomically adds @a to @v, so long as @v was not already @u. 135 * Atomically adds @a to @v, so long as @v was not already @u.
136 * Returns non-zero if @v was not @u, and zero otherwise. 136 * Returns non-zero if @v was not @u, and zero otherwise.
137 */ 137 */
138static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) 138static inline long long atomic64_add_unless(atomic64_t *v, long long a,
139 long long u)
139{ 140{
140 smp_mb(); /* barrier for proper semantics */ 141 smp_mb(); /* barrier for proper semantics */
141 return _atomic64_xchg_add_unless(&v->counter, a, u) != u; 142 return _atomic64_xchg_add_unless(&v->counter, a, u) != u;
@@ -151,7 +152,7 @@ static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
151 * atomic64_set() can't be just a raw store, since it would be lost if it 152 * atomic64_set() can't be just a raw store, since it would be lost if it
152 * fell between the load and store of one of the other atomic ops. 153 * fell between the load and store of one of the other atomic ops.
153 */ 154 */
154static inline void atomic64_set(atomic64_t *v, u64 n) 155static inline void atomic64_set(atomic64_t *v, long long n)
155{ 156{
156 _atomic64_xchg(&v->counter, n); 157 _atomic64_xchg(&v->counter, n);
157} 158}
@@ -236,11 +237,13 @@ extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
236extern struct __get_user __atomic_or(volatile int *p, int *lock, int n); 237extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
237extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n); 238extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
238extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n); 239extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
239extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n); 240extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
240extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n); 241 long long o, long long n);
241extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n); 242extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n);
242extern u64 __atomic64_xchg_add_unless(volatile u64 *p, 243extern long long __atomic64_xchg_add(volatile long long *p, int *lock,
243 int *lock, u64 o, u64 n); 244 long long n);
245extern long long __atomic64_xchg_add_unless(volatile long long *p,
246 int *lock, long long o, long long n);
244 247
245/* Return failure from the atomic wrappers. */ 248/* Return failure from the atomic wrappers. */
246struct __get_user __atomic_bad_address(int __user *addr); 249struct __get_user __atomic_bad_address(int __user *addr);
diff --git a/arch/tile/include/asm/cmpxchg.h b/arch/tile/include/asm/cmpxchg.h
index 4001d5eab4bb..0ccda3c425be 100644
--- a/arch/tile/include/asm/cmpxchg.h
+++ b/arch/tile/include/asm/cmpxchg.h
@@ -35,10 +35,10 @@ int _atomic_xchg(int *ptr, int n);
35int _atomic_xchg_add(int *v, int i); 35int _atomic_xchg_add(int *v, int i);
36int _atomic_xchg_add_unless(int *v, int a, int u); 36int _atomic_xchg_add_unless(int *v, int a, int u);
37int _atomic_cmpxchg(int *ptr, int o, int n); 37int _atomic_cmpxchg(int *ptr, int o, int n);
38u64 _atomic64_xchg(u64 *v, u64 n); 38long long _atomic64_xchg(long long *v, long long n);
39u64 _atomic64_xchg_add(u64 *v, u64 i); 39long long _atomic64_xchg_add(long long *v, long long i);
40u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u); 40long long _atomic64_xchg_add_unless(long long *v, long long a, long long u);
41u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); 41long long _atomic64_cmpxchg(long long *v, long long o, long long n);
42 42
43#define xchg(ptr, n) \ 43#define xchg(ptr, n) \
44 ({ \ 44 ({ \
@@ -53,7 +53,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
53 if (sizeof(*(ptr)) != 4) \ 53 if (sizeof(*(ptr)) != 4) \
54 __cmpxchg_called_with_bad_pointer(); \ 54 __cmpxchg_called_with_bad_pointer(); \
55 smp_mb(); \ 55 smp_mb(); \
56 (typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, (int)n); \ 56 (typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, \
57 (int)n); \
57 }) 58 })
58 59
59#define xchg64(ptr, n) \ 60#define xchg64(ptr, n) \
@@ -61,7 +62,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
61 if (sizeof(*(ptr)) != 8) \ 62 if (sizeof(*(ptr)) != 8) \
62 __xchg_called_with_bad_pointer(); \ 63 __xchg_called_with_bad_pointer(); \
63 smp_mb(); \ 64 smp_mb(); \
64 (typeof(*(ptr)))_atomic64_xchg((u64 *)(ptr), (u64)(n)); \ 65 (typeof(*(ptr)))_atomic64_xchg((long long *)(ptr), \
66 (long long)(n)); \
65 }) 67 })
66 68
67#define cmpxchg64(ptr, o, n) \ 69#define cmpxchg64(ptr, o, n) \
@@ -69,7 +71,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
69 if (sizeof(*(ptr)) != 8) \ 71 if (sizeof(*(ptr)) != 8) \
70 __cmpxchg_called_with_bad_pointer(); \ 72 __cmpxchg_called_with_bad_pointer(); \
71 smp_mb(); \ 73 smp_mb(); \
72 (typeof(*(ptr)))_atomic64_cmpxchg((u64 *)ptr, (u64)o, (u64)n); \ 74 (typeof(*(ptr)))_atomic64_cmpxchg((long long *)ptr, \
75 (long long)o, (long long)n); \
73 }) 76 })
74 77
75#else 78#else
@@ -81,10 +84,11 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
81 switch (sizeof(*(ptr))) { \ 84 switch (sizeof(*(ptr))) { \
82 case 4: \ 85 case 4: \
83 __x = (typeof(__x))(unsigned long) \ 86 __x = (typeof(__x))(unsigned long) \
84 __insn_exch4((ptr), (u32)(unsigned long)(n)); \ 87 __insn_exch4((ptr), \
88 (u32)(unsigned long)(n)); \
85 break; \ 89 break; \
86 case 8: \ 90 case 8: \
87 __x = (typeof(__x)) \ 91 __x = (typeof(__x)) \
88 __insn_exch((ptr), (unsigned long)(n)); \ 92 __insn_exch((ptr), (unsigned long)(n)); \
89 break; \ 93 break; \
90 default: \ 94 default: \
@@ -103,10 +107,12 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
103 switch (sizeof(*(ptr))) { \ 107 switch (sizeof(*(ptr))) { \
104 case 4: \ 108 case 4: \
105 __x = (typeof(__x))(unsigned long) \ 109 __x = (typeof(__x))(unsigned long) \
106 __insn_cmpexch4((ptr), (u32)(unsigned long)(n)); \ 110 __insn_cmpexch4((ptr), \
111 (u32)(unsigned long)(n)); \
107 break; \ 112 break; \
108 case 8: \ 113 case 8: \
109 __x = (typeof(__x))__insn_cmpexch((ptr), (u64)(n)); \ 114 __x = (typeof(__x))__insn_cmpexch((ptr), \
115 (long long)(n)); \
110 break; \ 116 break; \
111 default: \ 117 default: \
112 __cmpxchg_called_with_bad_pointer(); \ 118 __cmpxchg_called_with_bad_pointer(); \
diff --git a/arch/tile/include/asm/percpu.h b/arch/tile/include/asm/percpu.h
index 63294f5a8efb..4f7ae39fa202 100644
--- a/arch/tile/include/asm/percpu.h
+++ b/arch/tile/include/asm/percpu.h
@@ -15,9 +15,37 @@
15#ifndef _ASM_TILE_PERCPU_H 15#ifndef _ASM_TILE_PERCPU_H
16#define _ASM_TILE_PERCPU_H 16#define _ASM_TILE_PERCPU_H
17 17
18register unsigned long __my_cpu_offset __asm__("tp"); 18register unsigned long my_cpu_offset_reg asm("tp");
19#define __my_cpu_offset __my_cpu_offset 19
20#define set_my_cpu_offset(tp) (__my_cpu_offset = (tp)) 20#ifdef CONFIG_PREEMPT
21/*
22 * For full preemption, we can't just use the register variable
23 * directly, since we need barrier() to hazard against it, causing the
24 * compiler to reload anything computed from a previous "tp" value.
25 * But we also don't want to use volatile asm, since we'd like the
26 * compiler to be able to cache the value across multiple percpu reads.
27 * So we use a fake stack read as a hazard against barrier().
28 * The 'U' constraint is like 'm' but disallows postincrement.
29 */
30static inline unsigned long __my_cpu_offset(void)
31{
32 unsigned long tp;
33 register unsigned long *sp asm("sp");
34 asm("move %0, tp" : "=r" (tp) : "U" (*sp));
35 return tp;
36}
37#define __my_cpu_offset __my_cpu_offset()
38#else
39/*
40 * We don't need to hazard against barrier() since "tp" doesn't ever
41 * change with PREEMPT_NONE, and with PREEMPT_VOLUNTARY it only
42 * changes at function call points, at which we are already re-reading
43 * the value of "tp" due to "my_cpu_offset_reg" being a global variable.
44 */
45#define __my_cpu_offset my_cpu_offset_reg
46#endif
47
48#define set_my_cpu_offset(tp) (my_cpu_offset_reg = (tp))
21 49
22#include <asm-generic/percpu.h> 50#include <asm-generic/percpu.h>
23 51
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c
index df27a1fd94a3..531f4c365351 100644
--- a/arch/tile/kernel/hardwall.c
+++ b/arch/tile/kernel/hardwall.c
@@ -66,7 +66,7 @@ static struct hardwall_type hardwall_types[] = {
66 0, 66 0,
67 "udn", 67 "udn",
68 LIST_HEAD_INIT(hardwall_types[HARDWALL_UDN].list), 68 LIST_HEAD_INIT(hardwall_types[HARDWALL_UDN].list),
69 __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_UDN].lock), 69 __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_UDN].lock),
70 NULL 70 NULL
71 }, 71 },
72#ifndef __tilepro__ 72#ifndef __tilepro__
@@ -77,7 +77,7 @@ static struct hardwall_type hardwall_types[] = {
77 1, /* disabled pending hypervisor support */ 77 1, /* disabled pending hypervisor support */
78 "idn", 78 "idn",
79 LIST_HEAD_INIT(hardwall_types[HARDWALL_IDN].list), 79 LIST_HEAD_INIT(hardwall_types[HARDWALL_IDN].list),
80 __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IDN].lock), 80 __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IDN].lock),
81 NULL 81 NULL
82 }, 82 },
83 { /* access to user-space IPI */ 83 { /* access to user-space IPI */
@@ -87,7 +87,7 @@ static struct hardwall_type hardwall_types[] = {
87 0, 87 0,
88 "ipi", 88 "ipi",
89 LIST_HEAD_INIT(hardwall_types[HARDWALL_IPI].list), 89 LIST_HEAD_INIT(hardwall_types[HARDWALL_IPI].list),
90 __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IPI].lock), 90 __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IPI].lock),
91 NULL 91 NULL
92 }, 92 },
93#endif 93#endif
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index 088d5c141e68..2cbe6d5dd6b0 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -815,6 +815,9 @@ STD_ENTRY(interrupt_return)
815 } 815 }
816 bzt r28, 1f 816 bzt r28, 1f
817 bnz r29, 1f 817 bnz r29, 1f
818 /* Disable interrupts explicitly for preemption. */
819 IRQ_DISABLE(r20,r21)
820 TRACE_IRQS_OFF
818 jal preempt_schedule_irq 821 jal preempt_schedule_irq
819 FEEDBACK_REENTER(interrupt_return) 822 FEEDBACK_REENTER(interrupt_return)
8201: 8231:
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S
index ec755d3f3734..b8fc497f2437 100644
--- a/arch/tile/kernel/intvec_64.S
+++ b/arch/tile/kernel/intvec_64.S
@@ -841,6 +841,9 @@ STD_ENTRY(interrupt_return)
841 } 841 }
842 beqzt r28, 1f 842 beqzt r28, 1f
843 bnez r29, 1f 843 bnez r29, 1f
844 /* Disable interrupts explicitly for preemption. */
845 IRQ_DISABLE(r20,r21)
846 TRACE_IRQS_OFF
844 jal preempt_schedule_irq 847 jal preempt_schedule_irq
845 FEEDBACK_REENTER(interrupt_return) 848 FEEDBACK_REENTER(interrupt_return)
8461: 8491:
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index 362284af3afd..c93977a62116 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -23,6 +23,7 @@
23#include <linux/mmzone.h> 23#include <linux/mmzone.h>
24#include <linux/dcache.h> 24#include <linux/dcache.h>
25#include <linux/fs.h> 25#include <linux/fs.h>
26#include <linux/string.h>
26#include <asm/backtrace.h> 27#include <asm/backtrace.h>
27#include <asm/page.h> 28#include <asm/page.h>
28#include <asm/ucontext.h> 29#include <asm/ucontext.h>
@@ -332,21 +333,18 @@ static void describe_addr(struct KBacktraceIterator *kbt,
332 } 333 }
333 334
334 if (vma->vm_file) { 335 if (vma->vm_file) {
335 char *s;
336 p = d_path(&vma->vm_file->f_path, buf, bufsize); 336 p = d_path(&vma->vm_file->f_path, buf, bufsize);
337 if (IS_ERR(p)) 337 if (IS_ERR(p))
338 p = "?"; 338 p = "?";
339 s = strrchr(p, '/'); 339 name = kbasename(p);
340 if (s)
341 p = s+1;
342 } else { 340 } else {
343 p = "anon"; 341 name = "anon";
344 } 342 }
345 343
346 /* Generate a string description of the vma info. */ 344 /* Generate a string description of the vma info. */
347 namelen = strlen(p); 345 namelen = strlen(name);
348 remaining = (bufsize - 1) - namelen; 346 remaining = (bufsize - 1) - namelen;
349 memmove(buf, p, namelen); 347 memmove(buf, name, namelen);
350 snprintf(buf + namelen, remaining, "[%lx+%lx] ", 348 snprintf(buf + namelen, remaining, "[%lx+%lx] ",
351 vma->vm_start, vma->vm_end - vma->vm_start); 349 vma->vm_start, vma->vm_end - vma->vm_start);
352} 350}
diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c
index 759efa337be8..c89b211fd9e7 100644
--- a/arch/tile/lib/atomic_32.c
+++ b/arch/tile/lib/atomic_32.c
@@ -107,19 +107,19 @@ unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask)
107EXPORT_SYMBOL(_atomic_xor); 107EXPORT_SYMBOL(_atomic_xor);
108 108
109 109
110u64 _atomic64_xchg(u64 *v, u64 n) 110long long _atomic64_xchg(long long *v, long long n)
111{ 111{
112 return __atomic64_xchg(v, __atomic_setup(v), n); 112 return __atomic64_xchg(v, __atomic_setup(v), n);
113} 113}
114EXPORT_SYMBOL(_atomic64_xchg); 114EXPORT_SYMBOL(_atomic64_xchg);
115 115
116u64 _atomic64_xchg_add(u64 *v, u64 i) 116long long _atomic64_xchg_add(long long *v, long long i)
117{ 117{
118 return __atomic64_xchg_add(v, __atomic_setup(v), i); 118 return __atomic64_xchg_add(v, __atomic_setup(v), i);
119} 119}
120EXPORT_SYMBOL(_atomic64_xchg_add); 120EXPORT_SYMBOL(_atomic64_xchg_add);
121 121
122u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u) 122long long _atomic64_xchg_add_unless(long long *v, long long a, long long u)
123{ 123{
124 /* 124 /*
125 * Note: argument order is switched here since it is easier 125 * Note: argument order is switched here since it is easier
@@ -130,7 +130,7 @@ u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u)
130} 130}
131EXPORT_SYMBOL(_atomic64_xchg_add_unless); 131EXPORT_SYMBOL(_atomic64_xchg_add_unless);
132 132
133u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n) 133long long _atomic64_cmpxchg(long long *v, long long o, long long n)
134{ 134{
135 return __atomic64_cmpxchg(v, __atomic_setup(v), o, n); 135 return __atomic64_cmpxchg(v, __atomic_setup(v), o, n);
136} 136}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index ee2fb9d37745..f67e839f06c8 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -860,7 +860,7 @@ source "kernel/Kconfig.preempt"
860 860
861config X86_UP_APIC 861config X86_UP_APIC
862 bool "Local APIC support on uniprocessors" 862 bool "Local APIC support on uniprocessors"
863 depends on X86_32 && !SMP && !X86_32_NON_STANDARD 863 depends on X86_32 && !SMP && !X86_32_NON_STANDARD && !PCI_MSI
864 ---help--- 864 ---help---
865 A local APIC (Advanced Programmable Interrupt Controller) is an 865 A local APIC (Advanced Programmable Interrupt Controller) is an
866 integrated interrupt controller in the CPU. If you have a single-CPU 866 integrated interrupt controller in the CPU. If you have a single-CPU
@@ -885,11 +885,11 @@ config X86_UP_IOAPIC
885 885
886config X86_LOCAL_APIC 886config X86_LOCAL_APIC
887 def_bool y 887 def_bool y
888 depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC 888 depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC || PCI_MSI
889 889
890config X86_IO_APIC 890config X86_IO_APIC
891 def_bool y 891 def_bool y
892 depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC 892 depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC || PCI_MSI
893 893
894config X86_VISWS_APIC 894config X86_VISWS_APIC
895 def_bool y 895 def_bool y
@@ -1033,6 +1033,7 @@ config X86_REBOOTFIXUPS
1033 1033
1034config MICROCODE 1034config MICROCODE
1035 tristate "CPU microcode loading support" 1035 tristate "CPU microcode loading support"
1036 depends on CPU_SUP_AMD || CPU_SUP_INTEL
1036 select FW_LOADER 1037 select FW_LOADER
1037 ---help--- 1038 ---help---
1038 1039
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index d3f5c63078d8..89270b4318db 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -374,7 +374,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
374 * Catch too early usage of this before alternatives 374 * Catch too early usage of this before alternatives
375 * have run. 375 * have run.
376 */ 376 */
377 asm goto("1: jmp %l[t_warn]\n" 377 asm_volatile_goto("1: jmp %l[t_warn]\n"
378 "2:\n" 378 "2:\n"
379 ".section .altinstructions,\"a\"\n" 379 ".section .altinstructions,\"a\"\n"
380 " .long 1b - .\n" 380 " .long 1b - .\n"
@@ -388,7 +388,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
388 388
389#endif 389#endif
390 390
391 asm goto("1: jmp %l[t_no]\n" 391 asm_volatile_goto("1: jmp %l[t_no]\n"
392 "2:\n" 392 "2:\n"
393 ".section .altinstructions,\"a\"\n" 393 ".section .altinstructions,\"a\"\n"
394 " .long 1b - .\n" 394 " .long 1b - .\n"
@@ -453,7 +453,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
453 * have. Thus, we force the jump to the widest, 4-byte, signed relative 453 * have. Thus, we force the jump to the widest, 4-byte, signed relative
454 * offset even though the last would often fit in less bytes. 454 * offset even though the last would often fit in less bytes.
455 */ 455 */
456 asm goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n" 456 asm_volatile_goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n"
457 "2:\n" 457 "2:\n"
458 ".section .altinstructions,\"a\"\n" 458 ".section .altinstructions,\"a\"\n"
459 " .long 1b - .\n" /* src offset */ 459 " .long 1b - .\n" /* src offset */
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index 64507f35800c..6a2cefb4395a 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -18,7 +18,7 @@
18 18
19static __always_inline bool arch_static_branch(struct static_key *key) 19static __always_inline bool arch_static_branch(struct static_key *key)
20{ 20{
21 asm goto("1:" 21 asm_volatile_goto("1:"
22 ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t" 22 ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
23 ".pushsection __jump_table, \"aw\" \n\t" 23 ".pushsection __jump_table, \"aw\" \n\t"
24 _ASM_ALIGN "\n\t" 24 _ASM_ALIGN "\n\t"
diff --git a/arch/x86/include/asm/mutex_64.h b/arch/x86/include/asm/mutex_64.h
index e7e6751648ed..07537a44216e 100644
--- a/arch/x86/include/asm/mutex_64.h
+++ b/arch/x86/include/asm/mutex_64.h
@@ -20,7 +20,7 @@
20static inline void __mutex_fastpath_lock(atomic_t *v, 20static inline void __mutex_fastpath_lock(atomic_t *v,
21 void (*fail_fn)(atomic_t *)) 21 void (*fail_fn)(atomic_t *))
22{ 22{
23 asm volatile goto(LOCK_PREFIX " decl %0\n" 23 asm_volatile_goto(LOCK_PREFIX " decl %0\n"
24 " jns %l[exit]\n" 24 " jns %l[exit]\n"
25 : : "m" (v->counter) 25 : : "m" (v->counter)
26 : "memory", "cc" 26 : "memory", "cc"
@@ -75,7 +75,7 @@ static inline int __mutex_fastpath_lock_retval(atomic_t *count)
75static inline void __mutex_fastpath_unlock(atomic_t *v, 75static inline void __mutex_fastpath_unlock(atomic_t *v,
76 void (*fail_fn)(atomic_t *)) 76 void (*fail_fn)(atomic_t *))
77{ 77{
78 asm volatile goto(LOCK_PREFIX " incl %0\n" 78 asm_volatile_goto(LOCK_PREFIX " incl %0\n"
79 " jg %l[exit]\n" 79 " jg %l[exit]\n"
80 : : "m" (v->counter) 80 : : "m" (v->counter)
81 : "memory", "cc" 81 : "memory", "cc"
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 6aef9fbc09b7..b913915e8e63 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -79,30 +79,38 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn)
79 return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY; 79 return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY;
80} 80}
81 81
82static inline unsigned long mfn_to_pfn(unsigned long mfn) 82static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn)
83{ 83{
84 unsigned long pfn; 84 unsigned long pfn;
85 int ret = 0; 85 int ret;
86 86
87 if (xen_feature(XENFEAT_auto_translated_physmap)) 87 if (xen_feature(XENFEAT_auto_translated_physmap))
88 return mfn; 88 return mfn;
89 89
90 if (unlikely(mfn >= machine_to_phys_nr)) { 90 if (unlikely(mfn >= machine_to_phys_nr))
91 pfn = ~0; 91 return ~0;
92 goto try_override; 92
93 }
94 pfn = 0;
95 /* 93 /*
96 * The array access can fail (e.g., device space beyond end of RAM). 94 * The array access can fail (e.g., device space beyond end of RAM).
97 * In such cases it doesn't matter what we return (we return garbage), 95 * In such cases it doesn't matter what we return (we return garbage),
98 * but we must handle the fault without crashing! 96 * but we must handle the fault without crashing!
99 */ 97 */
100 ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); 98 ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
101try_override:
102 /* ret might be < 0 if there are no entries in the m2p for mfn */
103 if (ret < 0) 99 if (ret < 0)
104 pfn = ~0; 100 return ~0;
105 else if (get_phys_to_machine(pfn) != mfn) 101
102 return pfn;
103}
104
105static inline unsigned long mfn_to_pfn(unsigned long mfn)
106{
107 unsigned long pfn;
108
109 if (xen_feature(XENFEAT_auto_translated_physmap))
110 return mfn;
111
112 pfn = mfn_to_pfn_no_overrides(mfn);
113 if (get_phys_to_machine(pfn) != mfn) {
106 /* 114 /*
107 * If this appears to be a foreign mfn (because the pfn 115 * If this appears to be a foreign mfn (because the pfn
108 * doesn't map back to the mfn), then check the local override 116 * doesn't map back to the mfn), then check the local override
@@ -111,6 +119,7 @@ try_override:
111 * m2p_find_override_pfn returns ~0 if it doesn't find anything. 119 * m2p_find_override_pfn returns ~0 if it doesn't find anything.
112 */ 120 */
113 pfn = m2p_find_override_pfn(mfn, ~0); 121 pfn = m2p_find_override_pfn(mfn, ~0);
122 }
114 123
115 /* 124 /*
116 * pfn is ~0 if there are no entries in the m2p for mfn or if the 125 * pfn is ~0 if there are no entries in the m2p for mfn or if the
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 1191ac1c9d25..a419814cea57 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -113,7 +113,7 @@ static int __init early_get_pnodeid(void)
113 break; 113 break;
114 case UV3_HUB_PART_NUMBER: 114 case UV3_HUB_PART_NUMBER:
115 case UV3_HUB_PART_NUMBER_X: 115 case UV3_HUB_PART_NUMBER_X:
116 uv_min_hub_revision_id += UV3_HUB_REVISION_BASE - 1; 116 uv_min_hub_revision_id += UV3_HUB_REVISION_BASE;
117 break; 117 break;
118 } 118 }
119 119
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 8355c84b9729..9d8449158cf9 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1506,7 +1506,7 @@ static int __init init_hw_perf_events(void)
1506 err = amd_pmu_init(); 1506 err = amd_pmu_init();
1507 break; 1507 break;
1508 default: 1508 default:
1509 return 0; 1509 err = -ENOTSUPP;
1510 } 1510 }
1511 if (err != 0) { 1511 if (err != 0) {
1512 pr_cont("no PMU driver, software events only.\n"); 1512 pr_cont("no PMU driver, software events only.\n");
@@ -1883,26 +1883,21 @@ static struct pmu pmu = {
1883 1883
1884void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) 1884void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
1885{ 1885{
1886 userpg->cap_usr_time = 0; 1886 userpg->cap_user_time = 0;
1887 userpg->cap_usr_time_zero = 0; 1887 userpg->cap_user_time_zero = 0;
1888 userpg->cap_usr_rdpmc = x86_pmu.attr_rdpmc; 1888 userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc;
1889 userpg->pmc_width = x86_pmu.cntval_bits; 1889 userpg->pmc_width = x86_pmu.cntval_bits;
1890 1890
1891 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 1891 if (!sched_clock_stable)
1892 return;
1893
1894 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
1895 return; 1892 return;
1896 1893
1897 userpg->cap_usr_time = 1; 1894 userpg->cap_user_time = 1;
1898 userpg->time_mult = this_cpu_read(cyc2ns); 1895 userpg->time_mult = this_cpu_read(cyc2ns);
1899 userpg->time_shift = CYC2NS_SCALE_FACTOR; 1896 userpg->time_shift = CYC2NS_SCALE_FACTOR;
1900 userpg->time_offset = this_cpu_read(cyc2ns_offset) - now; 1897 userpg->time_offset = this_cpu_read(cyc2ns_offset) - now;
1901 1898
1902 if (sched_clock_stable && !check_tsc_disabled()) { 1899 userpg->cap_user_time_zero = 1;
1903 userpg->cap_usr_time_zero = 1; 1900 userpg->time_zero = this_cpu_read(cyc2ns_offset);
1904 userpg->time_zero = this_cpu_read(cyc2ns_offset);
1905 }
1906} 1901}
1907 1902
1908/* 1903/*
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 9db76c31b3c3..f31a1655d1ff 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2325,6 +2325,7 @@ __init int intel_pmu_init(void)
2325 break; 2325 break;
2326 2326
2327 case 55: /* Atom 22nm "Silvermont" */ 2327 case 55: /* Atom 22nm "Silvermont" */
2328 case 77: /* Avoton "Silvermont" */
2328 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids, 2329 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
2329 sizeof(hw_cache_event_ids)); 2330 sizeof(hw_cache_event_ids));
2330 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs, 2331 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 8ed44589b0e4..4118f9f68315 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -2706,14 +2706,14 @@ static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
2706 box->hrtimer.function = uncore_pmu_hrtimer; 2706 box->hrtimer.function = uncore_pmu_hrtimer;
2707} 2707}
2708 2708
2709struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int cpu) 2709static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int node)
2710{ 2710{
2711 struct intel_uncore_box *box; 2711 struct intel_uncore_box *box;
2712 int i, size; 2712 int i, size;
2713 2713
2714 size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg); 2714 size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
2715 2715
2716 box = kzalloc_node(size, GFP_KERNEL, cpu_to_node(cpu)); 2716 box = kzalloc_node(size, GFP_KERNEL, node);
2717 if (!box) 2717 if (!box)
2718 return NULL; 2718 return NULL;
2719 2719
@@ -3031,7 +3031,7 @@ static int uncore_validate_group(struct intel_uncore_pmu *pmu,
3031 struct intel_uncore_box *fake_box; 3031 struct intel_uncore_box *fake_box;
3032 int ret = -EINVAL, n; 3032 int ret = -EINVAL, n;
3033 3033
3034 fake_box = uncore_alloc_box(pmu->type, smp_processor_id()); 3034 fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
3035 if (!fake_box) 3035 if (!fake_box)
3036 return -ENOMEM; 3036 return -ENOMEM;
3037 3037
@@ -3294,7 +3294,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
3294 } 3294 }
3295 3295
3296 type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)]; 3296 type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
3297 box = uncore_alloc_box(type, 0); 3297 box = uncore_alloc_box(type, NUMA_NO_NODE);
3298 if (!box) 3298 if (!box)
3299 return -ENOMEM; 3299 return -ENOMEM;
3300 3300
@@ -3499,7 +3499,7 @@ static int uncore_cpu_prepare(int cpu, int phys_id)
3499 if (pmu->func_id < 0) 3499 if (pmu->func_id < 0)
3500 pmu->func_id = j; 3500 pmu->func_id = j;
3501 3501
3502 box = uncore_alloc_box(type, cpu); 3502 box = uncore_alloc_box(type, cpu_to_node(cpu));
3503 if (!box) 3503 if (!box)
3504 return -ENOMEM; 3504 return -ENOMEM;
3505 3505
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 697b93af02dd..a0e2a8a80c94 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -775,11 +775,22 @@ void __init kvm_spinlock_init(void)
775 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) 775 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
776 return; 776 return;
777 777
778 printk(KERN_INFO "KVM setup paravirtual spinlock\n"); 778 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
779 pv_lock_ops.unlock_kick = kvm_unlock_kick;
780}
781
782static __init int kvm_spinlock_init_jump(void)
783{
784 if (!kvm_para_available())
785 return 0;
786 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
787 return 0;
779 788
780 static_key_slow_inc(&paravirt_ticketlocks_enabled); 789 static_key_slow_inc(&paravirt_ticketlocks_enabled);
790 printk(KERN_INFO "KVM setup paravirtual spinlock\n");
781 791
782 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning); 792 return 0;
783 pv_lock_ops.unlock_kick = kvm_unlock_kick;
784} 793}
794early_initcall(kvm_spinlock_init_jump);
795
785#endif /* CONFIG_PARAVIRT_SPINLOCKS */ 796#endif /* CONFIG_PARAVIRT_SPINLOCKS */
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index 7123b5df479d..af99f71aeb7f 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -216,6 +216,7 @@ int apply_microcode_amd(int cpu)
216 /* need to apply patch? */ 216 /* need to apply patch? */
217 if (rev >= mc_amd->hdr.patch_id) { 217 if (rev >= mc_amd->hdr.patch_id) {
218 c->microcode = rev; 218 c->microcode = rev;
219 uci->cpu_sig.rev = rev;
219 return 0; 220 return 0;
220 } 221 }
221 222
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 563ed91e6faa..7e920bff99a3 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -326,6 +326,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
326 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"), 326 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"),
327 }, 327 },
328 }, 328 },
329 { /* Handle problems with rebooting on the Latitude E5410. */
330 .callback = set_pci_reboot,
331 .ident = "Dell Latitude E5410",
332 .matches = {
333 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
334 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5410"),
335 },
336 },
329 { /* Handle problems with rebooting on the Latitude E5420. */ 337 { /* Handle problems with rebooting on the Latitude E5420. */
330 .callback = set_pci_reboot, 338 .callback = set_pci_reboot,
331 .ident = "Dell Latitude E5420", 339 .ident = "Dell Latitude E5420",
@@ -352,12 +360,28 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
352 }, 360 },
353 { /* Handle problems with rebooting on the Precision M6600. */ 361 { /* Handle problems with rebooting on the Precision M6600. */
354 .callback = set_pci_reboot, 362 .callback = set_pci_reboot,
355 .ident = "Dell OptiPlex 990", 363 .ident = "Dell Precision M6600",
356 .matches = { 364 .matches = {
357 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 365 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
358 DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"), 366 DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
359 }, 367 },
360 }, 368 },
369 { /* Handle problems with rebooting on the Dell PowerEdge C6100. */
370 .callback = set_pci_reboot,
371 .ident = "Dell PowerEdge C6100",
372 .matches = {
373 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
374 DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
375 },
376 },
377 { /* Some C6100 machines were shipped with vendor being 'Dell'. */
378 .callback = set_pci_reboot,
379 .ident = "Dell PowerEdge C6100",
380 .matches = {
381 DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
382 DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
383 },
384 },
361 { } 385 { }
362}; 386};
363 387
diff --git a/arch/x86/kernel/sysfb_simplefb.c b/arch/x86/kernel/sysfb_simplefb.c
index 22513e96b012..86179d409893 100644
--- a/arch/x86/kernel/sysfb_simplefb.c
+++ b/arch/x86/kernel/sysfb_simplefb.c
@@ -72,14 +72,14 @@ __init int create_simplefb(const struct screen_info *si,
72 * the part that is occupied by the framebuffer */ 72 * the part that is occupied by the framebuffer */
73 len = mode->height * mode->stride; 73 len = mode->height * mode->stride;
74 len = PAGE_ALIGN(len); 74 len = PAGE_ALIGN(len);
75 if (len > si->lfb_size << 16) { 75 if (len > (u64)si->lfb_size << 16) {
76 printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n"); 76 printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n");
77 return -EINVAL; 77 return -EINVAL;
78 } 78 }
79 79
80 /* setup IORESOURCE_MEM as framebuffer memory */ 80 /* setup IORESOURCE_MEM as framebuffer memory */
81 memset(&res, 0, sizeof(res)); 81 memset(&res, 0, sizeof(res));
82 res.flags = IORESOURCE_MEM; 82 res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
83 res.name = simplefb_resname; 83 res.name = simplefb_resname;
84 res.start = si->lfb_base; 84 res.start = si->lfb_base;
85 res.end = si->lfb_base + len - 1; 85 res.end = si->lfb_base + len - 1;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a1216de9ffda..2b2fce1b2009 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3255,25 +3255,29 @@ static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
3255 3255
3256static void ept_load_pdptrs(struct kvm_vcpu *vcpu) 3256static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
3257{ 3257{
3258 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3259
3258 if (!test_bit(VCPU_EXREG_PDPTR, 3260 if (!test_bit(VCPU_EXREG_PDPTR,
3259 (unsigned long *)&vcpu->arch.regs_dirty)) 3261 (unsigned long *)&vcpu->arch.regs_dirty))
3260 return; 3262 return;
3261 3263
3262 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { 3264 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
3263 vmcs_write64(GUEST_PDPTR0, vcpu->arch.mmu.pdptrs[0]); 3265 vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]);
3264 vmcs_write64(GUEST_PDPTR1, vcpu->arch.mmu.pdptrs[1]); 3266 vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]);
3265 vmcs_write64(GUEST_PDPTR2, vcpu->arch.mmu.pdptrs[2]); 3267 vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]);
3266 vmcs_write64(GUEST_PDPTR3, vcpu->arch.mmu.pdptrs[3]); 3268 vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]);
3267 } 3269 }
3268} 3270}
3269 3271
3270static void ept_save_pdptrs(struct kvm_vcpu *vcpu) 3272static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
3271{ 3273{
3274 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3275
3272 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { 3276 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
3273 vcpu->arch.mmu.pdptrs[0] = vmcs_read64(GUEST_PDPTR0); 3277 mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
3274 vcpu->arch.mmu.pdptrs[1] = vmcs_read64(GUEST_PDPTR1); 3278 mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
3275 vcpu->arch.mmu.pdptrs[2] = vmcs_read64(GUEST_PDPTR2); 3279 mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
3276 vcpu->arch.mmu.pdptrs[3] = vmcs_read64(GUEST_PDPTR3); 3280 mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
3277 } 3281 }
3278 3282
3279 __set_bit(VCPU_EXREG_PDPTR, 3283 __set_bit(VCPU_EXREG_PDPTR,
@@ -5345,7 +5349,9 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
5345 * There are errata that may cause this bit to not be set: 5349 * There are errata that may cause this bit to not be set:
5346 * AAK134, BY25. 5350 * AAK134, BY25.
5347 */ 5351 */
5348 if (exit_qualification & INTR_INFO_UNBLOCK_NMI) 5352 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
5353 cpu_has_virtual_nmis() &&
5354 (exit_qualification & INTR_INFO_UNBLOCK_NMI))
5349 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); 5355 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
5350 5356
5351 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); 5357 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
@@ -7775,10 +7781,6 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
7775 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); 7781 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
7776 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); 7782 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
7777 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); 7783 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
7778 __clear_bit(VCPU_EXREG_PDPTR,
7779 (unsigned long *)&vcpu->arch.regs_avail);
7780 __clear_bit(VCPU_EXREG_PDPTR,
7781 (unsigned long *)&vcpu->arch.regs_dirty);
7782 } 7784 }
7783 7785
7784 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp); 7786 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 79c216aa0e2b..516593e1ce33 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -772,13 +772,21 @@ out:
772 return; 772 return;
773} 773}
774 774
775static void bpf_jit_free_deferred(struct work_struct *work)
776{
777 struct sk_filter *fp = container_of(work, struct sk_filter, work);
778 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
779 struct bpf_binary_header *header = (void *)addr;
780
781 set_memory_rw(addr, header->pages);
782 module_free(NULL, header);
783 kfree(fp);
784}
785
775void bpf_jit_free(struct sk_filter *fp) 786void bpf_jit_free(struct sk_filter *fp)
776{ 787{
777 if (fp->bpf_func != sk_run_filter) { 788 if (fp->bpf_func != sk_run_filter) {
778 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; 789 INIT_WORK(&fp->work, bpf_jit_free_deferred);
779 struct bpf_binary_header *header = (void *)addr; 790 schedule_work(&fp->work);
780
781 set_memory_rw(addr, header->pages);
782 module_free(NULL, header);
783 } 791 }
784} 792}
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 5596c7bdd327..082e88129712 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -700,7 +700,7 @@ int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end,
700 if (!(pci_probe & PCI_PROBE_MMCONF) || pci_mmcfg_arch_init_failed) 700 if (!(pci_probe & PCI_PROBE_MMCONF) || pci_mmcfg_arch_init_failed)
701 return -ENODEV; 701 return -ENODEV;
702 702
703 if (start > end || !addr) 703 if (start > end)
704 return -EINVAL; 704 return -EINVAL;
705 705
706 mutex_lock(&pci_mmcfg_lock); 706 mutex_lock(&pci_mmcfg_lock);
@@ -716,6 +716,11 @@ int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end,
716 return -EEXIST; 716 return -EEXIST;
717 } 717 }
718 718
719 if (!addr) {
720 mutex_unlock(&pci_mmcfg_lock);
721 return -EINVAL;
722 }
723
719 rc = -EBUSY; 724 rc = -EBUSY;
720 cfg = pci_mmconfig_alloc(seg, start, end, addr); 725 cfg = pci_mmconfig_alloc(seg, start, end, addr);
721 if (cfg == NULL) { 726 if (cfg == NULL) {
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 90f6ed127096..c7e22ab29a5a 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -912,10 +912,13 @@ void __init efi_enter_virtual_mode(void)
912 912
913 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { 913 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
914 md = p; 914 md = p;
915 if (!(md->attribute & EFI_MEMORY_RUNTIME) && 915 if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
916 md->type != EFI_BOOT_SERVICES_CODE && 916#ifdef CONFIG_X86_64
917 md->type != EFI_BOOT_SERVICES_DATA) 917 if (md->type != EFI_BOOT_SERVICES_CODE &&
918 continue; 918 md->type != EFI_BOOT_SERVICES_DATA)
919#endif
920 continue;
921 }
919 922
920 size = md->num_pages << EFI_PAGE_SHIFT; 923 size = md->num_pages << EFI_PAGE_SHIFT;
921 end = md->phys_addr + size; 924 end = md->phys_addr + size;
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 8b901e8d782d..a61c7d5811be 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -879,7 +879,6 @@ int m2p_add_override(unsigned long mfn, struct page *page,
879 unsigned long uninitialized_var(address); 879 unsigned long uninitialized_var(address);
880 unsigned level; 880 unsigned level;
881 pte_t *ptep = NULL; 881 pte_t *ptep = NULL;
882 int ret = 0;
883 882
884 pfn = page_to_pfn(page); 883 pfn = page_to_pfn(page);
885 if (!PageHighMem(page)) { 884 if (!PageHighMem(page)) {
@@ -926,8 +925,8 @@ int m2p_add_override(unsigned long mfn, struct page *page,
926 * frontend pages while they are being shared with the backend, 925 * frontend pages while they are being shared with the backend,
927 * because mfn_to_pfn (that ends up being called by GUPF) will 926 * because mfn_to_pfn (that ends up being called by GUPF) will
928 * return the backend pfn rather than the frontend pfn. */ 927 * return the backend pfn rather than the frontend pfn. */
929 ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); 928 pfn = mfn_to_pfn_no_overrides(mfn);
930 if (ret == 0 && get_phys_to_machine(pfn) == mfn) 929 if (get_phys_to_machine(pfn) == mfn)
931 set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)); 930 set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
932 931
933 return 0; 932 return 0;
@@ -942,7 +941,6 @@ int m2p_remove_override(struct page *page,
942 unsigned long uninitialized_var(address); 941 unsigned long uninitialized_var(address);
943 unsigned level; 942 unsigned level;
944 pte_t *ptep = NULL; 943 pte_t *ptep = NULL;
945 int ret = 0;
946 944
947 pfn = page_to_pfn(page); 945 pfn = page_to_pfn(page);
948 mfn = get_phys_to_machine(pfn); 946 mfn = get_phys_to_machine(pfn);
@@ -1029,8 +1027,8 @@ int m2p_remove_override(struct page *page,
1029 * the original pfn causes mfn_to_pfn(mfn) to return the frontend 1027 * the original pfn causes mfn_to_pfn(mfn) to return the frontend
1030 * pfn again. */ 1028 * pfn again. */
1031 mfn &= ~FOREIGN_FRAME_BIT; 1029 mfn &= ~FOREIGN_FRAME_BIT;
1032 ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); 1030 pfn = mfn_to_pfn_no_overrides(mfn);
1033 if (ret == 0 && get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) && 1031 if (get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
1034 m2p_find_override(mfn) == NULL) 1032 m2p_find_override(mfn) == NULL)
1035 set_phys_to_machine(pfn, mfn); 1033 set_phys_to_machine(pfn, mfn);
1036 1034
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index d1e4777b4e75..31d04758b76f 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -278,6 +278,15 @@ static void __init xen_smp_prepare_boot_cpu(void)
278 old memory can be recycled */ 278 old memory can be recycled */
279 make_lowmem_page_readwrite(xen_initial_gdt); 279 make_lowmem_page_readwrite(xen_initial_gdt);
280 280
281#ifdef CONFIG_X86_32
282 /*
283 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
284 * expects __USER_DS
285 */
286 loadsegment(ds, __USER_DS);
287 loadsegment(es, __USER_DS);
288#endif
289
281 xen_filter_cpu_maps(); 290 xen_filter_cpu_maps();
282 xen_setup_vcpu_info_placement(); 291 xen_setup_vcpu_info_placement();
283 } 292 }
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 253f63fceea1..be6b86078957 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -259,6 +259,14 @@ void xen_uninit_lock_cpu(int cpu)
259} 259}
260 260
261 261
262/*
263 * Our init of PV spinlocks is split in two init functions due to us
264 * using paravirt patching and jump labels patching and having to do
265 * all of this before SMP code is invoked.
266 *
267 * The paravirt patching needs to be done _before_ the alternative asm code
268 * is started, otherwise we would not patch the core kernel code.
269 */
262void __init xen_init_spinlocks(void) 270void __init xen_init_spinlocks(void)
263{ 271{
264 272
@@ -267,12 +275,26 @@ void __init xen_init_spinlocks(void)
267 return; 275 return;
268 } 276 }
269 277
270 static_key_slow_inc(&paravirt_ticketlocks_enabled);
271
272 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning); 278 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning);
273 pv_lock_ops.unlock_kick = xen_unlock_kick; 279 pv_lock_ops.unlock_kick = xen_unlock_kick;
274} 280}
275 281
282/*
283 * While the jump_label init code needs to happend _after_ the jump labels are
284 * enabled and before SMP is started. Hence we use pre-SMP initcall level
285 * init. We cannot do it in xen_init_spinlocks as that is done before
286 * jump labels are activated.
287 */
288static __init int xen_init_spinlocks_jump(void)
289{
290 if (!xen_pvspin)
291 return 0;
292
293 static_key_slow_inc(&paravirt_ticketlocks_enabled);
294 return 0;
295}
296early_initcall(xen_init_spinlocks_jump);
297
276static __init int xen_parse_nopvspin(char *arg) 298static __init int xen_parse_nopvspin(char *arg)
277{ 299{
278 xen_pvspin = false; 300 xen_pvspin = false;
diff --git a/block/Kconfig b/block/Kconfig
index 7f38e40fee08..2429515c05c2 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -99,11 +99,16 @@ config BLK_DEV_THROTTLING
99 99
100 See Documentation/cgroups/blkio-controller.txt for more information. 100 See Documentation/cgroups/blkio-controller.txt for more information.
101 101
102config CMDLINE_PARSER 102config BLK_CMDLINE_PARSER
103 bool "Block device command line partition parser" 103 bool "Block device command line partition parser"
104 default n 104 default n
105 ---help--- 105 ---help---
106 Parsing command line, get the partitions information. 106 Enabling this option allows you to specify the partition layout from
107 the kernel boot args. This is typically of use for embedded devices
108 which don't otherwise have any standardized method for listing the
109 partitions on a block device.
110
111 See Documentation/block/cmdline-partition.txt for more information.
107 112
108menu "Partition Types" 113menu "Partition Types"
109 114
diff --git a/block/Makefile b/block/Makefile
index 4fa4be544ece..671a83d063a5 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -18,4 +18,4 @@ obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
18 18
19obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o 19obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
20obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o 20obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o
21obj-$(CONFIG_CMDLINE_PARSER) += cmdline-parser.o 21obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o
diff --git a/block/partitions/Kconfig b/block/partitions/Kconfig
index 87a32086535d..9b29a996c311 100644
--- a/block/partitions/Kconfig
+++ b/block/partitions/Kconfig
@@ -263,7 +263,7 @@ config SYSV68_PARTITION
263 263
264config CMDLINE_PARTITION 264config CMDLINE_PARTITION
265 bool "Command line partition support" if PARTITION_ADVANCED 265 bool "Command line partition support" if PARTITION_ADVANCED
266 select CMDLINE_PARSER 266 select BLK_CMDLINE_PARSER
267 help 267 help
268 Say Y here if you would read the partitions table from bootargs. 268 Say Y here if you want to read the partition table from bootargs.
269 The format for the command line is just like mtdparts. 269 The format for the command line is just like mtdparts.
diff --git a/block/partitions/cmdline.c b/block/partitions/cmdline.c
index 56cf4ffad51e..5141b563adf1 100644
--- a/block/partitions/cmdline.c
+++ b/block/partitions/cmdline.c
@@ -2,15 +2,15 @@
2 * Copyright (C) 2013 HUAWEI 2 * Copyright (C) 2013 HUAWEI
3 * Author: Cai Zhiyong <caizhiyong@huawei.com> 3 * Author: Cai Zhiyong <caizhiyong@huawei.com>
4 * 4 *
5 * Read block device partition table from command line. 5 * Read block device partition table from the command line.
6 * The partition used for fixed block device (eMMC) embedded device. 6 * Typically used for fixed block (eMMC) embedded devices.
7 * It is no MBR, save storage space. Bootloader can be easily accessed 7 * It has no MBR, so saves storage space. Bootloader can be easily accessed
8 * by absolute address of data on the block device. 8 * by absolute address of data on the block device.
9 * Users can easily change the partition. 9 * Users can easily change the partition.
10 * 10 *
11 * The format for the command line is just like mtdparts. 11 * The format for the command line is just like mtdparts.
12 * 12 *
13 * Verbose config please reference "Documentation/block/cmdline-partition.txt" 13 * For further information, see "Documentation/block/cmdline-partition.txt"
14 * 14 *
15 */ 15 */
16 16
diff --git a/block/partitions/efi.c b/block/partitions/efi.c
index 1eb09ee5311b..a8287b49d062 100644
--- a/block/partitions/efi.c
+++ b/block/partitions/efi.c
@@ -222,11 +222,16 @@ check_hybrid:
222 * the disk size. 222 * the disk size.
223 * 223 *
224 * Hybrid MBRs do not necessarily comply with this. 224 * Hybrid MBRs do not necessarily comply with this.
225 *
226 * Consider a bad value here to be a warning to support dd'ing
227 * an image from a smaller disk to a larger disk.
225 */ 228 */
226 if (ret == GPT_MBR_PROTECTIVE) { 229 if (ret == GPT_MBR_PROTECTIVE) {
227 sz = le32_to_cpu(mbr->partition_record[part].size_in_lba); 230 sz = le32_to_cpu(mbr->partition_record[part].size_in_lba);
228 if (sz != (uint32_t) total_sectors - 1 && sz != 0xFFFFFFFF) 231 if (sz != (uint32_t) total_sectors - 1 && sz != 0xFFFFFFFF)
229 ret = 0; 232 pr_debug("GPT: mbr size in lba (%u) different than whole disk (%u).\n",
233 sz, min_t(uint32_t,
234 total_sectors - 1, 0xFFFFFFFF));
230 } 235 }
231done: 236done:
232 return ret; 237 return ret;
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 76f9ebda9081..10e399f97189 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -24,7 +24,7 @@ menuconfig ACPI
24 are configured, ACPI is used. 24 are configured, ACPI is used.
25 25
26 The project home page for the Linux ACPI subsystem is here: 26 The project home page for the Linux ACPI subsystem is here:
27 <http://www.lesswatts.org/projects/acpi/> 27 <https://01.org/linux-acpi>
28 28
29 Linux support for ACPI is based on Intel Corporation's ACPI 29 Linux support for ACPI is based on Intel Corporation's ACPI
30 Component Architecture (ACPI CA). For more information on the 30 Component Architecture (ACPI CA). For more information on the
@@ -106,9 +106,9 @@ config ACPI_BUTTON
106 default y 106 default y
107 help 107 help
108 This driver handles events on the power, sleep, and lid buttons. 108 This driver handles events on the power, sleep, and lid buttons.
109 A daemon reads /proc/acpi/event and perform user-defined actions 109 A daemon reads events from input devices or via netlink and
110 such as shutting down the system. This is necessary for 110 performs user-defined actions such as shutting down the system.
111 software-controlled poweroff. 111 This is necessary for software-controlled poweroff.
112 112
113 To compile this driver as a module, choose M here: 113 To compile this driver as a module, choose M here:
114 the module will be called button. 114 the module will be called button.
diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c
index f40acef80269..a6977e12d574 100644
--- a/drivers/acpi/acpi_ipmi.c
+++ b/drivers/acpi/acpi_ipmi.c
@@ -39,6 +39,7 @@
39#include <linux/ipmi.h> 39#include <linux/ipmi.h>
40#include <linux/device.h> 40#include <linux/device.h>
41#include <linux/pnp.h> 41#include <linux/pnp.h>
42#include <linux/spinlock.h>
42 43
43MODULE_AUTHOR("Zhao Yakui"); 44MODULE_AUTHOR("Zhao Yakui");
44MODULE_DESCRIPTION("ACPI IPMI Opregion driver"); 45MODULE_DESCRIPTION("ACPI IPMI Opregion driver");
@@ -57,7 +58,7 @@ struct acpi_ipmi_device {
57 struct list_head head; 58 struct list_head head;
58 /* the IPMI request message list */ 59 /* the IPMI request message list */
59 struct list_head tx_msg_list; 60 struct list_head tx_msg_list;
60 struct mutex tx_msg_lock; 61 spinlock_t tx_msg_lock;
61 acpi_handle handle; 62 acpi_handle handle;
62 struct pnp_dev *pnp_dev; 63 struct pnp_dev *pnp_dev;
63 ipmi_user_t user_interface; 64 ipmi_user_t user_interface;
@@ -147,6 +148,7 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
147 struct kernel_ipmi_msg *msg; 148 struct kernel_ipmi_msg *msg;
148 struct acpi_ipmi_buffer *buffer; 149 struct acpi_ipmi_buffer *buffer;
149 struct acpi_ipmi_device *device; 150 struct acpi_ipmi_device *device;
151 unsigned long flags;
150 152
151 msg = &tx_msg->tx_message; 153 msg = &tx_msg->tx_message;
152 /* 154 /*
@@ -177,10 +179,10 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
177 179
178 /* Get the msgid */ 180 /* Get the msgid */
179 device = tx_msg->device; 181 device = tx_msg->device;
180 mutex_lock(&device->tx_msg_lock); 182 spin_lock_irqsave(&device->tx_msg_lock, flags);
181 device->curr_msgid++; 183 device->curr_msgid++;
182 tx_msg->tx_msgid = device->curr_msgid; 184 tx_msg->tx_msgid = device->curr_msgid;
183 mutex_unlock(&device->tx_msg_lock); 185 spin_unlock_irqrestore(&device->tx_msg_lock, flags);
184} 186}
185 187
186static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg, 188static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
@@ -242,6 +244,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
242 int msg_found = 0; 244 int msg_found = 0;
243 struct acpi_ipmi_msg *tx_msg; 245 struct acpi_ipmi_msg *tx_msg;
244 struct pnp_dev *pnp_dev = ipmi_device->pnp_dev; 246 struct pnp_dev *pnp_dev = ipmi_device->pnp_dev;
247 unsigned long flags;
245 248
246 if (msg->user != ipmi_device->user_interface) { 249 if (msg->user != ipmi_device->user_interface) {
247 dev_warn(&pnp_dev->dev, "Unexpected response is returned. " 250 dev_warn(&pnp_dev->dev, "Unexpected response is returned. "
@@ -250,7 +253,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
250 ipmi_free_recv_msg(msg); 253 ipmi_free_recv_msg(msg);
251 return; 254 return;
252 } 255 }
253 mutex_lock(&ipmi_device->tx_msg_lock); 256 spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
254 list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) { 257 list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) {
255 if (msg->msgid == tx_msg->tx_msgid) { 258 if (msg->msgid == tx_msg->tx_msgid) {
256 msg_found = 1; 259 msg_found = 1;
@@ -258,7 +261,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
258 } 261 }
259 } 262 }
260 263
261 mutex_unlock(&ipmi_device->tx_msg_lock); 264 spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
262 if (!msg_found) { 265 if (!msg_found) {
263 dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is " 266 dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is "
264 "returned.\n", msg->msgid); 267 "returned.\n", msg->msgid);
@@ -378,6 +381,7 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
378 struct acpi_ipmi_device *ipmi_device = handler_context; 381 struct acpi_ipmi_device *ipmi_device = handler_context;
379 int err, rem_time; 382 int err, rem_time;
380 acpi_status status; 383 acpi_status status;
384 unsigned long flags;
381 /* 385 /*
382 * IPMI opregion message. 386 * IPMI opregion message.
383 * IPMI message is firstly written to the BMC and system software 387 * IPMI message is firstly written to the BMC and system software
@@ -395,9 +399,9 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
395 return AE_NO_MEMORY; 399 return AE_NO_MEMORY;
396 400
397 acpi_format_ipmi_msg(tx_msg, address, value); 401 acpi_format_ipmi_msg(tx_msg, address, value);
398 mutex_lock(&ipmi_device->tx_msg_lock); 402 spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
399 list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list); 403 list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
400 mutex_unlock(&ipmi_device->tx_msg_lock); 404 spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
401 err = ipmi_request_settime(ipmi_device->user_interface, 405 err = ipmi_request_settime(ipmi_device->user_interface,
402 &tx_msg->addr, 406 &tx_msg->addr,
403 tx_msg->tx_msgid, 407 tx_msg->tx_msgid,
@@ -413,9 +417,9 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
413 status = AE_OK; 417 status = AE_OK;
414 418
415end_label: 419end_label:
416 mutex_lock(&ipmi_device->tx_msg_lock); 420 spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
417 list_del(&tx_msg->head); 421 list_del(&tx_msg->head);
418 mutex_unlock(&ipmi_device->tx_msg_lock); 422 spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
419 kfree(tx_msg); 423 kfree(tx_msg);
420 return status; 424 return status;
421} 425}
@@ -457,7 +461,7 @@ static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device)
457 461
458 INIT_LIST_HEAD(&ipmi_device->head); 462 INIT_LIST_HEAD(&ipmi_device->head);
459 463
460 mutex_init(&ipmi_device->tx_msg_lock); 464 spin_lock_init(&ipmi_device->tx_msg_lock);
461 INIT_LIST_HEAD(&ipmi_device->tx_msg_list); 465 INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
462 ipmi_install_space_handler(ipmi_device); 466 ipmi_install_space_handler(ipmi_device);
463 467
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 59d3202f6b36..a94383d1f350 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -1025,60 +1025,4 @@ void acpi_dev_pm_detach(struct device *dev, bool power_off)
1025 } 1025 }
1026} 1026}
1027EXPORT_SYMBOL_GPL(acpi_dev_pm_detach); 1027EXPORT_SYMBOL_GPL(acpi_dev_pm_detach);
1028
1029/**
1030 * acpi_dev_pm_add_dependent - Add physical device depending for PM.
1031 * @handle: Handle of ACPI device node.
1032 * @depdev: Device depending on that node for PM.
1033 */
1034void acpi_dev_pm_add_dependent(acpi_handle handle, struct device *depdev)
1035{
1036 struct acpi_device_physical_node *dep;
1037 struct acpi_device *adev;
1038
1039 if (!depdev || acpi_bus_get_device(handle, &adev))
1040 return;
1041
1042 mutex_lock(&adev->physical_node_lock);
1043
1044 list_for_each_entry(dep, &adev->power_dependent, node)
1045 if (dep->dev == depdev)
1046 goto out;
1047
1048 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1049 if (dep) {
1050 dep->dev = depdev;
1051 list_add_tail(&dep->node, &adev->power_dependent);
1052 }
1053
1054 out:
1055 mutex_unlock(&adev->physical_node_lock);
1056}
1057EXPORT_SYMBOL_GPL(acpi_dev_pm_add_dependent);
1058
1059/**
1060 * acpi_dev_pm_remove_dependent - Remove physical device depending for PM.
1061 * @handle: Handle of ACPI device node.
1062 * @depdev: Device depending on that node for PM.
1063 */
1064void acpi_dev_pm_remove_dependent(acpi_handle handle, struct device *depdev)
1065{
1066 struct acpi_device_physical_node *dep;
1067 struct acpi_device *adev;
1068
1069 if (!depdev || acpi_bus_get_device(handle, &adev))
1070 return;
1071
1072 mutex_lock(&adev->physical_node_lock);
1073
1074 list_for_each_entry(dep, &adev->power_dependent, node)
1075 if (dep->dev == depdev) {
1076 list_del(&dep->node);
1077 kfree(dep);
1078 break;
1079 }
1080
1081 mutex_unlock(&adev->physical_node_lock);
1082}
1083EXPORT_SYMBOL_GPL(acpi_dev_pm_remove_dependent);
1084#endif /* CONFIG_PM */ 1028#endif /* CONFIG_PM */
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 0dbe5cdf3396..c2ad391d8041 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -59,16 +59,9 @@ ACPI_MODULE_NAME("power");
59#define ACPI_POWER_RESOURCE_STATE_ON 0x01 59#define ACPI_POWER_RESOURCE_STATE_ON 0x01
60#define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF 60#define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF
61 61
62struct acpi_power_dependent_device {
63 struct list_head node;
64 struct acpi_device *adev;
65 struct work_struct work;
66};
67
68struct acpi_power_resource { 62struct acpi_power_resource {
69 struct acpi_device device; 63 struct acpi_device device;
70 struct list_head list_node; 64 struct list_head list_node;
71 struct list_head dependent;
72 char *name; 65 char *name;
73 u32 system_level; 66 u32 system_level;
74 u32 order; 67 u32 order;
@@ -233,32 +226,6 @@ static int acpi_power_get_list_state(struct list_head *list, int *state)
233 return 0; 226 return 0;
234} 227}
235 228
236static void acpi_power_resume_dependent(struct work_struct *work)
237{
238 struct acpi_power_dependent_device *dep;
239 struct acpi_device_physical_node *pn;
240 struct acpi_device *adev;
241 int state;
242
243 dep = container_of(work, struct acpi_power_dependent_device, work);
244 adev = dep->adev;
245 if (acpi_power_get_inferred_state(adev, &state))
246 return;
247
248 if (state > ACPI_STATE_D0)
249 return;
250
251 mutex_lock(&adev->physical_node_lock);
252
253 list_for_each_entry(pn, &adev->physical_node_list, node)
254 pm_request_resume(pn->dev);
255
256 list_for_each_entry(pn, &adev->power_dependent, node)
257 pm_request_resume(pn->dev);
258
259 mutex_unlock(&adev->physical_node_lock);
260}
261
262static int __acpi_power_on(struct acpi_power_resource *resource) 229static int __acpi_power_on(struct acpi_power_resource *resource)
263{ 230{
264 acpi_status status = AE_OK; 231 acpi_status status = AE_OK;
@@ -283,14 +250,8 @@ static int acpi_power_on_unlocked(struct acpi_power_resource *resource)
283 resource->name)); 250 resource->name));
284 } else { 251 } else {
285 result = __acpi_power_on(resource); 252 result = __acpi_power_on(resource);
286 if (result) { 253 if (result)
287 resource->ref_count--; 254 resource->ref_count--;
288 } else {
289 struct acpi_power_dependent_device *dep;
290
291 list_for_each_entry(dep, &resource->dependent, node)
292 schedule_work(&dep->work);
293 }
294 } 255 }
295 return result; 256 return result;
296} 257}
@@ -390,52 +351,6 @@ static int acpi_power_on_list(struct list_head *list)
390 return result; 351 return result;
391} 352}
392 353
393static void acpi_power_add_dependent(struct acpi_power_resource *resource,
394 struct acpi_device *adev)
395{
396 struct acpi_power_dependent_device *dep;
397
398 mutex_lock(&resource->resource_lock);
399
400 list_for_each_entry(dep, &resource->dependent, node)
401 if (dep->adev == adev)
402 goto out;
403
404 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
405 if (!dep)
406 goto out;
407
408 dep->adev = adev;
409 INIT_WORK(&dep->work, acpi_power_resume_dependent);
410 list_add_tail(&dep->node, &resource->dependent);
411
412 out:
413 mutex_unlock(&resource->resource_lock);
414}
415
416static void acpi_power_remove_dependent(struct acpi_power_resource *resource,
417 struct acpi_device *adev)
418{
419 struct acpi_power_dependent_device *dep;
420 struct work_struct *work = NULL;
421
422 mutex_lock(&resource->resource_lock);
423
424 list_for_each_entry(dep, &resource->dependent, node)
425 if (dep->adev == adev) {
426 list_del(&dep->node);
427 work = &dep->work;
428 break;
429 }
430
431 mutex_unlock(&resource->resource_lock);
432
433 if (work) {
434 cancel_work_sync(work);
435 kfree(dep);
436 }
437}
438
439static struct attribute *attrs[] = { 354static struct attribute *attrs[] = {
440 NULL, 355 NULL,
441}; 356};
@@ -524,8 +439,6 @@ static void acpi_power_expose_hide(struct acpi_device *adev,
524 439
525void acpi_power_add_remove_device(struct acpi_device *adev, bool add) 440void acpi_power_add_remove_device(struct acpi_device *adev, bool add)
526{ 441{
527 struct acpi_device_power_state *ps;
528 struct acpi_power_resource_entry *entry;
529 int state; 442 int state;
530 443
531 if (adev->wakeup.flags.valid) 444 if (adev->wakeup.flags.valid)
@@ -535,16 +448,6 @@ void acpi_power_add_remove_device(struct acpi_device *adev, bool add)
535 if (!adev->power.flags.power_resources) 448 if (!adev->power.flags.power_resources)
536 return; 449 return;
537 450
538 ps = &adev->power.states[ACPI_STATE_D0];
539 list_for_each_entry(entry, &ps->resources, node) {
540 struct acpi_power_resource *resource = entry->resource;
541
542 if (add)
543 acpi_power_add_dependent(resource, adev);
544 else
545 acpi_power_remove_dependent(resource, adev);
546 }
547
548 for (state = ACPI_STATE_D0; state <= ACPI_STATE_D3_HOT; state++) 451 for (state = ACPI_STATE_D0; state <= ACPI_STATE_D3_HOT; state++)
549 acpi_power_expose_hide(adev, 452 acpi_power_expose_hide(adev,
550 &adev->power.states[state].resources, 453 &adev->power.states[state].resources,
@@ -882,7 +785,6 @@ int acpi_add_power_resource(acpi_handle handle)
882 acpi_init_device_object(device, handle, ACPI_BUS_TYPE_POWER, 785 acpi_init_device_object(device, handle, ACPI_BUS_TYPE_POWER,
883 ACPI_STA_DEFAULT); 786 ACPI_STA_DEFAULT);
884 mutex_init(&resource->resource_lock); 787 mutex_init(&resource->resource_lock);
885 INIT_LIST_HEAD(&resource->dependent);
886 INIT_LIST_HEAD(&resource->list_node); 788 INIT_LIST_HEAD(&resource->list_node);
887 resource->name = device->pnp.bus_id; 789 resource->name = device->pnp.bus_id;
888 strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME); 790 strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
@@ -936,8 +838,10 @@ void acpi_resume_power_resources(void)
936 mutex_lock(&resource->resource_lock); 838 mutex_lock(&resource->resource_lock);
937 839
938 result = acpi_power_get_state(resource->device.handle, &state); 840 result = acpi_power_get_state(resource->device.handle, &state);
939 if (result) 841 if (result) {
842 mutex_unlock(&resource->resource_lock);
940 continue; 843 continue;
844 }
941 845
942 if (state == ACPI_POWER_RESOURCE_STATE_OFF 846 if (state == ACPI_POWER_RESOURCE_STATE_OFF
943 && resource->ref_count) { 847 && resource->ref_count) {
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index fbdb82e70d10..fee8a297c7d9 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -968,7 +968,7 @@ int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
968 } 968 }
969 return 0; 969 return 0;
970} 970}
971EXPORT_SYMBOL_GPL(acpi_bus_get_device); 971EXPORT_SYMBOL(acpi_bus_get_device);
972 972
973int acpi_device_add(struct acpi_device *device, 973int acpi_device_add(struct acpi_device *device,
974 void (*release)(struct device *)) 974 void (*release)(struct device *))
@@ -999,7 +999,6 @@ int acpi_device_add(struct acpi_device *device,
999 INIT_LIST_HEAD(&device->wakeup_list); 999 INIT_LIST_HEAD(&device->wakeup_list);
1000 INIT_LIST_HEAD(&device->physical_node_list); 1000 INIT_LIST_HEAD(&device->physical_node_list);
1001 mutex_init(&device->physical_node_lock); 1001 mutex_init(&device->physical_node_lock);
1002 INIT_LIST_HEAD(&device->power_dependent);
1003 1002
1004 new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL); 1003 new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL);
1005 if (!new_bus_id) { 1004 if (!new_bus_id) {
@@ -1121,7 +1120,7 @@ int acpi_bus_register_driver(struct acpi_driver *driver)
1121EXPORT_SYMBOL(acpi_bus_register_driver); 1120EXPORT_SYMBOL(acpi_bus_register_driver);
1122 1121
1123/** 1122/**
1124 * acpi_bus_unregister_driver - unregisters a driver with the APIC bus 1123 * acpi_bus_unregister_driver - unregisters a driver with the ACPI bus
1125 * @driver: driver to unregister 1124 * @driver: driver to unregister
1126 * 1125 *
1127 * Unregisters a driver with the ACPI bus. Searches the namespace for all 1126 * Unregisters a driver with the ACPI bus. Searches the namespace for all
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 9d715ae5ff6b..8e28f923cf7f 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1343,7 +1343,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1343 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) 1343 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
1344 host->flags |= ATA_HOST_PARALLEL_SCAN; 1344 host->flags |= ATA_HOST_PARALLEL_SCAN;
1345 else 1345 else
1346 printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n"); 1346 dev_info(&pdev->dev, "SSS flag set, parallel bus scan disabled\n");
1347 1347
1348 if (pi.flags & ATA_FLAG_EM) 1348 if (pi.flags & ATA_FLAG_EM)
1349 ahci_reset_em(host); 1349 ahci_reset_em(host);
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 2daaee05cab1..7d3b85385bfc 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -184,7 +184,7 @@ static int ahci_probe(struct platform_device *pdev)
184 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) 184 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
185 host->flags |= ATA_HOST_PARALLEL_SCAN; 185 host->flags |= ATA_HOST_PARALLEL_SCAN;
186 else 186 else
187 printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n"); 187 dev_info(dev, "SSS flag set, parallel bus scan disabled\n");
188 188
189 if (pi.flags & ATA_FLAG_EM) 189 if (pi.flags & ATA_FLAG_EM)
190 ahci_reset_em(host); 190 ahci_reset_em(host);
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index acfd0f711069..aaac4fb0d564 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -778,8 +778,16 @@ static void ahci_start_port(struct ata_port *ap)
778 rc = ap->ops->transmit_led_message(ap, 778 rc = ap->ops->transmit_led_message(ap,
779 emp->led_state, 779 emp->led_state,
780 4); 780 4);
781 /*
782 * If busy, give a breather but do not
783 * release EH ownership by using msleep()
784 * instead of ata_msleep(). EM Transmit
785 * bit is busy for the whole host and
786 * releasing ownership will cause other
787 * ports to fail the same way.
788 */
781 if (rc == -EBUSY) 789 if (rc == -EBUSY)
782 ata_msleep(ap, 1); 790 msleep(1);
783 else 791 else
784 break; 792 break;
785 } 793 }
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 4ba8b0405572..ab714d2ad978 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -1035,17 +1035,3 @@ void ata_acpi_on_disable(struct ata_device *dev)
1035{ 1035{
1036 ata_acpi_clear_gtf(dev); 1036 ata_acpi_clear_gtf(dev);
1037} 1037}
1038
1039void ata_scsi_acpi_bind(struct ata_device *dev)
1040{
1041 acpi_handle handle = ata_dev_acpi_handle(dev);
1042 if (handle)
1043 acpi_dev_pm_add_dependent(handle, &dev->sdev->sdev_gendev);
1044}
1045
1046void ata_scsi_acpi_unbind(struct ata_device *dev)
1047{
1048 acpi_handle handle = ata_dev_acpi_handle(dev);
1049 if (handle)
1050 acpi_dev_pm_remove_dependent(handle, &dev->sdev->sdev_gendev);
1051}
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index c69fcce505c0..370462fa8e01 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1322,14 +1322,14 @@ void ata_eh_qc_complete(struct ata_queued_cmd *qc)
1322 * should be retried. To be used from EH. 1322 * should be retried. To be used from EH.
1323 * 1323 *
1324 * SCSI midlayer limits the number of retries to scmd->allowed. 1324 * SCSI midlayer limits the number of retries to scmd->allowed.
1325 * scmd->retries is decremented for commands which get retried 1325 * scmd->allowed is incremented for commands which get retried
1326 * due to unrelated failures (qc->err_mask is zero). 1326 * due to unrelated failures (qc->err_mask is zero).
1327 */ 1327 */
1328void ata_eh_qc_retry(struct ata_queued_cmd *qc) 1328void ata_eh_qc_retry(struct ata_queued_cmd *qc)
1329{ 1329{
1330 struct scsi_cmnd *scmd = qc->scsicmd; 1330 struct scsi_cmnd *scmd = qc->scsicmd;
1331 if (!qc->err_mask && scmd->retries) 1331 if (!qc->err_mask)
1332 scmd->retries--; 1332 scmd->allowed++;
1333 __ata_eh_qc_complete(qc); 1333 __ata_eh_qc_complete(qc);
1334} 1334}
1335 1335
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 97a0cef12959..db6dfcfa3e2e 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3679,7 +3679,6 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync)
3679 if (!IS_ERR(sdev)) { 3679 if (!IS_ERR(sdev)) {
3680 dev->sdev = sdev; 3680 dev->sdev = sdev;
3681 scsi_device_put(sdev); 3681 scsi_device_put(sdev);
3682 ata_scsi_acpi_bind(dev);
3683 } else { 3682 } else {
3684 dev->sdev = NULL; 3683 dev->sdev = NULL;
3685 } 3684 }
@@ -3767,8 +3766,6 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
3767 struct scsi_device *sdev; 3766 struct scsi_device *sdev;
3768 unsigned long flags; 3767 unsigned long flags;
3769 3768
3770 ata_scsi_acpi_unbind(dev);
3771
3772 /* Alas, we need to grab scan_mutex to ensure SCSI device 3769 /* Alas, we need to grab scan_mutex to ensure SCSI device
3773 * state doesn't change underneath us and thus 3770 * state doesn't change underneath us and thus
3774 * scsi_device_get() always succeeds. The mutex locking can 3771 * scsi_device_get() always succeeds. The mutex locking can
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index eeeb77845d48..45b5ab3a95d5 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -121,8 +121,6 @@ extern void ata_acpi_set_state(struct ata_port *ap, pm_message_t state);
121extern void ata_acpi_bind_port(struct ata_port *ap); 121extern void ata_acpi_bind_port(struct ata_port *ap);
122extern void ata_acpi_bind_dev(struct ata_device *dev); 122extern void ata_acpi_bind_dev(struct ata_device *dev);
123extern acpi_handle ata_dev_acpi_handle(struct ata_device *dev); 123extern acpi_handle ata_dev_acpi_handle(struct ata_device *dev);
124extern void ata_scsi_acpi_bind(struct ata_device *dev);
125extern void ata_scsi_acpi_unbind(struct ata_device *dev);
126#else 124#else
127static inline void ata_acpi_dissociate(struct ata_host *host) { } 125static inline void ata_acpi_dissociate(struct ata_host *host) { }
128static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; } 126static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; }
@@ -133,8 +131,6 @@ static inline void ata_acpi_set_state(struct ata_port *ap,
133 pm_message_t state) { } 131 pm_message_t state) { }
134static inline void ata_acpi_bind_port(struct ata_port *ap) {} 132static inline void ata_acpi_bind_port(struct ata_port *ap) {}
135static inline void ata_acpi_bind_dev(struct ata_device *dev) {} 133static inline void ata_acpi_bind_dev(struct ata_device *dev) {}
136static inline void ata_scsi_acpi_bind(struct ata_device *dev) {}
137static inline void ata_scsi_acpi_unbind(struct ata_device *dev) {}
138#endif 134#endif
139 135
140/* libata-scsi.c */ 136/* libata-scsi.c */
diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
index 4bceb8803a10..b33d1f99b3a4 100644
--- a/drivers/ata/pata_isapnp.c
+++ b/drivers/ata/pata_isapnp.c
@@ -78,7 +78,7 @@ static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev
78 78
79 ap->ioaddr.cmd_addr = cmd_addr; 79 ap->ioaddr.cmd_addr = cmd_addr;
80 80
81 if (pnp_port_valid(idev, 1) == 0) { 81 if (pnp_port_valid(idev, 1)) {
82 ctl_addr = devm_ioport_map(&idev->dev, 82 ctl_addr = devm_ioport_map(&idev->dev,
83 pnp_port_start(idev, 1), 1); 83 pnp_port_start(idev, 1), 1);
84 ap->ioaddr.altstatus_addr = ctl_addr; 84 ap->ioaddr.altstatus_addr = ctl_addr;
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 958ba2a420c3..97f4acb54ad6 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -2,7 +2,7 @@
2 * sata_promise.c - Promise SATA 2 * sata_promise.c - Promise SATA
3 * 3 *
4 * Maintained by: Tejun Heo <tj@kernel.org> 4 * Maintained by: Tejun Heo <tj@kernel.org>
5 * Mikael Pettersson <mikpe@it.uu.se> 5 * Mikael Pettersson
6 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * Please ALWAYS copy linux-ide@vger.kernel.org
7 * on emails. 7 * on emails.
8 * 8 *
diff --git a/drivers/base/core.c b/drivers/base/core.c
index c7cfadcf6752..34abf4d8a45f 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -2017,7 +2017,7 @@ EXPORT_SYMBOL_GPL(device_move);
2017 */ 2017 */
2018void device_shutdown(void) 2018void device_shutdown(void)
2019{ 2019{
2020 struct device *dev; 2020 struct device *dev, *parent;
2021 2021
2022 spin_lock(&devices_kset->list_lock); 2022 spin_lock(&devices_kset->list_lock);
2023 /* 2023 /*
@@ -2034,7 +2034,7 @@ void device_shutdown(void)
2034 * prevent it from being freed because parent's 2034 * prevent it from being freed because parent's
2035 * lock is to be held 2035 * lock is to be held
2036 */ 2036 */
2037 get_device(dev->parent); 2037 parent = get_device(dev->parent);
2038 get_device(dev); 2038 get_device(dev);
2039 /* 2039 /*
2040 * Make sure the device is off the kset list, in the 2040 * Make sure the device is off the kset list, in the
@@ -2044,8 +2044,8 @@ void device_shutdown(void)
2044 spin_unlock(&devices_kset->list_lock); 2044 spin_unlock(&devices_kset->list_lock);
2045 2045
2046 /* hold lock to avoid race with probe/release */ 2046 /* hold lock to avoid race with probe/release */
2047 if (dev->parent) 2047 if (parent)
2048 device_lock(dev->parent); 2048 device_lock(parent);
2049 device_lock(dev); 2049 device_lock(dev);
2050 2050
2051 /* Don't allow any more runtime suspends */ 2051 /* Don't allow any more runtime suspends */
@@ -2063,11 +2063,11 @@ void device_shutdown(void)
2063 } 2063 }
2064 2064
2065 device_unlock(dev); 2065 device_unlock(dev);
2066 if (dev->parent) 2066 if (parent)
2067 device_unlock(dev->parent); 2067 device_unlock(parent);
2068 2068
2069 put_device(dev); 2069 put_device(dev);
2070 put_device(dev->parent); 2070 put_device(parent);
2071 2071
2072 spin_lock(&devices_kset->list_lock); 2072 spin_lock(&devices_kset->list_lock);
2073 } 2073 }
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 9e59f6535c44..bece691cb5d9 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -333,8 +333,10 @@ store_mem_state(struct device *dev,
333 online_type = ONLINE_KEEP; 333 online_type = ONLINE_KEEP;
334 else if (!strncmp(buf, "offline", min_t(int, count, 7))) 334 else if (!strncmp(buf, "offline", min_t(int, count, 7)))
335 online_type = -1; 335 online_type = -1;
336 else 336 else {
337 return -EINVAL; 337 ret = -EINVAL;
338 goto err;
339 }
338 340
339 switch (online_type) { 341 switch (online_type) {
340 case ONLINE_KERNEL: 342 case ONLINE_KERNEL:
@@ -357,6 +359,7 @@ store_mem_state(struct device *dev,
357 ret = -EINVAL; /* should never happen */ 359 ret = -EINVAL; /* should never happen */
358 } 360 }
359 361
362err:
360 unlock_device_hotplug(); 363 unlock_device_hotplug();
361 364
362 if (ret) 365 if (ret)
diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c
index c9fd6943ce45..50329d1057ed 100644
--- a/drivers/bcma/driver_pci.c
+++ b/drivers/bcma/driver_pci.c
@@ -210,25 +210,6 @@ static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc)
210 } 210 }
211} 211}
212 212
213static void bcma_core_pci_power_save(struct bcma_drv_pci *pc, bool up)
214{
215 u16 data;
216
217 if (pc->core->id.rev >= 15 && pc->core->id.rev <= 20) {
218 data = up ? 0x74 : 0x7C;
219 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
220 BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7F64);
221 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
222 BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
223 } else if (pc->core->id.rev >= 21 && pc->core->id.rev <= 22) {
224 data = up ? 0x75 : 0x7D;
225 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
226 BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7E65);
227 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
228 BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
229 }
230}
231
232/************************************************** 213/**************************************************
233 * Init. 214 * Init.
234 **************************************************/ 215 **************************************************/
@@ -255,6 +236,32 @@ void bcma_core_pci_init(struct bcma_drv_pci *pc)
255 bcma_core_pci_clientmode_init(pc); 236 bcma_core_pci_clientmode_init(pc);
256} 237}
257 238
239void bcma_core_pci_power_save(struct bcma_bus *bus, bool up)
240{
241 struct bcma_drv_pci *pc;
242 u16 data;
243
244 if (bus->hosttype != BCMA_HOSTTYPE_PCI)
245 return;
246
247 pc = &bus->drv_pci[0];
248
249 if (pc->core->id.rev >= 15 && pc->core->id.rev <= 20) {
250 data = up ? 0x74 : 0x7C;
251 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
252 BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7F64);
253 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
254 BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
255 } else if (pc->core->id.rev >= 21 && pc->core->id.rev <= 22) {
256 data = up ? 0x75 : 0x7D;
257 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
258 BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7E65);
259 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
260 BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
261 }
262}
263EXPORT_SYMBOL_GPL(bcma_core_pci_power_save);
264
258int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core, 265int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core,
259 bool enable) 266 bool enable)
260{ 267{
@@ -310,8 +317,6 @@ void bcma_core_pci_up(struct bcma_bus *bus)
310 317
311 pc = &bus->drv_pci[0]; 318 pc = &bus->drv_pci[0];
312 319
313 bcma_core_pci_power_save(pc, true);
314
315 bcma_core_pci_extend_L1timer(pc, true); 320 bcma_core_pci_extend_L1timer(pc, true);
316} 321}
317EXPORT_SYMBOL_GPL(bcma_core_pci_up); 322EXPORT_SYMBOL_GPL(bcma_core_pci_up);
@@ -326,7 +331,5 @@ void bcma_core_pci_down(struct bcma_bus *bus)
326 pc = &bus->drv_pci[0]; 331 pc = &bus->drv_pci[0];
327 332
328 bcma_core_pci_extend_L1timer(pc, false); 333 bcma_core_pci_extend_L1timer(pc, false);
329
330 bcma_core_pci_power_save(pc, false);
331} 334}
332EXPORT_SYMBOL_GPL(bcma_core_pci_down); 335EXPORT_SYMBOL_GPL(bcma_core_pci_down);
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index d2d95ff5353b..edfa2515bc86 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1189,6 +1189,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
1189 int err; 1189 int err;
1190 u32 cp; 1190 u32 cp;
1191 1191
1192 memset(&arg64, 0, sizeof(arg64));
1192 err = 0; 1193 err = 0;
1193 err |= 1194 err |=
1194 copy_from_user(&arg64.LUN_info, &arg32->LUN_info, 1195 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 639d26b90b91..2b9440384536 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -1193,6 +1193,7 @@ out_passthru:
1193 ida_pci_info_struct pciinfo; 1193 ida_pci_info_struct pciinfo;
1194 1194
1195 if (!arg) return -EINVAL; 1195 if (!arg) return -EINVAL;
1196 memset(&pciinfo, 0, sizeof(pciinfo));
1196 pciinfo.bus = host->pci_dev->bus->number; 1197 pciinfo.bus = host->pci_dev->bus->number;
1197 pciinfo.dev_fn = host->pci_dev->devfn; 1198 pciinfo.dev_fn = host->pci_dev->devfn;
1198 pciinfo.board_id = host->board_id; 1199 pciinfo.board_id = host->board_id;
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index a12b923bbaca..0a327f4154a2 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -85,6 +85,7 @@ static struct usb_device_id ath3k_table[] = {
85 { USB_DEVICE(0x04CA, 0x3008) }, 85 { USB_DEVICE(0x04CA, 0x3008) },
86 { USB_DEVICE(0x13d3, 0x3362) }, 86 { USB_DEVICE(0x13d3, 0x3362) },
87 { USB_DEVICE(0x0CF3, 0xE004) }, 87 { USB_DEVICE(0x0CF3, 0xE004) },
88 { USB_DEVICE(0x0CF3, 0xE005) },
88 { USB_DEVICE(0x0930, 0x0219) }, 89 { USB_DEVICE(0x0930, 0x0219) },
89 { USB_DEVICE(0x0489, 0xe057) }, 90 { USB_DEVICE(0x0489, 0xe057) },
90 { USB_DEVICE(0x13d3, 0x3393) }, 91 { USB_DEVICE(0x13d3, 0x3393) },
@@ -126,6 +127,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
126 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, 127 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
127 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, 128 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
128 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, 129 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
130 { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
129 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, 131 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
130 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, 132 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
131 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, 133 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 8e16f0af6358..f3dfc0a88fdc 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -102,6 +102,7 @@ static struct usb_device_id btusb_table[] = {
102 102
103 /* Broadcom BCM20702A0 */ 103 /* Broadcom BCM20702A0 */
104 { USB_DEVICE(0x0b05, 0x17b5) }, 104 { USB_DEVICE(0x0b05, 0x17b5) },
105 { USB_DEVICE(0x0b05, 0x17cb) },
105 { USB_DEVICE(0x04ca, 0x2003) }, 106 { USB_DEVICE(0x04ca, 0x2003) },
106 { USB_DEVICE(0x0489, 0xe042) }, 107 { USB_DEVICE(0x0489, 0xe042) },
107 { USB_DEVICE(0x413c, 0x8197) }, 108 { USB_DEVICE(0x413c, 0x8197) },
@@ -112,6 +113,9 @@ static struct usb_device_id btusb_table[] = {
112 /*Broadcom devices with vendor specific id */ 113 /*Broadcom devices with vendor specific id */
113 { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) }, 114 { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) },
114 115
116 /* Belkin F8065bf - Broadcom based */
117 { USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01) },
118
115 { } /* Terminating entry */ 119 { } /* Terminating entry */
116}; 120};
117 121
@@ -148,6 +152,7 @@ static struct usb_device_id blacklist_table[] = {
148 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, 152 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
149 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, 153 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
150 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, 154 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
155 { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
151 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, 156 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
152 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, 157 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
153 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, 158 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index 19ab6ff53d59..2394e9753ef5 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -700,6 +700,7 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
700 phys_addr_t sdramwins_phys_base, 700 phys_addr_t sdramwins_phys_base,
701 size_t sdramwins_size) 701 size_t sdramwins_size)
702{ 702{
703 struct device_node *np;
703 int win; 704 int win;
704 705
705 mbus->mbuswins_base = ioremap(mbuswins_phys_base, mbuswins_size); 706 mbus->mbuswins_base = ioremap(mbuswins_phys_base, mbuswins_size);
@@ -712,8 +713,11 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
712 return -ENOMEM; 713 return -ENOMEM;
713 } 714 }
714 715
715 if (of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric")) 716 np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric");
717 if (np) {
716 mbus->hw_io_coherency = 1; 718 mbus->hw_io_coherency = 1;
719 of_node_put(np);
720 }
717 721
718 for (win = 0; win < mbus->soc->num_wins; win++) 722 for (win = 0; win < mbus->soc->num_wins; win++)
719 mvebu_mbus_disable_window(mbus, win); 723 mvebu_mbus_disable_window(mbus, win);
@@ -861,11 +865,13 @@ static void __init mvebu_mbus_get_pcie_resources(struct device_node *np,
861 int ret; 865 int ret;
862 866
863 /* 867 /*
864 * These are optional, so we clear them and they'll 868 * These are optional, so we make sure that resource_size(x) will
865 * be zero if they are missing from the DT. 869 * return 0.
866 */ 870 */
867 memset(mem, 0, sizeof(struct resource)); 871 memset(mem, 0, sizeof(struct resource));
872 mem->end = -1;
868 memset(io, 0, sizeof(struct resource)); 873 memset(io, 0, sizeof(struct resource));
874 io->end = -1;
869 875
870 ret = of_property_read_u32_array(np, "pcie-mem-aperture", reg, ARRAY_SIZE(reg)); 876 ret = of_property_read_u32_array(np, "pcie-mem-aperture", reg, ARRAY_SIZE(reg));
871 if (!ret) { 877 if (!ret) {
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 7737b5bd26af..7a744d391756 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -640,7 +640,7 @@ struct timer_rand_state {
640 */ 640 */
641void add_device_randomness(const void *buf, unsigned int size) 641void add_device_randomness(const void *buf, unsigned int size)
642{ 642{
643 unsigned long time = get_cycles() ^ jiffies; 643 unsigned long time = random_get_entropy() ^ jiffies;
644 644
645 mix_pool_bytes(&input_pool, buf, size, NULL); 645 mix_pool_bytes(&input_pool, buf, size, NULL);
646 mix_pool_bytes(&input_pool, &time, sizeof(time), NULL); 646 mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
@@ -677,7 +677,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
677 goto out; 677 goto out;
678 678
679 sample.jiffies = jiffies; 679 sample.jiffies = jiffies;
680 sample.cycles = get_cycles(); 680 sample.cycles = random_get_entropy();
681 sample.num = num; 681 sample.num = num;
682 mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL); 682 mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL);
683 683
@@ -744,7 +744,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
744 struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness); 744 struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
745 struct pt_regs *regs = get_irq_regs(); 745 struct pt_regs *regs = get_irq_regs();
746 unsigned long now = jiffies; 746 unsigned long now = jiffies;
747 __u32 input[4], cycles = get_cycles(); 747 __u32 input[4], cycles = random_get_entropy();
748 748
749 input[0] = cycles ^ jiffies; 749 input[0] = cycles ^ jiffies;
750 input[1] = irq; 750 input[1] = irq;
@@ -1459,12 +1459,11 @@ struct ctl_table random_table[] = {
1459 1459
1460static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned; 1460static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
1461 1461
1462static int __init random_int_secret_init(void) 1462int random_int_secret_init(void)
1463{ 1463{
1464 get_random_bytes(random_int_secret, sizeof(random_int_secret)); 1464 get_random_bytes(random_int_secret, sizeof(random_int_secret));
1465 return 0; 1465 return 0;
1466} 1466}
1467late_initcall(random_int_secret_init);
1468 1467
1469/* 1468/*
1470 * Get a random word for internal kernel use only. Similar to urandom but 1469 * Get a random word for internal kernel use only. Similar to urandom but
@@ -1483,7 +1482,7 @@ unsigned int get_random_int(void)
1483 1482
1484 hash = get_cpu_var(get_random_int_hash); 1483 hash = get_cpu_var(get_random_int_hash);
1485 1484
1486 hash[0] += current->pid + jiffies + get_cycles(); 1485 hash[0] += current->pid + jiffies + random_get_entropy();
1487 md5_transform(hash, random_int_secret); 1486 md5_transform(hash, random_int_secret);
1488 ret = hash[0]; 1487 ret = hash[0];
1489 put_cpu_var(get_random_int_hash); 1488 put_cpu_var(get_random_int_hash);
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 7a7929ba2658..94c280d36e8b 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -10,6 +10,7 @@
10#include <linux/errno.h> 10#include <linux/errno.h>
11#include <linux/err.h> 11#include <linux/err.h>
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <xen/xen.h>
13#include <xen/events.h> 14#include <xen/events.h>
14#include <xen/interface/io/tpmif.h> 15#include <xen/interface/io/tpmif.h>
15#include <xen/grant_table.h> 16#include <xen/grant_table.h>
@@ -142,32 +143,6 @@ static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
142 return length; 143 return length;
143} 144}
144 145
145ssize_t tpm_show_locality(struct device *dev, struct device_attribute *attr,
146 char *buf)
147{
148 struct tpm_chip *chip = dev_get_drvdata(dev);
149 struct tpm_private *priv = TPM_VPRIV(chip);
150 u8 locality = priv->shr->locality;
151
152 return sprintf(buf, "%d\n", locality);
153}
154
155ssize_t tpm_store_locality(struct device *dev, struct device_attribute *attr,
156 const char *buf, size_t len)
157{
158 struct tpm_chip *chip = dev_get_drvdata(dev);
159 struct tpm_private *priv = TPM_VPRIV(chip);
160 u8 val;
161
162 int rv = kstrtou8(buf, 0, &val);
163 if (rv)
164 return rv;
165
166 priv->shr->locality = val;
167
168 return len;
169}
170
171static const struct file_operations vtpm_ops = { 146static const struct file_operations vtpm_ops = {
172 .owner = THIS_MODULE, 147 .owner = THIS_MODULE,
173 .llseek = no_llseek, 148 .llseek = no_llseek,
@@ -188,8 +163,6 @@ static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
188static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); 163static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
189static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL); 164static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
190static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL); 165static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
191static DEVICE_ATTR(locality, S_IRUGO | S_IWUSR, tpm_show_locality,
192 tpm_store_locality);
193 166
194static struct attribute *vtpm_attrs[] = { 167static struct attribute *vtpm_attrs[] = {
195 &dev_attr_pubek.attr, 168 &dev_attr_pubek.attr,
@@ -202,7 +175,6 @@ static struct attribute *vtpm_attrs[] = {
202 &dev_attr_cancel.attr, 175 &dev_attr_cancel.attr,
203 &dev_attr_durations.attr, 176 &dev_attr_durations.attr,
204 &dev_attr_timeouts.attr, 177 &dev_attr_timeouts.attr,
205 &dev_attr_locality.attr,
206 NULL, 178 NULL,
207}; 179};
208 180
@@ -210,8 +182,6 @@ static struct attribute_group vtpm_attr_grp = {
210 .attrs = vtpm_attrs, 182 .attrs = vtpm_attrs,
211}; 183};
212 184
213#define TPM_LONG_TIMEOUT (10 * 60 * HZ)
214
215static const struct tpm_vendor_specific tpm_vtpm = { 185static const struct tpm_vendor_specific tpm_vtpm = {
216 .status = vtpm_status, 186 .status = vtpm_status,
217 .recv = vtpm_recv, 187 .recv = vtpm_recv,
@@ -224,11 +194,6 @@ static const struct tpm_vendor_specific tpm_vtpm = {
224 .miscdev = { 194 .miscdev = {
225 .fops = &vtpm_ops, 195 .fops = &vtpm_ops,
226 }, 196 },
227 .duration = {
228 TPM_LONG_TIMEOUT,
229 TPM_LONG_TIMEOUT,
230 TPM_LONG_TIMEOUT,
231 },
232}; 197};
233 198
234static irqreturn_t tpmif_interrupt(int dummy, void *dev_id) 199static irqreturn_t tpmif_interrupt(int dummy, void *dev_id)
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 41c69469ce20..971d796e071d 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -26,6 +26,7 @@ config DW_APB_TIMER_OF
26 26
27config ARMADA_370_XP_TIMER 27config ARMADA_370_XP_TIMER
28 bool 28 bool
29 select CLKSRC_OF
29 30
30config ORION_TIMER 31config ORION_TIMER
31 select CLKSRC_OF 32 select CLKSRC_OF
diff --git a/drivers/clocksource/clksrc-of.c b/drivers/clocksource/clksrc-of.c
index 37f5325bec95..b9ddd9e3a2f5 100644
--- a/drivers/clocksource/clksrc-of.c
+++ b/drivers/clocksource/clksrc-of.c
@@ -30,6 +30,9 @@ void __init clocksource_of_init(void)
30 clocksource_of_init_fn init_func; 30 clocksource_of_init_fn init_func;
31 31
32 for_each_matching_node_and_match(np, __clksrc_of_table, &match) { 32 for_each_matching_node_and_match(np, __clksrc_of_table, &match) {
33 if (!of_device_is_available(np))
34 continue;
35
33 init_func = match->data; 36 init_func = match->data;
34 init_func(np); 37 init_func(np);
35 } 38 }
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
index b9c81b7c3a3b..3a5909c12d42 100644
--- a/drivers/clocksource/em_sti.c
+++ b/drivers/clocksource/em_sti.c
@@ -301,7 +301,7 @@ static void em_sti_register_clockevent(struct em_sti_priv *p)
301 ced->name = dev_name(&p->pdev->dev); 301 ced->name = dev_name(&p->pdev->dev);
302 ced->features = CLOCK_EVT_FEAT_ONESHOT; 302 ced->features = CLOCK_EVT_FEAT_ONESHOT;
303 ced->rating = 200; 303 ced->rating = 200;
304 ced->cpumask = cpumask_of(0); 304 ced->cpumask = cpu_possible_mask;
305 ced->set_next_event = em_sti_clock_event_next; 305 ced->set_next_event = em_sti_clock_event_next;
306 ced->set_mode = em_sti_clock_event_mode; 306 ced->set_mode = em_sti_clock_event_mode;
307 307
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 5b34768f4d7c..62b0de6a1837 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -428,7 +428,6 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
428 evt->irq); 428 evt->irq);
429 return -EIO; 429 return -EIO;
430 } 430 }
431 irq_set_affinity(evt->irq, cpumask_of(cpu));
432 } else { 431 } else {
433 enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0); 432 enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
434 } 433 }
@@ -449,6 +448,7 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
449 unsigned long action, void *hcpu) 448 unsigned long action, void *hcpu)
450{ 449{
451 struct mct_clock_event_device *mevt; 450 struct mct_clock_event_device *mevt;
451 unsigned int cpu;
452 452
453 /* 453 /*
454 * Grab cpu pointer in each case to avoid spurious 454 * Grab cpu pointer in each case to avoid spurious
@@ -459,6 +459,12 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
459 mevt = this_cpu_ptr(&percpu_mct_tick); 459 mevt = this_cpu_ptr(&percpu_mct_tick);
460 exynos4_local_timer_setup(&mevt->evt); 460 exynos4_local_timer_setup(&mevt->evt);
461 break; 461 break;
462 case CPU_ONLINE:
463 cpu = (unsigned long)hcpu;
464 if (mct_int_type == MCT_INT_SPI)
465 irq_set_affinity(mct_irqs[MCT_L0_IRQ + cpu],
466 cpumask_of(cpu));
467 break;
462 case CPU_DYING: 468 case CPU_DYING:
463 mevt = this_cpu_ptr(&percpu_mct_tick); 469 mevt = this_cpu_ptr(&percpu_mct_tick);
464 exynos4_local_timer_stop(&mevt->evt); 470 exynos4_local_timer_stop(&mevt->evt);
@@ -500,6 +506,8 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem
500 &percpu_mct_tick); 506 &percpu_mct_tick);
501 WARN(err, "MCT: can't request IRQ %d (%d)\n", 507 WARN(err, "MCT: can't request IRQ %d (%d)\n",
502 mct_irqs[MCT_L0_IRQ], err); 508 mct_irqs[MCT_L0_IRQ], err);
509 } else {
510 irq_set_affinity(mct_irqs[MCT_L0_IRQ], cpumask_of(0));
503 } 511 }
504 512
505 err = register_cpu_notifier(&exynos4_mct_cpu_nb); 513 err = register_cpu_notifier(&exynos4_mct_cpu_nb);
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 08ae128cce9b..c73fc2b74de2 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -65,6 +65,7 @@ void proc_fork_connector(struct task_struct *task)
65 65
66 msg = (struct cn_msg *)buffer; 66 msg = (struct cn_msg *)buffer;
67 ev = (struct proc_event *)msg->data; 67 ev = (struct proc_event *)msg->data;
68 memset(&ev->event_data, 0, sizeof(ev->event_data));
68 get_seq(&msg->seq, &ev->cpu); 69 get_seq(&msg->seq, &ev->cpu);
69 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 70 ktime_get_ts(&ts); /* get high res monotonic timestamp */
70 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 71 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -80,6 +81,7 @@ void proc_fork_connector(struct task_struct *task)
80 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 81 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
81 msg->ack = 0; /* not used */ 82 msg->ack = 0; /* not used */
82 msg->len = sizeof(*ev); 83 msg->len = sizeof(*ev);
84 msg->flags = 0; /* not used */
83 /* If cn_netlink_send() failed, the data is not sent */ 85 /* If cn_netlink_send() failed, the data is not sent */
84 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 86 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
85} 87}
@@ -96,6 +98,7 @@ void proc_exec_connector(struct task_struct *task)
96 98
97 msg = (struct cn_msg *)buffer; 99 msg = (struct cn_msg *)buffer;
98 ev = (struct proc_event *)msg->data; 100 ev = (struct proc_event *)msg->data;
101 memset(&ev->event_data, 0, sizeof(ev->event_data));
99 get_seq(&msg->seq, &ev->cpu); 102 get_seq(&msg->seq, &ev->cpu);
100 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 103 ktime_get_ts(&ts); /* get high res monotonic timestamp */
101 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 104 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -106,6 +109,7 @@ void proc_exec_connector(struct task_struct *task)
106 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 109 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
107 msg->ack = 0; /* not used */ 110 msg->ack = 0; /* not used */
108 msg->len = sizeof(*ev); 111 msg->len = sizeof(*ev);
112 msg->flags = 0; /* not used */
109 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 113 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
110} 114}
111 115
@@ -122,6 +126,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
122 126
123 msg = (struct cn_msg *)buffer; 127 msg = (struct cn_msg *)buffer;
124 ev = (struct proc_event *)msg->data; 128 ev = (struct proc_event *)msg->data;
129 memset(&ev->event_data, 0, sizeof(ev->event_data));
125 ev->what = which_id; 130 ev->what = which_id;
126 ev->event_data.id.process_pid = task->pid; 131 ev->event_data.id.process_pid = task->pid;
127 ev->event_data.id.process_tgid = task->tgid; 132 ev->event_data.id.process_tgid = task->tgid;
@@ -145,6 +150,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
145 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 150 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
146 msg->ack = 0; /* not used */ 151 msg->ack = 0; /* not used */
147 msg->len = sizeof(*ev); 152 msg->len = sizeof(*ev);
153 msg->flags = 0; /* not used */
148 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 154 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
149} 155}
150 156
@@ -160,6 +166,7 @@ void proc_sid_connector(struct task_struct *task)
160 166
161 msg = (struct cn_msg *)buffer; 167 msg = (struct cn_msg *)buffer;
162 ev = (struct proc_event *)msg->data; 168 ev = (struct proc_event *)msg->data;
169 memset(&ev->event_data, 0, sizeof(ev->event_data));
163 get_seq(&msg->seq, &ev->cpu); 170 get_seq(&msg->seq, &ev->cpu);
164 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 171 ktime_get_ts(&ts); /* get high res monotonic timestamp */
165 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 172 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -170,6 +177,7 @@ void proc_sid_connector(struct task_struct *task)
170 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 177 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
171 msg->ack = 0; /* not used */ 178 msg->ack = 0; /* not used */
172 msg->len = sizeof(*ev); 179 msg->len = sizeof(*ev);
180 msg->flags = 0; /* not used */
173 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 181 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
174} 182}
175 183
@@ -185,6 +193,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
185 193
186 msg = (struct cn_msg *)buffer; 194 msg = (struct cn_msg *)buffer;
187 ev = (struct proc_event *)msg->data; 195 ev = (struct proc_event *)msg->data;
196 memset(&ev->event_data, 0, sizeof(ev->event_data));
188 get_seq(&msg->seq, &ev->cpu); 197 get_seq(&msg->seq, &ev->cpu);
189 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 198 ktime_get_ts(&ts); /* get high res monotonic timestamp */
190 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 199 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -203,6 +212,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
203 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 212 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
204 msg->ack = 0; /* not used */ 213 msg->ack = 0; /* not used */
205 msg->len = sizeof(*ev); 214 msg->len = sizeof(*ev);
215 msg->flags = 0; /* not used */
206 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 216 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
207} 217}
208 218
@@ -218,6 +228,7 @@ void proc_comm_connector(struct task_struct *task)
218 228
219 msg = (struct cn_msg *)buffer; 229 msg = (struct cn_msg *)buffer;
220 ev = (struct proc_event *)msg->data; 230 ev = (struct proc_event *)msg->data;
231 memset(&ev->event_data, 0, sizeof(ev->event_data));
221 get_seq(&msg->seq, &ev->cpu); 232 get_seq(&msg->seq, &ev->cpu);
222 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 233 ktime_get_ts(&ts); /* get high res monotonic timestamp */
223 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 234 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -229,6 +240,7 @@ void proc_comm_connector(struct task_struct *task)
229 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 240 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
230 msg->ack = 0; /* not used */ 241 msg->ack = 0; /* not used */
231 msg->len = sizeof(*ev); 242 msg->len = sizeof(*ev);
243 msg->flags = 0; /* not used */
232 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 244 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
233} 245}
234 246
@@ -244,6 +256,7 @@ void proc_coredump_connector(struct task_struct *task)
244 256
245 msg = (struct cn_msg *)buffer; 257 msg = (struct cn_msg *)buffer;
246 ev = (struct proc_event *)msg->data; 258 ev = (struct proc_event *)msg->data;
259 memset(&ev->event_data, 0, sizeof(ev->event_data));
247 get_seq(&msg->seq, &ev->cpu); 260 get_seq(&msg->seq, &ev->cpu);
248 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 261 ktime_get_ts(&ts); /* get high res monotonic timestamp */
249 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 262 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -254,6 +267,7 @@ void proc_coredump_connector(struct task_struct *task)
254 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 267 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
255 msg->ack = 0; /* not used */ 268 msg->ack = 0; /* not used */
256 msg->len = sizeof(*ev); 269 msg->len = sizeof(*ev);
270 msg->flags = 0; /* not used */
257 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 271 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
258} 272}
259 273
@@ -269,6 +283,7 @@ void proc_exit_connector(struct task_struct *task)
269 283
270 msg = (struct cn_msg *)buffer; 284 msg = (struct cn_msg *)buffer;
271 ev = (struct proc_event *)msg->data; 285 ev = (struct proc_event *)msg->data;
286 memset(&ev->event_data, 0, sizeof(ev->event_data));
272 get_seq(&msg->seq, &ev->cpu); 287 get_seq(&msg->seq, &ev->cpu);
273 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 288 ktime_get_ts(&ts); /* get high res monotonic timestamp */
274 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 289 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -281,6 +296,7 @@ void proc_exit_connector(struct task_struct *task)
281 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 296 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
282 msg->ack = 0; /* not used */ 297 msg->ack = 0; /* not used */
283 msg->len = sizeof(*ev); 298 msg->len = sizeof(*ev);
299 msg->flags = 0; /* not used */
284 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 300 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
285} 301}
286 302
@@ -304,6 +320,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
304 320
305 msg = (struct cn_msg *)buffer; 321 msg = (struct cn_msg *)buffer;
306 ev = (struct proc_event *)msg->data; 322 ev = (struct proc_event *)msg->data;
323 memset(&ev->event_data, 0, sizeof(ev->event_data));
307 msg->seq = rcvd_seq; 324 msg->seq = rcvd_seq;
308 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 325 ktime_get_ts(&ts); /* get high res monotonic timestamp */
309 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 326 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -313,6 +330,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
313 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 330 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
314 msg->ack = rcvd_ack + 1; 331 msg->ack = rcvd_ack + 1;
315 msg->len = sizeof(*ev); 332 msg->len = sizeof(*ev);
333 msg->flags = 0; /* not used */
316 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 334 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
317} 335}
318 336
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index 6ecfa758942c..a36749f1e44a 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -109,7 +109,7 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask)
109 109
110 data = nlmsg_data(nlh); 110 data = nlmsg_data(nlh);
111 111
112 memcpy(data, msg, sizeof(*data) + msg->len); 112 memcpy(data, msg, size);
113 113
114 NETLINK_CB(skb).dst_group = group; 114 NETLINK_CB(skb).dst_group = group;
115 115
@@ -157,17 +157,18 @@ static int cn_call_callback(struct sk_buff *skb)
157static void cn_rx_skb(struct sk_buff *__skb) 157static void cn_rx_skb(struct sk_buff *__skb)
158{ 158{
159 struct nlmsghdr *nlh; 159 struct nlmsghdr *nlh;
160 int err;
161 struct sk_buff *skb; 160 struct sk_buff *skb;
161 int len, err;
162 162
163 skb = skb_get(__skb); 163 skb = skb_get(__skb);
164 164
165 if (skb->len >= NLMSG_HDRLEN) { 165 if (skb->len >= NLMSG_HDRLEN) {
166 nlh = nlmsg_hdr(skb); 166 nlh = nlmsg_hdr(skb);
167 len = nlmsg_len(nlh);
167 168
168 if (nlh->nlmsg_len < sizeof(struct cn_msg) || 169 if (len < (int)sizeof(struct cn_msg) ||
169 skb->len < nlh->nlmsg_len || 170 skb->len < nlh->nlmsg_len ||
170 nlh->nlmsg_len > CONNECTOR_MAX_MSG_SIZE) { 171 len > CONNECTOR_MAX_MSG_SIZE) {
171 kfree_skb(skb); 172 kfree_skb(skb);
172 return; 173 return;
173 } 174 }
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index a1260b4549db..506fd23c7550 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -987,7 +987,11 @@ static int __init acpi_cpufreq_init(void)
987 int ret; 987 int ret;
988 988
989 if (acpi_disabled) 989 if (acpi_disabled)
990 return 0; 990 return -ENODEV;
991
992 /* don't keep reloading if cpufreq_driver exists */
993 if (cpufreq_get_current_driver())
994 return -EEXIST;
991 995
992 pr_debug("acpi_cpufreq_init\n"); 996 pr_debug("acpi_cpufreq_init\n");
993 997
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index 78c49d8e0f4a..c522a95c0e16 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -229,7 +229,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
229 if (of_property_read_u32(np, "clock-latency", &transition_latency)) 229 if (of_property_read_u32(np, "clock-latency", &transition_latency))
230 transition_latency = CPUFREQ_ETERNAL; 230 transition_latency = CPUFREQ_ETERNAL;
231 231
232 if (cpu_reg) { 232 if (!IS_ERR(cpu_reg)) {
233 struct opp *opp; 233 struct opp *opp;
234 unsigned long min_uV, max_uV; 234 unsigned long min_uV, max_uV;
235 int i; 235 int i;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 89b3c52cd5c3..04548f7023af 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1460,6 +1460,9 @@ unsigned int cpufreq_get(unsigned int cpu)
1460{ 1460{
1461 unsigned int ret_freq = 0; 1461 unsigned int ret_freq = 0;
1462 1462
1463 if (cpufreq_disabled() || !cpufreq_driver)
1464 return -ENOENT;
1465
1463 if (!down_read_trylock(&cpufreq_rwsem)) 1466 if (!down_read_trylock(&cpufreq_rwsem))
1464 return 0; 1467 return 0;
1465 1468
diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c
index d514c152fd1a..be5380ecdcd4 100644
--- a/drivers/cpufreq/exynos5440-cpufreq.c
+++ b/drivers/cpufreq/exynos5440-cpufreq.c
@@ -457,7 +457,7 @@ err_free_table:
457 opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table); 457 opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
458err_put_node: 458err_put_node:
459 of_node_put(np); 459 of_node_put(np);
460 dev_err(dvfs_info->dev, "%s: failed initialization\n", __func__); 460 dev_err(&pdev->dev, "%s: failed initialization\n", __func__);
461 return ret; 461 return ret;
462} 462}
463 463
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 9733f29ed148..eb3fdc755000 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -48,7 +48,7 @@ static inline int32_t div_fp(int32_t x, int32_t y)
48} 48}
49 49
50struct sample { 50struct sample {
51 int core_pct_busy; 51 int32_t core_pct_busy;
52 u64 aperf; 52 u64 aperf;
53 u64 mperf; 53 u64 mperf;
54 int freq; 54 int freq;
@@ -68,7 +68,7 @@ struct _pid {
68 int32_t i_gain; 68 int32_t i_gain;
69 int32_t d_gain; 69 int32_t d_gain;
70 int deadband; 70 int deadband;
71 int last_err; 71 int32_t last_err;
72}; 72};
73 73
74struct cpudata { 74struct cpudata {
@@ -153,16 +153,15 @@ static inline void pid_d_gain_set(struct _pid *pid, int percent)
153 pid->d_gain = div_fp(int_tofp(percent), int_tofp(100)); 153 pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
154} 154}
155 155
156static signed int pid_calc(struct _pid *pid, int busy) 156static signed int pid_calc(struct _pid *pid, int32_t busy)
157{ 157{
158 signed int err, result; 158 signed int result;
159 int32_t pterm, dterm, fp_error; 159 int32_t pterm, dterm, fp_error;
160 int32_t integral_limit; 160 int32_t integral_limit;
161 161
162 err = pid->setpoint - busy; 162 fp_error = int_tofp(pid->setpoint) - busy;
163 fp_error = int_tofp(err);
164 163
165 if (abs(err) <= pid->deadband) 164 if (abs(fp_error) <= int_tofp(pid->deadband))
166 return 0; 165 return 0;
167 166
168 pterm = mul_fp(pid->p_gain, fp_error); 167 pterm = mul_fp(pid->p_gain, fp_error);
@@ -176,8 +175,8 @@ static signed int pid_calc(struct _pid *pid, int busy)
176 if (pid->integral < -integral_limit) 175 if (pid->integral < -integral_limit)
177 pid->integral = -integral_limit; 176 pid->integral = -integral_limit;
178 177
179 dterm = mul_fp(pid->d_gain, (err - pid->last_err)); 178 dterm = mul_fp(pid->d_gain, fp_error - pid->last_err);
180 pid->last_err = err; 179 pid->last_err = fp_error;
181 180
182 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; 181 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
183 182
@@ -367,12 +366,13 @@ static int intel_pstate_turbo_pstate(void)
367static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) 366static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
368{ 367{
369 int max_perf = cpu->pstate.turbo_pstate; 368 int max_perf = cpu->pstate.turbo_pstate;
369 int max_perf_adj;
370 int min_perf; 370 int min_perf;
371 if (limits.no_turbo) 371 if (limits.no_turbo)
372 max_perf = cpu->pstate.max_pstate; 372 max_perf = cpu->pstate.max_pstate;
373 373
374 max_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf)); 374 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
375 *max = clamp_t(int, max_perf, 375 *max = clamp_t(int, max_perf_adj,
376 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); 376 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
377 377
378 min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf)); 378 min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf));
@@ -383,6 +383,7 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
383static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) 383static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
384{ 384{
385 int max_perf, min_perf; 385 int max_perf, min_perf;
386 u64 val;
386 387
387 intel_pstate_get_min_max(cpu, &min_perf, &max_perf); 388 intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
388 389
@@ -394,8 +395,11 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
394 trace_cpu_frequency(pstate * 100000, cpu->cpu); 395 trace_cpu_frequency(pstate * 100000, cpu->cpu);
395 396
396 cpu->pstate.current_pstate = pstate; 397 cpu->pstate.current_pstate = pstate;
397 wrmsrl(MSR_IA32_PERF_CTL, pstate << 8); 398 val = pstate << 8;
399 if (limits.no_turbo)
400 val |= (u64)1 << 32;
398 401
402 wrmsrl(MSR_IA32_PERF_CTL, val);
399} 403}
400 404
401static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps) 405static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
@@ -432,8 +436,9 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu,
432 struct sample *sample) 436 struct sample *sample)
433{ 437{
434 u64 core_pct; 438 u64 core_pct;
435 core_pct = div64_u64(sample->aperf * 100, sample->mperf); 439 core_pct = div64_u64(int_tofp(sample->aperf * 100),
436 sample->freq = cpu->pstate.max_pstate * core_pct * 1000; 440 sample->mperf);
441 sample->freq = fp_toint(cpu->pstate.max_pstate * core_pct * 1000);
437 442
438 sample->core_pct_busy = core_pct; 443 sample->core_pct_busy = core_pct;
439} 444}
@@ -465,22 +470,19 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
465 mod_timer_pinned(&cpu->timer, jiffies + delay); 470 mod_timer_pinned(&cpu->timer, jiffies + delay);
466} 471}
467 472
468static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu) 473static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
469{ 474{
470 int32_t busy_scaled;
471 int32_t core_busy, max_pstate, current_pstate; 475 int32_t core_busy, max_pstate, current_pstate;
472 476
473 core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy); 477 core_busy = cpu->samples[cpu->sample_ptr].core_pct_busy;
474 max_pstate = int_tofp(cpu->pstate.max_pstate); 478 max_pstate = int_tofp(cpu->pstate.max_pstate);
475 current_pstate = int_tofp(cpu->pstate.current_pstate); 479 current_pstate = int_tofp(cpu->pstate.current_pstate);
476 busy_scaled = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); 480 return mul_fp(core_busy, div_fp(max_pstate, current_pstate));
477
478 return fp_toint(busy_scaled);
479} 481}
480 482
481static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) 483static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
482{ 484{
483 int busy_scaled; 485 int32_t busy_scaled;
484 struct _pid *pid; 486 struct _pid *pid;
485 signed int ctl = 0; 487 signed int ctl = 0;
486 int steps; 488 int steps;
@@ -634,8 +636,8 @@ static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
634 636
635static int intel_pstate_cpu_init(struct cpufreq_policy *policy) 637static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
636{ 638{
637 int rc, min_pstate, max_pstate;
638 struct cpudata *cpu; 639 struct cpudata *cpu;
640 int rc;
639 641
640 rc = intel_pstate_init_cpu(policy->cpu); 642 rc = intel_pstate_init_cpu(policy->cpu);
641 if (rc) 643 if (rc)
@@ -649,9 +651,8 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
649 else 651 else
650 policy->policy = CPUFREQ_POLICY_POWERSAVE; 652 policy->policy = CPUFREQ_POLICY_POWERSAVE;
651 653
652 intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate); 654 policy->min = cpu->pstate.min_pstate * 100000;
653 policy->min = min_pstate * 100000; 655 policy->max = cpu->pstate.turbo_pstate * 100000;
654 policy->max = max_pstate * 100000;
655 656
656 /* cpuinfo and default policy values */ 657 /* cpuinfo and default policy values */
657 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000; 658 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000;
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
index 8a72b0c555f8..15631f92ab7d 100644
--- a/drivers/cpufreq/s3c64xx-cpufreq.c
+++ b/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -166,7 +166,7 @@ static void __init s3c64xx_cpufreq_config_regulator(void)
166 if (freq->frequency == CPUFREQ_ENTRY_INVALID) 166 if (freq->frequency == CPUFREQ_ENTRY_INVALID)
167 continue; 167 continue;
168 168
169 dvfs = &s3c64xx_dvfs_table[freq->index]; 169 dvfs = &s3c64xx_dvfs_table[freq->driver_data];
170 found = 0; 170 found = 0;
171 171
172 for (i = 0; i < count; i++) { 172 for (i = 0; i < count; i++) {
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 19e364fa5955..3f418166ce02 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -113,7 +113,7 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
113 unsigned int target_freq, unsigned int relation) 113 unsigned int target_freq, unsigned int relation)
114{ 114{
115 struct cpufreq_freqs freqs; 115 struct cpufreq_freqs freqs;
116 unsigned long newfreq; 116 long newfreq;
117 struct clk *srcclk; 117 struct clk *srcclk;
118 int index, ret, mult = 1; 118 int index, ret, mult = 1;
119 119
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 526ec77c7ba0..f238cfd33847 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -198,6 +198,7 @@ config TI_EDMA
198 depends on ARCH_DAVINCI || ARCH_OMAP 198 depends on ARCH_DAVINCI || ARCH_OMAP
199 select DMA_ENGINE 199 select DMA_ENGINE
200 select DMA_VIRTUAL_CHANNELS 200 select DMA_VIRTUAL_CHANNELS
201 select TI_PRIV_EDMA
201 default n 202 default n
202 help 203 help
203 Enable support for the TI EDMA controller. This DMA 204 Enable support for the TI EDMA controller. This DMA
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index ff50ff4c6a57..10b577fcf48d 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -305,7 +305,9 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
305 edma_alloc_slot(EDMA_CTLR(echan->ch_num), 305 edma_alloc_slot(EDMA_CTLR(echan->ch_num),
306 EDMA_SLOT_ANY); 306 EDMA_SLOT_ANY);
307 if (echan->slot[i] < 0) { 307 if (echan->slot[i] < 0) {
308 kfree(edesc);
308 dev_err(dev, "Failed to allocate slot\n"); 309 dev_err(dev, "Failed to allocate slot\n");
310 kfree(edesc);
309 return NULL; 311 return NULL;
310 } 312 }
311 } 313 }
@@ -345,6 +347,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
345 ccnt = sg_dma_len(sg) / (acnt * bcnt); 347 ccnt = sg_dma_len(sg) / (acnt * bcnt);
346 if (ccnt > (SZ_64K - 1)) { 348 if (ccnt > (SZ_64K - 1)) {
347 dev_err(dev, "Exceeded max SG segment size\n"); 349 dev_err(dev, "Exceeded max SG segment size\n");
350 kfree(edesc);
348 return NULL; 351 return NULL;
349 } 352 }
350 cidx = acnt * bcnt; 353 cidx = acnt * bcnt;
@@ -749,6 +752,6 @@ static void __exit edma_exit(void)
749} 752}
750module_exit(edma_exit); 753module_exit(edma_exit);
751 754
752MODULE_AUTHOR("Matt Porter <mporter@ti.com>"); 755MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
753MODULE_DESCRIPTION("TI EDMA DMA engine driver"); 756MODULE_DESCRIPTION("TI EDMA DMA engine driver");
754MODULE_LICENSE("GPL v2"); 757MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 78f8ca5fccee..55852c026791 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -437,17 +437,18 @@ static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
437 struct imxdma_engine *imxdma = imxdmac->imxdma; 437 struct imxdma_engine *imxdma = imxdmac->imxdma;
438 int chno = imxdmac->channel; 438 int chno = imxdmac->channel;
439 struct imxdma_desc *desc; 439 struct imxdma_desc *desc;
440 unsigned long flags;
440 441
441 spin_lock(&imxdma->lock); 442 spin_lock_irqsave(&imxdma->lock, flags);
442 if (list_empty(&imxdmac->ld_active)) { 443 if (list_empty(&imxdmac->ld_active)) {
443 spin_unlock(&imxdma->lock); 444 spin_unlock_irqrestore(&imxdma->lock, flags);
444 goto out; 445 goto out;
445 } 446 }
446 447
447 desc = list_first_entry(&imxdmac->ld_active, 448 desc = list_first_entry(&imxdmac->ld_active,
448 struct imxdma_desc, 449 struct imxdma_desc,
449 node); 450 node);
450 spin_unlock(&imxdma->lock); 451 spin_unlock_irqrestore(&imxdma->lock, flags);
451 452
452 if (desc->sg) { 453 if (desc->sg) {
453 u32 tmp; 454 u32 tmp;
@@ -519,7 +520,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
519{ 520{
520 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); 521 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
521 struct imxdma_engine *imxdma = imxdmac->imxdma; 522 struct imxdma_engine *imxdma = imxdmac->imxdma;
522 unsigned long flags;
523 int slot = -1; 523 int slot = -1;
524 int i; 524 int i;
525 525
@@ -527,7 +527,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
527 switch (d->type) { 527 switch (d->type) {
528 case IMXDMA_DESC_INTERLEAVED: 528 case IMXDMA_DESC_INTERLEAVED:
529 /* Try to get a free 2D slot */ 529 /* Try to get a free 2D slot */
530 spin_lock_irqsave(&imxdma->lock, flags);
531 for (i = 0; i < IMX_DMA_2D_SLOTS; i++) { 530 for (i = 0; i < IMX_DMA_2D_SLOTS; i++) {
532 if ((imxdma->slots_2d[i].count > 0) && 531 if ((imxdma->slots_2d[i].count > 0) &&
533 ((imxdma->slots_2d[i].xsr != d->x) || 532 ((imxdma->slots_2d[i].xsr != d->x) ||
@@ -537,10 +536,8 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
537 slot = i; 536 slot = i;
538 break; 537 break;
539 } 538 }
540 if (slot < 0) { 539 if (slot < 0)
541 spin_unlock_irqrestore(&imxdma->lock, flags);
542 return -EBUSY; 540 return -EBUSY;
543 }
544 541
545 imxdma->slots_2d[slot].xsr = d->x; 542 imxdma->slots_2d[slot].xsr = d->x;
546 imxdma->slots_2d[slot].ysr = d->y; 543 imxdma->slots_2d[slot].ysr = d->y;
@@ -549,7 +546,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
549 546
550 imxdmac->slot_2d = slot; 547 imxdmac->slot_2d = slot;
551 imxdmac->enabled_2d = true; 548 imxdmac->enabled_2d = true;
552 spin_unlock_irqrestore(&imxdma->lock, flags);
553 549
554 if (slot == IMX_DMA_2D_SLOT_A) { 550 if (slot == IMX_DMA_2D_SLOT_A) {
555 d->config_mem &= ~CCR_MSEL_B; 551 d->config_mem &= ~CCR_MSEL_B;
@@ -625,18 +621,17 @@ static void imxdma_tasklet(unsigned long data)
625 struct imxdma_channel *imxdmac = (void *)data; 621 struct imxdma_channel *imxdmac = (void *)data;
626 struct imxdma_engine *imxdma = imxdmac->imxdma; 622 struct imxdma_engine *imxdma = imxdmac->imxdma;
627 struct imxdma_desc *desc; 623 struct imxdma_desc *desc;
624 unsigned long flags;
628 625
629 spin_lock(&imxdma->lock); 626 spin_lock_irqsave(&imxdma->lock, flags);
630 627
631 if (list_empty(&imxdmac->ld_active)) { 628 if (list_empty(&imxdmac->ld_active)) {
632 /* Someone might have called terminate all */ 629 /* Someone might have called terminate all */
633 goto out; 630 spin_unlock_irqrestore(&imxdma->lock, flags);
631 return;
634 } 632 }
635 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node); 633 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);
636 634
637 if (desc->desc.callback)
638 desc->desc.callback(desc->desc.callback_param);
639
640 /* If we are dealing with a cyclic descriptor, keep it on ld_active 635 /* If we are dealing with a cyclic descriptor, keep it on ld_active
641 * and dont mark the descriptor as complete. 636 * and dont mark the descriptor as complete.
642 * Only in non-cyclic cases it would be marked as complete 637 * Only in non-cyclic cases it would be marked as complete
@@ -663,7 +658,11 @@ static void imxdma_tasklet(unsigned long data)
663 __func__, imxdmac->channel); 658 __func__, imxdmac->channel);
664 } 659 }
665out: 660out:
666 spin_unlock(&imxdma->lock); 661 spin_unlock_irqrestore(&imxdma->lock, flags);
662
663 if (desc->desc.callback)
664 desc->desc.callback(desc->desc.callback_param);
665
667} 666}
668 667
669static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 668static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
@@ -883,7 +882,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
883 kfree(imxdmac->sg_list); 882 kfree(imxdmac->sg_list);
884 883
885 imxdmac->sg_list = kcalloc(periods + 1, 884 imxdmac->sg_list = kcalloc(periods + 1,
886 sizeof(struct scatterlist), GFP_KERNEL); 885 sizeof(struct scatterlist), GFP_ATOMIC);
887 if (!imxdmac->sg_list) 886 if (!imxdmac->sg_list)
888 return NULL; 887 return NULL;
889 888
diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c
index 45a520281ce1..ebad84591a6e 100644
--- a/drivers/dma/sh/rcar-hpbdma.c
+++ b/drivers/dma/sh/rcar-hpbdma.c
@@ -93,6 +93,7 @@ struct hpb_dmae_chan {
93 void __iomem *base; 93 void __iomem *base;
94 const struct hpb_dmae_slave_config *cfg; 94 const struct hpb_dmae_slave_config *cfg;
95 char dev_id[16]; /* unique name per DMAC of channel */ 95 char dev_id[16]; /* unique name per DMAC of channel */
96 dma_addr_t slave_addr;
96}; 97};
97 98
98struct hpb_dmae_device { 99struct hpb_dmae_device {
@@ -432,7 +433,6 @@ hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan *hpb_chan,
432 hpb_chan->xfer_mode = XFER_DOUBLE; 433 hpb_chan->xfer_mode = XFER_DOUBLE;
433 } else { 434 } else {
434 dev_err(hpb_chan->shdma_chan.dev, "DCR setting error"); 435 dev_err(hpb_chan->shdma_chan.dev, "DCR setting error");
435 shdma_free_irq(&hpb_chan->shdma_chan);
436 return -EINVAL; 436 return -EINVAL;
437 } 437 }
438 438
@@ -446,7 +446,8 @@ hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan *hpb_chan,
446 return 0; 446 return 0;
447} 447}
448 448
449static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id, bool try) 449static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id,
450 dma_addr_t slave_addr, bool try)
450{ 451{
451 struct hpb_dmae_chan *chan = to_chan(schan); 452 struct hpb_dmae_chan *chan = to_chan(schan);
452 const struct hpb_dmae_slave_config *sc = 453 const struct hpb_dmae_slave_config *sc =
@@ -457,6 +458,7 @@ static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id, bool try)
457 if (try) 458 if (try)
458 return 0; 459 return 0;
459 chan->cfg = sc; 460 chan->cfg = sc;
461 chan->slave_addr = slave_addr ? : sc->addr;
460 return hpb_dmae_alloc_chan_resources(chan, sc); 462 return hpb_dmae_alloc_chan_resources(chan, sc);
461} 463}
462 464
@@ -468,7 +470,7 @@ static dma_addr_t hpb_dmae_slave_addr(struct shdma_chan *schan)
468{ 470{
469 struct hpb_dmae_chan *chan = to_chan(schan); 471 struct hpb_dmae_chan *chan = to_chan(schan);
470 472
471 return chan->cfg->addr; 473 return chan->slave_addr;
472} 474}
473 475
474static struct shdma_desc *hpb_dmae_embedded_desc(void *buf, int i) 476static struct shdma_desc *hpb_dmae_embedded_desc(void *buf, int i)
@@ -614,7 +616,6 @@ static void hpb_dmae_chan_remove(struct hpb_dmae_device *hpbdev)
614 shdma_for_each_chan(schan, &hpbdev->shdma_dev, i) { 616 shdma_for_each_chan(schan, &hpbdev->shdma_dev, i) {
615 BUG_ON(!schan); 617 BUG_ON(!schan);
616 618
617 shdma_free_irq(schan);
618 shdma_chan_remove(schan); 619 shdma_chan_remove(schan);
619 } 620 }
620 dma_dev->chancnt = 0; 621 dma_dev->chancnt = 0;
diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c
index 2d9ca6055e5e..41b5913ddabe 100644
--- a/drivers/gpio/gpio-lynxpoint.c
+++ b/drivers/gpio/gpio-lynxpoint.c
@@ -248,14 +248,15 @@ static void lp_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
248 struct lp_gpio *lg = irq_data_get_irq_handler_data(data); 248 struct lp_gpio *lg = irq_data_get_irq_handler_data(data);
249 struct irq_chip *chip = irq_data_get_irq_chip(data); 249 struct irq_chip *chip = irq_data_get_irq_chip(data);
250 u32 base, pin, mask; 250 u32 base, pin, mask;
251 unsigned long reg, pending; 251 unsigned long reg, ena, pending;
252 unsigned virq; 252 unsigned virq;
253 253
254 /* check from GPIO controller which pin triggered the interrupt */ 254 /* check from GPIO controller which pin triggered the interrupt */
255 for (base = 0; base < lg->chip.ngpio; base += 32) { 255 for (base = 0; base < lg->chip.ngpio; base += 32) {
256 reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT); 256 reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
257 ena = lp_gpio_reg(&lg->chip, base, LP_INT_ENABLE);
257 258
258 while ((pending = inl(reg))) { 259 while ((pending = (inl(reg) & inl(ena)))) {
259 pin = __ffs(pending); 260 pin = __ffs(pending);
260 mask = BIT(pin); 261 mask = BIT(pin);
261 /* Clear before handling so we don't lose an edge */ 262 /* Clear before handling so we don't lose an edge */
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 0ff43552d472..89675f862308 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -63,6 +63,7 @@ struct gpio_bank {
63 struct gpio_chip chip; 63 struct gpio_chip chip;
64 struct clk *dbck; 64 struct clk *dbck;
65 u32 mod_usage; 65 u32 mod_usage;
66 u32 irq_usage;
66 u32 dbck_enable_mask; 67 u32 dbck_enable_mask;
67 bool dbck_enabled; 68 bool dbck_enabled;
68 struct device *dev; 69 struct device *dev;
@@ -86,6 +87,9 @@ struct gpio_bank {
86#define GPIO_BIT(bank, gpio) (1 << GPIO_INDEX(bank, gpio)) 87#define GPIO_BIT(bank, gpio) (1 << GPIO_INDEX(bank, gpio))
87#define GPIO_MOD_CTRL_BIT BIT(0) 88#define GPIO_MOD_CTRL_BIT BIT(0)
88 89
90#define BANK_USED(bank) (bank->mod_usage || bank->irq_usage)
91#define LINE_USED(line, offset) (line & (1 << offset))
92
89static int irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq) 93static int irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq)
90{ 94{
91 return bank->chip.base + gpio_irq; 95 return bank->chip.base + gpio_irq;
@@ -420,15 +424,69 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio,
420 return 0; 424 return 0;
421} 425}
422 426
427static void _enable_gpio_module(struct gpio_bank *bank, unsigned offset)
428{
429 if (bank->regs->pinctrl) {
430 void __iomem *reg = bank->base + bank->regs->pinctrl;
431
432 /* Claim the pin for MPU */
433 __raw_writel(__raw_readl(reg) | (1 << offset), reg);
434 }
435
436 if (bank->regs->ctrl && !BANK_USED(bank)) {
437 void __iomem *reg = bank->base + bank->regs->ctrl;
438 u32 ctrl;
439
440 ctrl = __raw_readl(reg);
441 /* Module is enabled, clocks are not gated */
442 ctrl &= ~GPIO_MOD_CTRL_BIT;
443 __raw_writel(ctrl, reg);
444 bank->context.ctrl = ctrl;
445 }
446}
447
448static void _disable_gpio_module(struct gpio_bank *bank, unsigned offset)
449{
450 void __iomem *base = bank->base;
451
452 if (bank->regs->wkup_en &&
453 !LINE_USED(bank->mod_usage, offset) &&
454 !LINE_USED(bank->irq_usage, offset)) {
455 /* Disable wake-up during idle for dynamic tick */
456 _gpio_rmw(base, bank->regs->wkup_en, 1 << offset, 0);
457 bank->context.wake_en =
458 __raw_readl(bank->base + bank->regs->wkup_en);
459 }
460
461 if (bank->regs->ctrl && !BANK_USED(bank)) {
462 void __iomem *reg = bank->base + bank->regs->ctrl;
463 u32 ctrl;
464
465 ctrl = __raw_readl(reg);
466 /* Module is disabled, clocks are gated */
467 ctrl |= GPIO_MOD_CTRL_BIT;
468 __raw_writel(ctrl, reg);
469 bank->context.ctrl = ctrl;
470 }
471}
472
473static int gpio_is_input(struct gpio_bank *bank, int mask)
474{
475 void __iomem *reg = bank->base + bank->regs->direction;
476
477 return __raw_readl(reg) & mask;
478}
479
423static int gpio_irq_type(struct irq_data *d, unsigned type) 480static int gpio_irq_type(struct irq_data *d, unsigned type)
424{ 481{
425 struct gpio_bank *bank = irq_data_get_irq_chip_data(d); 482 struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
426 unsigned gpio = 0; 483 unsigned gpio = 0;
427 int retval; 484 int retval;
428 unsigned long flags; 485 unsigned long flags;
486 unsigned offset;
429 487
430 if (WARN_ON(!bank->mod_usage)) 488 if (!BANK_USED(bank))
431 return -EINVAL; 489 pm_runtime_get_sync(bank->dev);
432 490
433#ifdef CONFIG_ARCH_OMAP1 491#ifdef CONFIG_ARCH_OMAP1
434 if (d->irq > IH_MPUIO_BASE) 492 if (d->irq > IH_MPUIO_BASE)
@@ -446,7 +504,17 @@ static int gpio_irq_type(struct irq_data *d, unsigned type)
446 return -EINVAL; 504 return -EINVAL;
447 505
448 spin_lock_irqsave(&bank->lock, flags); 506 spin_lock_irqsave(&bank->lock, flags);
449 retval = _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), type); 507 offset = GPIO_INDEX(bank, gpio);
508 retval = _set_gpio_triggering(bank, offset, type);
509 if (!LINE_USED(bank->mod_usage, offset)) {
510 _enable_gpio_module(bank, offset);
511 _set_gpio_direction(bank, offset, 1);
512 } else if (!gpio_is_input(bank, 1 << offset)) {
513 spin_unlock_irqrestore(&bank->lock, flags);
514 return -EINVAL;
515 }
516
517 bank->irq_usage |= 1 << GPIO_INDEX(bank, gpio);
450 spin_unlock_irqrestore(&bank->lock, flags); 518 spin_unlock_irqrestore(&bank->lock, flags);
451 519
452 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) 520 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
@@ -603,35 +671,19 @@ static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
603 * If this is the first gpio_request for the bank, 671 * If this is the first gpio_request for the bank,
604 * enable the bank module. 672 * enable the bank module.
605 */ 673 */
606 if (!bank->mod_usage) 674 if (!BANK_USED(bank))
607 pm_runtime_get_sync(bank->dev); 675 pm_runtime_get_sync(bank->dev);
608 676
609 spin_lock_irqsave(&bank->lock, flags); 677 spin_lock_irqsave(&bank->lock, flags);
610 /* Set trigger to none. You need to enable the desired trigger with 678 /* Set trigger to none. You need to enable the desired trigger with
611 * request_irq() or set_irq_type(). 679 * request_irq() or set_irq_type(). Only do this if the IRQ line has
680 * not already been requested.
612 */ 681 */
613 _set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); 682 if (!LINE_USED(bank->irq_usage, offset)) {
614 683 _set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
615 if (bank->regs->pinctrl) { 684 _enable_gpio_module(bank, offset);
616 void __iomem *reg = bank->base + bank->regs->pinctrl;
617
618 /* Claim the pin for MPU */
619 __raw_writel(__raw_readl(reg) | (1 << offset), reg);
620 }
621
622 if (bank->regs->ctrl && !bank->mod_usage) {
623 void __iomem *reg = bank->base + bank->regs->ctrl;
624 u32 ctrl;
625
626 ctrl = __raw_readl(reg);
627 /* Module is enabled, clocks are not gated */
628 ctrl &= ~GPIO_MOD_CTRL_BIT;
629 __raw_writel(ctrl, reg);
630 bank->context.ctrl = ctrl;
631 } 685 }
632
633 bank->mod_usage |= 1 << offset; 686 bank->mod_usage |= 1 << offset;
634
635 spin_unlock_irqrestore(&bank->lock, flags); 687 spin_unlock_irqrestore(&bank->lock, flags);
636 688
637 return 0; 689 return 0;
@@ -640,31 +692,11 @@ static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
640static void omap_gpio_free(struct gpio_chip *chip, unsigned offset) 692static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
641{ 693{
642 struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip); 694 struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
643 void __iomem *base = bank->base;
644 unsigned long flags; 695 unsigned long flags;
645 696
646 spin_lock_irqsave(&bank->lock, flags); 697 spin_lock_irqsave(&bank->lock, flags);
647
648 if (bank->regs->wkup_en) {
649 /* Disable wake-up during idle for dynamic tick */
650 _gpio_rmw(base, bank->regs->wkup_en, 1 << offset, 0);
651 bank->context.wake_en =
652 __raw_readl(bank->base + bank->regs->wkup_en);
653 }
654
655 bank->mod_usage &= ~(1 << offset); 698 bank->mod_usage &= ~(1 << offset);
656 699 _disable_gpio_module(bank, offset);
657 if (bank->regs->ctrl && !bank->mod_usage) {
658 void __iomem *reg = bank->base + bank->regs->ctrl;
659 u32 ctrl;
660
661 ctrl = __raw_readl(reg);
662 /* Module is disabled, clocks are gated */
663 ctrl |= GPIO_MOD_CTRL_BIT;
664 __raw_writel(ctrl, reg);
665 bank->context.ctrl = ctrl;
666 }
667
668 _reset_gpio(bank, bank->chip.base + offset); 700 _reset_gpio(bank, bank->chip.base + offset);
669 spin_unlock_irqrestore(&bank->lock, flags); 701 spin_unlock_irqrestore(&bank->lock, flags);
670 702
@@ -672,7 +704,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
672 * If this is the last gpio to be freed in the bank, 704 * If this is the last gpio to be freed in the bank,
673 * disable the bank module. 705 * disable the bank module.
674 */ 706 */
675 if (!bank->mod_usage) 707 if (!BANK_USED(bank))
676 pm_runtime_put(bank->dev); 708 pm_runtime_put(bank->dev);
677} 709}
678 710
@@ -762,10 +794,20 @@ static void gpio_irq_shutdown(struct irq_data *d)
762 struct gpio_bank *bank = irq_data_get_irq_chip_data(d); 794 struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
763 unsigned int gpio = irq_to_gpio(bank, d->hwirq); 795 unsigned int gpio = irq_to_gpio(bank, d->hwirq);
764 unsigned long flags; 796 unsigned long flags;
797 unsigned offset = GPIO_INDEX(bank, gpio);
765 798
766 spin_lock_irqsave(&bank->lock, flags); 799 spin_lock_irqsave(&bank->lock, flags);
800 bank->irq_usage &= ~(1 << offset);
801 _disable_gpio_module(bank, offset);
767 _reset_gpio(bank, gpio); 802 _reset_gpio(bank, gpio);
768 spin_unlock_irqrestore(&bank->lock, flags); 803 spin_unlock_irqrestore(&bank->lock, flags);
804
805 /*
806 * If this is the last IRQ to be freed in the bank,
807 * disable the bank module.
808 */
809 if (!BANK_USED(bank))
810 pm_runtime_put(bank->dev);
769} 811}
770 812
771static void gpio_ack_irq(struct irq_data *d) 813static void gpio_ack_irq(struct irq_data *d)
@@ -897,13 +939,6 @@ static int gpio_input(struct gpio_chip *chip, unsigned offset)
897 return 0; 939 return 0;
898} 940}
899 941
900static int gpio_is_input(struct gpio_bank *bank, int mask)
901{
902 void __iomem *reg = bank->base + bank->regs->direction;
903
904 return __raw_readl(reg) & mask;
905}
906
907static int gpio_get(struct gpio_chip *chip, unsigned offset) 942static int gpio_get(struct gpio_chip *chip, unsigned offset)
908{ 943{
909 struct gpio_bank *bank; 944 struct gpio_bank *bank;
@@ -922,13 +957,22 @@ static int gpio_output(struct gpio_chip *chip, unsigned offset, int value)
922{ 957{
923 struct gpio_bank *bank; 958 struct gpio_bank *bank;
924 unsigned long flags; 959 unsigned long flags;
960 int retval = 0;
925 961
926 bank = container_of(chip, struct gpio_bank, chip); 962 bank = container_of(chip, struct gpio_bank, chip);
927 spin_lock_irqsave(&bank->lock, flags); 963 spin_lock_irqsave(&bank->lock, flags);
964
965 if (LINE_USED(bank->irq_usage, offset)) {
966 retval = -EINVAL;
967 goto exit;
968 }
969
928 bank->set_dataout(bank, offset, value); 970 bank->set_dataout(bank, offset, value);
929 _set_gpio_direction(bank, offset, 0); 971 _set_gpio_direction(bank, offset, 0);
972
973exit:
930 spin_unlock_irqrestore(&bank->lock, flags); 974 spin_unlock_irqrestore(&bank->lock, flags);
931 return 0; 975 return retval;
932} 976}
933 977
934static int gpio_debounce(struct gpio_chip *chip, unsigned offset, 978static int gpio_debounce(struct gpio_chip *chip, unsigned offset,
@@ -1400,7 +1444,7 @@ void omap2_gpio_prepare_for_idle(int pwr_mode)
1400 struct gpio_bank *bank; 1444 struct gpio_bank *bank;
1401 1445
1402 list_for_each_entry(bank, &omap_gpio_list, node) { 1446 list_for_each_entry(bank, &omap_gpio_list, node) {
1403 if (!bank->mod_usage || !bank->loses_context) 1447 if (!BANK_USED(bank) || !bank->loses_context)
1404 continue; 1448 continue;
1405 1449
1406 bank->power_mode = pwr_mode; 1450 bank->power_mode = pwr_mode;
@@ -1414,7 +1458,7 @@ void omap2_gpio_resume_after_idle(void)
1414 struct gpio_bank *bank; 1458 struct gpio_bank *bank;
1415 1459
1416 list_for_each_entry(bank, &omap_gpio_list, node) { 1460 list_for_each_entry(bank, &omap_gpio_list, node) {
1417 if (!bank->mod_usage || !bank->loses_context) 1461 if (!BANK_USED(bank) || !bank->loses_context)
1418 continue; 1462 continue;
1419 1463
1420 pm_runtime_get_sync(bank->dev); 1464 pm_runtime_get_sync(bank->dev);
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index e3745eb07570..6038966ab045 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -293,10 +293,9 @@ static void gpio_rcar_parse_pdata(struct gpio_rcar_priv *p)
293 if (pdata) { 293 if (pdata) {
294 p->config = *pdata; 294 p->config = *pdata;
295 } else if (IS_ENABLED(CONFIG_OF) && np) { 295 } else if (IS_ENABLED(CONFIG_OF) && np) {
296 ret = of_parse_phandle_with_args(np, "gpio-ranges", 296 ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0,
297 "#gpio-range-cells", 0, &args); 297 &args);
298 p->config.number_of_pins = ret == 0 && args.args_count == 3 298 p->config.number_of_pins = ret == 0 ? args.args[2]
299 ? args.args[2]
300 : RCAR_MAX_GPIO_PER_BANK; 299 : RCAR_MAX_GPIO_PER_BANK;
301 p->config.gpio_base = -1; 300 p->config.gpio_base = -1;
302 } 301 }
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 86ef3461ec06..0dee0e0c247a 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -136,7 +136,7 @@ static struct gpio_desc *gpio_to_desc(unsigned gpio)
136 */ 136 */
137static int desc_to_gpio(const struct gpio_desc *desc) 137static int desc_to_gpio(const struct gpio_desc *desc)
138{ 138{
139 return desc->chip->base + gpio_chip_hwgpio(desc); 139 return desc - &gpio_desc[0];
140} 140}
141 141
142 142
@@ -1398,7 +1398,7 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
1398 int status = -EPROBE_DEFER; 1398 int status = -EPROBE_DEFER;
1399 unsigned long flags; 1399 unsigned long flags;
1400 1400
1401 if (!desc || !desc->chip) { 1401 if (!desc) {
1402 pr_warn("%s: invalid GPIO\n", __func__); 1402 pr_warn("%s: invalid GPIO\n", __func__);
1403 return -EINVAL; 1403 return -EINVAL;
1404 } 1404 }
@@ -1406,6 +1406,8 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
1406 spin_lock_irqsave(&gpio_lock, flags); 1406 spin_lock_irqsave(&gpio_lock, flags);
1407 1407
1408 chip = desc->chip; 1408 chip = desc->chip;
1409 if (chip == NULL)
1410 goto done;
1409 1411
1410 if (!try_module_get(chip->owner)) 1412 if (!try_module_get(chip->owner))
1411 goto done; 1413 goto done;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index e572dd20bdee..05ad9ba0a67e 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -402,9 +402,16 @@ long drm_ioctl(struct file *filp,
402 cmd = ioctl->cmd_drv; 402 cmd = ioctl->cmd_drv;
403 } 403 }
404 else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) { 404 else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
405 u32 drv_size;
406
405 ioctl = &drm_ioctls[nr]; 407 ioctl = &drm_ioctls[nr];
406 cmd = ioctl->cmd; 408
409 drv_size = _IOC_SIZE(ioctl->cmd);
407 usize = asize = _IOC_SIZE(cmd); 410 usize = asize = _IOC_SIZE(cmd);
411 if (drv_size > asize)
412 asize = drv_size;
413
414 cmd = ioctl->cmd;
408 } else 415 } else
409 goto err_i1; 416 goto err_i1;
410 417
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 1688ff500513..830f7501cb4d 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -2925,6 +2925,8 @@ int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb)
2925 /* Speaker Allocation Data Block */ 2925 /* Speaker Allocation Data Block */
2926 if (dbl == 3) { 2926 if (dbl == 3) {
2927 *sadb = kmalloc(dbl, GFP_KERNEL); 2927 *sadb = kmalloc(dbl, GFP_KERNEL);
2928 if (!*sadb)
2929 return -ENOMEM;
2928 memcpy(*sadb, &db[1], dbl); 2930 memcpy(*sadb, &db[1], dbl);
2929 count = dbl; 2931 count = dbl;
2930 break; 2932 break;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index f6f6cc7fc133..3d13ca6e257f 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -416,14 +416,6 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
416 return; 416 return;
417 417
418 /* 418 /*
419 * fbdev->blank can be called from irq context in case of a panic.
420 * Since we already have our own special panic handler which will
421 * restore the fbdev console mode completely, just bail out early.
422 */
423 if (oops_in_progress)
424 return;
425
426 /*
427 * For each CRTC in this fb, turn the connectors on/off. 419 * For each CRTC in this fb, turn the connectors on/off.
428 */ 420 */
429 drm_modeset_lock_all(dev); 421 drm_modeset_lock_all(dev);
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index 92babac362ec..2db731f00930 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -204,6 +204,7 @@ static int psb_gtt_attach_pages(struct gtt_range *gt)
204 if (IS_ERR(pages)) 204 if (IS_ERR(pages))
205 return PTR_ERR(pages); 205 return PTR_ERR(pages);
206 206
207 gt->npage = gt->gem.size / PAGE_SIZE;
207 gt->pages = pages; 208 gt->pages = pages;
208 209
209 return 0; 210 return 0;
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index b1f8fc69023f..60e84043aa34 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -707,8 +707,7 @@ tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
707 reg_write(encoder, REG_VIP_CNTRL_2, priv->vip_cntrl_2); 707 reg_write(encoder, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
708 break; 708 break;
709 case DRM_MODE_DPMS_OFF: 709 case DRM_MODE_DPMS_OFF:
710 /* disable audio and video ports */ 710 /* disable video ports */
711 reg_write(encoder, REG_ENA_AP, 0x00);
712 reg_write(encoder, REG_ENA_VP_0, 0x00); 711 reg_write(encoder, REG_ENA_VP_0, 0x00);
713 reg_write(encoder, REG_ENA_VP_1, 0x00); 712 reg_write(encoder, REG_ENA_VP_1, 0x00);
714 reg_write(encoder, REG_ENA_VP_2, 0x00); 713 reg_write(encoder, REG_ENA_VP_2, 0x00);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index c27a21034a5e..d5c784d48671 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1290,12 +1290,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
1290 * then we do not take part in VGA arbitration and the 1290 * then we do not take part in VGA arbitration and the
1291 * vga_client_register() fails with -ENODEV. 1291 * vga_client_register() fails with -ENODEV.
1292 */ 1292 */
1293 if (!HAS_PCH_SPLIT(dev)) { 1293 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
1294 ret = vga_client_register(dev->pdev, dev, NULL, 1294 if (ret && ret != -ENODEV)
1295 i915_vga_set_decode); 1295 goto out;
1296 if (ret && ret != -ENODEV)
1297 goto out;
1298 }
1299 1296
1300 intel_register_dsm_handler(); 1297 intel_register_dsm_handler();
1301 1298
@@ -1351,12 +1348,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
1351 */ 1348 */
1352 intel_fbdev_initial_config(dev); 1349 intel_fbdev_initial_config(dev);
1353 1350
1354 /*
1355 * Must do this after fbcon init so that
1356 * vgacon_save_screen() works during the handover.
1357 */
1358 i915_disable_vga_mem(dev);
1359
1360 /* Only enable hotplug handling once the fbdev is fully set up. */ 1351 /* Only enable hotplug handling once the fbdev is fully set up. */
1361 dev_priv->enable_hotplug_processing = true; 1352 dev_priv->enable_hotplug_processing = true;
1362 1353
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 69d8ed5416c3..2ad27880cd04 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -505,6 +505,8 @@ static int i915_drm_freeze(struct drm_device *dev)
505 intel_modeset_suspend_hw(dev); 505 intel_modeset_suspend_hw(dev);
506 } 506 }
507 507
508 i915_gem_suspend_gtt_mappings(dev);
509
508 i915_save_state(dev); 510 i915_save_state(dev);
509 511
510 intel_opregion_fini(dev); 512 intel_opregion_fini(dev);
@@ -648,7 +650,8 @@ static int i915_drm_thaw(struct drm_device *dev)
648 mutex_lock(&dev->struct_mutex); 650 mutex_lock(&dev->struct_mutex);
649 i915_gem_restore_gtt_mappings(dev); 651 i915_gem_restore_gtt_mappings(dev);
650 mutex_unlock(&dev->struct_mutex); 652 mutex_unlock(&dev->struct_mutex);
651 } 653 } else if (drm_core_check_feature(dev, DRIVER_MODESET))
654 i915_check_and_clear_faults(dev);
652 655
653 __i915_drm_thaw(dev); 656 __i915_drm_thaw(dev);
654 657
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 35874b3a86dc..ab0f2c0a440c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -497,10 +497,12 @@ struct i915_address_space {
497 497
498 /* FIXME: Need a more generic return type */ 498 /* FIXME: Need a more generic return type */
499 gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr, 499 gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
500 enum i915_cache_level level); 500 enum i915_cache_level level,
501 bool valid); /* Create a valid PTE */
501 void (*clear_range)(struct i915_address_space *vm, 502 void (*clear_range)(struct i915_address_space *vm,
502 unsigned int first_entry, 503 unsigned int first_entry,
503 unsigned int num_entries); 504 unsigned int num_entries,
505 bool use_scratch);
504 void (*insert_entries)(struct i915_address_space *vm, 506 void (*insert_entries)(struct i915_address_space *vm,
505 struct sg_table *st, 507 struct sg_table *st,
506 unsigned int first_entry, 508 unsigned int first_entry,
@@ -2065,6 +2067,8 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
2065void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, 2067void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
2066 struct drm_i915_gem_object *obj); 2068 struct drm_i915_gem_object *obj);
2067 2069
2070void i915_check_and_clear_faults(struct drm_device *dev);
2071void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
2068void i915_gem_restore_gtt_mappings(struct drm_device *dev); 2072void i915_gem_restore_gtt_mappings(struct drm_device *dev);
2069int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); 2073int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
2070void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, 2074void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index df9253d890ee..cdfb9da0e4ce 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4800,10 +4800,10 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
4800 4800
4801 if (!mutex_trylock(&dev->struct_mutex)) { 4801 if (!mutex_trylock(&dev->struct_mutex)) {
4802 if (!mutex_is_locked_by(&dev->struct_mutex, current)) 4802 if (!mutex_is_locked_by(&dev->struct_mutex, current))
4803 return SHRINK_STOP; 4803 return 0;
4804 4804
4805 if (dev_priv->mm.shrinker_no_lock_stealing) 4805 if (dev_priv->mm.shrinker_no_lock_stealing)
4806 return SHRINK_STOP; 4806 return 0;
4807 4807
4808 unlock = false; 4808 unlock = false;
4809 } 4809 }
@@ -4901,10 +4901,10 @@ i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
4901 4901
4902 if (!mutex_trylock(&dev->struct_mutex)) { 4902 if (!mutex_trylock(&dev->struct_mutex)) {
4903 if (!mutex_is_locked_by(&dev->struct_mutex, current)) 4903 if (!mutex_is_locked_by(&dev->struct_mutex, current))
4904 return 0; 4904 return SHRINK_STOP;
4905 4905
4906 if (dev_priv->mm.shrinker_no_lock_stealing) 4906 if (dev_priv->mm.shrinker_no_lock_stealing)
4907 return 0; 4907 return SHRINK_STOP;
4908 4908
4909 unlock = false; 4909 unlock = false;
4910 } 4910 }
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 212f6d8c35ec..1f7b4caefb6e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -58,9 +58,10 @@
58#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) 58#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
59 59
60static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, 60static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
61 enum i915_cache_level level) 61 enum i915_cache_level level,
62 bool valid)
62{ 63{
63 gen6_gtt_pte_t pte = GEN6_PTE_VALID; 64 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
64 pte |= GEN6_PTE_ADDR_ENCODE(addr); 65 pte |= GEN6_PTE_ADDR_ENCODE(addr);
65 66
66 switch (level) { 67 switch (level) {
@@ -79,9 +80,10 @@ static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
79} 80}
80 81
81static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr, 82static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
82 enum i915_cache_level level) 83 enum i915_cache_level level,
84 bool valid)
83{ 85{
84 gen6_gtt_pte_t pte = GEN6_PTE_VALID; 86 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
85 pte |= GEN6_PTE_ADDR_ENCODE(addr); 87 pte |= GEN6_PTE_ADDR_ENCODE(addr);
86 88
87 switch (level) { 89 switch (level) {
@@ -105,9 +107,10 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
105#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2) 107#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
106 108
107static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, 109static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
108 enum i915_cache_level level) 110 enum i915_cache_level level,
111 bool valid)
109{ 112{
110 gen6_gtt_pte_t pte = GEN6_PTE_VALID; 113 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
111 pte |= GEN6_PTE_ADDR_ENCODE(addr); 114 pte |= GEN6_PTE_ADDR_ENCODE(addr);
112 115
113 /* Mark the page as writeable. Other platforms don't have a 116 /* Mark the page as writeable. Other platforms don't have a
@@ -122,9 +125,10 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
122} 125}
123 126
124static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr, 127static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
125 enum i915_cache_level level) 128 enum i915_cache_level level,
129 bool valid)
126{ 130{
127 gen6_gtt_pte_t pte = GEN6_PTE_VALID; 131 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
128 pte |= HSW_PTE_ADDR_ENCODE(addr); 132 pte |= HSW_PTE_ADDR_ENCODE(addr);
129 133
130 if (level != I915_CACHE_NONE) 134 if (level != I915_CACHE_NONE)
@@ -134,9 +138,10 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
134} 138}
135 139
136static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, 140static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
137 enum i915_cache_level level) 141 enum i915_cache_level level,
142 bool valid)
138{ 143{
139 gen6_gtt_pte_t pte = GEN6_PTE_VALID; 144 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
140 pte |= HSW_PTE_ADDR_ENCODE(addr); 145 pte |= HSW_PTE_ADDR_ENCODE(addr);
141 146
142 switch (level) { 147 switch (level) {
@@ -236,7 +241,8 @@ static int gen6_ppgtt_enable(struct drm_device *dev)
236/* PPGTT support for Sandybdrige/Gen6 and later */ 241/* PPGTT support for Sandybdrige/Gen6 and later */
237static void gen6_ppgtt_clear_range(struct i915_address_space *vm, 242static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
238 unsigned first_entry, 243 unsigned first_entry,
239 unsigned num_entries) 244 unsigned num_entries,
245 bool use_scratch)
240{ 246{
241 struct i915_hw_ppgtt *ppgtt = 247 struct i915_hw_ppgtt *ppgtt =
242 container_of(vm, struct i915_hw_ppgtt, base); 248 container_of(vm, struct i915_hw_ppgtt, base);
@@ -245,7 +251,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
245 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; 251 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
246 unsigned last_pte, i; 252 unsigned last_pte, i;
247 253
248 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC); 254 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
249 255
250 while (num_entries) { 256 while (num_entries) {
251 last_pte = first_pte + num_entries; 257 last_pte = first_pte + num_entries;
@@ -282,7 +288,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
282 dma_addr_t page_addr; 288 dma_addr_t page_addr;
283 289
284 page_addr = sg_page_iter_dma_address(&sg_iter); 290 page_addr = sg_page_iter_dma_address(&sg_iter);
285 pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level); 291 pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level, true);
286 if (++act_pte == I915_PPGTT_PT_ENTRIES) { 292 if (++act_pte == I915_PPGTT_PT_ENTRIES) {
287 kunmap_atomic(pt_vaddr); 293 kunmap_atomic(pt_vaddr);
288 act_pt++; 294 act_pt++;
@@ -367,7 +373,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
367 } 373 }
368 374
369 ppgtt->base.clear_range(&ppgtt->base, 0, 375 ppgtt->base.clear_range(&ppgtt->base, 0,
370 ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES); 376 ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES, true);
371 377
372 ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t); 378 ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
373 379
@@ -444,7 +450,8 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
444{ 450{
445 ppgtt->base.clear_range(&ppgtt->base, 451 ppgtt->base.clear_range(&ppgtt->base,
446 i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, 452 i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
447 obj->base.size >> PAGE_SHIFT); 453 obj->base.size >> PAGE_SHIFT,
454 true);
448} 455}
449 456
450extern int intel_iommu_gfx_mapped; 457extern int intel_iommu_gfx_mapped;
@@ -485,15 +492,65 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
485 dev_priv->mm.interruptible = interruptible; 492 dev_priv->mm.interruptible = interruptible;
486} 493}
487 494
495void i915_check_and_clear_faults(struct drm_device *dev)
496{
497 struct drm_i915_private *dev_priv = dev->dev_private;
498 struct intel_ring_buffer *ring;
499 int i;
500
501 if (INTEL_INFO(dev)->gen < 6)
502 return;
503
504 for_each_ring(ring, dev_priv, i) {
505 u32 fault_reg;
506 fault_reg = I915_READ(RING_FAULT_REG(ring));
507 if (fault_reg & RING_FAULT_VALID) {
508 DRM_DEBUG_DRIVER("Unexpected fault\n"
509 "\tAddr: 0x%08lx\\n"
510 "\tAddress space: %s\n"
511 "\tSource ID: %d\n"
512 "\tType: %d\n",
513 fault_reg & PAGE_MASK,
514 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
515 RING_FAULT_SRCID(fault_reg),
516 RING_FAULT_FAULT_TYPE(fault_reg));
517 I915_WRITE(RING_FAULT_REG(ring),
518 fault_reg & ~RING_FAULT_VALID);
519 }
520 }
521 POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
522}
523
524void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
525{
526 struct drm_i915_private *dev_priv = dev->dev_private;
527
528 /* Don't bother messing with faults pre GEN6 as we have little
529 * documentation supporting that it's a good idea.
530 */
531 if (INTEL_INFO(dev)->gen < 6)
532 return;
533
534 i915_check_and_clear_faults(dev);
535
536 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
537 dev_priv->gtt.base.start / PAGE_SIZE,
538 dev_priv->gtt.base.total / PAGE_SIZE,
539 false);
540}
541
488void i915_gem_restore_gtt_mappings(struct drm_device *dev) 542void i915_gem_restore_gtt_mappings(struct drm_device *dev)
489{ 543{
490 struct drm_i915_private *dev_priv = dev->dev_private; 544 struct drm_i915_private *dev_priv = dev->dev_private;
491 struct drm_i915_gem_object *obj; 545 struct drm_i915_gem_object *obj;
492 546
547 i915_check_and_clear_faults(dev);
548
493 /* First fill our portion of the GTT with scratch pages */ 549 /* First fill our portion of the GTT with scratch pages */
494 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, 550 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
495 dev_priv->gtt.base.start / PAGE_SIZE, 551 dev_priv->gtt.base.start / PAGE_SIZE,
496 dev_priv->gtt.base.total / PAGE_SIZE); 552 dev_priv->gtt.base.total / PAGE_SIZE,
553 true);
497 554
498 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 555 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
499 i915_gem_clflush_object(obj, obj->pin_display); 556 i915_gem_clflush_object(obj, obj->pin_display);
@@ -536,7 +593,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
536 593
537 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { 594 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
538 addr = sg_page_iter_dma_address(&sg_iter); 595 addr = sg_page_iter_dma_address(&sg_iter);
539 iowrite32(vm->pte_encode(addr, level), &gtt_entries[i]); 596 iowrite32(vm->pte_encode(addr, level, true), &gtt_entries[i]);
540 i++; 597 i++;
541 } 598 }
542 599
@@ -548,7 +605,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
548 */ 605 */
549 if (i != 0) 606 if (i != 0)
550 WARN_ON(readl(&gtt_entries[i-1]) != 607 WARN_ON(readl(&gtt_entries[i-1]) !=
551 vm->pte_encode(addr, level)); 608 vm->pte_encode(addr, level, true));
552 609
553 /* This next bit makes the above posting read even more important. We 610 /* This next bit makes the above posting read even more important. We
554 * want to flush the TLBs only after we're certain all the PTE updates 611 * want to flush the TLBs only after we're certain all the PTE updates
@@ -560,7 +617,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
560 617
561static void gen6_ggtt_clear_range(struct i915_address_space *vm, 618static void gen6_ggtt_clear_range(struct i915_address_space *vm,
562 unsigned int first_entry, 619 unsigned int first_entry,
563 unsigned int num_entries) 620 unsigned int num_entries,
621 bool use_scratch)
564{ 622{
565 struct drm_i915_private *dev_priv = vm->dev->dev_private; 623 struct drm_i915_private *dev_priv = vm->dev->dev_private;
566 gen6_gtt_pte_t scratch_pte, __iomem *gtt_base = 624 gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
@@ -573,7 +631,8 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
573 first_entry, num_entries, max_entries)) 631 first_entry, num_entries, max_entries))
574 num_entries = max_entries; 632 num_entries = max_entries;
575 633
576 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC); 634 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch);
635
577 for (i = 0; i < num_entries; i++) 636 for (i = 0; i < num_entries; i++)
578 iowrite32(scratch_pte, &gtt_base[i]); 637 iowrite32(scratch_pte, &gtt_base[i]);
579 readl(gtt_base); 638 readl(gtt_base);
@@ -594,7 +653,8 @@ static void i915_ggtt_insert_entries(struct i915_address_space *vm,
594 653
595static void i915_ggtt_clear_range(struct i915_address_space *vm, 654static void i915_ggtt_clear_range(struct i915_address_space *vm,
596 unsigned int first_entry, 655 unsigned int first_entry,
597 unsigned int num_entries) 656 unsigned int num_entries,
657 bool unused)
598{ 658{
599 intel_gtt_clear_range(first_entry, num_entries); 659 intel_gtt_clear_range(first_entry, num_entries);
600} 660}
@@ -622,7 +682,8 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
622 682
623 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, 683 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
624 entry, 684 entry,
625 obj->base.size >> PAGE_SHIFT); 685 obj->base.size >> PAGE_SHIFT,
686 true);
626 687
627 obj->has_global_gtt_mapping = 0; 688 obj->has_global_gtt_mapping = 0;
628} 689}
@@ -709,11 +770,11 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
709 const unsigned long count = (hole_end - hole_start) / PAGE_SIZE; 770 const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
710 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", 771 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
711 hole_start, hole_end); 772 hole_start, hole_end);
712 ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count); 773 ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count, true);
713 } 774 }
714 775
715 /* And finally clear the reserved guard page */ 776 /* And finally clear the reserved guard page */
716 ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1); 777 ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1, true);
717} 778}
718 779
719static bool 780static bool
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index aba9d7498996..dae364f0028c 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -143,8 +143,10 @@ static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
143 143
144 /* Seek the first printf which is hits start position */ 144 /* Seek the first printf which is hits start position */
145 if (e->pos < e->start) { 145 if (e->pos < e->start) {
146 len = vsnprintf(NULL, 0, f, args); 146 va_list tmp;
147 if (!__i915_error_seek(e, len)) 147
148 va_copy(tmp, args);
149 if (!__i915_error_seek(e, vsnprintf(NULL, 0, f, tmp)))
148 return; 150 return;
149 } 151 }
150 152
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index c159e1a6810f..ef9b35479f01 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -604,6 +604,10 @@
604#define ARB_MODE_SWIZZLE_IVB (1<<5) 604#define ARB_MODE_SWIZZLE_IVB (1<<5)
605#define RENDER_HWS_PGA_GEN7 (0x04080) 605#define RENDER_HWS_PGA_GEN7 (0x04080)
606#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id) 606#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id)
607#define RING_FAULT_GTTSEL_MASK (1<<11)
608#define RING_FAULT_SRCID(x) ((x >> 3) & 0xff)
609#define RING_FAULT_FAULT_TYPE(x) ((x >> 1) & 0x3)
610#define RING_FAULT_VALID (1<<0)
607#define DONE_REG 0x40b0 611#define DONE_REG 0x40b0
608#define BSD_HWS_PGA_GEN7 (0x04180) 612#define BSD_HWS_PGA_GEN7 (0x04180)
609#define BLT_HWS_PGA_GEN7 (0x04280) 613#define BLT_HWS_PGA_GEN7 (0x04280)
@@ -3881,6 +3885,9 @@
3881#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030 3885#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
3882#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) 3886#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
3883 3887
3888#define HSW_SCRATCH1 0xb038
3889#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27)
3890
3884#define HSW_FUSE_STRAP 0x42014 3891#define HSW_FUSE_STRAP 0x42014
3885#define HSW_CDCLK_LIMIT (1 << 24) 3892#define HSW_CDCLK_LIMIT (1 << 24)
3886 3893
@@ -4276,7 +4283,9 @@
4276#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN) 4283#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN)
4277 4284
4278#define SOUTH_DSPCLK_GATE_D 0xc2020 4285#define SOUTH_DSPCLK_GATE_D 0xc2020
4286#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30)
4279#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29) 4287#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
4288#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14)
4280#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12) 4289#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12)
4281 4290
4282/* CPU: FDI_TX */ 4291/* CPU: FDI_TX */
@@ -4728,6 +4737,9 @@
4728#define GEN7_ROW_CHICKEN2_GT2 0xf4f4 4737#define GEN7_ROW_CHICKEN2_GT2 0xf4f4
4729#define DOP_CLOCK_GATING_DISABLE (1<<0) 4738#define DOP_CLOCK_GATING_DISABLE (1<<0)
4730 4739
4740#define HSW_ROW_CHICKEN3 0xe49c
4741#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
4742
4731#define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020) 4743#define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020)
4732#define INTEL_AUDIO_DEVCL 0x808629FB 4744#define INTEL_AUDIO_DEVCL 0x808629FB
4733#define INTEL_AUDIO_DEVBLC 0x80862801 4745#define INTEL_AUDIO_DEVBLC 0x80862801
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index d8a1d98693e7..581fb4b2f766 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3941,8 +3941,6 @@ static void intel_connector_check_state(struct intel_connector *connector)
3941 * consider. */ 3941 * consider. */
3942void intel_connector_dpms(struct drm_connector *connector, int mode) 3942void intel_connector_dpms(struct drm_connector *connector, int mode)
3943{ 3943{
3944 struct intel_encoder *encoder = intel_attached_encoder(connector);
3945
3946 /* All the simple cases only support two dpms states. */ 3944 /* All the simple cases only support two dpms states. */
3947 if (mode != DRM_MODE_DPMS_ON) 3945 if (mode != DRM_MODE_DPMS_ON)
3948 mode = DRM_MODE_DPMS_OFF; 3946 mode = DRM_MODE_DPMS_OFF;
@@ -3953,10 +3951,8 @@ void intel_connector_dpms(struct drm_connector *connector, int mode)
3953 connector->dpms = mode; 3951 connector->dpms = mode;
3954 3952
3955 /* Only need to change hw state when actually enabled */ 3953 /* Only need to change hw state when actually enabled */
3956 if (encoder->base.crtc) 3954 if (connector->encoder)
3957 intel_encoder_dpms(encoder, mode); 3955 intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
3958 else
3959 WARN_ON(encoder->connectors_active != false);
3960 3956
3961 intel_modeset_check_state(connector->dev); 3957 intel_modeset_check_state(connector->dev);
3962} 3958}
@@ -4775,6 +4771,10 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
4775 4771
4776 pipeconf = 0; 4772 pipeconf = 0;
4777 4773
4774 if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
4775 I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
4776 pipeconf |= PIPECONF_ENABLE;
4777
4778 if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) { 4778 if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) {
4779 /* Enable pixel doubling when the dot clock is > 90% of the (display) 4779 /* Enable pixel doubling when the dot clock is > 90% of the (display)
4780 * core speed. 4780 * core speed.
@@ -10045,33 +10045,6 @@ static void i915_disable_vga(struct drm_device *dev)
10045 POSTING_READ(vga_reg); 10045 POSTING_READ(vga_reg);
10046} 10046}
10047 10047
10048static void i915_enable_vga_mem(struct drm_device *dev)
10049{
10050 /* Enable VGA memory on Intel HD */
10051 if (HAS_PCH_SPLIT(dev)) {
10052 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
10053 outb(inb(VGA_MSR_READ) | VGA_MSR_MEM_EN, VGA_MSR_WRITE);
10054 vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO |
10055 VGA_RSRC_LEGACY_MEM |
10056 VGA_RSRC_NORMAL_IO |
10057 VGA_RSRC_NORMAL_MEM);
10058 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
10059 }
10060}
10061
10062void i915_disable_vga_mem(struct drm_device *dev)
10063{
10064 /* Disable VGA memory on Intel HD */
10065 if (HAS_PCH_SPLIT(dev)) {
10066 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
10067 outb(inb(VGA_MSR_READ) & ~VGA_MSR_MEM_EN, VGA_MSR_WRITE);
10068 vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO |
10069 VGA_RSRC_NORMAL_IO |
10070 VGA_RSRC_NORMAL_MEM);
10071 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
10072 }
10073}
10074
10075void intel_modeset_init_hw(struct drm_device *dev) 10048void intel_modeset_init_hw(struct drm_device *dev)
10076{ 10049{
10077 intel_init_power_well(dev); 10050 intel_init_power_well(dev);
@@ -10350,7 +10323,6 @@ void i915_redisable_vga(struct drm_device *dev)
10350 if (I915_READ(vga_reg) != VGA_DISP_DISABLE) { 10323 if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
10351 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); 10324 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
10352 i915_disable_vga(dev); 10325 i915_disable_vga(dev);
10353 i915_disable_vga_mem(dev);
10354 } 10326 }
10355} 10327}
10356 10328
@@ -10564,8 +10536,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
10564 10536
10565 intel_disable_fbc(dev); 10537 intel_disable_fbc(dev);
10566 10538
10567 i915_enable_vga_mem(dev);
10568
10569 intel_disable_gt_powersave(dev); 10539 intel_disable_gt_powersave(dev);
10570 10540
10571 ironlake_teardown_rc6(dev); 10541 ironlake_teardown_rc6(dev);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 2151d13772b8..2c555f91bfae 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -588,7 +588,18 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
588 DRM_DEBUG_KMS("aux_ch native nack\n"); 588 DRM_DEBUG_KMS("aux_ch native nack\n");
589 return -EREMOTEIO; 589 return -EREMOTEIO;
590 case AUX_NATIVE_REPLY_DEFER: 590 case AUX_NATIVE_REPLY_DEFER:
591 udelay(100); 591 /*
592 * For now, just give more slack to branch devices. We
593 * could check the DPCD for I2C bit rate capabilities,
594 * and if available, adjust the interval. We could also
595 * be more careful with DP-to-Legacy adapters where a
596 * long legacy cable may force very low I2C bit rates.
597 */
598 if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
599 DP_DWN_STRM_PORT_PRESENT)
600 usleep_range(500, 600);
601 else
602 usleep_range(300, 400);
592 continue; 603 continue;
593 default: 604 default:
594 DRM_ERROR("aux_ch invalid native reply 0x%02x\n", 605 DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
@@ -1456,7 +1467,7 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp)
1456 1467
1457 /* Avoid continuous PSR exit by masking memup and hpd */ 1468 /* Avoid continuous PSR exit by masking memup and hpd */
1458 I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP | 1469 I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
1459 EDP_PSR_DEBUG_MASK_HPD); 1470 EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
1460 1471
1461 intel_dp->psr_setup_done = true; 1472 intel_dp->psr_setup_done = true;
1462} 1473}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 28cae80495e2..9b7b68fd5d47 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -793,6 +793,5 @@ extern void hsw_pc8_disable_interrupts(struct drm_device *dev);
793extern void hsw_pc8_restore_interrupts(struct drm_device *dev); 793extern void hsw_pc8_restore_interrupts(struct drm_device *dev);
794extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv); 794extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
795extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv); 795extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
796extern void i915_disable_vga_mem(struct drm_device *dev);
797 796
798#endif /* __INTEL_DRV_H__ */ 797#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index dd176b7296c1..26c2ea3e985c 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3864,8 +3864,6 @@ static void valleyview_enable_rps(struct drm_device *dev)
3864 dev_priv->rps.rpe_delay), 3864 dev_priv->rps.rpe_delay),
3865 dev_priv->rps.rpe_delay); 3865 dev_priv->rps.rpe_delay);
3866 3866
3867 INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
3868
3869 valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay); 3867 valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
3870 3868
3871 gen6_enable_rps_interrupts(dev); 3869 gen6_enable_rps_interrupts(dev);
@@ -4761,7 +4759,9 @@ static void cpt_init_clock_gating(struct drm_device *dev)
4761 * gating for the panel power sequencer or it will fail to 4759 * gating for the panel power sequencer or it will fail to
4762 * start up when no ports are active. 4760 * start up when no ports are active.
4763 */ 4761 */
4764 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); 4762 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
4763 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
4764 PCH_CPUNIT_CLOCK_GATE_DISABLE);
4765 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | 4765 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
4766 DPLS_EDP_PPS_FIX_DIS); 4766 DPLS_EDP_PPS_FIX_DIS);
4767 /* The below fixes the weird display corruption, a few pixels shifted 4767 /* The below fixes the weird display corruption, a few pixels shifted
@@ -4955,6 +4955,11 @@ static void haswell_init_clock_gating(struct drm_device *dev)
4955 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, 4955 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
4956 GEN7_WA_L3_CHICKEN_MODE); 4956 GEN7_WA_L3_CHICKEN_MODE);
4957 4957
4958 /* L3 caching of data atomics doesn't work -- disable it. */
4959 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
4960 I915_WRITE(HSW_ROW_CHICKEN3,
4961 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
4962
4958 /* This is required by WaCatErrorRejectionIssue:hsw */ 4963 /* This is required by WaCatErrorRejectionIssue:hsw */
4959 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 4964 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
4960 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 4965 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
@@ -5681,5 +5686,7 @@ void intel_pm_init(struct drm_device *dev)
5681 5686
5682 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, 5687 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
5683 intel_gen6_powersave_work); 5688 intel_gen6_powersave_work);
5689
5690 INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
5684} 5691}
5685 5692
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index f2c6d7909ae2..dd6f84bf6c22 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -916,6 +916,14 @@ intel_tv_compute_config(struct intel_encoder *encoder,
916 DRM_DEBUG_KMS("forcing bpc to 8 for TV\n"); 916 DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
917 pipe_config->pipe_bpp = 8*3; 917 pipe_config->pipe_bpp = 8*3;
918 918
919 /* TV has it's own notion of sync and other mode flags, so clear them. */
920 pipe_config->adjusted_mode.flags = 0;
921
922 /*
923 * FIXME: We don't check whether the input mode is actually what we want
924 * or whether userspace is doing something stupid.
925 */
926
919 return true; 927 return true;
920} 928}
921 929
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
index 5db5bbaedae2..bc7fd11ad8be 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
@@ -19,8 +19,6 @@
19#include "msm_drv.h" 19#include "msm_drv.h"
20#include "mdp4_kms.h" 20#include "mdp4_kms.h"
21 21
22#include <mach/iommu.h>
23
24static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev); 22static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev);
25 23
26static int mdp4_hw_init(struct msm_kms *kms) 24static int mdp4_hw_init(struct msm_kms *kms)
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 008d772384c7..b3a2f1629041 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -18,8 +18,6 @@
18#include "msm_drv.h" 18#include "msm_drv.h"
19#include "msm_gpu.h" 19#include "msm_gpu.h"
20 20
21#include <mach/iommu.h>
22
23static void msm_fb_output_poll_changed(struct drm_device *dev) 21static void msm_fb_output_poll_changed(struct drm_device *dev)
24{ 22{
25 struct msm_drm_private *priv = dev->dev_private; 23 struct msm_drm_private *priv = dev->dev_private;
@@ -62,6 +60,8 @@ int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
62 int i, ret; 60 int i, ret;
63 61
64 for (i = 0; i < cnt; i++) { 62 for (i = 0; i < cnt; i++) {
63 /* TODO maybe some day msm iommu won't require this hack: */
64 struct device *msm_iommu_get_ctx(const char *ctx_name);
65 struct device *ctx = msm_iommu_get_ctx(names[i]); 65 struct device *ctx = msm_iommu_get_ctx(names[i]);
66 if (!ctx) 66 if (!ctx)
67 continue; 67 continue;
@@ -199,7 +199,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
199 * imx drm driver on iMX5 199 * imx drm driver on iMX5
200 */ 200 */
201 dev_err(dev->dev, "failed to load kms\n"); 201 dev_err(dev->dev, "failed to load kms\n");
202 ret = PTR_ERR(priv->kms); 202 ret = PTR_ERR(kms);
203 goto fail; 203 goto fail;
204 } 204 }
205 205
@@ -697,7 +697,7 @@ static struct drm_driver msm_driver = {
697 .gem_vm_ops = &vm_ops, 697 .gem_vm_ops = &vm_ops,
698 .dumb_create = msm_gem_dumb_create, 698 .dumb_create = msm_gem_dumb_create,
699 .dumb_map_offset = msm_gem_dumb_map_offset, 699 .dumb_map_offset = msm_gem_dumb_map_offset,
700 .dumb_destroy = msm_gem_dumb_destroy, 700 .dumb_destroy = drm_gem_dumb_destroy,
701#ifdef CONFIG_DEBUG_FS 701#ifdef CONFIG_DEBUG_FS
702 .debugfs_init = msm_debugfs_init, 702 .debugfs_init = msm_debugfs_init,
703 .debugfs_cleanup = msm_debugfs_cleanup, 703 .debugfs_cleanup = msm_debugfs_cleanup,
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 29eacfa29cfb..2bae46c66a30 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -319,13 +319,6 @@ int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
319 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle); 319 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
320} 320}
321 321
322int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
323 uint32_t handle)
324{
325 /* No special work needed, drop the reference and see what falls out */
326 return drm_gem_handle_delete(file, handle);
327}
328
329int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 322int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
330 uint32_t handle, uint64_t *offset) 323 uint32_t handle, uint64_t *offset)
331{ 324{
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
index 37712a6df923..e290cfa4acee 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -113,7 +113,7 @@ nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
113 pmc->use_msi = false; 113 pmc->use_msi = false;
114 break; 114 break;
115 default: 115 default:
116 pmc->use_msi = nouveau_boolopt(device->cfgopt, "NvMSI", true); 116 pmc->use_msi = nouveau_boolopt(device->cfgopt, "NvMSI", false);
117 if (pmc->use_msi) { 117 if (pmc->use_msi) {
118 pmc->use_msi = pci_enable_msi(device->pdev) == 0; 118 pmc->use_msi = pci_enable_msi(device->pdev) == 0;
119 if (pmc->use_msi) { 119 if (pmc->use_msi) {
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 32923d2f6002..5e891b226acf 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -707,24 +707,37 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
707 switch (connector->connector_type) { 707 switch (connector->connector_type) {
708 case DRM_MODE_CONNECTOR_DVII: 708 case DRM_MODE_CONNECTOR_DVII:
709 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ 709 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
710 if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) || 710 if (radeon_audio != 0) {
711 (drm_detect_hdmi_monitor(radeon_connector->edid) && 711 if (radeon_connector->use_digital &&
712 (radeon_connector->audio == RADEON_AUDIO_AUTO))) 712 (radeon_connector->audio == RADEON_AUDIO_ENABLE))
713 return ATOM_ENCODER_MODE_HDMI; 713 return ATOM_ENCODER_MODE_HDMI;
714 else if (radeon_connector->use_digital) 714 else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
715 (radeon_connector->audio == RADEON_AUDIO_AUTO))
716 return ATOM_ENCODER_MODE_HDMI;
717 else if (radeon_connector->use_digital)
718 return ATOM_ENCODER_MODE_DVI;
719 else
720 return ATOM_ENCODER_MODE_CRT;
721 } else if (radeon_connector->use_digital) {
715 return ATOM_ENCODER_MODE_DVI; 722 return ATOM_ENCODER_MODE_DVI;
716 else 723 } else {
717 return ATOM_ENCODER_MODE_CRT; 724 return ATOM_ENCODER_MODE_CRT;
725 }
718 break; 726 break;
719 case DRM_MODE_CONNECTOR_DVID: 727 case DRM_MODE_CONNECTOR_DVID:
720 case DRM_MODE_CONNECTOR_HDMIA: 728 case DRM_MODE_CONNECTOR_HDMIA:
721 default: 729 default:
722 if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) || 730 if (radeon_audio != 0) {
723 (drm_detect_hdmi_monitor(radeon_connector->edid) && 731 if (radeon_connector->audio == RADEON_AUDIO_ENABLE)
724 (radeon_connector->audio == RADEON_AUDIO_AUTO))) 732 return ATOM_ENCODER_MODE_HDMI;
725 return ATOM_ENCODER_MODE_HDMI; 733 else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
726 else 734 (radeon_connector->audio == RADEON_AUDIO_AUTO))
735 return ATOM_ENCODER_MODE_HDMI;
736 else
737 return ATOM_ENCODER_MODE_DVI;
738 } else {
727 return ATOM_ENCODER_MODE_DVI; 739 return ATOM_ENCODER_MODE_DVI;
740 }
728 break; 741 break;
729 case DRM_MODE_CONNECTOR_LVDS: 742 case DRM_MODE_CONNECTOR_LVDS:
730 return ATOM_ENCODER_MODE_LVDS; 743 return ATOM_ENCODER_MODE_LVDS;
@@ -732,14 +745,19 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
732 case DRM_MODE_CONNECTOR_DisplayPort: 745 case DRM_MODE_CONNECTOR_DisplayPort:
733 dig_connector = radeon_connector->con_priv; 746 dig_connector = radeon_connector->con_priv;
734 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 747 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
735 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) 748 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
736 return ATOM_ENCODER_MODE_DP; 749 return ATOM_ENCODER_MODE_DP;
737 else if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) || 750 } else if (radeon_audio != 0) {
738 (drm_detect_hdmi_monitor(radeon_connector->edid) && 751 if (radeon_connector->audio == RADEON_AUDIO_ENABLE)
739 (radeon_connector->audio == RADEON_AUDIO_AUTO))) 752 return ATOM_ENCODER_MODE_HDMI;
740 return ATOM_ENCODER_MODE_HDMI; 753 else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
741 else 754 (radeon_connector->audio == RADEON_AUDIO_AUTO))
755 return ATOM_ENCODER_MODE_HDMI;
756 else
757 return ATOM_ENCODER_MODE_DVI;
758 } else {
742 return ATOM_ENCODER_MODE_DVI; 759 return ATOM_ENCODER_MODE_DVI;
760 }
743 break; 761 break;
744 case DRM_MODE_CONNECTOR_eDP: 762 case DRM_MODE_CONNECTOR_eDP:
745 return ATOM_ENCODER_MODE_DP; 763 return ATOM_ENCODER_MODE_DP;
@@ -1655,7 +1673,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
1655 * does the same thing and more. 1673 * does the same thing and more.
1656 */ 1674 */
1657 if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) && 1675 if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) &&
1658 (rdev->family != CHIP_RS880)) 1676 (rdev->family != CHIP_RS780) && (rdev->family != CHIP_RS880))
1659 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); 1677 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
1660 } 1678 }
1661 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { 1679 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 05ff315e8e9e..9b6950d9b3c0 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -1168,6 +1168,23 @@ static const struct radeon_blacklist_clocks btc_blacklist_clocks[] =
1168 { 25000, 30000, RADEON_SCLK_UP } 1168 { 25000, 30000, RADEON_SCLK_UP }
1169}; 1169};
1170 1170
1171void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
1172 u32 *max_clock)
1173{
1174 u32 i, clock = 0;
1175
1176 if ((table == NULL) || (table->count == 0)) {
1177 *max_clock = clock;
1178 return;
1179 }
1180
1181 for (i = 0; i < table->count; i++) {
1182 if (clock < table->entries[i].clk)
1183 clock = table->entries[i].clk;
1184 }
1185 *max_clock = clock;
1186}
1187
1171void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table, 1188void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
1172 u32 clock, u16 max_voltage, u16 *voltage) 1189 u32 clock, u16 max_voltage, u16 *voltage)
1173{ 1190{
@@ -1913,7 +1930,7 @@ static int btc_set_mc_special_registers(struct radeon_device *rdev,
1913 } 1930 }
1914 j++; 1931 j++;
1915 1932
1916 if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE) 1933 if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
1917 return -EINVAL; 1934 return -EINVAL;
1918 1935
1919 tmp = RREG32(MC_PMG_CMD_MRS); 1936 tmp = RREG32(MC_PMG_CMD_MRS);
@@ -1928,7 +1945,7 @@ static int btc_set_mc_special_registers(struct radeon_device *rdev,
1928 } 1945 }
1929 j++; 1946 j++;
1930 1947
1931 if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE) 1948 if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
1932 return -EINVAL; 1949 return -EINVAL;
1933 break; 1950 break;
1934 case MC_SEQ_RESERVE_M >> 2: 1951 case MC_SEQ_RESERVE_M >> 2:
@@ -1942,7 +1959,7 @@ static int btc_set_mc_special_registers(struct radeon_device *rdev,
1942 } 1959 }
1943 j++; 1960 j++;
1944 1961
1945 if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE) 1962 if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
1946 return -EINVAL; 1963 return -EINVAL;
1947 break; 1964 break;
1948 default: 1965 default:
@@ -2080,6 +2097,7 @@ static void btc_apply_state_adjust_rules(struct radeon_device *rdev,
2080 bool disable_mclk_switching; 2097 bool disable_mclk_switching;
2081 u32 mclk, sclk; 2098 u32 mclk, sclk;
2082 u16 vddc, vddci; 2099 u16 vddc, vddci;
2100 u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
2083 2101
2084 if ((rdev->pm.dpm.new_active_crtc_count > 1) || 2102 if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
2085 btc_dpm_vblank_too_short(rdev)) 2103 btc_dpm_vblank_too_short(rdev))
@@ -2121,6 +2139,39 @@ static void btc_apply_state_adjust_rules(struct radeon_device *rdev,
2121 ps->low.vddci = max_limits->vddci; 2139 ps->low.vddci = max_limits->vddci;
2122 } 2140 }
2123 2141
2142 /* limit clocks to max supported clocks based on voltage dependency tables */
2143 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
2144 &max_sclk_vddc);
2145 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2146 &max_mclk_vddci);
2147 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2148 &max_mclk_vddc);
2149
2150 if (max_sclk_vddc) {
2151 if (ps->low.sclk > max_sclk_vddc)
2152 ps->low.sclk = max_sclk_vddc;
2153 if (ps->medium.sclk > max_sclk_vddc)
2154 ps->medium.sclk = max_sclk_vddc;
2155 if (ps->high.sclk > max_sclk_vddc)
2156 ps->high.sclk = max_sclk_vddc;
2157 }
2158 if (max_mclk_vddci) {
2159 if (ps->low.mclk > max_mclk_vddci)
2160 ps->low.mclk = max_mclk_vddci;
2161 if (ps->medium.mclk > max_mclk_vddci)
2162 ps->medium.mclk = max_mclk_vddci;
2163 if (ps->high.mclk > max_mclk_vddci)
2164 ps->high.mclk = max_mclk_vddci;
2165 }
2166 if (max_mclk_vddc) {
2167 if (ps->low.mclk > max_mclk_vddc)
2168 ps->low.mclk = max_mclk_vddc;
2169 if (ps->medium.mclk > max_mclk_vddc)
2170 ps->medium.mclk = max_mclk_vddc;
2171 if (ps->high.mclk > max_mclk_vddc)
2172 ps->high.mclk = max_mclk_vddc;
2173 }
2174
2124 /* XXX validate the min clocks required for display */ 2175 /* XXX validate the min clocks required for display */
2125 2176
2126 if (disable_mclk_switching) { 2177 if (disable_mclk_switching) {
diff --git a/drivers/gpu/drm/radeon/btc_dpm.h b/drivers/gpu/drm/radeon/btc_dpm.h
index 1a15e0e41950..3b6f12b7760b 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.h
+++ b/drivers/gpu/drm/radeon/btc_dpm.h
@@ -46,6 +46,8 @@ void btc_adjust_clock_combinations(struct radeon_device *rdev,
46 struct rv7xx_pl *pl); 46 struct rv7xx_pl *pl);
47void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table, 47void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
48 u32 clock, u16 max_voltage, u16 *voltage); 48 u32 clock, u16 max_voltage, u16 *voltage);
49void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
50 u32 *max_clock);
49void btc_apply_voltage_delta_rules(struct radeon_device *rdev, 51void btc_apply_voltage_delta_rules(struct radeon_device *rdev,
50 u16 max_vddc, u16 max_vddci, 52 u16 max_vddc, u16 max_vddci,
51 u16 *vddc, u16 *vddci); 53 u16 *vddc, u16 *vddci);
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 899627443030..51e947a97edf 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -146,6 +146,8 @@ static const struct ci_pt_config_reg didt_config_ci[] =
146}; 146};
147 147
148extern u8 rv770_get_memory_module_index(struct radeon_device *rdev); 148extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
149extern void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
150 u32 *max_clock);
149extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev, 151extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
150 u32 arb_freq_src, u32 arb_freq_dest); 152 u32 arb_freq_src, u32 arb_freq_dest);
151extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock); 153extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
@@ -712,6 +714,7 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
712 struct radeon_clock_and_voltage_limits *max_limits; 714 struct radeon_clock_and_voltage_limits *max_limits;
713 bool disable_mclk_switching; 715 bool disable_mclk_switching;
714 u32 sclk, mclk; 716 u32 sclk, mclk;
717 u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
715 int i; 718 int i;
716 719
717 if ((rdev->pm.dpm.new_active_crtc_count > 1) || 720 if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
@@ -739,6 +742,29 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
739 } 742 }
740 } 743 }
741 744
745 /* limit clocks to max supported clocks based on voltage dependency tables */
746 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
747 &max_sclk_vddc);
748 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
749 &max_mclk_vddci);
750 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
751 &max_mclk_vddc);
752
753 for (i = 0; i < ps->performance_level_count; i++) {
754 if (max_sclk_vddc) {
755 if (ps->performance_levels[i].sclk > max_sclk_vddc)
756 ps->performance_levels[i].sclk = max_sclk_vddc;
757 }
758 if (max_mclk_vddci) {
759 if (ps->performance_levels[i].mclk > max_mclk_vddci)
760 ps->performance_levels[i].mclk = max_mclk_vddci;
761 }
762 if (max_mclk_vddc) {
763 if (ps->performance_levels[i].mclk > max_mclk_vddc)
764 ps->performance_levels[i].mclk = max_mclk_vddc;
765 }
766 }
767
742 /* XXX validate the min clocks required for display */ 768 /* XXX validate the min clocks required for display */
743 769
744 if (disable_mclk_switching) { 770 if (disable_mclk_switching) {
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index adbdb6503b05..9cd2bc989ac7 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -77,6 +77,8 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev);
77static void cik_program_aspm(struct radeon_device *rdev); 77static void cik_program_aspm(struct radeon_device *rdev);
78static void cik_init_pg(struct radeon_device *rdev); 78static void cik_init_pg(struct radeon_device *rdev);
79static void cik_init_cg(struct radeon_device *rdev); 79static void cik_init_cg(struct radeon_device *rdev);
80static void cik_fini_pg(struct radeon_device *rdev);
81static void cik_fini_cg(struct radeon_device *rdev);
80static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev, 82static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
81 bool enable); 83 bool enable);
82 84
@@ -1692,6 +1694,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
1692 fw_name); 1694 fw_name);
1693 release_firmware(rdev->smc_fw); 1695 release_firmware(rdev->smc_fw);
1694 rdev->smc_fw = NULL; 1696 rdev->smc_fw = NULL;
1697 err = 0;
1695 } else if (rdev->smc_fw->size != smc_req_size) { 1698 } else if (rdev->smc_fw->size != smc_req_size) {
1696 printk(KERN_ERR 1699 printk(KERN_ERR
1697 "cik_smc: Bogus length %zu in firmware \"%s\"\n", 1700 "cik_smc: Bogus length %zu in firmware \"%s\"\n",
@@ -2845,10 +2848,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
2845 rdev->config.cik.tile_config |= (3 << 0); 2848 rdev->config.cik.tile_config |= (3 << 0);
2846 break; 2849 break;
2847 } 2850 }
2848 if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) 2851 rdev->config.cik.tile_config |=
2849 rdev->config.cik.tile_config |= 1 << 4; 2852 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
2850 else
2851 rdev->config.cik.tile_config |= 0 << 4;
2852 rdev->config.cik.tile_config |= 2853 rdev->config.cik.tile_config |=
2853 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; 2854 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
2854 rdev->config.cik.tile_config |= 2855 rdev->config.cik.tile_config |=
@@ -3182,6 +3183,7 @@ int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3182 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); 3183 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
3183 if (r) { 3184 if (r) {
3184 DRM_ERROR("radeon: failed to get ib (%d).\n", r); 3185 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
3186 radeon_scratch_free(rdev, scratch);
3185 return r; 3187 return r;
3186 } 3188 }
3187 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); 3189 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
@@ -3198,6 +3200,8 @@ int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3198 r = radeon_fence_wait(ib.fence, false); 3200 r = radeon_fence_wait(ib.fence, false);
3199 if (r) { 3201 if (r) {
3200 DRM_ERROR("radeon: fence wait failed (%d).\n", r); 3202 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
3203 radeon_scratch_free(rdev, scratch);
3204 radeon_ib_free(rdev, &ib);
3201 return r; 3205 return r;
3202 } 3206 }
3203 for (i = 0; i < rdev->usec_timeout; i++) { 3207 for (i = 0; i < rdev->usec_timeout; i++) {
@@ -4187,6 +4191,10 @@ static void cik_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
4187 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 4191 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
4188 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); 4192 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
4189 4193
4194 /* disable CG/PG */
4195 cik_fini_pg(rdev);
4196 cik_fini_cg(rdev);
4197
4190 /* stop the rlc */ 4198 /* stop the rlc */
4191 cik_rlc_stop(rdev); 4199 cik_rlc_stop(rdev);
4192 4200
@@ -4456,8 +4464,8 @@ static int cik_mc_init(struct radeon_device *rdev)
4456 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); 4464 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
4457 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); 4465 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
4458 /* size in MB on si */ 4466 /* size in MB on si */
4459 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 4467 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
4460 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 4468 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
4461 rdev->mc.visible_vram_size = rdev->mc.aper_size; 4469 rdev->mc.visible_vram_size = rdev->mc.aper_size;
4462 si_vram_gtt_location(rdev, &rdev->mc); 4470 si_vram_gtt_location(rdev, &rdev->mc);
4463 radeon_update_bandwidth_info(rdev); 4471 radeon_update_bandwidth_info(rdev);
@@ -4735,12 +4743,13 @@ static void cik_vm_decode_fault(struct radeon_device *rdev,
4735 u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT; 4743 u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
4736 u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT; 4744 u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
4737 u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT; 4745 u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
4738 char *block = (char *)&mc_client; 4746 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
4747 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
4739 4748
4740 printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n", 4749 printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
4741 protections, vmid, addr, 4750 protections, vmid, addr,
4742 (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read", 4751 (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
4743 block, mc_id); 4752 block, mc_client, mc_id);
4744} 4753}
4745 4754
4746/** 4755/**
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 85a69d2ea3d2..9fcd338c0fcf 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -113,6 +113,9 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
113 u8 *sadb; 113 u8 *sadb;
114 int sad_count; 114 int sad_count;
115 115
116 /* XXX: setting this register causes hangs on some asics */
117 return;
118
116 if (!dig->afmt->pin) 119 if (!dig->afmt->pin)
117 return; 120 return;
118 121
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 555164e270a7..b5c67a99dda9 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -3131,7 +3131,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
3131 rdev->config.evergreen.sx_max_export_size = 256; 3131 rdev->config.evergreen.sx_max_export_size = 256;
3132 rdev->config.evergreen.sx_max_export_pos_size = 64; 3132 rdev->config.evergreen.sx_max_export_pos_size = 64;
3133 rdev->config.evergreen.sx_max_export_smx_size = 192; 3133 rdev->config.evergreen.sx_max_export_smx_size = 192;
3134 rdev->config.evergreen.max_hw_contexts = 8; 3134 rdev->config.evergreen.max_hw_contexts = 4;
3135 rdev->config.evergreen.sq_num_cf_insts = 2; 3135 rdev->config.evergreen.sq_num_cf_insts = 2;
3136 3136
3137 rdev->config.evergreen.sc_prim_fifo_size = 0x40; 3137 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index f71ce390aebe..fe1de855775e 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -67,6 +67,9 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
67 u8 *sadb; 67 u8 *sadb;
68 int sad_count; 68 int sad_count;
69 69
70 /* XXX: setting this register causes hangs on some asics */
71 return;
72
70 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 73 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
71 if (connector->encoder == encoder) 74 if (connector->encoder == encoder)
72 radeon_connector = to_radeon_connector(connector); 75 radeon_connector = to_radeon_connector(connector);
@@ -288,8 +291,7 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
288 /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */ 291 /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
289 292
290 WREG32(HDMI_ACR_PACKET_CONTROL + offset, 293 WREG32(HDMI_ACR_PACKET_CONTROL + offset,
291 HDMI_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */ 294 HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
292 HDMI_ACR_SOURCE); /* select SW CTS value */
293 295
294 evergreen_hdmi_update_ACR(encoder, mode->clock); 296 evergreen_hdmi_update_ACR(encoder, mode->clock);
295 297
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 8768fd6a1e27..4f6d2962767d 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -1501,7 +1501,7 @@
1501 * 6. COMMAND [29:22] | BYTE_COUNT [20:0] 1501 * 6. COMMAND [29:22] | BYTE_COUNT [20:0]
1502 */ 1502 */
1503# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20) 1503# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
1504 /* 0 - SRC_ADDR 1504 /* 0 - DST_ADDR
1505 * 1 - GDS 1505 * 1 - GDS
1506 */ 1506 */
1507# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27) 1507# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
@@ -1516,7 +1516,7 @@
1516# define PACKET3_CP_DMA_CP_SYNC (1 << 31) 1516# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
1517/* COMMAND */ 1517/* COMMAND */
1518# define PACKET3_CP_DMA_DIS_WC (1 << 21) 1518# define PACKET3_CP_DMA_DIS_WC (1 << 21)
1519# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23) 1519# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
1520 /* 0 - none 1520 /* 0 - none
1521 * 1 - 8 in 16 1521 * 1 - 8 in 16
1522 * 2 - 8 in 32 1522 * 2 - 8 in 32
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 93c1f9ef5da9..cac2866d79da 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -804,6 +804,7 @@ int ni_init_microcode(struct radeon_device *rdev)
804 fw_name); 804 fw_name);
805 release_firmware(rdev->smc_fw); 805 release_firmware(rdev->smc_fw);
806 rdev->smc_fw = NULL; 806 rdev->smc_fw = NULL;
807 err = 0;
807 } else if (rdev->smc_fw->size != smc_req_size) { 808 } else if (rdev->smc_fw->size != smc_req_size) {
808 printk(KERN_ERR 809 printk(KERN_ERR
809 "ni_mc: Bogus length %zu in firmware \"%s\"\n", 810 "ni_mc: Bogus length %zu in firmware \"%s\"\n",
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index 6c398a456d78..f26339028154 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -787,6 +787,7 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
787 bool disable_mclk_switching; 787 bool disable_mclk_switching;
788 u32 mclk, sclk; 788 u32 mclk, sclk;
789 u16 vddc, vddci; 789 u16 vddc, vddci;
790 u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
790 int i; 791 int i;
791 792
792 if ((rdev->pm.dpm.new_active_crtc_count > 1) || 793 if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
@@ -813,6 +814,29 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
813 } 814 }
814 } 815 }
815 816
817 /* limit clocks to max supported clocks based on voltage dependency tables */
818 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
819 &max_sclk_vddc);
820 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
821 &max_mclk_vddci);
822 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
823 &max_mclk_vddc);
824
825 for (i = 0; i < ps->performance_level_count; i++) {
826 if (max_sclk_vddc) {
827 if (ps->performance_levels[i].sclk > max_sclk_vddc)
828 ps->performance_levels[i].sclk = max_sclk_vddc;
829 }
830 if (max_mclk_vddci) {
831 if (ps->performance_levels[i].mclk > max_mclk_vddci)
832 ps->performance_levels[i].mclk = max_mclk_vddci;
833 }
834 if (max_mclk_vddc) {
835 if (ps->performance_levels[i].mclk > max_mclk_vddc)
836 ps->performance_levels[i].mclk = max_mclk_vddc;
837 }
838 }
839
816 /* XXX validate the min clocks required for display */ 840 /* XXX validate the min clocks required for display */
817 841
818 if (disable_mclk_switching) { 842 if (disable_mclk_switching) {
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 24175717307b..d71333033b2b 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -2933,9 +2933,11 @@ static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
2933 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp); 2933 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
2934 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); 2934 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
2935 seq_printf(m, "%u dwords in ring\n", count); 2935 seq_printf(m, "%u dwords in ring\n", count);
2936 for (j = 0; j <= count; j++) { 2936 if (ring->ready) {
2937 i = (rdp + j) & ring->ptr_mask; 2937 for (j = 0; j <= count; j++) {
2938 seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]); 2938 i = (rdp + j) & ring->ptr_mask;
2939 seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
2940 }
2939 } 2941 }
2940 return 0; 2942 return 0;
2941} 2943}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 2a1b1876b431..f9be22062df1 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2302,6 +2302,7 @@ int r600_init_microcode(struct radeon_device *rdev)
2302 fw_name); 2302 fw_name);
2303 release_firmware(rdev->smc_fw); 2303 release_firmware(rdev->smc_fw);
2304 rdev->smc_fw = NULL; 2304 rdev->smc_fw = NULL;
2305 err = 0;
2305 } else if (rdev->smc_fw->size != smc_req_size) { 2306 } else if (rdev->smc_fw->size != smc_req_size) {
2306 printk(KERN_ERR 2307 printk(KERN_ERR
2307 "smc: Bogus length %zu in firmware \"%s\"\n", 2308 "smc: Bogus length %zu in firmware \"%s\"\n",
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index e65f211a7be0..5513d8f06252 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -1084,7 +1084,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
1084 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk = 1084 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
1085 le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16); 1085 le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
1086 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v = 1086 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
1087 le16_to_cpu(limits->entries[i].usVoltage); 1087 le16_to_cpu(entry->usVoltage);
1088 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *) 1088 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
1089 ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record)); 1089 ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
1090 } 1090 }
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index f443010ce90b..06022e3b9c3b 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -57,15 +57,15 @@ enum r600_hdmi_iec_status_bits {
57static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = { 57static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
58 /* 32kHz 44.1kHz 48kHz */ 58 /* 32kHz 44.1kHz 48kHz */
59 /* Clock N CTS N CTS N CTS */ 59 /* Clock N CTS N CTS N CTS */
60 { 25174, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */ 60 { 25175, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */
61 { 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */ 61 { 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */
62 { 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */ 62 { 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */
63 { 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */ 63 { 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */
64 { 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */ 64 { 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */
65 { 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */ 65 { 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */
66 { 74175, 11648, 210937, 17836, 234375, 11648, 140625 }, /* 74.25/1.001 MHz */ 66 { 74176, 11648, 210937, 17836, 234375, 11648, 140625 }, /* 74.25/1.001 MHz */
67 { 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */ 67 { 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */
68 { 148351, 11648, 421875, 8918, 234375, 5824, 140625 }, /* 148.50/1.001 MHz */ 68 { 148352, 11648, 421875, 8918, 234375, 5824, 140625 }, /* 148.50/1.001 MHz */
69 { 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */ 69 { 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */
70 { 0, 4096, 0, 6272, 0, 6144, 0 } /* Other */ 70 { 0, 4096, 0, 6272, 0, 6144, 0 } /* Other */
71}; 71};
@@ -75,8 +75,15 @@ static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
75 */ 75 */
76static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int N, int freq) 76static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int N, int freq)
77{ 77{
78 if (*CTS == 0) 78 u64 n;
79 *CTS = clock * N / (128 * freq) * 1000; 79 u32 d;
80
81 if (*CTS == 0) {
82 n = (u64)clock * (u64)N * 1000ULL;
83 d = 128 * freq;
84 do_div(n, d);
85 *CTS = n;
86 }
80 DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n", 87 DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n",
81 N, *CTS, freq); 88 N, *CTS, freq);
82} 89}
@@ -257,10 +264,7 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
257 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE 264 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
258 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator 265 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
259 */ 266 */
260 if (ASIC_IS_DCE3(rdev)) { 267 if (ASIC_IS_DCE32(rdev)) {
261 /* according to the reg specs, this should DCE3.2 only, but in
262 * practice it seems to cover DCE3.0 as well.
263 */
264 if (dig->dig_encoder == 0) { 268 if (dig->dig_encoder == 0) {
265 dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; 269 dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
266 dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); 270 dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
@@ -276,8 +280,21 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
276 WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo); 280 WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo);
277 WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ 281 WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
278 } 282 }
283 } else if (ASIC_IS_DCE3(rdev)) {
284 /* according to the reg specs, this should DCE3.2 only, but in
285 * practice it seems to cover DCE3.0/3.1 as well.
286 */
287 if (dig->dig_encoder == 0) {
288 WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
289 WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
290 WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
291 } else {
292 WREG32(DCCG_AUDIO_DTO1_PHASE, base_rate * 100);
293 WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100);
294 WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
295 }
279 } else { 296 } else {
280 /* according to the reg specs, this should be DCE2.0 and DCE3.0 */ 297 /* according to the reg specs, this should be DCE2.0 and DCE3.0/3.1 */
281 WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) | 298 WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
282 AUDIO_DTO_MODULE(clock / 10)); 299 AUDIO_DTO_MODULE(clock / 10));
283 } 300 }
@@ -292,6 +309,9 @@ static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
292 u8 *sadb; 309 u8 *sadb;
293 int sad_count; 310 int sad_count;
294 311
312 /* XXX: setting this register causes hangs on some asics */
313 return;
314
295 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 315 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
296 if (connector->encoder == encoder) 316 if (connector->encoder == encoder)
297 radeon_connector = to_radeon_connector(connector); 317 radeon_connector = to_radeon_connector(connector);
@@ -434,8 +454,8 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
434 } 454 }
435 455
436 WREG32(HDMI0_ACR_PACKET_CONTROL + offset, 456 WREG32(HDMI0_ACR_PACKET_CONTROL + offset,
437 HDMI0_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */ 457 HDMI0_ACR_SOURCE | /* select SW CTS value - XXX verify that hw CTS works on all families */
438 HDMI0_ACR_SOURCE); /* select SW CTS value */ 458 HDMI0_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
439 459
440 WREG32(HDMI0_VBI_PACKET_CONTROL + offset, 460 WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
441 HDMI0_NULL_SEND | /* send null packets when required */ 461 HDMI0_NULL_SEND | /* send null packets when required */
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index e673fe26ea84..7b3c7b5932c5 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1523,7 +1523,7 @@
1523 */ 1523 */
1524# define PACKET3_CP_DMA_CP_SYNC (1 << 31) 1524# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
1525/* COMMAND */ 1525/* COMMAND */
1526# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23) 1526# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
1527 /* 0 - none 1527 /* 0 - none
1528 * 1 - 8 in 16 1528 * 1 - 8 in 16
1529 * 2 - 8 in 32 1529 * 2 - 8 in 32
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 5003385a7512..8f7e04538fd6 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1004,6 +1004,8 @@ static struct radeon_asic rv6xx_asic = {
1004 .wait_for_vblank = &avivo_wait_for_vblank, 1004 .wait_for_vblank = &avivo_wait_for_vblank,
1005 .set_backlight_level = &atombios_set_backlight_level, 1005 .set_backlight_level = &atombios_set_backlight_level,
1006 .get_backlight_level = &atombios_get_backlight_level, 1006 .get_backlight_level = &atombios_get_backlight_level,
1007 .hdmi_enable = &r600_hdmi_enable,
1008 .hdmi_setmode = &r600_hdmi_setmode,
1007 }, 1009 },
1008 .copy = { 1010 .copy = {
1009 .blit = &r600_copy_cpdma, 1011 .blit = &r600_copy_cpdma,
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 404e25d285ba..f79ee184ffd5 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1367,6 +1367,7 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
1367 int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info); 1367 int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info);
1368 uint16_t data_offset, size; 1368 uint16_t data_offset, size;
1369 struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info; 1369 struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
1370 struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT *ss_assign;
1370 uint8_t frev, crev; 1371 uint8_t frev, crev;
1371 int i, num_indices; 1372 int i, num_indices;
1372 1373
@@ -1378,18 +1379,21 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
1378 1379
1379 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / 1380 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
1380 sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT); 1381 sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT);
1381 1382 ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*)
1383 ((u8 *)&ss_info->asSS_Info[0]);
1382 for (i = 0; i < num_indices; i++) { 1384 for (i = 0; i < num_indices; i++) {
1383 if (ss_info->asSS_Info[i].ucSS_Id == id) { 1385 if (ss_assign->ucSS_Id == id) {
1384 ss->percentage = 1386 ss->percentage =
1385 le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage); 1387 le16_to_cpu(ss_assign->usSpreadSpectrumPercentage);
1386 ss->type = ss_info->asSS_Info[i].ucSpreadSpectrumType; 1388 ss->type = ss_assign->ucSpreadSpectrumType;
1387 ss->step = ss_info->asSS_Info[i].ucSS_Step; 1389 ss->step = ss_assign->ucSS_Step;
1388 ss->delay = ss_info->asSS_Info[i].ucSS_Delay; 1390 ss->delay = ss_assign->ucSS_Delay;
1389 ss->range = ss_info->asSS_Info[i].ucSS_Range; 1391 ss->range = ss_assign->ucSS_Range;
1390 ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div; 1392 ss->refdiv = ss_assign->ucRecommendedRef_Div;
1391 return true; 1393 return true;
1392 } 1394 }
1395 ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*)
1396 ((u8 *)ss_assign + sizeof(struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT));
1393 } 1397 }
1394 } 1398 }
1395 return false; 1399 return false;
@@ -1477,6 +1481,12 @@ union asic_ss_info {
1477 struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3; 1481 struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3;
1478}; 1482};
1479 1483
1484union asic_ss_assignment {
1485 struct _ATOM_ASIC_SS_ASSIGNMENT v1;
1486 struct _ATOM_ASIC_SS_ASSIGNMENT_V2 v2;
1487 struct _ATOM_ASIC_SS_ASSIGNMENT_V3 v3;
1488};
1489
1480bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, 1490bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1481 struct radeon_atom_ss *ss, 1491 struct radeon_atom_ss *ss,
1482 int id, u32 clock) 1492 int id, u32 clock)
@@ -1485,6 +1495,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1485 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); 1495 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
1486 uint16_t data_offset, size; 1496 uint16_t data_offset, size;
1487 union asic_ss_info *ss_info; 1497 union asic_ss_info *ss_info;
1498 union asic_ss_assignment *ss_assign;
1488 uint8_t frev, crev; 1499 uint8_t frev, crev;
1489 int i, num_indices; 1500 int i, num_indices;
1490 1501
@@ -1509,45 +1520,52 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1509 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / 1520 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
1510 sizeof(ATOM_ASIC_SS_ASSIGNMENT); 1521 sizeof(ATOM_ASIC_SS_ASSIGNMENT);
1511 1522
1523 ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info.asSpreadSpectrum[0]);
1512 for (i = 0; i < num_indices; i++) { 1524 for (i = 0; i < num_indices; i++) {
1513 if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) && 1525 if ((ss_assign->v1.ucClockIndication == id) &&
1514 (clock <= le32_to_cpu(ss_info->info.asSpreadSpectrum[i].ulTargetClockRange))) { 1526 (clock <= le32_to_cpu(ss_assign->v1.ulTargetClockRange))) {
1515 ss->percentage = 1527 ss->percentage =
1516 le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage); 1528 le16_to_cpu(ss_assign->v1.usSpreadSpectrumPercentage);
1517 ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode; 1529 ss->type = ss_assign->v1.ucSpreadSpectrumMode;
1518 ss->rate = le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadRateInKhz); 1530 ss->rate = le16_to_cpu(ss_assign->v1.usSpreadRateInKhz);
1519 return true; 1531 return true;
1520 } 1532 }
1533 ss_assign = (union asic_ss_assignment *)
1534 ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT));
1521 } 1535 }
1522 break; 1536 break;
1523 case 2: 1537 case 2:
1524 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / 1538 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
1525 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2); 1539 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
1540 ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_2.asSpreadSpectrum[0]);
1526 for (i = 0; i < num_indices; i++) { 1541 for (i = 0; i < num_indices; i++) {
1527 if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) && 1542 if ((ss_assign->v2.ucClockIndication == id) &&
1528 (clock <= le32_to_cpu(ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange))) { 1543 (clock <= le32_to_cpu(ss_assign->v2.ulTargetClockRange))) {
1529 ss->percentage = 1544 ss->percentage =
1530 le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage); 1545 le16_to_cpu(ss_assign->v2.usSpreadSpectrumPercentage);
1531 ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode; 1546 ss->type = ss_assign->v2.ucSpreadSpectrumMode;
1532 ss->rate = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadRateIn10Hz); 1547 ss->rate = le16_to_cpu(ss_assign->v2.usSpreadRateIn10Hz);
1533 if ((crev == 2) && 1548 if ((crev == 2) &&
1534 ((id == ASIC_INTERNAL_ENGINE_SS) || 1549 ((id == ASIC_INTERNAL_ENGINE_SS) ||
1535 (id == ASIC_INTERNAL_MEMORY_SS))) 1550 (id == ASIC_INTERNAL_MEMORY_SS)))
1536 ss->rate /= 100; 1551 ss->rate /= 100;
1537 return true; 1552 return true;
1538 } 1553 }
1554 ss_assign = (union asic_ss_assignment *)
1555 ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2));
1539 } 1556 }
1540 break; 1557 break;
1541 case 3: 1558 case 3:
1542 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / 1559 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
1543 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3); 1560 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
1561 ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_3.asSpreadSpectrum[0]);
1544 for (i = 0; i < num_indices; i++) { 1562 for (i = 0; i < num_indices; i++) {
1545 if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) && 1563 if ((ss_assign->v3.ucClockIndication == id) &&
1546 (clock <= le32_to_cpu(ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange))) { 1564 (clock <= le32_to_cpu(ss_assign->v3.ulTargetClockRange))) {
1547 ss->percentage = 1565 ss->percentage =
1548 le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage); 1566 le16_to_cpu(ss_assign->v3.usSpreadSpectrumPercentage);
1549 ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode; 1567 ss->type = ss_assign->v3.ucSpreadSpectrumMode;
1550 ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz); 1568 ss->rate = le16_to_cpu(ss_assign->v3.usSpreadRateIn10Hz);
1551 if ((id == ASIC_INTERNAL_ENGINE_SS) || 1569 if ((id == ASIC_INTERNAL_ENGINE_SS) ||
1552 (id == ASIC_INTERNAL_MEMORY_SS)) 1570 (id == ASIC_INTERNAL_MEMORY_SS))
1553 ss->rate /= 100; 1571 ss->rate /= 100;
@@ -1555,6 +1573,8 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1555 radeon_atombios_get_igp_ss_overrides(rdev, ss, id); 1573 radeon_atombios_get_igp_ss_overrides(rdev, ss, id);
1556 return true; 1574 return true;
1557 } 1575 }
1576 ss_assign = (union asic_ss_assignment *)
1577 ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3));
1558 } 1578 }
1559 break; 1579 break;
1560 default: 1580 default:
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 79159b5da05b..64565732cb98 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1658,9 +1658,12 @@ radeon_add_atom_connector(struct drm_device *dev,
1658 drm_object_attach_property(&radeon_connector->base.base, 1658 drm_object_attach_property(&radeon_connector->base.base,
1659 rdev->mode_info.underscan_vborder_property, 1659 rdev->mode_info.underscan_vborder_property,
1660 0); 1660 0);
1661 drm_object_attach_property(&radeon_connector->base.base, 1661 if (radeon_audio != 0)
1662 rdev->mode_info.audio_property, 1662 drm_object_attach_property(&radeon_connector->base.base,
1663 RADEON_AUDIO_DISABLE); 1663 rdev->mode_info.audio_property,
1664 (radeon_audio == 1) ?
1665 RADEON_AUDIO_AUTO :
1666 RADEON_AUDIO_DISABLE);
1664 subpixel_order = SubPixelHorizontalRGB; 1667 subpixel_order = SubPixelHorizontalRGB;
1665 connector->interlace_allowed = true; 1668 connector->interlace_allowed = true;
1666 if (connector_type == DRM_MODE_CONNECTOR_HDMIB) 1669 if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@@ -1754,10 +1757,12 @@ radeon_add_atom_connector(struct drm_device *dev,
1754 rdev->mode_info.underscan_vborder_property, 1757 rdev->mode_info.underscan_vborder_property,
1755 0); 1758 0);
1756 } 1759 }
1757 if (ASIC_IS_DCE2(rdev)) { 1760 if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
1758 drm_object_attach_property(&radeon_connector->base.base, 1761 drm_object_attach_property(&radeon_connector->base.base,
1759 rdev->mode_info.audio_property, 1762 rdev->mode_info.audio_property,
1760 RADEON_AUDIO_DISABLE); 1763 (radeon_audio == 1) ?
1764 RADEON_AUDIO_AUTO :
1765 RADEON_AUDIO_DISABLE);
1761 } 1766 }
1762 if (connector_type == DRM_MODE_CONNECTOR_DVII) { 1767 if (connector_type == DRM_MODE_CONNECTOR_DVII) {
1763 radeon_connector->dac_load_detect = true; 1768 radeon_connector->dac_load_detect = true;
@@ -1799,10 +1804,12 @@ radeon_add_atom_connector(struct drm_device *dev,
1799 rdev->mode_info.underscan_vborder_property, 1804 rdev->mode_info.underscan_vborder_property,
1800 0); 1805 0);
1801 } 1806 }
1802 if (ASIC_IS_DCE2(rdev)) { 1807 if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
1803 drm_object_attach_property(&radeon_connector->base.base, 1808 drm_object_attach_property(&radeon_connector->base.base,
1804 rdev->mode_info.audio_property, 1809 rdev->mode_info.audio_property,
1805 RADEON_AUDIO_DISABLE); 1810 (radeon_audio == 1) ?
1811 RADEON_AUDIO_AUTO :
1812 RADEON_AUDIO_DISABLE);
1806 } 1813 }
1807 subpixel_order = SubPixelHorizontalRGB; 1814 subpixel_order = SubPixelHorizontalRGB;
1808 connector->interlace_allowed = true; 1815 connector->interlace_allowed = true;
@@ -1843,10 +1850,12 @@ radeon_add_atom_connector(struct drm_device *dev,
1843 rdev->mode_info.underscan_vborder_property, 1850 rdev->mode_info.underscan_vborder_property,
1844 0); 1851 0);
1845 } 1852 }
1846 if (ASIC_IS_DCE2(rdev)) { 1853 if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
1847 drm_object_attach_property(&radeon_connector->base.base, 1854 drm_object_attach_property(&radeon_connector->base.base,
1848 rdev->mode_info.audio_property, 1855 rdev->mode_info.audio_property,
1849 RADEON_AUDIO_DISABLE); 1856 (radeon_audio == 1) ?
1857 RADEON_AUDIO_AUTO :
1858 RADEON_AUDIO_DISABLE);
1850 } 1859 }
1851 connector->interlace_allowed = true; 1860 connector->interlace_allowed = true;
1852 /* in theory with a DP to VGA converter... */ 1861 /* in theory with a DP to VGA converter... */
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index ac6ece61a476..80285e35bc65 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -85,7 +85,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
85 VRAM, also but everything into VRAM on AGP cards to avoid 85 VRAM, also but everything into VRAM on AGP cards to avoid
86 image corruptions */ 86 image corruptions */
87 if (p->ring == R600_RING_TYPE_UVD_INDEX && 87 if (p->ring == R600_RING_TYPE_UVD_INDEX &&
88 (i == 0 || p->rdev->flags & RADEON_IS_AGP)) { 88 (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) {
89 /* TODO: is this still needed for NI+ ? */ 89 /* TODO: is this still needed for NI+ ? */
90 p->relocs[i].lobj.domain = 90 p->relocs[i].lobj.domain =
91 RADEON_GEM_DOMAIN_VRAM; 91 RADEON_GEM_DOMAIN_VRAM;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index e29faa73b574..841d0e09be3e 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1320,13 +1320,22 @@ int radeon_device_init(struct radeon_device *rdev,
1320 return r; 1320 return r;
1321 } 1321 }
1322 if ((radeon_testing & 1)) { 1322 if ((radeon_testing & 1)) {
1323 radeon_test_moves(rdev); 1323 if (rdev->accel_working)
1324 radeon_test_moves(rdev);
1325 else
1326 DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
1324 } 1327 }
1325 if ((radeon_testing & 2)) { 1328 if ((radeon_testing & 2)) {
1326 radeon_test_syncing(rdev); 1329 if (rdev->accel_working)
1330 radeon_test_syncing(rdev);
1331 else
1332 DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
1327 } 1333 }
1328 if (radeon_benchmarking) { 1334 if (radeon_benchmarking) {
1329 radeon_benchmark(rdev, radeon_benchmarking); 1335 if (rdev->accel_working)
1336 radeon_benchmark(rdev, radeon_benchmarking);
1337 else
1338 DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
1330 } 1339 }
1331 return 0; 1340 return 0;
1332} 1341}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index cdd12dcd988b..9c14a1ba1de4 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -153,7 +153,7 @@ int radeon_benchmarking = 0;
153int radeon_testing = 0; 153int radeon_testing = 0;
154int radeon_connector_table = 0; 154int radeon_connector_table = 0;
155int radeon_tv = 1; 155int radeon_tv = 1;
156int radeon_audio = 1; 156int radeon_audio = -1;
157int radeon_disp_priority = 0; 157int radeon_disp_priority = 0;
158int radeon_hw_i2c = 0; 158int radeon_hw_i2c = 0;
159int radeon_pcie_gen2 = -1; 159int radeon_pcie_gen2 = -1;
@@ -196,7 +196,7 @@ module_param_named(connector_table, radeon_connector_table, int, 0444);
196MODULE_PARM_DESC(tv, "TV enable (0 = disable)"); 196MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
197module_param_named(tv, radeon_tv, int, 0444); 197module_param_named(tv, radeon_tv, int, 0444);
198 198
199MODULE_PARM_DESC(audio, "Audio enable (1 = enable)"); 199MODULE_PARM_DESC(audio, "Audio enable (-1 = auto, 0 = disable, 1 = enable)");
200module_param_named(audio, radeon_audio, int, 0444); 200module_param_named(audio, radeon_audio, int, 0444);
201 201
202MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)"); 202MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 87e1d69e8fdb..4f6b7fc7ad3c 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -945,6 +945,8 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
945 if (enable) { 945 if (enable) {
946 mutex_lock(&rdev->pm.mutex); 946 mutex_lock(&rdev->pm.mutex);
947 rdev->pm.dpm.uvd_active = true; 947 rdev->pm.dpm.uvd_active = true;
948 /* disable this for now */
949#if 0
948 if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0)) 950 if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
949 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD; 951 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
950 else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0)) 952 else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
@@ -954,6 +956,7 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
954 else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2)) 956 else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
955 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2; 957 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
956 else 958 else
959#endif
957 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD; 960 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
958 rdev->pm.dpm.state = dpm_state; 961 rdev->pm.dpm.state = dpm_state;
959 mutex_unlock(&rdev->pm.mutex); 962 mutex_unlock(&rdev->pm.mutex);
@@ -1002,7 +1005,7 @@ static void radeon_pm_resume_old(struct radeon_device *rdev)
1002{ 1005{
1003 /* set up the default clocks if the MC ucode is loaded */ 1006 /* set up the default clocks if the MC ucode is loaded */
1004 if ((rdev->family >= CHIP_BARTS) && 1007 if ((rdev->family >= CHIP_BARTS) &&
1005 (rdev->family <= CHIP_HAINAN) && 1008 (rdev->family <= CHIP_CAYMAN) &&
1006 rdev->mc_fw) { 1009 rdev->mc_fw) {
1007 if (rdev->pm.default_vddc) 1010 if (rdev->pm.default_vddc)
1008 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1011 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
@@ -1046,7 +1049,7 @@ static void radeon_pm_resume_dpm(struct radeon_device *rdev)
1046 if (ret) { 1049 if (ret) {
1047 DRM_ERROR("radeon: dpm resume failed\n"); 1050 DRM_ERROR("radeon: dpm resume failed\n");
1048 if ((rdev->family >= CHIP_BARTS) && 1051 if ((rdev->family >= CHIP_BARTS) &&
1049 (rdev->family <= CHIP_HAINAN) && 1052 (rdev->family <= CHIP_CAYMAN) &&
1050 rdev->mc_fw) { 1053 rdev->mc_fw) {
1051 if (rdev->pm.default_vddc) 1054 if (rdev->pm.default_vddc)
1052 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1055 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
@@ -1097,7 +1100,7 @@ static int radeon_pm_init_old(struct radeon_device *rdev)
1097 radeon_pm_init_profile(rdev); 1100 radeon_pm_init_profile(rdev);
1098 /* set up the default clocks if the MC ucode is loaded */ 1101 /* set up the default clocks if the MC ucode is loaded */
1099 if ((rdev->family >= CHIP_BARTS) && 1102 if ((rdev->family >= CHIP_BARTS) &&
1100 (rdev->family <= CHIP_HAINAN) && 1103 (rdev->family <= CHIP_CAYMAN) &&
1101 rdev->mc_fw) { 1104 rdev->mc_fw) {
1102 if (rdev->pm.default_vddc) 1105 if (rdev->pm.default_vddc)
1103 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1106 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
@@ -1183,7 +1186,7 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
1183 if (ret) { 1186 if (ret) {
1184 rdev->pm.dpm_enabled = false; 1187 rdev->pm.dpm_enabled = false;
1185 if ((rdev->family >= CHIP_BARTS) && 1188 if ((rdev->family >= CHIP_BARTS) &&
1186 (rdev->family <= CHIP_HAINAN) && 1189 (rdev->family <= CHIP_CAYMAN) &&
1187 rdev->mc_fw) { 1190 rdev->mc_fw) {
1188 if (rdev->pm.default_vddc) 1191 if (rdev->pm.default_vddc)
1189 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1192 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 46a25f037b84..18254e1c3e71 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -839,9 +839,11 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
839 * packet that is the root issue 839 * packet that is the root issue
840 */ 840 */
841 i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask; 841 i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
842 for (j = 0; j <= (count + 32); j++) { 842 if (ring->ready) {
843 seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]); 843 for (j = 0; j <= (count + 32); j++) {
844 i = (i + 1) & ring->ptr_mask; 844 seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]);
845 i = (i + 1) & ring->ptr_mask;
846 }
845 } 847 }
846 return 0; 848 return 0;
847} 849}
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index f4d6bcee9006..12e8099a0823 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -36,8 +36,8 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
36 struct radeon_bo *vram_obj = NULL; 36 struct radeon_bo *vram_obj = NULL;
37 struct radeon_bo **gtt_obj = NULL; 37 struct radeon_bo **gtt_obj = NULL;
38 uint64_t gtt_addr, vram_addr; 38 uint64_t gtt_addr, vram_addr;
39 unsigned i, n, size; 39 unsigned n, size;
40 int r, ring; 40 int i, r, ring;
41 41
42 switch (flag) { 42 switch (flag) {
43 case RADEON_TEST_COPY_DMA: 43 case RADEON_TEST_COPY_DMA:
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 1a01bbff9bfa..308eff5be1b4 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -799,7 +799,8 @@ void radeon_uvd_note_usage(struct radeon_device *rdev)
799 (rdev->pm.dpm.hd != hd)) { 799 (rdev->pm.dpm.hd != hd)) {
800 rdev->pm.dpm.sd = sd; 800 rdev->pm.dpm.sd = sd;
801 rdev->pm.dpm.hd = hd; 801 rdev->pm.dpm.hd = hd;
802 streams_changed = true; 802 /* disable this for now */
803 /*streams_changed = true;*/
803 } 804 }
804 } 805 }
805 806
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index c354c1094967..d96f7cbca0a1 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -85,6 +85,9 @@ extern void si_dma_vm_set_page(struct radeon_device *rdev,
85 uint32_t incr, uint32_t flags); 85 uint32_t incr, uint32_t flags);
86static void si_enable_gui_idle_interrupt(struct radeon_device *rdev, 86static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
87 bool enable); 87 bool enable);
88static void si_fini_pg(struct radeon_device *rdev);
89static void si_fini_cg(struct radeon_device *rdev);
90static void si_rlc_stop(struct radeon_device *rdev);
88 91
89static const u32 verde_rlc_save_restore_register_list[] = 92static const u32 verde_rlc_save_restore_register_list[] =
90{ 93{
@@ -1678,6 +1681,7 @@ static int si_init_microcode(struct radeon_device *rdev)
1678 fw_name); 1681 fw_name);
1679 release_firmware(rdev->smc_fw); 1682 release_firmware(rdev->smc_fw);
1680 rdev->smc_fw = NULL; 1683 rdev->smc_fw = NULL;
1684 err = 0;
1681 } else if (rdev->smc_fw->size != smc_req_size) { 1685 } else if (rdev->smc_fw->size != smc_req_size) {
1682 printk(KERN_ERR 1686 printk(KERN_ERR
1683 "si_smc: Bogus length %zu in firmware \"%s\"\n", 1687 "si_smc: Bogus length %zu in firmware \"%s\"\n",
@@ -3608,6 +3612,13 @@ static void si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
3608 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 3612 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
3609 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); 3613 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
3610 3614
3615 /* disable PG/CG */
3616 si_fini_pg(rdev);
3617 si_fini_cg(rdev);
3618
3619 /* stop the rlc */
3620 si_rlc_stop(rdev);
3621
3611 /* Disable CP parsing/prefetching */ 3622 /* Disable CP parsing/prefetching */
3612 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT); 3623 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
3613 3624
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index cfe5d4d28915..2332aa1bf93c 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2910,6 +2910,7 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
2910 bool disable_sclk_switching = false; 2910 bool disable_sclk_switching = false;
2911 u32 mclk, sclk; 2911 u32 mclk, sclk;
2912 u16 vddc, vddci; 2912 u16 vddc, vddci;
2913 u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
2913 int i; 2914 int i;
2914 2915
2915 if ((rdev->pm.dpm.new_active_crtc_count > 1) || 2916 if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
@@ -2943,6 +2944,29 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
2943 } 2944 }
2944 } 2945 }
2945 2946
2947 /* limit clocks to max supported clocks based on voltage dependency tables */
2948 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
2949 &max_sclk_vddc);
2950 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2951 &max_mclk_vddci);
2952 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2953 &max_mclk_vddc);
2954
2955 for (i = 0; i < ps->performance_level_count; i++) {
2956 if (max_sclk_vddc) {
2957 if (ps->performance_levels[i].sclk > max_sclk_vddc)
2958 ps->performance_levels[i].sclk = max_sclk_vddc;
2959 }
2960 if (max_mclk_vddci) {
2961 if (ps->performance_levels[i].mclk > max_mclk_vddci)
2962 ps->performance_levels[i].mclk = max_mclk_vddci;
2963 }
2964 if (max_mclk_vddc) {
2965 if (ps->performance_levels[i].mclk > max_mclk_vddc)
2966 ps->performance_levels[i].mclk = max_mclk_vddc;
2967 }
2968 }
2969
2946 /* XXX validate the min clocks required for display */ 2970 /* XXX validate the min clocks required for display */
2947 2971
2948 if (disable_mclk_switching) { 2972 if (disable_mclk_switching) {
@@ -5184,7 +5208,7 @@ static int si_set_mc_special_registers(struct radeon_device *rdev,
5184 table->mc_reg_table_entry[k].mc_data[j] |= 0x100; 5208 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
5185 } 5209 }
5186 j++; 5210 j++;
5187 if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) 5211 if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
5188 return -EINVAL; 5212 return -EINVAL;
5189 5213
5190 if (!pi->mem_gddr5) { 5214 if (!pi->mem_gddr5) {
@@ -5194,7 +5218,7 @@ static int si_set_mc_special_registers(struct radeon_device *rdev,
5194 table->mc_reg_table_entry[k].mc_data[j] = 5218 table->mc_reg_table_entry[k].mc_data[j] =
5195 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; 5219 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
5196 j++; 5220 j++;
5197 if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) 5221 if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
5198 return -EINVAL; 5222 return -EINVAL;
5199 } 5223 }
5200 break; 5224 break;
@@ -5207,7 +5231,7 @@ static int si_set_mc_special_registers(struct radeon_device *rdev,
5207 (temp_reg & 0xffff0000) | 5231 (temp_reg & 0xffff0000) |
5208 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); 5232 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
5209 j++; 5233 j++;
5210 if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) 5234 if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
5211 return -EINVAL; 5235 return -EINVAL;
5212 break; 5236 break;
5213 default: 5237 default:
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 52d2ab6b67a0..7e2e0ea66a00 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -1553,7 +1553,7 @@
1553 * 6. COMMAND [30:21] | BYTE_COUNT [20:0] 1553 * 6. COMMAND [30:21] | BYTE_COUNT [20:0]
1554 */ 1554 */
1555# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20) 1555# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
1556 /* 0 - SRC_ADDR 1556 /* 0 - DST_ADDR
1557 * 1 - GDS 1557 * 1 - GDS
1558 */ 1558 */
1559# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27) 1559# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
@@ -1568,7 +1568,7 @@
1568# define PACKET3_CP_DMA_CP_SYNC (1 << 31) 1568# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
1569/* COMMAND */ 1569/* COMMAND */
1570# define PACKET3_CP_DMA_DIS_WC (1 << 21) 1570# define PACKET3_CP_DMA_DIS_WC (1 << 21)
1571# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23) 1571# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
1572 /* 0 - none 1572 /* 0 - none
1573 * 1 - 8 in 16 1573 * 1 - 8 in 16
1574 * 2 - 8 in 32 1574 * 2 - 8 in 32
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 7f998bf1cc9d..9364129ba292 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1868,7 +1868,7 @@ int trinity_dpm_init(struct radeon_device *rdev)
1868 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) 1868 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
1869 pi->at[i] = TRINITY_AT_DFLT; 1869 pi->at[i] = TRINITY_AT_DFLT;
1870 1870
1871 pi->enable_bapm = true; 1871 pi->enable_bapm = false;
1872 pi->enable_nbps_policy = true; 1872 pi->enable_nbps_policy = true;
1873 pi->enable_sclk_ds = true; 1873 pi->enable_sclk_ds = true;
1874 pi->enable_gfx_power_gating = true; 1874 pi->enable_gfx_power_gating = true;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 1a90f0a2f7e5..0508f93b9795 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -740,9 +740,17 @@ static void vmw_postclose(struct drm_device *dev,
740 struct vmw_fpriv *vmw_fp; 740 struct vmw_fpriv *vmw_fp;
741 741
742 vmw_fp = vmw_fpriv(file_priv); 742 vmw_fp = vmw_fpriv(file_priv);
743 ttm_object_file_release(&vmw_fp->tfile); 743
744 if (vmw_fp->locked_master) 744 if (vmw_fp->locked_master) {
745 struct vmw_master *vmaster =
746 vmw_master(vmw_fp->locked_master);
747
748 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
749 ttm_vt_unlock(&vmaster->lock);
745 drm_master_put(&vmw_fp->locked_master); 750 drm_master_put(&vmw_fp->locked_master);
751 }
752
753 ttm_object_file_release(&vmw_fp->tfile);
746 kfree(vmw_fp); 754 kfree(vmw_fp);
747} 755}
748 756
@@ -925,14 +933,13 @@ static void vmw_master_drop(struct drm_device *dev,
925 933
926 vmw_fp->locked_master = drm_master_get(file_priv->master); 934 vmw_fp->locked_master = drm_master_get(file_priv->master);
927 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); 935 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
928 vmw_execbuf_release_pinned_bo(dev_priv);
929
930 if (unlikely((ret != 0))) { 936 if (unlikely((ret != 0))) {
931 DRM_ERROR("Unable to lock TTM at VT switch.\n"); 937 DRM_ERROR("Unable to lock TTM at VT switch.\n");
932 drm_master_put(&vmw_fp->locked_master); 938 drm_master_put(&vmw_fp->locked_master);
933 } 939 }
934 940
935 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); 941 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
942 vmw_execbuf_release_pinned_bo(dev_priv);
936 943
937 if (!dev_priv->enable_fb) { 944 if (!dev_priv->enable_fb) {
938 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); 945 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 0e67cf41065d..37fb4befec82 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -970,7 +970,7 @@ void vmw_resource_unreserve(struct vmw_resource *res,
970 if (new_backup) 970 if (new_backup)
971 res->backup_offset = new_backup_offset; 971 res->backup_offset = new_backup_offset;
972 972
973 if (!res->func->may_evict) 973 if (!res->func->may_evict || res->id == -1)
974 return; 974 return;
975 975
976 write_lock(&dev_priv->resource_lock); 976 write_lock(&dev_priv->resource_lock);
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 71b70e3a7a71..c91d547191dd 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -241,6 +241,7 @@ config HID_HOLTEK
241 - Sharkoon Drakonia / Perixx MX-2000 gaming mice 241 - Sharkoon Drakonia / Perixx MX-2000 gaming mice
242 - Tracer Sniper TRM-503 / NOVA Gaming Slider X200 / 242 - Tracer Sniper TRM-503 / NOVA Gaming Slider X200 /
243 Zalman ZM-GM1 243 Zalman ZM-GM1
244 - SHARKOON DarkGlider Gaming mouse
244 245
245config HOLTEK_FF 246config HOLTEK_FF
246 bool "Holtek On Line Grip force feedback support" 247 bool "Holtek On Line Grip force feedback support"
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index b8470b1a10fe..e80da62363bc 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -319,7 +319,7 @@ static s32 item_sdata(struct hid_item *item)
319 319
320static int hid_parser_global(struct hid_parser *parser, struct hid_item *item) 320static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
321{ 321{
322 __u32 raw_value; 322 __s32 raw_value;
323 switch (item->tag) { 323 switch (item->tag) {
324 case HID_GLOBAL_ITEM_TAG_PUSH: 324 case HID_GLOBAL_ITEM_TAG_PUSH:
325 325
@@ -370,10 +370,11 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
370 return 0; 370 return 0;
371 371
372 case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT: 372 case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT:
373 /* Units exponent negative numbers are given through a 373 /* Many devices provide unit exponent as a two's complement
374 * two's complement. 374 * nibble due to the common misunderstanding of HID
375 * See "6.2.2.7 Global Items" for more information. */ 375 * specification 1.11, 6.2.2.7 Global Items. Attempt to handle
376 raw_value = item_udata(item); 376 * both this and the standard encoding. */
377 raw_value = item_sdata(item);
377 if (!(raw_value & 0xfffffff0)) 378 if (!(raw_value & 0xfffffff0))
378 parser->global.unit_exponent = hid_snto32(raw_value, 4); 379 parser->global.unit_exponent = hid_snto32(raw_value, 4);
379 else 380 else
@@ -1715,6 +1716,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1715 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) }, 1716 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) },
1716 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) }, 1717 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },
1717 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) }, 1718 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
1719 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
1718 { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_580) }, 1720 { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_580) },
1719 { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) }, 1721 { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
1720 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) }, 1722 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
@@ -1869,6 +1871,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1869 1871
1870 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) }, 1872 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
1871 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) }, 1873 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
1874 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO2, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
1872 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) }, 1875 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
1873 { } 1876 { }
1874}; 1877};
diff --git a/drivers/hid/hid-holtek-mouse.c b/drivers/hid/hid-holtek-mouse.c
index 7e6db3cf46f9..e696566cde46 100644
--- a/drivers/hid/hid-holtek-mouse.c
+++ b/drivers/hid/hid-holtek-mouse.c
@@ -27,6 +27,7 @@
27 * - USB ID 04d9:a067, sold as Sharkoon Drakonia and Perixx MX-2000 27 * - USB ID 04d9:a067, sold as Sharkoon Drakonia and Perixx MX-2000
28 * - USB ID 04d9:a04a, sold as Tracer Sniper TRM-503, NOVA Gaming Slider X200 28 * - USB ID 04d9:a04a, sold as Tracer Sniper TRM-503, NOVA Gaming Slider X200
29 * and Zalman ZM-GM1 29 * and Zalman ZM-GM1
30 * - USB ID 04d9:a081, sold as SHARKOON DarkGlider Gaming mouse
30 */ 31 */
31 32
32static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc, 33static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
@@ -46,6 +47,7 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
46 } 47 }
47 break; 48 break;
48 case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A: 49 case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A:
50 case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081:
49 if (*rsize >= 113 && rdesc[106] == 0xff && rdesc[107] == 0x7f 51 if (*rsize >= 113 && rdesc[106] == 0xff && rdesc[107] == 0x7f
50 && rdesc[111] == 0xff && rdesc[112] == 0x7f) { 52 && rdesc[111] == 0xff && rdesc[112] == 0x7f) {
51 hid_info(hdev, "Fixing up report descriptor\n"); 53 hid_info(hdev, "Fixing up report descriptor\n");
@@ -63,6 +65,8 @@ static const struct hid_device_id holtek_mouse_devices[] = {
63 USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) }, 65 USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
64 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, 66 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
65 USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) }, 67 USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },
68 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
69 USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
66 { } 70 { }
67}; 71};
68MODULE_DEVICE_TABLE(hid, holtek_mouse_devices); 72MODULE_DEVICE_TABLE(hid, holtek_mouse_devices);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index e60e8d530697..f0296a50be5f 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -450,6 +450,7 @@
450#define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD 0xa055 450#define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD 0xa055
451#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067 0xa067 451#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067 0xa067
452#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A 0xa04a 452#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A 0xa04a
453#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081 0xa081
453 454
454#define USB_VENDOR_ID_IMATION 0x0718 455#define USB_VENDOR_ID_IMATION 0x0718
455#define USB_DEVICE_ID_DISC_STAKKA 0xd000 456#define USB_DEVICE_ID_DISC_STAKKA 0xd000
@@ -632,6 +633,7 @@
632#define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003 633#define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003
633 634
634#define USB_VENDOR_ID_NINTENDO 0x057e 635#define USB_VENDOR_ID_NINTENDO 0x057e
636#define USB_VENDOR_ID_NINTENDO2 0x054c
635#define USB_DEVICE_ID_NINTENDO_WIIMOTE 0x0306 637#define USB_DEVICE_ID_NINTENDO_WIIMOTE 0x0306
636#define USB_DEVICE_ID_NINTENDO_WIIMOTE2 0x0330 638#define USB_DEVICE_ID_NINTENDO_WIIMOTE2 0x0330
637 639
@@ -791,6 +793,8 @@
791#define USB_DEVICE_ID_SYNAPTICS_COMP_TP 0x0009 793#define USB_DEVICE_ID_SYNAPTICS_COMP_TP 0x0009
792#define USB_DEVICE_ID_SYNAPTICS_WTP 0x0010 794#define USB_DEVICE_ID_SYNAPTICS_WTP 0x0010
793#define USB_DEVICE_ID_SYNAPTICS_DPAD 0x0013 795#define USB_DEVICE_ID_SYNAPTICS_DPAD 0x0013
796#define USB_DEVICE_ID_SYNAPTICS_LTS1 0x0af8
797#define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10
794 798
795#define USB_VENDOR_ID_THINGM 0x27b8 799#define USB_VENDOR_ID_THINGM 0x27b8
796#define USB_DEVICE_ID_BLINK1 0x01ed 800#define USB_DEVICE_ID_BLINK1 0x01ed
@@ -918,4 +922,7 @@
918#define USB_VENDOR_ID_PRIMAX 0x0461 922#define USB_VENDOR_ID_PRIMAX 0x0461
919#define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05 923#define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05
920 924
925#define USB_VENDOR_ID_SIS 0x0457
926#define USB_DEVICE_ID_SIS_TS 0x1013
927
921#endif 928#endif
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 8741d953dcc8..d97f2323af57 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -192,6 +192,7 @@ static int hidinput_setkeycode(struct input_dev *dev,
192 return -EINVAL; 192 return -EINVAL;
193} 193}
194 194
195
195/** 196/**
196 * hidinput_calc_abs_res - calculate an absolute axis resolution 197 * hidinput_calc_abs_res - calculate an absolute axis resolution
197 * @field: the HID report field to calculate resolution for 198 * @field: the HID report field to calculate resolution for
@@ -234,23 +235,17 @@ __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code)
234 case ABS_MT_TOOL_Y: 235 case ABS_MT_TOOL_Y:
235 case ABS_MT_TOUCH_MAJOR: 236 case ABS_MT_TOUCH_MAJOR:
236 case ABS_MT_TOUCH_MINOR: 237 case ABS_MT_TOUCH_MINOR:
237 if (field->unit & 0xffffff00) /* Not a length */ 238 if (field->unit == 0x11) { /* If centimeters */
238 return 0;
239 unit_exponent += hid_snto32(field->unit >> 4, 4) - 1;
240 switch (field->unit & 0xf) {
241 case 0x1: /* If centimeters */
242 /* Convert to millimeters */ 239 /* Convert to millimeters */
243 unit_exponent += 1; 240 unit_exponent += 1;
244 break; 241 } else if (field->unit == 0x13) { /* If inches */
245 case 0x3: /* If inches */
246 /* Convert to millimeters */ 242 /* Convert to millimeters */
247 prev = physical_extents; 243 prev = physical_extents;
248 physical_extents *= 254; 244 physical_extents *= 254;
249 if (physical_extents < prev) 245 if (physical_extents < prev)
250 return 0; 246 return 0;
251 unit_exponent -= 1; 247 unit_exponent -= 1;
252 break; 248 } else {
253 default:
254 return 0; 249 return 0;
255 } 250 }
256 break; 251 break;
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
index 602c188e9d86..6101816a7ddd 100644
--- a/drivers/hid/hid-roccat-kone.c
+++ b/drivers/hid/hid-roccat-kone.c
@@ -382,7 +382,7 @@ static ssize_t kone_sysfs_write_profilex(struct file *fp,
382} 382}
383#define PROFILE_ATTR(number) \ 383#define PROFILE_ATTR(number) \
384static struct bin_attribute bin_attr_profile##number = { \ 384static struct bin_attribute bin_attr_profile##number = { \
385 .attr = { .name = "profile##number", .mode = 0660 }, \ 385 .attr = { .name = "profile" #number, .mode = 0660 }, \
386 .size = sizeof(struct kone_profile), \ 386 .size = sizeof(struct kone_profile), \
387 .read = kone_sysfs_read_profilex, \ 387 .read = kone_sysfs_read_profilex, \
388 .write = kone_sysfs_write_profilex, \ 388 .write = kone_sysfs_write_profilex, \
diff --git a/drivers/hid/hid-roccat-koneplus.c b/drivers/hid/hid-roccat-koneplus.c
index 5ddf605b6b89..5e99fcdc71b9 100644
--- a/drivers/hid/hid-roccat-koneplus.c
+++ b/drivers/hid/hid-roccat-koneplus.c
@@ -229,13 +229,13 @@ static ssize_t koneplus_sysfs_read_profilex_buttons(struct file *fp,
229 229
230#define PROFILE_ATTR(number) \ 230#define PROFILE_ATTR(number) \
231static struct bin_attribute bin_attr_profile##number##_settings = { \ 231static struct bin_attribute bin_attr_profile##number##_settings = { \
232 .attr = { .name = "profile##number##_settings", .mode = 0440 }, \ 232 .attr = { .name = "profile" #number "_settings", .mode = 0440 }, \
233 .size = KONEPLUS_SIZE_PROFILE_SETTINGS, \ 233 .size = KONEPLUS_SIZE_PROFILE_SETTINGS, \
234 .read = koneplus_sysfs_read_profilex_settings, \ 234 .read = koneplus_sysfs_read_profilex_settings, \
235 .private = &profile_numbers[number-1], \ 235 .private = &profile_numbers[number-1], \
236}; \ 236}; \
237static struct bin_attribute bin_attr_profile##number##_buttons = { \ 237static struct bin_attribute bin_attr_profile##number##_buttons = { \
238 .attr = { .name = "profile##number##_buttons", .mode = 0440 }, \ 238 .attr = { .name = "profile" #number "_buttons", .mode = 0440 }, \
239 .size = KONEPLUS_SIZE_PROFILE_BUTTONS, \ 239 .size = KONEPLUS_SIZE_PROFILE_BUTTONS, \
240 .read = koneplus_sysfs_read_profilex_buttons, \ 240 .read = koneplus_sysfs_read_profilex_buttons, \
241 .private = &profile_numbers[number-1], \ 241 .private = &profile_numbers[number-1], \
diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
index 515bc03136c0..0c8e1ef0b67d 100644
--- a/drivers/hid/hid-roccat-kovaplus.c
+++ b/drivers/hid/hid-roccat-kovaplus.c
@@ -257,13 +257,13 @@ static ssize_t kovaplus_sysfs_read_profilex_buttons(struct file *fp,
257 257
258#define PROFILE_ATTR(number) \ 258#define PROFILE_ATTR(number) \
259static struct bin_attribute bin_attr_profile##number##_settings = { \ 259static struct bin_attribute bin_attr_profile##number##_settings = { \
260 .attr = { .name = "profile##number##_settings", .mode = 0440 }, \ 260 .attr = { .name = "profile" #number "_settings", .mode = 0440 }, \
261 .size = KOVAPLUS_SIZE_PROFILE_SETTINGS, \ 261 .size = KOVAPLUS_SIZE_PROFILE_SETTINGS, \
262 .read = kovaplus_sysfs_read_profilex_settings, \ 262 .read = kovaplus_sysfs_read_profilex_settings, \
263 .private = &profile_numbers[number-1], \ 263 .private = &profile_numbers[number-1], \
264}; \ 264}; \
265static struct bin_attribute bin_attr_profile##number##_buttons = { \ 265static struct bin_attribute bin_attr_profile##number##_buttons = { \
266 .attr = { .name = "profile##number##_buttons", .mode = 0440 }, \ 266 .attr = { .name = "profile" #number "_buttons", .mode = 0440 }, \
267 .size = KOVAPLUS_SIZE_PROFILE_BUTTONS, \ 267 .size = KOVAPLUS_SIZE_PROFILE_BUTTONS, \
268 .read = kovaplus_sysfs_read_profilex_buttons, \ 268 .read = kovaplus_sysfs_read_profilex_buttons, \
269 .private = &profile_numbers[number-1], \ 269 .private = &profile_numbers[number-1], \
diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c
index 5a6dbbeee790..1a07e07d99a0 100644
--- a/drivers/hid/hid-roccat-pyra.c
+++ b/drivers/hid/hid-roccat-pyra.c
@@ -225,13 +225,13 @@ static ssize_t pyra_sysfs_read_profilex_buttons(struct file *fp,
225 225
226#define PROFILE_ATTR(number) \ 226#define PROFILE_ATTR(number) \
227static struct bin_attribute bin_attr_profile##number##_settings = { \ 227static struct bin_attribute bin_attr_profile##number##_settings = { \
228 .attr = { .name = "profile##number##_settings", .mode = 0440 }, \ 228 .attr = { .name = "profile" #number "_settings", .mode = 0440 }, \
229 .size = PYRA_SIZE_PROFILE_SETTINGS, \ 229 .size = PYRA_SIZE_PROFILE_SETTINGS, \
230 .read = pyra_sysfs_read_profilex_settings, \ 230 .read = pyra_sysfs_read_profilex_settings, \
231 .private = &profile_numbers[number-1], \ 231 .private = &profile_numbers[number-1], \
232}; \ 232}; \
233static struct bin_attribute bin_attr_profile##number##_buttons = { \ 233static struct bin_attribute bin_attr_profile##number##_buttons = { \
234 .attr = { .name = "profile##number##_buttons", .mode = 0440 }, \ 234 .attr = { .name = "profile" #number "_buttons", .mode = 0440 }, \
235 .size = PYRA_SIZE_PROFILE_BUTTONS, \ 235 .size = PYRA_SIZE_PROFILE_BUTTONS, \
236 .read = pyra_sysfs_read_profilex_buttons, \ 236 .read = pyra_sysfs_read_profilex_buttons, \
237 .private = &profile_numbers[number-1], \ 237 .private = &profile_numbers[number-1], \
diff --git a/drivers/hid/hid-wiimote-core.c b/drivers/hid/hid-wiimote-core.c
index abb20db2b443..1446f526ee8b 100644
--- a/drivers/hid/hid-wiimote-core.c
+++ b/drivers/hid/hid-wiimote-core.c
@@ -834,7 +834,8 @@ static void wiimote_init_set_type(struct wiimote_data *wdata,
834 goto done; 834 goto done;
835 } 835 }
836 836
837 if (vendor == USB_VENDOR_ID_NINTENDO) { 837 if (vendor == USB_VENDOR_ID_NINTENDO ||
838 vendor == USB_VENDOR_ID_NINTENDO2) {
838 if (product == USB_DEVICE_ID_NINTENDO_WIIMOTE) { 839 if (product == USB_DEVICE_ID_NINTENDO_WIIMOTE) {
839 devtype = WIIMOTE_DEV_GEN10; 840 devtype = WIIMOTE_DEV_GEN10;
840 goto done; 841 goto done;
@@ -1855,6 +1856,8 @@ static void wiimote_hid_remove(struct hid_device *hdev)
1855static const struct hid_device_id wiimote_hid_devices[] = { 1856static const struct hid_device_id wiimote_hid_devices[] = {
1856 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, 1857 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
1857 USB_DEVICE_ID_NINTENDO_WIIMOTE) }, 1858 USB_DEVICE_ID_NINTENDO_WIIMOTE) },
1859 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO2,
1860 USB_DEVICE_ID_NINTENDO_WIIMOTE) },
1858 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, 1861 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
1859 USB_DEVICE_ID_NINTENDO_WIIMOTE2) }, 1862 USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
1860 { } 1863 { }
diff --git a/drivers/hid/hid-wiimote-modules.c b/drivers/hid/hid-wiimote-modules.c
index 2e7d644dba18..71adf9e60b13 100644
--- a/drivers/hid/hid-wiimote-modules.c
+++ b/drivers/hid/hid-wiimote-modules.c
@@ -119,12 +119,22 @@ static const struct wiimod_ops wiimod_keys = {
119 * the rumble motor, this flag shouldn't be set. 119 * the rumble motor, this flag shouldn't be set.
120 */ 120 */
121 121
122/* used by wiimod_rumble and wiipro_rumble */
123static void wiimod_rumble_worker(struct work_struct *work)
124{
125 struct wiimote_data *wdata = container_of(work, struct wiimote_data,
126 rumble_worker);
127
128 spin_lock_irq(&wdata->state.lock);
129 wiiproto_req_rumble(wdata, wdata->state.cache_rumble);
130 spin_unlock_irq(&wdata->state.lock);
131}
132
122static int wiimod_rumble_play(struct input_dev *dev, void *data, 133static int wiimod_rumble_play(struct input_dev *dev, void *data,
123 struct ff_effect *eff) 134 struct ff_effect *eff)
124{ 135{
125 struct wiimote_data *wdata = input_get_drvdata(dev); 136 struct wiimote_data *wdata = input_get_drvdata(dev);
126 __u8 value; 137 __u8 value;
127 unsigned long flags;
128 138
129 /* 139 /*
130 * The wiimote supports only a single rumble motor so if any magnitude 140 * The wiimote supports only a single rumble motor so if any magnitude
@@ -137,9 +147,10 @@ static int wiimod_rumble_play(struct input_dev *dev, void *data,
137 else 147 else
138 value = 0; 148 value = 0;
139 149
140 spin_lock_irqsave(&wdata->state.lock, flags); 150 /* Locking state.lock here might deadlock with input_event() calls.
141 wiiproto_req_rumble(wdata, value); 151 * schedule_work acts as barrier. Merging multiple changes is fine. */
142 spin_unlock_irqrestore(&wdata->state.lock, flags); 152 wdata->state.cache_rumble = value;
153 schedule_work(&wdata->rumble_worker);
143 154
144 return 0; 155 return 0;
145} 156}
@@ -147,6 +158,8 @@ static int wiimod_rumble_play(struct input_dev *dev, void *data,
147static int wiimod_rumble_probe(const struct wiimod_ops *ops, 158static int wiimod_rumble_probe(const struct wiimod_ops *ops,
148 struct wiimote_data *wdata) 159 struct wiimote_data *wdata)
149{ 160{
161 INIT_WORK(&wdata->rumble_worker, wiimod_rumble_worker);
162
150 set_bit(FF_RUMBLE, wdata->input->ffbit); 163 set_bit(FF_RUMBLE, wdata->input->ffbit);
151 if (input_ff_create_memless(wdata->input, NULL, wiimod_rumble_play)) 164 if (input_ff_create_memless(wdata->input, NULL, wiimod_rumble_play))
152 return -ENOMEM; 165 return -ENOMEM;
@@ -159,6 +172,8 @@ static void wiimod_rumble_remove(const struct wiimod_ops *ops,
159{ 172{
160 unsigned long flags; 173 unsigned long flags;
161 174
175 cancel_work_sync(&wdata->rumble_worker);
176
162 spin_lock_irqsave(&wdata->state.lock, flags); 177 spin_lock_irqsave(&wdata->state.lock, flags);
163 wiiproto_req_rumble(wdata, 0); 178 wiiproto_req_rumble(wdata, 0);
164 spin_unlock_irqrestore(&wdata->state.lock, flags); 179 spin_unlock_irqrestore(&wdata->state.lock, flags);
@@ -1731,7 +1746,6 @@ static int wiimod_pro_play(struct input_dev *dev, void *data,
1731{ 1746{
1732 struct wiimote_data *wdata = input_get_drvdata(dev); 1747 struct wiimote_data *wdata = input_get_drvdata(dev);
1733 __u8 value; 1748 __u8 value;
1734 unsigned long flags;
1735 1749
1736 /* 1750 /*
1737 * The wiimote supports only a single rumble motor so if any magnitude 1751 * The wiimote supports only a single rumble motor so if any magnitude
@@ -1744,9 +1758,10 @@ static int wiimod_pro_play(struct input_dev *dev, void *data,
1744 else 1758 else
1745 value = 0; 1759 value = 0;
1746 1760
1747 spin_lock_irqsave(&wdata->state.lock, flags); 1761 /* Locking state.lock here might deadlock with input_event() calls.
1748 wiiproto_req_rumble(wdata, value); 1762 * schedule_work acts as barrier. Merging multiple changes is fine. */
1749 spin_unlock_irqrestore(&wdata->state.lock, flags); 1763 wdata->state.cache_rumble = value;
1764 schedule_work(&wdata->rumble_worker);
1750 1765
1751 return 0; 1766 return 0;
1752} 1767}
@@ -1756,6 +1771,8 @@ static int wiimod_pro_probe(const struct wiimod_ops *ops,
1756{ 1771{
1757 int ret, i; 1772 int ret, i;
1758 1773
1774 INIT_WORK(&wdata->rumble_worker, wiimod_rumble_worker);
1775
1759 wdata->extension.input = input_allocate_device(); 1776 wdata->extension.input = input_allocate_device();
1760 if (!wdata->extension.input) 1777 if (!wdata->extension.input)
1761 return -ENOMEM; 1778 return -ENOMEM;
@@ -1817,12 +1834,13 @@ static void wiimod_pro_remove(const struct wiimod_ops *ops,
1817 if (!wdata->extension.input) 1834 if (!wdata->extension.input)
1818 return; 1835 return;
1819 1836
1837 input_unregister_device(wdata->extension.input);
1838 wdata->extension.input = NULL;
1839 cancel_work_sync(&wdata->rumble_worker);
1840
1820 spin_lock_irqsave(&wdata->state.lock, flags); 1841 spin_lock_irqsave(&wdata->state.lock, flags);
1821 wiiproto_req_rumble(wdata, 0); 1842 wiiproto_req_rumble(wdata, 0);
1822 spin_unlock_irqrestore(&wdata->state.lock, flags); 1843 spin_unlock_irqrestore(&wdata->state.lock, flags);
1823
1824 input_unregister_device(wdata->extension.input);
1825 wdata->extension.input = NULL;
1826} 1844}
1827 1845
1828static const struct wiimod_ops wiimod_pro = { 1846static const struct wiimod_ops wiimod_pro = {
diff --git a/drivers/hid/hid-wiimote.h b/drivers/hid/hid-wiimote.h
index f1474f372c0b..75db0c400037 100644
--- a/drivers/hid/hid-wiimote.h
+++ b/drivers/hid/hid-wiimote.h
@@ -133,13 +133,15 @@ struct wiimote_state {
133 __u8 *cmd_read_buf; 133 __u8 *cmd_read_buf;
134 __u8 cmd_read_size; 134 __u8 cmd_read_size;
135 135
136 /* calibration data */ 136 /* calibration/cache data */
137 __u16 calib_bboard[4][3]; 137 __u16 calib_bboard[4][3];
138 __u8 cache_rumble;
138}; 139};
139 140
140struct wiimote_data { 141struct wiimote_data {
141 struct hid_device *hdev; 142 struct hid_device *hdev;
142 struct input_dev *input; 143 struct input_dev *input;
144 struct work_struct rumble_worker;
143 struct led_classdev *leds[4]; 145 struct led_classdev *leds[4];
144 struct input_dev *accel; 146 struct input_dev *accel;
145 struct input_dev *ir; 147 struct input_dev *ir;
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 8918dd12bb69..6a6dd5cd7833 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -308,18 +308,25 @@ static int hidraw_fasync(int fd, struct file *file, int on)
308static void drop_ref(struct hidraw *hidraw, int exists_bit) 308static void drop_ref(struct hidraw *hidraw, int exists_bit)
309{ 309{
310 if (exists_bit) { 310 if (exists_bit) {
311 hid_hw_close(hidraw->hid);
312 hidraw->exist = 0; 311 hidraw->exist = 0;
313 if (hidraw->open) 312 if (hidraw->open) {
313 hid_hw_close(hidraw->hid);
314 wake_up_interruptible(&hidraw->wait); 314 wake_up_interruptible(&hidraw->wait);
315 }
315 } else { 316 } else {
316 --hidraw->open; 317 --hidraw->open;
317 } 318 }
318 319 if (!hidraw->open) {
319 if (!hidraw->open && !hidraw->exist) { 320 if (!hidraw->exist) {
320 device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor)); 321 device_destroy(hidraw_class,
321 hidraw_table[hidraw->minor] = NULL; 322 MKDEV(hidraw_major, hidraw->minor));
322 kfree(hidraw); 323 hidraw_table[hidraw->minor] = NULL;
324 kfree(hidraw);
325 } else {
326 /* close device for last reader */
327 hid_hw_power(hidraw->hid, PM_HINT_NORMAL);
328 hid_hw_close(hidraw->hid);
329 }
323 } 330 }
324} 331}
325 332
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index 5bf2fb785844..93b00d76374c 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -615,7 +615,7 @@ static const struct file_operations uhid_fops = {
615 615
616static struct miscdevice uhid_misc = { 616static struct miscdevice uhid_misc = {
617 .fops = &uhid_fops, 617 .fops = &uhid_fops,
618 .minor = MISC_DYNAMIC_MINOR, 618 .minor = UHID_MINOR,
619 .name = UHID_NAME, 619 .name = UHID_NAME,
620}; 620};
621 621
@@ -634,4 +634,5 @@ module_exit(uhid_exit);
634MODULE_LICENSE("GPL"); 634MODULE_LICENSE("GPL");
635MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>"); 635MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
636MODULE_DESCRIPTION("User-space I/O driver support for HID subsystem"); 636MODULE_DESCRIPTION("User-space I/O driver support for HID subsystem");
637MODULE_ALIAS_MISCDEV(UHID_MINOR);
637MODULE_ALIAS("devname:" UHID_NAME); 638MODULE_ALIAS("devname:" UHID_NAME);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 07345521f421..3fca3be08337 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -110,6 +110,9 @@ static const struct hid_blacklist {
110 { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT }, 110 { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT },
111 { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT }, 111 { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT },
112 { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS }, 112 { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
113 { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS1, HID_QUIRK_NO_INIT_REPORTS },
114 { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2, HID_QUIRK_NO_INIT_REPORTS },
115 { USB_VENDOR_ID_SIS, USB_DEVICE_ID_SIS_TS, HID_QUIRK_NO_INIT_REPORTS },
113 116
114 { 0, 0 } 117 { 0, 0 }
115}; 118};
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 8f4743ab5fb2..936093e0271e 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -195,7 +195,7 @@ int vmbus_connect(void)
195 195
196 do { 196 do {
197 ret = vmbus_negotiate_version(msginfo, version); 197 ret = vmbus_negotiate_version(msginfo, version);
198 if (ret) 198 if (ret == -ETIMEDOUT)
199 goto cleanup; 199 goto cleanup;
200 200
201 if (vmbus_connection.conn_state == CONNECTED) 201 if (vmbus_connection.conn_state == CONNECTED)
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index 28b03325b872..09988b289622 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -32,13 +32,17 @@
32/* 32/*
33 * Pre win8 version numbers used in ws2008 and ws 2008 r2 (win7) 33 * Pre win8 version numbers used in ws2008 and ws 2008 r2 (win7)
34 */ 34 */
35#define WS2008_SRV_MAJOR 1
36#define WS2008_SRV_MINOR 0
37#define WS2008_SRV_VERSION (WS2008_SRV_MAJOR << 16 | WS2008_SRV_MINOR)
38
35#define WIN7_SRV_MAJOR 3 39#define WIN7_SRV_MAJOR 3
36#define WIN7_SRV_MINOR 0 40#define WIN7_SRV_MINOR 0
37#define WIN7_SRV_MAJOR_MINOR (WIN7_SRV_MAJOR << 16 | WIN7_SRV_MINOR) 41#define WIN7_SRV_VERSION (WIN7_SRV_MAJOR << 16 | WIN7_SRV_MINOR)
38 42
39#define WIN8_SRV_MAJOR 4 43#define WIN8_SRV_MAJOR 4
40#define WIN8_SRV_MINOR 0 44#define WIN8_SRV_MINOR 0
41#define WIN8_SRV_MAJOR_MINOR (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR) 45#define WIN8_SRV_VERSION (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
42 46
43/* 47/*
44 * Global state maintained for transaction that is being processed. 48 * Global state maintained for transaction that is being processed.
@@ -587,6 +591,8 @@ void hv_kvp_onchannelcallback(void *context)
587 591
588 struct icmsg_hdr *icmsghdrp; 592 struct icmsg_hdr *icmsghdrp;
589 struct icmsg_negotiate *negop = NULL; 593 struct icmsg_negotiate *negop = NULL;
594 int util_fw_version;
595 int kvp_srv_version;
590 596
591 if (kvp_transaction.active) { 597 if (kvp_transaction.active) {
592 /* 598 /*
@@ -606,17 +612,26 @@ void hv_kvp_onchannelcallback(void *context)
606 612
607 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { 613 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
608 /* 614 /*
609 * We start with win8 version and if the host cannot 615 * Based on the host, select appropriate
610 * support that we use the previous version. 616 * framework and service versions we will
617 * negotiate.
611 */ 618 */
612 if (vmbus_prep_negotiate_resp(icmsghdrp, negop, 619 switch (vmbus_proto_version) {
613 recv_buffer, UTIL_FW_MAJOR_MINOR, 620 case (VERSION_WS2008):
614 WIN8_SRV_MAJOR_MINOR)) 621 util_fw_version = UTIL_WS2K8_FW_VERSION;
615 goto done; 622 kvp_srv_version = WS2008_SRV_VERSION;
616 623 break;
624 case (VERSION_WIN7):
625 util_fw_version = UTIL_FW_VERSION;
626 kvp_srv_version = WIN7_SRV_VERSION;
627 break;
628 default:
629 util_fw_version = UTIL_FW_VERSION;
630 kvp_srv_version = WIN8_SRV_VERSION;
631 }
617 vmbus_prep_negotiate_resp(icmsghdrp, negop, 632 vmbus_prep_negotiate_resp(icmsghdrp, negop,
618 recv_buffer, UTIL_FW_MAJOR_MINOR, 633 recv_buffer, util_fw_version,
619 WIN7_SRV_MAJOR_MINOR); 634 kvp_srv_version);
620 635
621 } else { 636 } else {
622 kvp_msg = (struct hv_kvp_msg *)&recv_buffer[ 637 kvp_msg = (struct hv_kvp_msg *)&recv_buffer[
@@ -649,7 +664,6 @@ void hv_kvp_onchannelcallback(void *context)
649 return; 664 return;
650 665
651 } 666 }
652done:
653 667
654 icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION 668 icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
655 | ICMSGHDRFLAG_RESPONSE; 669 | ICMSGHDRFLAG_RESPONSE;
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index e4572f3f2834..0c3546224376 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -26,7 +26,7 @@
26 26
27#define VSS_MAJOR 5 27#define VSS_MAJOR 5
28#define VSS_MINOR 0 28#define VSS_MINOR 0
29#define VSS_MAJOR_MINOR (VSS_MAJOR << 16 | VSS_MINOR) 29#define VSS_VERSION (VSS_MAJOR << 16 | VSS_MINOR)
30 30
31 31
32 32
@@ -190,8 +190,8 @@ void hv_vss_onchannelcallback(void *context)
190 190
191 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { 191 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
192 vmbus_prep_negotiate_resp(icmsghdrp, negop, 192 vmbus_prep_negotiate_resp(icmsghdrp, negop,
193 recv_buffer, UTIL_FW_MAJOR_MINOR, 193 recv_buffer, UTIL_FW_VERSION,
194 VSS_MAJOR_MINOR); 194 VSS_VERSION);
195 } else { 195 } else {
196 vss_msg = (struct hv_vss_msg *)&recv_buffer[ 196 vss_msg = (struct hv_vss_msg *)&recv_buffer[
197 sizeof(struct vmbuspipe_hdr) + 197 sizeof(struct vmbuspipe_hdr) +
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index cb82233541b1..273e3ddb3a20 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -28,17 +28,32 @@
28#include <linux/reboot.h> 28#include <linux/reboot.h>
29#include <linux/hyperv.h> 29#include <linux/hyperv.h>
30 30
31#define SHUTDOWN_MAJOR 3
32#define SHUTDOWN_MINOR 0
33#define SHUTDOWN_MAJOR_MINOR (SHUTDOWN_MAJOR << 16 | SHUTDOWN_MINOR)
34 31
35#define TIMESYNCH_MAJOR 3 32#define SD_MAJOR 3
36#define TIMESYNCH_MINOR 0 33#define SD_MINOR 0
37#define TIMESYNCH_MAJOR_MINOR (TIMESYNCH_MAJOR << 16 | TIMESYNCH_MINOR) 34#define SD_VERSION (SD_MAJOR << 16 | SD_MINOR)
38 35
39#define HEARTBEAT_MAJOR 3 36#define SD_WS2008_MAJOR 1
40#define HEARTBEAT_MINOR 0 37#define SD_WS2008_VERSION (SD_WS2008_MAJOR << 16 | SD_MINOR)
41#define HEARTBEAT_MAJOR_MINOR (HEARTBEAT_MAJOR << 16 | HEARTBEAT_MINOR) 38
39#define TS_MAJOR 3
40#define TS_MINOR 0
41#define TS_VERSION (TS_MAJOR << 16 | TS_MINOR)
42
43#define TS_WS2008_MAJOR 1
44#define TS_WS2008_VERSION (TS_WS2008_MAJOR << 16 | TS_MINOR)
45
46#define HB_MAJOR 3
47#define HB_MINOR 0
48#define HB_VERSION (HB_MAJOR << 16 | HB_MINOR)
49
50#define HB_WS2008_MAJOR 1
51#define HB_WS2008_VERSION (HB_WS2008_MAJOR << 16 | HB_MINOR)
52
53static int sd_srv_version;
54static int ts_srv_version;
55static int hb_srv_version;
56static int util_fw_version;
42 57
43static void shutdown_onchannelcallback(void *context); 58static void shutdown_onchannelcallback(void *context);
44static struct hv_util_service util_shutdown = { 59static struct hv_util_service util_shutdown = {
@@ -99,8 +114,8 @@ static void shutdown_onchannelcallback(void *context)
99 114
100 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { 115 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
101 vmbus_prep_negotiate_resp(icmsghdrp, negop, 116 vmbus_prep_negotiate_resp(icmsghdrp, negop,
102 shut_txf_buf, UTIL_FW_MAJOR_MINOR, 117 shut_txf_buf, util_fw_version,
103 SHUTDOWN_MAJOR_MINOR); 118 sd_srv_version);
104 } else { 119 } else {
105 shutdown_msg = 120 shutdown_msg =
106 (struct shutdown_msg_data *)&shut_txf_buf[ 121 (struct shutdown_msg_data *)&shut_txf_buf[
@@ -216,6 +231,7 @@ static void timesync_onchannelcallback(void *context)
216 struct icmsg_hdr *icmsghdrp; 231 struct icmsg_hdr *icmsghdrp;
217 struct ictimesync_data *timedatap; 232 struct ictimesync_data *timedatap;
218 u8 *time_txf_buf = util_timesynch.recv_buffer; 233 u8 *time_txf_buf = util_timesynch.recv_buffer;
234 struct icmsg_negotiate *negop = NULL;
219 235
220 vmbus_recvpacket(channel, time_txf_buf, 236 vmbus_recvpacket(channel, time_txf_buf,
221 PAGE_SIZE, &recvlen, &requestid); 237 PAGE_SIZE, &recvlen, &requestid);
@@ -225,9 +241,10 @@ static void timesync_onchannelcallback(void *context)
225 sizeof(struct vmbuspipe_hdr)]; 241 sizeof(struct vmbuspipe_hdr)];
226 242
227 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { 243 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
228 vmbus_prep_negotiate_resp(icmsghdrp, NULL, time_txf_buf, 244 vmbus_prep_negotiate_resp(icmsghdrp, negop,
229 UTIL_FW_MAJOR_MINOR, 245 time_txf_buf,
230 TIMESYNCH_MAJOR_MINOR); 246 util_fw_version,
247 ts_srv_version);
231 } else { 248 } else {
232 timedatap = (struct ictimesync_data *)&time_txf_buf[ 249 timedatap = (struct ictimesync_data *)&time_txf_buf[
233 sizeof(struct vmbuspipe_hdr) + 250 sizeof(struct vmbuspipe_hdr) +
@@ -257,6 +274,7 @@ static void heartbeat_onchannelcallback(void *context)
257 struct icmsg_hdr *icmsghdrp; 274 struct icmsg_hdr *icmsghdrp;
258 struct heartbeat_msg_data *heartbeat_msg; 275 struct heartbeat_msg_data *heartbeat_msg;
259 u8 *hbeat_txf_buf = util_heartbeat.recv_buffer; 276 u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
277 struct icmsg_negotiate *negop = NULL;
260 278
261 vmbus_recvpacket(channel, hbeat_txf_buf, 279 vmbus_recvpacket(channel, hbeat_txf_buf,
262 PAGE_SIZE, &recvlen, &requestid); 280 PAGE_SIZE, &recvlen, &requestid);
@@ -266,9 +284,9 @@ static void heartbeat_onchannelcallback(void *context)
266 sizeof(struct vmbuspipe_hdr)]; 284 sizeof(struct vmbuspipe_hdr)];
267 285
268 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { 286 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
269 vmbus_prep_negotiate_resp(icmsghdrp, NULL, 287 vmbus_prep_negotiate_resp(icmsghdrp, negop,
270 hbeat_txf_buf, UTIL_FW_MAJOR_MINOR, 288 hbeat_txf_buf, util_fw_version,
271 HEARTBEAT_MAJOR_MINOR); 289 hb_srv_version);
272 } else { 290 } else {
273 heartbeat_msg = 291 heartbeat_msg =
274 (struct heartbeat_msg_data *)&hbeat_txf_buf[ 292 (struct heartbeat_msg_data *)&hbeat_txf_buf[
@@ -321,6 +339,25 @@ static int util_probe(struct hv_device *dev,
321 goto error; 339 goto error;
322 340
323 hv_set_drvdata(dev, srv); 341 hv_set_drvdata(dev, srv);
342 /*
343 * Based on the host; initialize the framework and
344 * service version numbers we will negotiate.
345 */
346 switch (vmbus_proto_version) {
347 case (VERSION_WS2008):
348 util_fw_version = UTIL_WS2K8_FW_VERSION;
349 sd_srv_version = SD_WS2008_VERSION;
350 ts_srv_version = TS_WS2008_VERSION;
351 hb_srv_version = HB_WS2008_VERSION;
352 break;
353
354 default:
355 util_fw_version = UTIL_FW_VERSION;
356 sd_srv_version = SD_VERSION;
357 ts_srv_version = TS_VERSION;
358 hb_srv_version = HB_VERSION;
359 }
360
324 return 0; 361 return 0;
325 362
326error: 363error:
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 62c2e32e25ef..3288f13d2d87 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -230,6 +230,7 @@ static int send_argument(const char *key)
230 230
231static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len) 231static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
232{ 232{
233 u8 status, data = 0;
233 int i; 234 int i;
234 235
235 if (send_command(cmd) || send_argument(key)) { 236 if (send_command(cmd) || send_argument(key)) {
@@ -237,6 +238,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
237 return -EIO; 238 return -EIO;
238 } 239 }
239 240
241 /* This has no effect on newer (2012) SMCs */
240 if (send_byte(len, APPLESMC_DATA_PORT)) { 242 if (send_byte(len, APPLESMC_DATA_PORT)) {
241 pr_warn("%.4s: read len fail\n", key); 243 pr_warn("%.4s: read len fail\n", key);
242 return -EIO; 244 return -EIO;
@@ -250,6 +252,17 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
250 buffer[i] = inb(APPLESMC_DATA_PORT); 252 buffer[i] = inb(APPLESMC_DATA_PORT);
251 } 253 }
252 254
255 /* Read the data port until bit0 is cleared */
256 for (i = 0; i < 16; i++) {
257 udelay(APPLESMC_MIN_WAIT);
258 status = inb(APPLESMC_CMD_PORT);
259 if (!(status & 0x01))
260 break;
261 data = inb(APPLESMC_DATA_PORT);
262 }
263 if (i)
264 pr_warn("flushed %d bytes, last value is: %d\n", i, data);
265
253 return 0; 266 return 0;
254} 267}
255 268
@@ -525,16 +538,25 @@ static int applesmc_init_smcreg_try(void)
525{ 538{
526 struct applesmc_registers *s = &smcreg; 539 struct applesmc_registers *s = &smcreg;
527 bool left_light_sensor, right_light_sensor; 540 bool left_light_sensor, right_light_sensor;
541 unsigned int count;
528 u8 tmp[1]; 542 u8 tmp[1];
529 int ret; 543 int ret;
530 544
531 if (s->init_complete) 545 if (s->init_complete)
532 return 0; 546 return 0;
533 547
534 ret = read_register_count(&s->key_count); 548 ret = read_register_count(&count);
535 if (ret) 549 if (ret)
536 return ret; 550 return ret;
537 551
552 if (s->cache && s->key_count != count) {
553 pr_warn("key count changed from %d to %d\n",
554 s->key_count, count);
555 kfree(s->cache);
556 s->cache = NULL;
557 }
558 s->key_count = count;
559
538 if (!s->cache) 560 if (!s->cache)
539 s->cache = kcalloc(s->key_count, sizeof(*s->cache), GFP_KERNEL); 561 s->cache = kcalloc(s->key_count, sizeof(*s->cache), GFP_KERNEL);
540 if (!s->cache) 562 if (!s->cache)
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index dbecf08399f8..5888feef1ac5 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -98,6 +98,8 @@
98 98
99#define DW_IC_ERR_TX_ABRT 0x1 99#define DW_IC_ERR_TX_ABRT 0x1
100 100
101#define DW_IC_TAR_10BITADDR_MASTER BIT(12)
102
101/* 103/*
102 * status codes 104 * status codes
103 */ 105 */
@@ -388,22 +390,34 @@ static int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev)
388static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) 390static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
389{ 391{
390 struct i2c_msg *msgs = dev->msgs; 392 struct i2c_msg *msgs = dev->msgs;
391 u32 ic_con; 393 u32 ic_con, ic_tar = 0;
392 394
393 /* Disable the adapter */ 395 /* Disable the adapter */
394 __i2c_dw_enable(dev, false); 396 __i2c_dw_enable(dev, false);
395 397
396 /* set the slave (target) address */
397 dw_writel(dev, msgs[dev->msg_write_idx].addr, DW_IC_TAR);
398
399 /* if the slave address is ten bit address, enable 10BITADDR */ 398 /* if the slave address is ten bit address, enable 10BITADDR */
400 ic_con = dw_readl(dev, DW_IC_CON); 399 ic_con = dw_readl(dev, DW_IC_CON);
401 if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) 400 if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) {
402 ic_con |= DW_IC_CON_10BITADDR_MASTER; 401 ic_con |= DW_IC_CON_10BITADDR_MASTER;
403 else 402 /*
403 * If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing
404 * mode has to be enabled via bit 12 of IC_TAR register.
405 * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be
406 * detected from registers.
407 */
408 ic_tar = DW_IC_TAR_10BITADDR_MASTER;
409 } else {
404 ic_con &= ~DW_IC_CON_10BITADDR_MASTER; 410 ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
411 }
412
405 dw_writel(dev, ic_con, DW_IC_CON); 413 dw_writel(dev, ic_con, DW_IC_CON);
406 414
415 /*
416 * Set the slave (target) address and enable 10-bit addressing mode
417 * if applicable.
418 */
419 dw_writel(dev, msgs[dev->msg_write_idx].addr | ic_tar, DW_IC_TAR);
420
407 /* Enable the adapter */ 421 /* Enable the adapter */
408 __i2c_dw_enable(dev, true); 422 __i2c_dw_enable(dev, true);
409 423
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 4c1b60539a25..0aa01136f8d9 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -270,7 +270,8 @@ static SIMPLE_DEV_PM_OPS(dw_i2c_dev_pm_ops, dw_i2c_suspend, dw_i2c_resume);
270MODULE_ALIAS("platform:i2c_designware"); 270MODULE_ALIAS("platform:i2c_designware");
271 271
272static struct platform_driver dw_i2c_driver = { 272static struct platform_driver dw_i2c_driver = {
273 .remove = dw_i2c_remove, 273 .probe = dw_i2c_probe,
274 .remove = dw_i2c_remove,
274 .driver = { 275 .driver = {
275 .name = "i2c_designware", 276 .name = "i2c_designware",
276 .owner = THIS_MODULE, 277 .owner = THIS_MODULE,
@@ -282,7 +283,7 @@ static struct platform_driver dw_i2c_driver = {
282 283
283static int __init dw_i2c_init_driver(void) 284static int __init dw_i2c_init_driver(void)
284{ 285{
285 return platform_driver_probe(&dw_i2c_driver, dw_i2c_probe); 286 return platform_driver_register(&dw_i2c_driver);
286} 287}
287subsys_initcall(dw_i2c_init_driver); 288subsys_initcall(dw_i2c_init_driver);
288 289
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index ccf46656bdad..1d7efa3169cd 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -365,7 +365,7 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx)
365 clk_disable_unprepare(i2c_imx->clk); 365 clk_disable_unprepare(i2c_imx->clk);
366} 366}
367 367
368static void __init i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx, 368static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
369 unsigned int rate) 369 unsigned int rate)
370{ 370{
371 struct imx_i2c_clk_pair *i2c_clk_div = i2c_imx->hwdata->clk_div; 371 struct imx_i2c_clk_pair *i2c_clk_div = i2c_imx->hwdata->clk_div;
@@ -589,7 +589,7 @@ static struct i2c_algorithm i2c_imx_algo = {
589 .functionality = i2c_imx_func, 589 .functionality = i2c_imx_func,
590}; 590};
591 591
592static int __init i2c_imx_probe(struct platform_device *pdev) 592static int i2c_imx_probe(struct platform_device *pdev)
593{ 593{
594 const struct of_device_id *of_id = of_match_device(i2c_imx_dt_ids, 594 const struct of_device_id *of_id = of_match_device(i2c_imx_dt_ids,
595 &pdev->dev); 595 &pdev->dev);
@@ -697,7 +697,7 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
697 return 0; /* Return OK */ 697 return 0; /* Return OK */
698} 698}
699 699
700static int __exit i2c_imx_remove(struct platform_device *pdev) 700static int i2c_imx_remove(struct platform_device *pdev)
701{ 701{
702 struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev); 702 struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev);
703 703
@@ -715,7 +715,8 @@ static int __exit i2c_imx_remove(struct platform_device *pdev)
715} 715}
716 716
717static struct platform_driver i2c_imx_driver = { 717static struct platform_driver i2c_imx_driver = {
718 .remove = __exit_p(i2c_imx_remove), 718 .probe = i2c_imx_probe,
719 .remove = i2c_imx_remove,
719 .driver = { 720 .driver = {
720 .name = DRIVER_NAME, 721 .name = DRIVER_NAME,
721 .owner = THIS_MODULE, 722 .owner = THIS_MODULE,
@@ -726,7 +727,7 @@ static struct platform_driver i2c_imx_driver = {
726 727
727static int __init i2c_adap_imx_init(void) 728static int __init i2c_adap_imx_init(void)
728{ 729{
729 return platform_driver_probe(&i2c_imx_driver, i2c_imx_probe); 730 return platform_driver_register(&i2c_imx_driver);
730} 731}
731subsys_initcall(i2c_adap_imx_init); 732subsys_initcall(i2c_adap_imx_init);
732 733
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 8ed79a086f85..1672effbcebb 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -393,6 +393,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
393 393
394 desc = &priv->hw[priv->head]; 394 desc = &priv->hw[priv->head];
395 395
396 /* Initialize the DMA buffer */
397 memset(priv->dma_buffer, 0, sizeof(priv->dma_buffer));
398
396 /* Initialize the descriptor */ 399 /* Initialize the descriptor */
397 memset(desc, 0, sizeof(struct ismt_desc)); 400 memset(desc, 0, sizeof(struct ismt_desc));
398 desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write); 401 desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write);
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 7f3a47443494..d3e9cc3153a9 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -234,9 +234,9 @@ static int mv64xxx_i2c_offload_msg(struct mv64xxx_i2c_data *drv_data)
234 ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_WR | 234 ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_WR |
235 (msg->len - 1) << MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT; 235 (msg->len - 1) << MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT;
236 236
237 writel_relaxed(data_reg_lo, 237 writel(data_reg_lo,
238 drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_LO); 238 drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_LO);
239 writel_relaxed(data_reg_hi, 239 writel(data_reg_hi,
240 drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_HI); 240 drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_HI);
241 241
242 } else { 242 } else {
@@ -697,6 +697,7 @@ static const struct of_device_id mv64xxx_i2c_of_match_table[] = {
697MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table); 697MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table);
698 698
699#ifdef CONFIG_OF 699#ifdef CONFIG_OF
700#ifdef CONFIG_HAVE_CLK
700static int 701static int
701mv64xxx_calc_freq(const int tclk, const int n, const int m) 702mv64xxx_calc_freq(const int tclk, const int n, const int m)
702{ 703{
@@ -726,16 +727,12 @@ mv64xxx_find_baud_factors(const u32 req_freq, const u32 tclk, u32 *best_n,
726 return false; 727 return false;
727 return true; 728 return true;
728} 729}
730#endif /* CONFIG_HAVE_CLK */
729 731
730static int 732static int
731mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data, 733mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
732 struct device *dev) 734 struct device *dev)
733{ 735{
734 const struct of_device_id *device;
735 struct device_node *np = dev->of_node;
736 u32 bus_freq, tclk;
737 int rc = 0;
738
739 /* CLK is mandatory when using DT to describe the i2c bus. We 736 /* CLK is mandatory when using DT to describe the i2c bus. We
740 * need to know tclk in order to calculate bus clock 737 * need to know tclk in order to calculate bus clock
741 * factors. 738 * factors.
@@ -744,6 +741,11 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
744 /* Have OF but no CLK */ 741 /* Have OF but no CLK */
745 return -ENODEV; 742 return -ENODEV;
746#else 743#else
744 const struct of_device_id *device;
745 struct device_node *np = dev->of_node;
746 u32 bus_freq, tclk;
747 int rc = 0;
748
747 if (IS_ERR(drv_data->clk)) { 749 if (IS_ERR(drv_data->clk)) {
748 rc = -ENODEV; 750 rc = -ENODEV;
749 goto out; 751 goto out;
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index f4a01675fa71..b7c857774708 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -780,12 +780,13 @@ static struct platform_driver mxs_i2c_driver = {
780 .owner = THIS_MODULE, 780 .owner = THIS_MODULE,
781 .of_match_table = mxs_i2c_dt_ids, 781 .of_match_table = mxs_i2c_dt_ids,
782 }, 782 },
783 .probe = mxs_i2c_probe,
783 .remove = mxs_i2c_remove, 784 .remove = mxs_i2c_remove,
784}; 785};
785 786
786static int __init mxs_i2c_init(void) 787static int __init mxs_i2c_init(void)
787{ 788{
788 return platform_driver_probe(&mxs_i2c_driver, mxs_i2c_probe); 789 return platform_driver_register(&mxs_i2c_driver);
789} 790}
790subsys_initcall(mxs_i2c_init); 791subsys_initcall(mxs_i2c_init);
791 792
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 6d8308d5dc4e..9967a6f9c2ff 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -939,6 +939,9 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
939 /* 939 /*
940 * ProDB0017052: Clear ARDY bit twice 940 * ProDB0017052: Clear ARDY bit twice
941 */ 941 */
942 if (stat & OMAP_I2C_STAT_ARDY)
943 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_ARDY);
944
942 if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK | 945 if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK |
943 OMAP_I2C_STAT_AL)) { 946 OMAP_I2C_STAT_AL)) {
944 omap_i2c_ack_stat(dev, (OMAP_I2C_STAT_RRDY | 947 omap_i2c_ack_stat(dev, (OMAP_I2C_STAT_RRDY |
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 3535f3c0f7b4..3747b9bf67d6 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -1178,8 +1178,6 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev)
1178 1178
1179 i2c_del_adapter(&i2c->adap); 1179 i2c_del_adapter(&i2c->adap);
1180 1180
1181 clk_disable_unprepare(i2c->clk);
1182
1183 if (pdev->dev.of_node && IS_ERR(i2c->pctrl)) 1181 if (pdev->dev.of_node && IS_ERR(i2c->pctrl))
1184 s3c24xx_i2c_dt_gpio_free(i2c); 1182 s3c24xx_i2c_dt_gpio_free(i2c);
1185 1183
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index f8f6f2e552db..04a17b9b38bb 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -859,8 +859,7 @@ static const struct i2c_algorithm stu300_algo = {
859 .functionality = stu300_func, 859 .functionality = stu300_func,
860}; 860};
861 861
862static int __init 862static int stu300_probe(struct platform_device *pdev)
863stu300_probe(struct platform_device *pdev)
864{ 863{
865 struct stu300_dev *dev; 864 struct stu300_dev *dev;
866 struct i2c_adapter *adap; 865 struct i2c_adapter *adap;
@@ -966,8 +965,7 @@ static SIMPLE_DEV_PM_OPS(stu300_pm, stu300_suspend, stu300_resume);
966#define STU300_I2C_PM NULL 965#define STU300_I2C_PM NULL
967#endif 966#endif
968 967
969static int __exit 968static int stu300_remove(struct platform_device *pdev)
970stu300_remove(struct platform_device *pdev)
971{ 969{
972 struct stu300_dev *dev = platform_get_drvdata(pdev); 970 struct stu300_dev *dev = platform_get_drvdata(pdev);
973 971
@@ -989,13 +987,14 @@ static struct platform_driver stu300_i2c_driver = {
989 .pm = STU300_I2C_PM, 987 .pm = STU300_I2C_PM,
990 .of_match_table = stu300_dt_match, 988 .of_match_table = stu300_dt_match,
991 }, 989 },
992 .remove = __exit_p(stu300_remove), 990 .probe = stu300_probe,
991 .remove = stu300_remove,
993 992
994}; 993};
995 994
996static int __init stu300_init(void) 995static int __init stu300_init(void)
997{ 996{
998 return platform_driver_probe(&stu300_i2c_driver, stu300_probe); 997 return platform_driver_register(&stu300_i2c_driver);
999} 998}
1000 999
1001static void __exit stu300_exit(void) 1000static void __exit stu300_exit(void)
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 29d3f045a2bf..3be58f89ac77 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -1134,6 +1134,9 @@ static void acpi_i2c_register_devices(struct i2c_adapter *adap)
1134 acpi_handle handle; 1134 acpi_handle handle;
1135 acpi_status status; 1135 acpi_status status;
1136 1136
1137 if (!adap->dev.parent)
1138 return;
1139
1137 handle = ACPI_HANDLE(adap->dev.parent); 1140 handle = ACPI_HANDLE(adap->dev.parent);
1138 if (!handle) 1141 if (!handle)
1139 return; 1142 return;
diff --git a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
index 74b41ae690f3..928656e241dd 100644
--- a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
+++ b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
@@ -200,7 +200,7 @@ static int i2c_arbitrator_probe(struct platform_device *pdev)
200 arb->parent = of_find_i2c_adapter_by_node(parent_np); 200 arb->parent = of_find_i2c_adapter_by_node(parent_np);
201 if (!arb->parent) { 201 if (!arb->parent) {
202 dev_err(dev, "Cannot find parent bus\n"); 202 dev_err(dev, "Cannot find parent bus\n");
203 return -EINVAL; 203 return -EPROBE_DEFER;
204 } 204 }
205 205
206 /* Actually add the mux adapter */ 206 /* Actually add the mux adapter */
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index 5d4a99ba743e..a764da777f08 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -66,7 +66,7 @@ static int i2c_mux_gpio_probe_dt(struct gpiomux *mux,
66 struct device_node *adapter_np, *child; 66 struct device_node *adapter_np, *child;
67 struct i2c_adapter *adapter; 67 struct i2c_adapter *adapter;
68 unsigned *values, *gpios; 68 unsigned *values, *gpios;
69 int i = 0; 69 int i = 0, ret;
70 70
71 if (!np) 71 if (!np)
72 return -ENODEV; 72 return -ENODEV;
@@ -79,7 +79,7 @@ static int i2c_mux_gpio_probe_dt(struct gpiomux *mux,
79 adapter = of_find_i2c_adapter_by_node(adapter_np); 79 adapter = of_find_i2c_adapter_by_node(adapter_np);
80 if (!adapter) { 80 if (!adapter) {
81 dev_err(&pdev->dev, "Cannot find parent bus\n"); 81 dev_err(&pdev->dev, "Cannot find parent bus\n");
82 return -ENODEV; 82 return -EPROBE_DEFER;
83 } 83 }
84 mux->data.parent = i2c_adapter_id(adapter); 84 mux->data.parent = i2c_adapter_id(adapter);
85 put_device(&adapter->dev); 85 put_device(&adapter->dev);
@@ -116,8 +116,12 @@ static int i2c_mux_gpio_probe_dt(struct gpiomux *mux,
116 return -ENOMEM; 116 return -ENOMEM;
117 } 117 }
118 118
119 for (i = 0; i < mux->data.n_gpios; i++) 119 for (i = 0; i < mux->data.n_gpios; i++) {
120 gpios[i] = of_get_named_gpio(np, "mux-gpios", i); 120 ret = of_get_named_gpio(np, "mux-gpios", i);
121 if (ret < 0)
122 return ret;
123 gpios[i] = ret;
124 }
121 125
122 mux->data.gpios = gpios; 126 mux->data.gpios = gpios;
123 127
@@ -177,7 +181,7 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev)
177 if (!parent) { 181 if (!parent) {
178 dev_err(&pdev->dev, "Parent adapter (%d) not found\n", 182 dev_err(&pdev->dev, "Parent adapter (%d) not found\n",
179 mux->data.parent); 183 mux->data.parent);
180 return -ENODEV; 184 return -EPROBE_DEFER;
181 } 185 }
182 186
183 mux->parent = parent; 187 mux->parent = parent;
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
index 69a91732ae65..68a37157377d 100644
--- a/drivers/i2c/muxes/i2c-mux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c
@@ -113,7 +113,7 @@ static int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux,
113 adapter = of_find_i2c_adapter_by_node(adapter_np); 113 adapter = of_find_i2c_adapter_by_node(adapter_np);
114 if (!adapter) { 114 if (!adapter) {
115 dev_err(mux->dev, "Cannot find parent bus\n"); 115 dev_err(mux->dev, "Cannot find parent bus\n");
116 return -ENODEV; 116 return -EPROBE_DEFER;
117 } 117 }
118 mux->pdata->parent_bus_num = i2c_adapter_id(adapter); 118 mux->pdata->parent_bus_num = i2c_adapter_id(adapter);
119 put_device(&adapter->dev); 119 put_device(&adapter->dev);
@@ -211,7 +211,7 @@ static int i2c_mux_pinctrl_probe(struct platform_device *pdev)
211 if (!mux->parent) { 211 if (!mux->parent) {
212 dev_err(&pdev->dev, "Parent adapter (%d) not found\n", 212 dev_err(&pdev->dev, "Parent adapter (%d) not found\n",
213 mux->pdata->parent_bus_num); 213 mux->pdata->parent_bus_num);
214 ret = -ENODEV; 214 ret = -EPROBE_DEFER;
215 goto err; 215 goto err;
216 } 216 }
217 217
diff --git a/drivers/iio/amplifiers/ad8366.c b/drivers/iio/amplifiers/ad8366.c
index d0a79a4bce1c..ba6f6a91dfff 100644
--- a/drivers/iio/amplifiers/ad8366.c
+++ b/drivers/iio/amplifiers/ad8366.c
@@ -185,10 +185,8 @@ static int ad8366_remove(struct spi_device *spi)
185 185
186 iio_device_unregister(indio_dev); 186 iio_device_unregister(indio_dev);
187 187
188 if (!IS_ERR(reg)) { 188 if (!IS_ERR(reg))
189 regulator_disable(reg); 189 regulator_disable(reg);
190 regulator_put(reg);
191 }
192 190
193 return 0; 191 return 0;
194} 192}
diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
index a7b30be86ae0..52605c0ea3a6 100644
--- a/drivers/iio/frequency/adf4350.c
+++ b/drivers/iio/frequency/adf4350.c
@@ -525,8 +525,10 @@ static int adf4350_probe(struct spi_device *spi)
525 } 525 }
526 526
527 indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st)); 527 indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
528 if (indio_dev == NULL) 528 if (indio_dev == NULL) {
529 return -ENOMEM; 529 ret = -ENOMEM;
530 goto error_disable_clk;
531 }
530 532
531 st = iio_priv(indio_dev); 533 st = iio_priv(indio_dev);
532 534
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 2710f7245c3b..2db7dcd826b9 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -477,6 +477,9 @@ void iio_disable_all_buffers(struct iio_dev *indio_dev)
477 indio_dev->currentmode = INDIO_DIRECT_MODE; 477 indio_dev->currentmode = INDIO_DIRECT_MODE;
478 if (indio_dev->setup_ops->postdisable) 478 if (indio_dev->setup_ops->postdisable)
479 indio_dev->setup_ops->postdisable(indio_dev); 479 indio_dev->setup_ops->postdisable(indio_dev);
480
481 if (indio_dev->available_scan_masks == NULL)
482 kfree(indio_dev->active_scan_mask);
480} 483}
481 484
482int iio_update_buffers(struct iio_dev *indio_dev, 485int iio_update_buffers(struct iio_dev *indio_dev,
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 8e84cd522e49..f95c6979efd8 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -852,7 +852,6 @@ static void iio_dev_release(struct device *device)
852 iio_device_unregister_trigger_consumer(indio_dev); 852 iio_device_unregister_trigger_consumer(indio_dev);
853 iio_device_unregister_eventset(indio_dev); 853 iio_device_unregister_eventset(indio_dev);
854 iio_device_unregister_sysfs(indio_dev); 854 iio_device_unregister_sysfs(indio_dev);
855 iio_device_unregister_debugfs(indio_dev);
856 855
857 ida_simple_remove(&iio_ida, indio_dev->id); 856 ida_simple_remove(&iio_ida, indio_dev->id);
858 kfree(indio_dev); 857 kfree(indio_dev);
@@ -1087,6 +1086,7 @@ void iio_device_unregister(struct iio_dev *indio_dev)
1087 1086
1088 if (indio_dev->chrdev.dev) 1087 if (indio_dev->chrdev.dev)
1089 cdev_del(&indio_dev->chrdev); 1088 cdev_del(&indio_dev->chrdev);
1089 iio_device_unregister_debugfs(indio_dev);
1090 1090
1091 iio_disable_all_buffers(indio_dev); 1091 iio_disable_all_buffers(indio_dev);
1092 1092
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index e8d2849cc81d..cab3bc7494a2 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -29,9 +29,9 @@
29#define ST_MAGN_NUMBER_DATA_CHANNELS 3 29#define ST_MAGN_NUMBER_DATA_CHANNELS 3
30 30
31/* DEFAULT VALUE FOR SENSORS */ 31/* DEFAULT VALUE FOR SENSORS */
32#define ST_MAGN_DEFAULT_OUT_X_L_ADDR 0X04 32#define ST_MAGN_DEFAULT_OUT_X_H_ADDR 0X03
33#define ST_MAGN_DEFAULT_OUT_Y_L_ADDR 0X08 33#define ST_MAGN_DEFAULT_OUT_Y_H_ADDR 0X07
34#define ST_MAGN_DEFAULT_OUT_Z_L_ADDR 0X06 34#define ST_MAGN_DEFAULT_OUT_Z_H_ADDR 0X05
35 35
36/* FULLSCALE */ 36/* FULLSCALE */
37#define ST_MAGN_FS_AVL_1300MG 1300 37#define ST_MAGN_FS_AVL_1300MG 1300
@@ -117,16 +117,16 @@
117static const struct iio_chan_spec st_magn_16bit_channels[] = { 117static const struct iio_chan_spec st_magn_16bit_channels[] = {
118 ST_SENSORS_LSM_CHANNELS(IIO_MAGN, 118 ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
119 BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), 119 BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
120 ST_SENSORS_SCAN_X, 1, IIO_MOD_X, 's', IIO_LE, 16, 16, 120 ST_SENSORS_SCAN_X, 1, IIO_MOD_X, 's', IIO_BE, 16, 16,
121 ST_MAGN_DEFAULT_OUT_X_L_ADDR), 121 ST_MAGN_DEFAULT_OUT_X_H_ADDR),
122 ST_SENSORS_LSM_CHANNELS(IIO_MAGN, 122 ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
123 BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), 123 BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
124 ST_SENSORS_SCAN_Y, 1, IIO_MOD_Y, 's', IIO_LE, 16, 16, 124 ST_SENSORS_SCAN_Y, 1, IIO_MOD_Y, 's', IIO_BE, 16, 16,
125 ST_MAGN_DEFAULT_OUT_Y_L_ADDR), 125 ST_MAGN_DEFAULT_OUT_Y_H_ADDR),
126 ST_SENSORS_LSM_CHANNELS(IIO_MAGN, 126 ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
127 BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), 127 BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
128 ST_SENSORS_SCAN_Z, 1, IIO_MOD_Z, 's', IIO_LE, 16, 16, 128 ST_SENSORS_SCAN_Z, 1, IIO_MOD_Z, 's', IIO_BE, 16, 16,
129 ST_MAGN_DEFAULT_OUT_Z_L_ADDR), 129 ST_MAGN_DEFAULT_OUT_Z_H_ADDR),
130 IIO_CHAN_SOFT_TIMESTAMP(3) 130 IIO_CHAN_SOFT_TIMESTAMP(3)
131}; 131};
132 132
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 5ceda710f516..b84791f03a27 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -31,6 +31,17 @@ config INFINIBAND_USER_ACCESS
31 libibverbs, libibcm and a hardware driver library from 31 libibverbs, libibcm and a hardware driver library from
32 <http://www.openfabrics.org/git/>. 32 <http://www.openfabrics.org/git/>.
33 33
34config INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
35 bool "Experimental and unstable ABI for userspace access to flow steering verbs"
36 depends on INFINIBAND_USER_ACCESS
37 depends on STAGING
38 ---help---
39 The final ABI for userspace access to flow steering verbs
40 has not been defined. To use the current ABI, *WHICH WILL
41 CHANGE IN THE FUTURE*, say Y here.
42
43 If unsure, say N.
44
34config INFINIBAND_USER_MEM 45config INFINIBAND_USER_MEM
35 bool 46 bool
36 depends on INFINIBAND_USER_ACCESS != n 47 depends on INFINIBAND_USER_ACCESS != n
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index d040b877475f..d8f9c6c272d7 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -217,7 +217,9 @@ IB_UVERBS_DECLARE_CMD(destroy_srq);
217IB_UVERBS_DECLARE_CMD(create_xsrq); 217IB_UVERBS_DECLARE_CMD(create_xsrq);
218IB_UVERBS_DECLARE_CMD(open_xrcd); 218IB_UVERBS_DECLARE_CMD(open_xrcd);
219IB_UVERBS_DECLARE_CMD(close_xrcd); 219IB_UVERBS_DECLARE_CMD(close_xrcd);
220#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
220IB_UVERBS_DECLARE_CMD(create_flow); 221IB_UVERBS_DECLARE_CMD(create_flow);
221IB_UVERBS_DECLARE_CMD(destroy_flow); 222IB_UVERBS_DECLARE_CMD(destroy_flow);
223#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
222 224
223#endif /* UVERBS_H */ 225#endif /* UVERBS_H */
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index f2b81b9ee0d6..2f0f01b70e3b 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -54,7 +54,9 @@ static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" };
54static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" }; 54static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" };
55static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" }; 55static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" };
56static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" }; 56static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
57#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
57static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" }; 58static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
59#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
58 60
59#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ 61#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
60 do { \ 62 do { \
@@ -2599,6 +2601,7 @@ out_put:
2599 return ret ? ret : in_len; 2601 return ret ? ret : in_len;
2600} 2602}
2601 2603
2604#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
2602static int kern_spec_to_ib_spec(struct ib_kern_spec *kern_spec, 2605static int kern_spec_to_ib_spec(struct ib_kern_spec *kern_spec,
2603 union ib_flow_spec *ib_spec) 2606 union ib_flow_spec *ib_spec)
2604{ 2607{
@@ -2824,6 +2827,7 @@ ssize_t ib_uverbs_destroy_flow(struct ib_uverbs_file *file,
2824 2827
2825 return ret ? ret : in_len; 2828 return ret ? ret : in_len;
2826} 2829}
2830#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
2827 2831
2828static int __uverbs_create_xsrq(struct ib_uverbs_file *file, 2832static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
2829 struct ib_uverbs_create_xsrq *cmd, 2833 struct ib_uverbs_create_xsrq *cmd,
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 75ad86c4abf8..2df31f68ea09 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -115,8 +115,10 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
115 [IB_USER_VERBS_CMD_CLOSE_XRCD] = ib_uverbs_close_xrcd, 115 [IB_USER_VERBS_CMD_CLOSE_XRCD] = ib_uverbs_close_xrcd,
116 [IB_USER_VERBS_CMD_CREATE_XSRQ] = ib_uverbs_create_xsrq, 116 [IB_USER_VERBS_CMD_CREATE_XSRQ] = ib_uverbs_create_xsrq,
117 [IB_USER_VERBS_CMD_OPEN_QP] = ib_uverbs_open_qp, 117 [IB_USER_VERBS_CMD_OPEN_QP] = ib_uverbs_open_qp,
118#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
118 [IB_USER_VERBS_CMD_CREATE_FLOW] = ib_uverbs_create_flow, 119 [IB_USER_VERBS_CMD_CREATE_FLOW] = ib_uverbs_create_flow,
119 [IB_USER_VERBS_CMD_DESTROY_FLOW] = ib_uverbs_destroy_flow 120 [IB_USER_VERBS_CMD_DESTROY_FLOW] = ib_uverbs_destroy_flow
121#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
120}; 122};
121 123
122static void ib_uverbs_add_one(struct ib_device *device); 124static void ib_uverbs_add_one(struct ib_device *device);
@@ -605,6 +607,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
605 if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command))) 607 if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command)))
606 return -ENOSYS; 608 return -ENOSYS;
607 609
610#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
608 if (hdr.command >= IB_USER_VERBS_CMD_THRESHOLD) { 611 if (hdr.command >= IB_USER_VERBS_CMD_THRESHOLD) {
609 struct ib_uverbs_cmd_hdr_ex hdr_ex; 612 struct ib_uverbs_cmd_hdr_ex hdr_ex;
610 613
@@ -621,6 +624,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
621 (hdr_ex.out_words + 624 (hdr_ex.out_words +
622 hdr_ex.provider_out_words) * 4); 625 hdr_ex.provider_out_words) * 4);
623 } else { 626 } else {
627#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
624 if (hdr.in_words * 4 != count) 628 if (hdr.in_words * 4 != count)
625 return -EINVAL; 629 return -EINVAL;
626 630
@@ -628,7 +632,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
628 buf + sizeof(hdr), 632 buf + sizeof(hdr),
629 hdr.in_words * 4, 633 hdr.in_words * 4,
630 hdr.out_words * 4); 634 hdr.out_words * 4);
635#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
631 } 636 }
637#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
632} 638}
633 639
634static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma) 640static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
diff --git a/drivers/infiniband/hw/amso1100/c2_ae.c b/drivers/infiniband/hw/amso1100/c2_ae.c
index d5d1929753e4..cedda25232be 100644
--- a/drivers/infiniband/hw/amso1100/c2_ae.c
+++ b/drivers/infiniband/hw/amso1100/c2_ae.c
@@ -141,7 +141,7 @@ static const char *to_qp_state_str(int state)
141 return "C2_QP_STATE_ERROR"; 141 return "C2_QP_STATE_ERROR";
142 default: 142 default:
143 return "<invalid QP state>"; 143 return "<invalid QP state>";
144 }; 144 }
145} 145}
146 146
147void c2_ae_event(struct c2_dev *c2dev, u32 mq_index) 147void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index d6c5a73becf4..f0612645de99 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1691,9 +1691,11 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
1691 ibdev->ib_dev.create_flow = mlx4_ib_create_flow; 1691 ibdev->ib_dev.create_flow = mlx4_ib_create_flow;
1692 ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow; 1692 ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow;
1693 1693
1694#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
1694 ibdev->ib_dev.uverbs_cmd_mask |= 1695 ibdev->ib_dev.uverbs_cmd_mask |=
1695 (1ull << IB_USER_VERBS_CMD_CREATE_FLOW) | 1696 (1ull << IB_USER_VERBS_CMD_CREATE_FLOW) |
1696 (1ull << IB_USER_VERBS_CMD_DESTROY_FLOW); 1697 (1ull << IB_USER_VERBS_CMD_DESTROY_FLOW);
1698#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
1697 } 1699 }
1698 1700
1699 mlx4_ib_alloc_eqs(dev, ibdev); 1701 mlx4_ib_alloc_eqs(dev, ibdev);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 3f831de9a4d8..b1a6cb3a2809 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -164,6 +164,7 @@ int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn)
164static int alloc_comp_eqs(struct mlx5_ib_dev *dev) 164static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
165{ 165{
166 struct mlx5_eq_table *table = &dev->mdev.priv.eq_table; 166 struct mlx5_eq_table *table = &dev->mdev.priv.eq_table;
167 char name[MLX5_MAX_EQ_NAME];
167 struct mlx5_eq *eq, *n; 168 struct mlx5_eq *eq, *n;
168 int ncomp_vec; 169 int ncomp_vec;
169 int nent; 170 int nent;
@@ -180,11 +181,10 @@ static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
180 goto clean; 181 goto clean;
181 } 182 }
182 183
183 snprintf(eq->name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i); 184 snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
184 err = mlx5_create_map_eq(&dev->mdev, eq, 185 err = mlx5_create_map_eq(&dev->mdev, eq,
185 i + MLX5_EQ_VEC_COMP_BASE, nent, 0, 186 i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
186 eq->name, 187 name, &dev->mdev.priv.uuari.uars[0]);
187 &dev->mdev.priv.uuari.uars[0]);
188 if (err) { 188 if (err) {
189 kfree(eq); 189 kfree(eq);
190 goto clean; 190 goto clean;
@@ -301,9 +301,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
301 props->max_srq_sge = max_rq_sg - 1; 301 props->max_srq_sge = max_rq_sg - 1;
302 props->max_fast_reg_page_list_len = (unsigned int)-1; 302 props->max_fast_reg_page_list_len = (unsigned int)-1;
303 props->local_ca_ack_delay = dev->mdev.caps.local_ca_ack_delay; 303 props->local_ca_ack_delay = dev->mdev.caps.local_ca_ack_delay;
304 props->atomic_cap = dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_ATOMIC ? 304 props->atomic_cap = IB_ATOMIC_NONE;
305 IB_ATOMIC_HCA : IB_ATOMIC_NONE; 305 props->masked_atomic_cap = IB_ATOMIC_NONE;
306 props->masked_atomic_cap = IB_ATOMIC_HCA;
307 props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28)); 306 props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28));
308 props->max_mcast_grp = 1 << dev->mdev.caps.log_max_mcg; 307 props->max_mcast_grp = 1 << dev->mdev.caps.log_max_mcg;
309 props->max_mcast_qp_attach = dev->mdev.caps.max_qp_mcg; 308 props->max_mcast_qp_attach = dev->mdev.caps.max_qp_mcg;
@@ -1006,6 +1005,11 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
1006 ibev.device = &ibdev->ib_dev; 1005 ibev.device = &ibdev->ib_dev;
1007 ibev.element.port_num = port; 1006 ibev.element.port_num = port;
1008 1007
1008 if (port < 1 || port > ibdev->num_ports) {
1009 mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
1010 return;
1011 }
1012
1009 if (ibdev->ib_active) 1013 if (ibdev->ib_active)
1010 ib_dispatch_event(&ibev); 1014 ib_dispatch_event(&ibev);
1011} 1015}
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index bd41df95b6f0..3453580b1eb2 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -42,6 +42,10 @@ enum {
42 DEF_CACHE_SIZE = 10, 42 DEF_CACHE_SIZE = 10,
43}; 43};
44 44
45enum {
46 MLX5_UMR_ALIGN = 2048
47};
48
45static __be64 *mr_align(__be64 *ptr, int align) 49static __be64 *mr_align(__be64 *ptr, int align)
46{ 50{
47 unsigned long mask = align - 1; 51 unsigned long mask = align - 1;
@@ -61,13 +65,11 @@ static int order2idx(struct mlx5_ib_dev *dev, int order)
61 65
62static int add_keys(struct mlx5_ib_dev *dev, int c, int num) 66static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
63{ 67{
64 struct device *ddev = dev->ib_dev.dma_device;
65 struct mlx5_mr_cache *cache = &dev->cache; 68 struct mlx5_mr_cache *cache = &dev->cache;
66 struct mlx5_cache_ent *ent = &cache->ent[c]; 69 struct mlx5_cache_ent *ent = &cache->ent[c];
67 struct mlx5_create_mkey_mbox_in *in; 70 struct mlx5_create_mkey_mbox_in *in;
68 struct mlx5_ib_mr *mr; 71 struct mlx5_ib_mr *mr;
69 int npages = 1 << ent->order; 72 int npages = 1 << ent->order;
70 int size = sizeof(u64) * npages;
71 int err = 0; 73 int err = 0;
72 int i; 74 int i;
73 75
@@ -83,21 +85,6 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
83 } 85 }
84 mr->order = ent->order; 86 mr->order = ent->order;
85 mr->umred = 1; 87 mr->umred = 1;
86 mr->pas = kmalloc(size + 0x3f, GFP_KERNEL);
87 if (!mr->pas) {
88 kfree(mr);
89 err = -ENOMEM;
90 goto out;
91 }
92 mr->dma = dma_map_single(ddev, mr_align(mr->pas, 0x40), size,
93 DMA_TO_DEVICE);
94 if (dma_mapping_error(ddev, mr->dma)) {
95 kfree(mr->pas);
96 kfree(mr);
97 err = -ENOMEM;
98 goto out;
99 }
100
101 in->seg.status = 1 << 6; 88 in->seg.status = 1 << 6;
102 in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2); 89 in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
103 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); 90 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
@@ -108,8 +95,6 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
108 sizeof(*in)); 95 sizeof(*in));
109 if (err) { 96 if (err) {
110 mlx5_ib_warn(dev, "create mkey failed %d\n", err); 97 mlx5_ib_warn(dev, "create mkey failed %d\n", err);
111 dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
112 kfree(mr->pas);
113 kfree(mr); 98 kfree(mr);
114 goto out; 99 goto out;
115 } 100 }
@@ -129,11 +114,9 @@ out:
129 114
130static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) 115static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
131{ 116{
132 struct device *ddev = dev->ib_dev.dma_device;
133 struct mlx5_mr_cache *cache = &dev->cache; 117 struct mlx5_mr_cache *cache = &dev->cache;
134 struct mlx5_cache_ent *ent = &cache->ent[c]; 118 struct mlx5_cache_ent *ent = &cache->ent[c];
135 struct mlx5_ib_mr *mr; 119 struct mlx5_ib_mr *mr;
136 int size;
137 int err; 120 int err;
138 int i; 121 int i;
139 122
@@ -149,14 +132,10 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
149 ent->size--; 132 ent->size--;
150 spin_unlock(&ent->lock); 133 spin_unlock(&ent->lock);
151 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); 134 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
152 if (err) { 135 if (err)
153 mlx5_ib_warn(dev, "failed destroy mkey\n"); 136 mlx5_ib_warn(dev, "failed destroy mkey\n");
154 } else { 137 else
155 size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40);
156 dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
157 kfree(mr->pas);
158 kfree(mr); 138 kfree(mr);
159 }
160 } 139 }
161} 140}
162 141
@@ -408,13 +387,12 @@ static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
408 387
409static void clean_keys(struct mlx5_ib_dev *dev, int c) 388static void clean_keys(struct mlx5_ib_dev *dev, int c)
410{ 389{
411 struct device *ddev = dev->ib_dev.dma_device;
412 struct mlx5_mr_cache *cache = &dev->cache; 390 struct mlx5_mr_cache *cache = &dev->cache;
413 struct mlx5_cache_ent *ent = &cache->ent[c]; 391 struct mlx5_cache_ent *ent = &cache->ent[c];
414 struct mlx5_ib_mr *mr; 392 struct mlx5_ib_mr *mr;
415 int size;
416 int err; 393 int err;
417 394
395 cancel_delayed_work(&ent->dwork);
418 while (1) { 396 while (1) {
419 spin_lock(&ent->lock); 397 spin_lock(&ent->lock);
420 if (list_empty(&ent->head)) { 398 if (list_empty(&ent->head)) {
@@ -427,14 +405,10 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
427 ent->size--; 405 ent->size--;
428 spin_unlock(&ent->lock); 406 spin_unlock(&ent->lock);
429 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); 407 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
430 if (err) { 408 if (err)
431 mlx5_ib_warn(dev, "failed destroy mkey\n"); 409 mlx5_ib_warn(dev, "failed destroy mkey\n");
432 } else { 410 else
433 size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40);
434 dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
435 kfree(mr->pas);
436 kfree(mr); 411 kfree(mr);
437 }
438 } 412 }
439} 413}
440 414
@@ -540,13 +514,15 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
540 int i; 514 int i;
541 515
542 dev->cache.stopped = 1; 516 dev->cache.stopped = 1;
543 destroy_workqueue(dev->cache.wq); 517 flush_workqueue(dev->cache.wq);
544 518
545 mlx5_mr_cache_debugfs_cleanup(dev); 519 mlx5_mr_cache_debugfs_cleanup(dev);
546 520
547 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) 521 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
548 clean_keys(dev, i); 522 clean_keys(dev, i);
549 523
524 destroy_workqueue(dev->cache.wq);
525
550 return 0; 526 return 0;
551} 527}
552 528
@@ -675,10 +651,12 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
675 int page_shift, int order, int access_flags) 651 int page_shift, int order, int access_flags)
676{ 652{
677 struct mlx5_ib_dev *dev = to_mdev(pd->device); 653 struct mlx5_ib_dev *dev = to_mdev(pd->device);
654 struct device *ddev = dev->ib_dev.dma_device;
678 struct umr_common *umrc = &dev->umrc; 655 struct umr_common *umrc = &dev->umrc;
679 struct ib_send_wr wr, *bad; 656 struct ib_send_wr wr, *bad;
680 struct mlx5_ib_mr *mr; 657 struct mlx5_ib_mr *mr;
681 struct ib_sge sg; 658 struct ib_sge sg;
659 int size = sizeof(u64) * npages;
682 int err; 660 int err;
683 int i; 661 int i;
684 662
@@ -697,7 +675,22 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
697 if (!mr) 675 if (!mr)
698 return ERR_PTR(-EAGAIN); 676 return ERR_PTR(-EAGAIN);
699 677
700 mlx5_ib_populate_pas(dev, umem, page_shift, mr_align(mr->pas, 0x40), 1); 678 mr->pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
679 if (!mr->pas) {
680 err = -ENOMEM;
681 goto error;
682 }
683
684 mlx5_ib_populate_pas(dev, umem, page_shift,
685 mr_align(mr->pas, MLX5_UMR_ALIGN), 1);
686
687 mr->dma = dma_map_single(ddev, mr_align(mr->pas, MLX5_UMR_ALIGN), size,
688 DMA_TO_DEVICE);
689 if (dma_mapping_error(ddev, mr->dma)) {
690 kfree(mr->pas);
691 err = -ENOMEM;
692 goto error;
693 }
701 694
702 memset(&wr, 0, sizeof(wr)); 695 memset(&wr, 0, sizeof(wr));
703 wr.wr_id = (u64)(unsigned long)mr; 696 wr.wr_id = (u64)(unsigned long)mr;
@@ -718,6 +711,9 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
718 wait_for_completion(&mr->done); 711 wait_for_completion(&mr->done);
719 up(&umrc->sem); 712 up(&umrc->sem);
720 713
714 dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
715 kfree(mr->pas);
716
721 if (mr->status != IB_WC_SUCCESS) { 717 if (mr->status != IB_WC_SUCCESS) {
722 mlx5_ib_warn(dev, "reg umr failed\n"); 718 mlx5_ib_warn(dev, "reg umr failed\n");
723 err = -EFAULT; 719 err = -EFAULT;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 045f8cdbd303..5659ea880741 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -203,7 +203,7 @@ static int sq_overhead(enum ib_qp_type qp_type)
203 203
204 switch (qp_type) { 204 switch (qp_type) {
205 case IB_QPT_XRC_INI: 205 case IB_QPT_XRC_INI:
206 size = sizeof(struct mlx5_wqe_xrc_seg); 206 size += sizeof(struct mlx5_wqe_xrc_seg);
207 /* fall through */ 207 /* fall through */
208 case IB_QPT_RC: 208 case IB_QPT_RC:
209 size += sizeof(struct mlx5_wqe_ctrl_seg) + 209 size += sizeof(struct mlx5_wqe_ctrl_seg) +
@@ -211,20 +211,23 @@ static int sq_overhead(enum ib_qp_type qp_type)
211 sizeof(struct mlx5_wqe_raddr_seg); 211 sizeof(struct mlx5_wqe_raddr_seg);
212 break; 212 break;
213 213
214 case IB_QPT_XRC_TGT:
215 return 0;
216
214 case IB_QPT_UC: 217 case IB_QPT_UC:
215 size = sizeof(struct mlx5_wqe_ctrl_seg) + 218 size += sizeof(struct mlx5_wqe_ctrl_seg) +
216 sizeof(struct mlx5_wqe_raddr_seg); 219 sizeof(struct mlx5_wqe_raddr_seg);
217 break; 220 break;
218 221
219 case IB_QPT_UD: 222 case IB_QPT_UD:
220 case IB_QPT_SMI: 223 case IB_QPT_SMI:
221 case IB_QPT_GSI: 224 case IB_QPT_GSI:
222 size = sizeof(struct mlx5_wqe_ctrl_seg) + 225 size += sizeof(struct mlx5_wqe_ctrl_seg) +
223 sizeof(struct mlx5_wqe_datagram_seg); 226 sizeof(struct mlx5_wqe_datagram_seg);
224 break; 227 break;
225 228
226 case MLX5_IB_QPT_REG_UMR: 229 case MLX5_IB_QPT_REG_UMR:
227 size = sizeof(struct mlx5_wqe_ctrl_seg) + 230 size += sizeof(struct mlx5_wqe_ctrl_seg) +
228 sizeof(struct mlx5_wqe_umr_ctrl_seg) + 231 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
229 sizeof(struct mlx5_mkey_seg); 232 sizeof(struct mlx5_mkey_seg);
230 break; 233 break;
@@ -270,7 +273,8 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
270 return wqe_size; 273 return wqe_size;
271 274
272 if (wqe_size > dev->mdev.caps.max_sq_desc_sz) { 275 if (wqe_size > dev->mdev.caps.max_sq_desc_sz) {
273 mlx5_ib_dbg(dev, "\n"); 276 mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
277 wqe_size, dev->mdev.caps.max_sq_desc_sz);
274 return -EINVAL; 278 return -EINVAL;
275 } 279 }
276 280
@@ -280,9 +284,15 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
280 284
281 wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size); 285 wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
282 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; 286 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
287 if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) {
288 mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
289 qp->sq.wqe_cnt, dev->mdev.caps.max_wqes);
290 return -ENOMEM;
291 }
283 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); 292 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
284 qp->sq.max_gs = attr->cap.max_send_sge; 293 qp->sq.max_gs = attr->cap.max_send_sge;
285 qp->sq.max_post = 1 << ilog2(wq_size / wqe_size); 294 qp->sq.max_post = wq_size / wqe_size;
295 attr->cap.max_send_wr = qp->sq.max_post;
286 296
287 return wq_size; 297 return wq_size;
288} 298}
@@ -1280,6 +1290,11 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
1280 MLX5_QP_OPTPAR_Q_KEY, 1290 MLX5_QP_OPTPAR_Q_KEY,
1281 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX | 1291 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX |
1282 MLX5_QP_OPTPAR_Q_KEY, 1292 MLX5_QP_OPTPAR_Q_KEY,
1293 [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1294 MLX5_QP_OPTPAR_RRE |
1295 MLX5_QP_OPTPAR_RAE |
1296 MLX5_QP_OPTPAR_RWE |
1297 MLX5_QP_OPTPAR_PKEY_INDEX,
1283 }, 1298 },
1284 }, 1299 },
1285 [MLX5_QP_STATE_RTR] = { 1300 [MLX5_QP_STATE_RTR] = {
@@ -1314,6 +1329,11 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
1314 [MLX5_QP_STATE_RTS] = { 1329 [MLX5_QP_STATE_RTS] = {
1315 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY, 1330 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
1316 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY, 1331 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
1332 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE,
1333 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT |
1334 MLX5_QP_OPTPAR_RWE |
1335 MLX5_QP_OPTPAR_RAE |
1336 MLX5_QP_OPTPAR_RRE,
1317 }, 1337 },
1318 }, 1338 },
1319}; 1339};
@@ -1651,29 +1671,6 @@ static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
1651 rseg->reserved = 0; 1671 rseg->reserved = 0;
1652} 1672}
1653 1673
1654static void set_atomic_seg(struct mlx5_wqe_atomic_seg *aseg, struct ib_send_wr *wr)
1655{
1656 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1657 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
1658 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
1659 } else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
1660 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
1661 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask);
1662 } else {
1663 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
1664 aseg->compare = 0;
1665 }
1666}
1667
1668static void set_masked_atomic_seg(struct mlx5_wqe_masked_atomic_seg *aseg,
1669 struct ib_send_wr *wr)
1670{
1671 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
1672 aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask);
1673 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
1674 aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask);
1675}
1676
1677static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg, 1674static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
1678 struct ib_send_wr *wr) 1675 struct ib_send_wr *wr)
1679{ 1676{
@@ -2063,28 +2060,11 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2063 2060
2064 case IB_WR_ATOMIC_CMP_AND_SWP: 2061 case IB_WR_ATOMIC_CMP_AND_SWP:
2065 case IB_WR_ATOMIC_FETCH_AND_ADD: 2062 case IB_WR_ATOMIC_FETCH_AND_ADD:
2066 set_raddr_seg(seg, wr->wr.atomic.remote_addr,
2067 wr->wr.atomic.rkey);
2068 seg += sizeof(struct mlx5_wqe_raddr_seg);
2069
2070 set_atomic_seg(seg, wr);
2071 seg += sizeof(struct mlx5_wqe_atomic_seg);
2072
2073 size += (sizeof(struct mlx5_wqe_raddr_seg) +
2074 sizeof(struct mlx5_wqe_atomic_seg)) / 16;
2075 break;
2076
2077 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: 2063 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
2078 set_raddr_seg(seg, wr->wr.atomic.remote_addr, 2064 mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
2079 wr->wr.atomic.rkey); 2065 err = -ENOSYS;
2080 seg += sizeof(struct mlx5_wqe_raddr_seg); 2066 *bad_wr = wr;
2081 2067 goto out;
2082 set_masked_atomic_seg(seg, wr);
2083 seg += sizeof(struct mlx5_wqe_masked_atomic_seg);
2084
2085 size += (sizeof(struct mlx5_wqe_raddr_seg) +
2086 sizeof(struct mlx5_wqe_masked_atomic_seg)) / 16;
2087 break;
2088 2068
2089 case IB_WR_LOCAL_INV: 2069 case IB_WR_LOCAL_INV:
2090 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; 2070 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 84d297afd6a9..0aa478bc291a 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -295,7 +295,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
295 mlx5_vfree(in); 295 mlx5_vfree(in);
296 if (err) { 296 if (err) {
297 mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err); 297 mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
298 goto err_srq; 298 goto err_usr_kern_srq;
299 } 299 }
300 300
301 mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn); 301 mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn);
@@ -316,6 +316,8 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
316 316
317err_core: 317err_core:
318 mlx5_core_destroy_srq(&dev->mdev, &srq->msrq); 318 mlx5_core_destroy_srq(&dev->mdev, &srq->msrq);
319
320err_usr_kern_srq:
319 if (pd->uobject) 321 if (pd->uobject)
320 destroy_srq_user(pd, srq); 322 destroy_srq_user(pd, srq);
321 else 323 else
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 7c9d35f39d75..690201738993 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -357,7 +357,7 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
357 mthca_warn(dev, "Unhandled event %02x(%02x) on EQ %d\n", 357 mthca_warn(dev, "Unhandled event %02x(%02x) on EQ %d\n",
358 eqe->type, eqe->subtype, eq->eqn); 358 eqe->type, eqe->subtype, eq->eqn);
359 break; 359 break;
360 }; 360 }
361 361
362 set_eqe_hw(eqe); 362 set_eqe_hw(eqe);
363 ++eq->cons_index; 363 ++eq->cons_index;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 4ed8235d2d36..50219ab2279d 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -150,7 +150,7 @@ enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps)
150 return IB_QPS_SQE; 150 return IB_QPS_SQE;
151 case OCRDMA_QPS_ERR: 151 case OCRDMA_QPS_ERR:
152 return IB_QPS_ERR; 152 return IB_QPS_ERR;
153 }; 153 }
154 return IB_QPS_ERR; 154 return IB_QPS_ERR;
155} 155}
156 156
@@ -171,7 +171,7 @@ static enum ocrdma_qp_state get_ocrdma_qp_state(enum ib_qp_state qps)
171 return OCRDMA_QPS_SQE; 171 return OCRDMA_QPS_SQE;
172 case IB_QPS_ERR: 172 case IB_QPS_ERR:
173 return OCRDMA_QPS_ERR; 173 return OCRDMA_QPS_ERR;
174 }; 174 }
175 return OCRDMA_QPS_ERR; 175 return OCRDMA_QPS_ERR;
176} 176}
177 177
@@ -1982,7 +1982,7 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
1982 break; 1982 break;
1983 default: 1983 default:
1984 return -EINVAL; 1984 return -EINVAL;
1985 }; 1985 }
1986 1986
1987 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_QP, sizeof(*cmd)); 1987 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_QP, sizeof(*cmd));
1988 if (!cmd) 1988 if (!cmd)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 56e004940f18..0ce7674621ea 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -531,7 +531,7 @@ static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event)
531 case BE_DEV_DOWN: 531 case BE_DEV_DOWN:
532 ocrdma_close(dev); 532 ocrdma_close(dev);
533 break; 533 break;
534 }; 534 }
535} 535}
536 536
537static struct ocrdma_driver ocrdma_drv = { 537static struct ocrdma_driver ocrdma_drv = {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 6e982bb43c31..69f1d1221a6b 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -141,7 +141,7 @@ static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
141 /* Unsupported */ 141 /* Unsupported */
142 *ib_speed = IB_SPEED_SDR; 142 *ib_speed = IB_SPEED_SDR;
143 *ib_width = IB_WIDTH_1X; 143 *ib_width = IB_WIDTH_1X;
144 }; 144 }
145} 145}
146 146
147 147
@@ -2331,7 +2331,7 @@ static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
2331 default: 2331 default:
2332 ibwc_status = IB_WC_GENERAL_ERR; 2332 ibwc_status = IB_WC_GENERAL_ERR;
2333 break; 2333 break;
2334 }; 2334 }
2335 return ibwc_status; 2335 return ibwc_status;
2336} 2336}
2337 2337
@@ -2370,7 +2370,7 @@ static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
2370 pr_err("%s() invalid opcode received = 0x%x\n", 2370 pr_err("%s() invalid opcode received = 0x%x\n",
2371 __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK); 2371 __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
2372 break; 2372 break;
2373 }; 2373 }
2374} 2374}
2375 2375
2376static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp, 2376static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp,
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 3591855cc5b5..6df23502059a 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -594,7 +594,7 @@ isert_connect_release(struct isert_conn *isert_conn)
594 594
595 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); 595 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
596 596
597 if (device->use_frwr) 597 if (device && device->use_frwr)
598 isert_conn_free_frwr_pool(isert_conn); 598 isert_conn_free_frwr_pool(isert_conn);
599 599
600 if (isert_conn->conn_qp) { 600 if (isert_conn->conn_qp) {
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 653ac6bfc57a..6c923c7039a1 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1588,7 +1588,7 @@ static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
1588 int resp_data_len; 1588 int resp_data_len;
1589 int resp_len; 1589 int resp_len;
1590 1590
1591 resp_data_len = (rsp_code == SRP_TSK_MGMT_SUCCESS) ? 0 : 4; 1591 resp_data_len = 4;
1592 resp_len = sizeof(*srp_rsp) + resp_data_len; 1592 resp_len = sizeof(*srp_rsp) + resp_data_len;
1593 1593
1594 srp_rsp = ioctx->ioctx.buf; 1594 srp_rsp = ioctx->ioctx.buf;
@@ -1600,11 +1600,9 @@ static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
1600 + atomic_xchg(&ch->req_lim_delta, 0)); 1600 + atomic_xchg(&ch->req_lim_delta, 0));
1601 srp_rsp->tag = tag; 1601 srp_rsp->tag = tag;
1602 1602
1603 if (rsp_code != SRP_TSK_MGMT_SUCCESS) { 1603 srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
1604 srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID; 1604 srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
1605 srp_rsp->resp_data_len = cpu_to_be32(resp_data_len); 1605 srp_rsp->data[3] = rsp_code;
1606 srp_rsp->data[3] = rsp_code;
1607 }
1608 1606
1609 return resp_len; 1607 return resp_len;
1610} 1608}
@@ -2358,6 +2356,8 @@ static void srpt_release_channel_work(struct work_struct *w)
2358 transport_deregister_session(se_sess); 2356 transport_deregister_session(se_sess);
2359 ch->sess = NULL; 2357 ch->sess = NULL;
2360 2358
2359 ib_destroy_cm_id(ch->cm_id);
2360
2361 srpt_destroy_ch_ib(ch); 2361 srpt_destroy_ch_ib(ch);
2362 2362
2363 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring, 2363 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
@@ -2368,8 +2368,6 @@ static void srpt_release_channel_work(struct work_struct *w)
2368 list_del(&ch->list); 2368 list_del(&ch->list);
2369 spin_unlock_irq(&sdev->spinlock); 2369 spin_unlock_irq(&sdev->spinlock);
2370 2370
2371 ib_destroy_cm_id(ch->cm_id);
2372
2373 if (ch->release_done) 2371 if (ch->release_done)
2374 complete(ch->release_done); 2372 complete(ch->release_done);
2375 2373
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index fe302e33f72e..c880ebaf1553 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -52,7 +52,7 @@ config AMD_IOMMU
52 select PCI_PRI 52 select PCI_PRI
53 select PCI_PASID 53 select PCI_PASID
54 select IOMMU_API 54 select IOMMU_API
55 depends on X86_64 && PCI && ACPI && X86_IO_APIC 55 depends on X86_64 && PCI && ACPI
56 ---help--- 56 ---help---
57 With this option you can enable support for AMD IOMMU hardware in 57 With this option you can enable support for AMD IOMMU hardware in
58 your system. An IOMMU is a hardware component which provides 58 your system. An IOMMU is a hardware component which provides
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index f417e89e1e7e..181c9ba929cd 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -377,6 +377,7 @@ struct arm_smmu_cfg {
377 u32 cbar; 377 u32 cbar;
378 pgd_t *pgd; 378 pgd_t *pgd;
379}; 379};
380#define INVALID_IRPTNDX 0xff
380 381
381#define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx) 382#define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx)
382#define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1) 383#define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1)
@@ -840,7 +841,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
840 if (IS_ERR_VALUE(ret)) { 841 if (IS_ERR_VALUE(ret)) {
841 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", 842 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
842 root_cfg->irptndx, irq); 843 root_cfg->irptndx, irq);
843 root_cfg->irptndx = -1; 844 root_cfg->irptndx = INVALID_IRPTNDX;
844 goto out_free_context; 845 goto out_free_context;
845 } 846 }
846 847
@@ -869,7 +870,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
869 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); 870 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
870 arm_smmu_tlb_inv_context(root_cfg); 871 arm_smmu_tlb_inv_context(root_cfg);
871 872
872 if (root_cfg->irptndx != -1) { 873 if (root_cfg->irptndx != INVALID_IRPTNDX) {
873 irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx]; 874 irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx];
874 free_irq(irq, domain); 875 free_irq(irq, domain);
875 } 876 }
@@ -1857,8 +1858,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1857 goto out_put_parent; 1858 goto out_put_parent;
1858 } 1859 }
1859 1860
1860 arm_smmu_device_reset(smmu);
1861
1862 for (i = 0; i < smmu->num_global_irqs; ++i) { 1861 for (i = 0; i < smmu->num_global_irqs; ++i) {
1863 err = request_irq(smmu->irqs[i], 1862 err = request_irq(smmu->irqs[i],
1864 arm_smmu_global_fault, 1863 arm_smmu_global_fault,
@@ -1876,6 +1875,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1876 spin_lock(&arm_smmu_devices_lock); 1875 spin_lock(&arm_smmu_devices_lock);
1877 list_add(&smmu->list, &arm_smmu_devices); 1876 list_add(&smmu->list, &arm_smmu_devices);
1878 spin_unlock(&arm_smmu_devices_lock); 1877 spin_unlock(&arm_smmu_devices_lock);
1878
1879 arm_smmu_device_reset(smmu);
1879 return 0; 1880 return 0;
1880 1881
1881out_free_irqs: 1882out_free_irqs:
@@ -1966,10 +1967,10 @@ static int __init arm_smmu_init(void)
1966 return ret; 1967 return ret;
1967 1968
1968 /* Oh, for a proper bus abstraction */ 1969 /* Oh, for a proper bus abstraction */
1969 if (!iommu_present(&platform_bus_type)); 1970 if (!iommu_present(&platform_bus_type))
1970 bus_set_iommu(&platform_bus_type, &arm_smmu_ops); 1971 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
1971 1972
1972 if (!iommu_present(&amba_bustype)); 1973 if (!iommu_present(&amba_bustype))
1973 bus_set_iommu(&amba_bustype, &arm_smmu_ops); 1974 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
1974 1975
1975 return 0; 1976 return 0;
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index b39f6f0b45f2..0f12382aa35d 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -498,7 +498,7 @@ struct cached_dev {
498 */ 498 */
499 atomic_t has_dirty; 499 atomic_t has_dirty;
500 500
501 struct ratelimit writeback_rate; 501 struct bch_ratelimit writeback_rate;
502 struct delayed_work writeback_rate_update; 502 struct delayed_work writeback_rate_update;
503 503
504 /* 504 /*
@@ -507,10 +507,9 @@ struct cached_dev {
507 */ 507 */
508 sector_t last_read; 508 sector_t last_read;
509 509
510 /* Number of writeback bios in flight */ 510 /* Limit number of writeback bios in flight */
511 atomic_t in_flight; 511 struct semaphore in_flight;
512 struct closure_with_timer writeback; 512 struct closure_with_timer writeback;
513 struct closure_waitlist writeback_wait;
514 513
515 struct keybuf writeback_keys; 514 struct keybuf writeback_keys;
516 515
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 8010eed06a51..22d1ae72c282 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -926,28 +926,45 @@ struct bkey *bch_next_recurse_key(struct btree *b, struct bkey *search)
926 926
927/* Mergesort */ 927/* Mergesort */
928 928
929static void sort_key_next(struct btree_iter *iter,
930 struct btree_iter_set *i)
931{
932 i->k = bkey_next(i->k);
933
934 if (i->k == i->end)
935 *i = iter->data[--iter->used];
936}
937
929static void btree_sort_fixup(struct btree_iter *iter) 938static void btree_sort_fixup(struct btree_iter *iter)
930{ 939{
931 while (iter->used > 1) { 940 while (iter->used > 1) {
932 struct btree_iter_set *top = iter->data, *i = top + 1; 941 struct btree_iter_set *top = iter->data, *i = top + 1;
933 struct bkey *k;
934 942
935 if (iter->used > 2 && 943 if (iter->used > 2 &&
936 btree_iter_cmp(i[0], i[1])) 944 btree_iter_cmp(i[0], i[1]))
937 i++; 945 i++;
938 946
939 for (k = i->k; 947 if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
940 k != i->end && bkey_cmp(top->k, &START_KEY(k)) > 0;
941 k = bkey_next(k))
942 if (top->k > i->k)
943 __bch_cut_front(top->k, k);
944 else if (KEY_SIZE(k))
945 bch_cut_back(&START_KEY(k), top->k);
946
947 if (top->k < i->k || k == i->k)
948 break; 948 break;
949 949
950 heap_sift(iter, i - top, btree_iter_cmp); 950 if (!KEY_SIZE(i->k)) {
951 sort_key_next(iter, i);
952 heap_sift(iter, i - top, btree_iter_cmp);
953 continue;
954 }
955
956 if (top->k > i->k) {
957 if (bkey_cmp(top->k, i->k) >= 0)
958 sort_key_next(iter, i);
959 else
960 bch_cut_front(top->k, i->k);
961
962 heap_sift(iter, i - top, btree_iter_cmp);
963 } else {
964 /* can't happen because of comparison func */
965 BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
966 bch_cut_back(&START_KEY(i->k), top->k);
967 }
951 } 968 }
952} 969}
953 970
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index f9764e61978b..f42fc7ed9cd6 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -255,7 +255,7 @@ void bch_btree_node_read(struct btree *b)
255 255
256 return; 256 return;
257err: 257err:
258 bch_cache_set_error(b->c, "io error reading bucket %lu", 258 bch_cache_set_error(b->c, "io error reading bucket %zu",
259 PTR_BUCKET_NR(b->c, &b->key, 0)); 259 PTR_BUCKET_NR(b->c, &b->key, 0));
260} 260}
261 261
@@ -612,7 +612,7 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
612 return SHRINK_STOP; 612 return SHRINK_STOP;
613 613
614 /* Return -1 if we can't do anything right now */ 614 /* Return -1 if we can't do anything right now */
615 if (sc->gfp_mask & __GFP_WAIT) 615 if (sc->gfp_mask & __GFP_IO)
616 mutex_lock(&c->bucket_lock); 616 mutex_lock(&c->bucket_lock);
617 else if (!mutex_trylock(&c->bucket_lock)) 617 else if (!mutex_trylock(&c->bucket_lock))
618 return -1; 618 return -1;
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index ba95ab84b2be..8435f81e5d85 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -153,7 +153,8 @@ int bch_journal_read(struct cache_set *c, struct list_head *list,
153 bitmap_zero(bitmap, SB_JOURNAL_BUCKETS); 153 bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
154 pr_debug("%u journal buckets", ca->sb.njournal_buckets); 154 pr_debug("%u journal buckets", ca->sb.njournal_buckets);
155 155
156 /* Read journal buckets ordered by golden ratio hash to quickly 156 /*
157 * Read journal buckets ordered by golden ratio hash to quickly
157 * find a sequence of buckets with valid journal entries 158 * find a sequence of buckets with valid journal entries
158 */ 159 */
159 for (i = 0; i < ca->sb.njournal_buckets; i++) { 160 for (i = 0; i < ca->sb.njournal_buckets; i++) {
@@ -166,18 +167,20 @@ int bch_journal_read(struct cache_set *c, struct list_head *list,
166 goto bsearch; 167 goto bsearch;
167 } 168 }
168 169
169 /* If that fails, check all the buckets we haven't checked 170 /*
171 * If that fails, check all the buckets we haven't checked
170 * already 172 * already
171 */ 173 */
172 pr_debug("falling back to linear search"); 174 pr_debug("falling back to linear search");
173 175
174 for (l = 0; l < ca->sb.njournal_buckets; l++) { 176 for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
175 if (test_bit(l, bitmap)) 177 l < ca->sb.njournal_buckets;
176 continue; 178 l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1))
177
178 if (read_bucket(l)) 179 if (read_bucket(l))
179 goto bsearch; 180 goto bsearch;
180 } 181
182 if (list_empty(list))
183 continue;
181bsearch: 184bsearch:
182 /* Binary search */ 185 /* Binary search */
183 m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1); 186 m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
@@ -197,10 +200,12 @@ bsearch:
197 r = m; 200 r = m;
198 } 201 }
199 202
200 /* Read buckets in reverse order until we stop finding more 203 /*
204 * Read buckets in reverse order until we stop finding more
201 * journal entries 205 * journal entries
202 */ 206 */
203 pr_debug("finishing up"); 207 pr_debug("finishing up: m %u njournal_buckets %u",
208 m, ca->sb.njournal_buckets);
204 l = m; 209 l = m;
205 210
206 while (1) { 211 while (1) {
@@ -228,9 +233,10 @@ bsearch:
228 } 233 }
229 } 234 }
230 235
231 c->journal.seq = list_entry(list->prev, 236 if (!list_empty(list))
232 struct journal_replay, 237 c->journal.seq = list_entry(list->prev,
233 list)->j.seq; 238 struct journal_replay,
239 list)->j.seq;
234 240
235 return 0; 241 return 0;
236#undef read_bucket 242#undef read_bucket
@@ -428,7 +434,7 @@ static void do_journal_discard(struct cache *ca)
428 return; 434 return;
429 } 435 }
430 436
431 switch (atomic_read(&ja->discard_in_flight) == DISCARD_IN_FLIGHT) { 437 switch (atomic_read(&ja->discard_in_flight)) {
432 case DISCARD_IN_FLIGHT: 438 case DISCARD_IN_FLIGHT:
433 return; 439 return;
434 440
@@ -689,6 +695,7 @@ void bch_journal_meta(struct cache_set *c, struct closure *cl)
689 if (cl) 695 if (cl)
690 BUG_ON(!closure_wait(&w->wait, cl)); 696 BUG_ON(!closure_wait(&w->wait, cl));
691 697
698 closure_flush(&c->journal.io);
692 __journal_try_write(c, true); 699 __journal_try_write(c, true);
693 } 700 }
694} 701}
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 786a1a4f74d8..2a7f0dd6abab 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -996,17 +996,19 @@ static void request_write(struct cached_dev *dc, struct search *s)
996 closure_bio_submit(bio, cl, s->d); 996 closure_bio_submit(bio, cl, s->d);
997 } else { 997 } else {
998 bch_writeback_add(dc); 998 bch_writeback_add(dc);
999 s->op.cache_bio = bio;
999 1000
1000 if (s->op.flush_journal) { 1001 if (bio->bi_rw & REQ_FLUSH) {
1001 /* Also need to send a flush to the backing device */ 1002 /* Also need to send a flush to the backing device */
1002 s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO, 1003 struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
1003 dc->disk.bio_split); 1004 dc->disk.bio_split);
1004 1005
1005 bio->bi_size = 0; 1006 flush->bi_rw = WRITE_FLUSH;
1006 bio->bi_vcnt = 0; 1007 flush->bi_bdev = bio->bi_bdev;
1007 closure_bio_submit(bio, cl, s->d); 1008 flush->bi_end_io = request_endio;
1008 } else { 1009 flush->bi_private = cl;
1009 s->op.cache_bio = bio; 1010
1011 closure_bio_submit(flush, cl, s->d);
1010 } 1012 }
1011 } 1013 }
1012out: 1014out:
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 4fe6ab2fbe2e..924dcfdae111 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -223,8 +223,13 @@ STORE(__cached_dev)
223 } 223 }
224 224
225 if (attr == &sysfs_label) { 225 if (attr == &sysfs_label) {
226 /* note: endlines are preserved */ 226 if (size > SB_LABEL_SIZE)
227 memcpy(dc->sb.label, buf, SB_LABEL_SIZE); 227 return -EINVAL;
228 memcpy(dc->sb.label, buf, size);
229 if (size < SB_LABEL_SIZE)
230 dc->sb.label[size] = '\0';
231 if (size && dc->sb.label[size - 1] == '\n')
232 dc->sb.label[size - 1] = '\0';
228 bch_write_bdev_super(dc, NULL); 233 bch_write_bdev_super(dc, NULL);
229 if (dc->disk.c) { 234 if (dc->disk.c) {
230 memcpy(dc->disk.c->uuids[dc->disk.id].label, 235 memcpy(dc->disk.c->uuids[dc->disk.id].label,
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index 98eb81159a22..420dad545c7d 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -190,7 +190,16 @@ void bch_time_stats_update(struct time_stats *stats, uint64_t start_time)
190 stats->last = now ?: 1; 190 stats->last = now ?: 1;
191} 191}
192 192
193unsigned bch_next_delay(struct ratelimit *d, uint64_t done) 193/**
194 * bch_next_delay() - increment @d by the amount of work done, and return how
195 * long to delay until the next time to do some work.
196 *
197 * @d - the struct bch_ratelimit to update
198 * @done - the amount of work done, in arbitrary units
199 *
200 * Returns the amount of time to delay by, in jiffies
201 */
202uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
194{ 203{
195 uint64_t now = local_clock(); 204 uint64_t now = local_clock();
196 205
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index 1ae2a73ad85f..ea345c6896f4 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -450,17 +450,23 @@ read_attribute(name ## _last_ ## frequency_units)
450 (ewma) >> factor; \ 450 (ewma) >> factor; \
451}) 451})
452 452
453struct ratelimit { 453struct bch_ratelimit {
454 /* Next time we want to do some work, in nanoseconds */
454 uint64_t next; 455 uint64_t next;
456
457 /*
458 * Rate at which we want to do work, in units per nanosecond
459 * The units here correspond to the units passed to bch_next_delay()
460 */
455 unsigned rate; 461 unsigned rate;
456}; 462};
457 463
458static inline void ratelimit_reset(struct ratelimit *d) 464static inline void bch_ratelimit_reset(struct bch_ratelimit *d)
459{ 465{
460 d->next = local_clock(); 466 d->next = local_clock();
461} 467}
462 468
463unsigned bch_next_delay(struct ratelimit *d, uint64_t done); 469uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done);
464 470
465#define __DIV_SAFE(n, d, zero) \ 471#define __DIV_SAFE(n, d, zero) \
466({ \ 472({ \
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 22cbff551628..ba3ee48320f2 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -94,11 +94,15 @@ static void update_writeback_rate(struct work_struct *work)
94 94
95static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) 95static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
96{ 96{
97 uint64_t ret;
98
97 if (atomic_read(&dc->disk.detaching) || 99 if (atomic_read(&dc->disk.detaching) ||
98 !dc->writeback_percent) 100 !dc->writeback_percent)
99 return 0; 101 return 0;
100 102
101 return bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL); 103 ret = bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL);
104
105 return min_t(uint64_t, ret, HZ);
102} 106}
103 107
104/* Background writeback */ 108/* Background writeback */
@@ -208,7 +212,7 @@ normal_refill:
208 212
209 up_write(&dc->writeback_lock); 213 up_write(&dc->writeback_lock);
210 214
211 ratelimit_reset(&dc->writeback_rate); 215 bch_ratelimit_reset(&dc->writeback_rate);
212 216
213 /* Punt to workqueue only so we don't recurse and blow the stack */ 217 /* Punt to workqueue only so we don't recurse and blow the stack */
214 continue_at(cl, read_dirty, dirty_wq); 218 continue_at(cl, read_dirty, dirty_wq);
@@ -318,9 +322,7 @@ static void write_dirty_finish(struct closure *cl)
318 } 322 }
319 323
320 bch_keybuf_del(&dc->writeback_keys, w); 324 bch_keybuf_del(&dc->writeback_keys, w);
321 atomic_dec_bug(&dc->in_flight); 325 up(&dc->in_flight);
322
323 closure_wake_up(&dc->writeback_wait);
324 326
325 closure_return_with_destructor(cl, dirty_io_destructor); 327 closure_return_with_destructor(cl, dirty_io_destructor);
326} 328}
@@ -349,7 +351,7 @@ static void write_dirty(struct closure *cl)
349 351
350 closure_bio_submit(&io->bio, cl, &io->dc->disk); 352 closure_bio_submit(&io->bio, cl, &io->dc->disk);
351 353
352 continue_at(cl, write_dirty_finish, dirty_wq); 354 continue_at(cl, write_dirty_finish, system_wq);
353} 355}
354 356
355static void read_dirty_endio(struct bio *bio, int error) 357static void read_dirty_endio(struct bio *bio, int error)
@@ -369,7 +371,7 @@ static void read_dirty_submit(struct closure *cl)
369 371
370 closure_bio_submit(&io->bio, cl, &io->dc->disk); 372 closure_bio_submit(&io->bio, cl, &io->dc->disk);
371 373
372 continue_at(cl, write_dirty, dirty_wq); 374 continue_at(cl, write_dirty, system_wq);
373} 375}
374 376
375static void read_dirty(struct closure *cl) 377static void read_dirty(struct closure *cl)
@@ -394,12 +396,8 @@ static void read_dirty(struct closure *cl)
394 396
395 if (delay > 0 && 397 if (delay > 0 &&
396 (KEY_START(&w->key) != dc->last_read || 398 (KEY_START(&w->key) != dc->last_read ||
397 jiffies_to_msecs(delay) > 50)) { 399 jiffies_to_msecs(delay) > 50))
398 w->private = NULL; 400 delay = schedule_timeout_uninterruptible(delay);
399
400 closure_delay(&dc->writeback, delay);
401 continue_at(cl, read_dirty, dirty_wq);
402 }
403 401
404 dc->last_read = KEY_OFFSET(&w->key); 402 dc->last_read = KEY_OFFSET(&w->key);
405 403
@@ -424,15 +422,10 @@ static void read_dirty(struct closure *cl)
424 422
425 trace_bcache_writeback(&w->key); 423 trace_bcache_writeback(&w->key);
426 424
427 closure_call(&io->cl, read_dirty_submit, NULL, &dc->disk.cl); 425 down(&dc->in_flight);
426 closure_call(&io->cl, read_dirty_submit, NULL, cl);
428 427
429 delay = writeback_delay(dc, KEY_SIZE(&w->key)); 428 delay = writeback_delay(dc, KEY_SIZE(&w->key));
430
431 atomic_inc(&dc->in_flight);
432
433 if (!closure_wait_event(&dc->writeback_wait, cl,
434 atomic_read(&dc->in_flight) < 64))
435 continue_at(cl, read_dirty, dirty_wq);
436 } 429 }
437 430
438 if (0) { 431 if (0) {
@@ -442,7 +435,11 @@ err:
442 bch_keybuf_del(&dc->writeback_keys, w); 435 bch_keybuf_del(&dc->writeback_keys, w);
443 } 436 }
444 437
445 refill_dirty(cl); 438 /*
439 * Wait for outstanding writeback IOs to finish (and keybuf slots to be
440 * freed) before refilling again
441 */
442 continue_at(cl, refill_dirty, dirty_wq);
446} 443}
447 444
448/* Init */ 445/* Init */
@@ -484,6 +481,7 @@ void bch_sectors_dirty_init(struct cached_dev *dc)
484 481
485void bch_cached_dev_writeback_init(struct cached_dev *dc) 482void bch_cached_dev_writeback_init(struct cached_dev *dc)
486{ 483{
484 sema_init(&dc->in_flight, 64);
487 closure_init_unlocked(&dc->writeback); 485 closure_init_unlocked(&dc->writeback);
488 init_rwsem(&dc->writeback_lock); 486 init_rwsem(&dc->writeback_lock);
489 487
@@ -513,7 +511,7 @@ void bch_writeback_exit(void)
513 511
514int __init bch_writeback_init(void) 512int __init bch_writeback_init(void)
515{ 513{
516 dirty_wq = create_singlethread_workqueue("bcache_writeback"); 514 dirty_wq = create_workqueue("bcache_writeback");
517 if (!dirty_wq) 515 if (!dirty_wq)
518 return -ENOMEM; 516 return -ENOMEM;
519 517
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index ea49834377c8..2a20986a2fec 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -19,8 +19,6 @@
19#define DM_MSG_PREFIX "io" 19#define DM_MSG_PREFIX "io"
20 20
21#define DM_IO_MAX_REGIONS BITS_PER_LONG 21#define DM_IO_MAX_REGIONS BITS_PER_LONG
22#define MIN_IOS 16
23#define MIN_BIOS 16
24 22
25struct dm_io_client { 23struct dm_io_client {
26 mempool_t *pool; 24 mempool_t *pool;
@@ -50,16 +48,17 @@ static struct kmem_cache *_dm_io_cache;
50struct dm_io_client *dm_io_client_create(void) 48struct dm_io_client *dm_io_client_create(void)
51{ 49{
52 struct dm_io_client *client; 50 struct dm_io_client *client;
51 unsigned min_ios = dm_get_reserved_bio_based_ios();
53 52
54 client = kmalloc(sizeof(*client), GFP_KERNEL); 53 client = kmalloc(sizeof(*client), GFP_KERNEL);
55 if (!client) 54 if (!client)
56 return ERR_PTR(-ENOMEM); 55 return ERR_PTR(-ENOMEM);
57 56
58 client->pool = mempool_create_slab_pool(MIN_IOS, _dm_io_cache); 57 client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache);
59 if (!client->pool) 58 if (!client->pool)
60 goto bad; 59 goto bad;
61 60
62 client->bios = bioset_create(MIN_BIOS, 0); 61 client->bios = bioset_create(min_ios, 0);
63 if (!client->bios) 62 if (!client->bios)
64 goto bad; 63 goto bad;
65 64
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index b759a127f9c3..de570a558764 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -7,6 +7,7 @@
7 7
8#include <linux/device-mapper.h> 8#include <linux/device-mapper.h>
9 9
10#include "dm.h"
10#include "dm-path-selector.h" 11#include "dm-path-selector.h"
11#include "dm-uevent.h" 12#include "dm-uevent.h"
12 13
@@ -116,8 +117,6 @@ struct dm_mpath_io {
116 117
117typedef int (*action_fn) (struct pgpath *pgpath); 118typedef int (*action_fn) (struct pgpath *pgpath);
118 119
119#define MIN_IOS 256 /* Mempool size */
120
121static struct kmem_cache *_mpio_cache; 120static struct kmem_cache *_mpio_cache;
122 121
123static struct workqueue_struct *kmultipathd, *kmpath_handlerd; 122static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
@@ -190,6 +189,7 @@ static void free_priority_group(struct priority_group *pg,
190static struct multipath *alloc_multipath(struct dm_target *ti) 189static struct multipath *alloc_multipath(struct dm_target *ti)
191{ 190{
192 struct multipath *m; 191 struct multipath *m;
192 unsigned min_ios = dm_get_reserved_rq_based_ios();
193 193
194 m = kzalloc(sizeof(*m), GFP_KERNEL); 194 m = kzalloc(sizeof(*m), GFP_KERNEL);
195 if (m) { 195 if (m) {
@@ -202,7 +202,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
202 INIT_WORK(&m->trigger_event, trigger_event); 202 INIT_WORK(&m->trigger_event, trigger_event);
203 init_waitqueue_head(&m->pg_init_wait); 203 init_waitqueue_head(&m->pg_init_wait);
204 mutex_init(&m->work_mutex); 204 mutex_init(&m->work_mutex);
205 m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); 205 m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
206 if (!m->mpio_pool) { 206 if (!m->mpio_pool) {
207 kfree(m); 207 kfree(m);
208 return NULL; 208 return NULL;
@@ -1268,6 +1268,7 @@ static int noretry_error(int error)
1268 case -EREMOTEIO: 1268 case -EREMOTEIO:
1269 case -EILSEQ: 1269 case -EILSEQ:
1270 case -ENODATA: 1270 case -ENODATA:
1271 case -ENOSPC:
1271 return 1; 1272 return 1;
1272 } 1273 }
1273 1274
@@ -1298,8 +1299,17 @@ static int do_end_io(struct multipath *m, struct request *clone,
1298 if (!error && !clone->errors) 1299 if (!error && !clone->errors)
1299 return 0; /* I/O complete */ 1300 return 0; /* I/O complete */
1300 1301
1301 if (noretry_error(error)) 1302 if (noretry_error(error)) {
1303 if ((clone->cmd_flags & REQ_WRITE_SAME) &&
1304 !clone->q->limits.max_write_same_sectors) {
1305 struct queue_limits *limits;
1306
1307 /* device doesn't really support WRITE SAME, disable it */
1308 limits = dm_get_queue_limits(dm_table_get_md(m->ti->table));
1309 limits->max_write_same_sectors = 0;
1310 }
1302 return error; 1311 return error;
1312 }
1303 1313
1304 if (mpio->pgpath) 1314 if (mpio->pgpath)
1305 fail_path(mpio->pgpath); 1315 fail_path(mpio->pgpath);
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 3ac415675b6c..2d2b1b7588d7 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -256,7 +256,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
256 */ 256 */
257 INIT_WORK_ONSTACK(&req.work, do_metadata); 257 INIT_WORK_ONSTACK(&req.work, do_metadata);
258 queue_work(ps->metadata_wq, &req.work); 258 queue_work(ps->metadata_wq, &req.work);
259 flush_work(&req.work); 259 flush_workqueue(ps->metadata_wq);
260 260
261 return req.result; 261 return req.result;
262} 262}
@@ -269,6 +269,14 @@ static chunk_t area_location(struct pstore *ps, chunk_t area)
269 return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area); 269 return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area);
270} 270}
271 271
272static void skip_metadata(struct pstore *ps)
273{
274 uint32_t stride = ps->exceptions_per_area + 1;
275 chunk_t next_free = ps->next_free;
276 if (sector_div(next_free, stride) == NUM_SNAPSHOT_HDR_CHUNKS)
277 ps->next_free++;
278}
279
272/* 280/*
273 * Read or write a metadata area. Remembering to skip the first 281 * Read or write a metadata area. Remembering to skip the first
274 * chunk which holds the header. 282 * chunk which holds the header.
@@ -502,6 +510,8 @@ static int read_exceptions(struct pstore *ps,
502 510
503 ps->current_area--; 511 ps->current_area--;
504 512
513 skip_metadata(ps);
514
505 return 0; 515 return 0;
506} 516}
507 517
@@ -616,8 +626,6 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
616 struct dm_exception *e) 626 struct dm_exception *e)
617{ 627{
618 struct pstore *ps = get_info(store); 628 struct pstore *ps = get_info(store);
619 uint32_t stride;
620 chunk_t next_free;
621 sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev); 629 sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
622 630
623 /* Is there enough room ? */ 631 /* Is there enough room ? */
@@ -630,10 +638,8 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
630 * Move onto the next free pending, making sure to take 638 * Move onto the next free pending, making sure to take
631 * into account the location of the metadata chunks. 639 * into account the location of the metadata chunks.
632 */ 640 */
633 stride = (ps->exceptions_per_area + 1); 641 ps->next_free++;
634 next_free = ++ps->next_free; 642 skip_metadata(ps);
635 if (sector_div(next_free, stride) == 1)
636 ps->next_free++;
637 643
638 atomic_inc(&ps->pending_count); 644 atomic_inc(&ps->pending_count);
639 return 0; 645 return 0;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index c434e5aab2df..aec57d76db5d 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -725,17 +725,16 @@ static int calc_max_buckets(void)
725 */ 725 */
726static int init_hash_tables(struct dm_snapshot *s) 726static int init_hash_tables(struct dm_snapshot *s)
727{ 727{
728 sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets; 728 sector_t hash_size, cow_dev_size, max_buckets;
729 729
730 /* 730 /*
731 * Calculate based on the size of the original volume or 731 * Calculate based on the size of the original volume or
732 * the COW volume... 732 * the COW volume...
733 */ 733 */
734 cow_dev_size = get_dev_size(s->cow->bdev); 734 cow_dev_size = get_dev_size(s->cow->bdev);
735 origin_dev_size = get_dev_size(s->origin->bdev);
736 max_buckets = calc_max_buckets(); 735 max_buckets = calc_max_buckets();
737 736
738 hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift; 737 hash_size = cow_dev_size >> s->store->chunk_shift;
739 hash_size = min(hash_size, max_buckets); 738 hash_size = min(hash_size, max_buckets);
740 739
741 if (hash_size < 64) 740 if (hash_size < 64)
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 8ae31e8d3d64..3d404c1371ed 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -451,19 +451,26 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
451 struct dm_stat_percpu *p; 451 struct dm_stat_percpu *p;
452 452
453 /* 453 /*
454 * For strict correctness we should use local_irq_disable/enable 454 * For strict correctness we should use local_irq_save/restore
455 * instead of preempt_disable/enable. 455 * instead of preempt_disable/enable.
456 * 456 *
457 * This is racy if the driver finishes bios from non-interrupt 457 * preempt_disable/enable is racy if the driver finishes bios
458 * context as well as from interrupt context or from more different 458 * from non-interrupt context as well as from interrupt context
459 * interrupts. 459 * or from more different interrupts.
460 * 460 *
461 * However, the race only results in not counting some events, 461 * On 64-bit architectures the race only results in not counting some
462 * so it is acceptable. 462 * events, so it is acceptable. On 32-bit architectures the race could
463 * cause the counter going off by 2^32, so we need to do proper locking
464 * there.
463 * 465 *
464 * part_stat_lock()/part_stat_unlock() have this race too. 466 * part_stat_lock()/part_stat_unlock() have this race too.
465 */ 467 */
468#if BITS_PER_LONG == 32
469 unsigned long flags;
470 local_irq_save(flags);
471#else
466 preempt_disable(); 472 preempt_disable();
473#endif
467 p = &s->stat_percpu[smp_processor_id()][entry]; 474 p = &s->stat_percpu[smp_processor_id()][entry];
468 475
469 if (!end) { 476 if (!end) {
@@ -478,7 +485,11 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
478 p->ticks[idx] += duration; 485 p->ticks[idx] += duration;
479 } 486 }
480 487
488#if BITS_PER_LONG == 32
489 local_irq_restore(flags);
490#else
481 preempt_enable(); 491 preempt_enable();
492#endif
482} 493}
483 494
484static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw, 495static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw,
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index ed063427d676..2c0cf511ec23 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2095,6 +2095,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
2095 * them down to the data device. The thin device's discard 2095 * them down to the data device. The thin device's discard
2096 * processing will cause mappings to be removed from the btree. 2096 * processing will cause mappings to be removed from the btree.
2097 */ 2097 */
2098 ti->discard_zeroes_data_unsupported = true;
2098 if (pf.discard_enabled && pf.discard_passdown) { 2099 if (pf.discard_enabled && pf.discard_passdown) {
2099 ti->num_discard_bios = 1; 2100 ti->num_discard_bios = 1;
2100 2101
@@ -2104,7 +2105,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
2104 * thin devices' discard limits consistent). 2105 * thin devices' discard limits consistent).
2105 */ 2106 */
2106 ti->discards_supported = true; 2107 ti->discards_supported = true;
2107 ti->discard_zeroes_data_unsupported = true;
2108 } 2108 }
2109 ti->private = pt; 2109 ti->private = pt;
2110 2110
@@ -2689,8 +2689,16 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
2689 * They get transferred to the live pool in bind_control_target() 2689 * They get transferred to the live pool in bind_control_target()
2690 * called from pool_preresume(). 2690 * called from pool_preresume().
2691 */ 2691 */
2692 if (!pt->adjusted_pf.discard_enabled) 2692 if (!pt->adjusted_pf.discard_enabled) {
2693 /*
2694 * Must explicitly disallow stacking discard limits otherwise the
2695 * block layer will stack them if pool's data device has support.
2696 * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the
2697 * user to see that, so make sure to set all discard limits to 0.
2698 */
2699 limits->discard_granularity = 0;
2693 return; 2700 return;
2701 }
2694 2702
2695 disable_passdown_if_not_supported(pt); 2703 disable_passdown_if_not_supported(pt);
2696 2704
@@ -2826,10 +2834,10 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
2826 ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook); 2834 ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
2827 2835
2828 /* In case the pool supports discards, pass them on. */ 2836 /* In case the pool supports discards, pass them on. */
2837 ti->discard_zeroes_data_unsupported = true;
2829 if (tc->pool->pf.discard_enabled) { 2838 if (tc->pool->pf.discard_enabled) {
2830 ti->discards_supported = true; 2839 ti->discards_supported = true;
2831 ti->num_discard_bios = 1; 2840 ti->num_discard_bios = 1;
2832 ti->discard_zeroes_data_unsupported = true;
2833 /* Discard bios must be split on a block boundary */ 2841 /* Discard bios must be split on a block boundary */
2834 ti->split_discard_bios = true; 2842 ti->split_discard_bios = true;
2835 } 2843 }
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 6a5e9ed2fcc3..b3e26c7d1417 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -211,10 +211,55 @@ struct dm_md_mempools {
211 struct bio_set *bs; 211 struct bio_set *bs;
212}; 212};
213 213
214#define MIN_IOS 256 214#define RESERVED_BIO_BASED_IOS 16
215#define RESERVED_REQUEST_BASED_IOS 256
216#define RESERVED_MAX_IOS 1024
215static struct kmem_cache *_io_cache; 217static struct kmem_cache *_io_cache;
216static struct kmem_cache *_rq_tio_cache; 218static struct kmem_cache *_rq_tio_cache;
217 219
220/*
221 * Bio-based DM's mempools' reserved IOs set by the user.
222 */
223static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
224
225/*
226 * Request-based DM's mempools' reserved IOs set by the user.
227 */
228static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
229
230static unsigned __dm_get_reserved_ios(unsigned *reserved_ios,
231 unsigned def, unsigned max)
232{
233 unsigned ios = ACCESS_ONCE(*reserved_ios);
234 unsigned modified_ios = 0;
235
236 if (!ios)
237 modified_ios = def;
238 else if (ios > max)
239 modified_ios = max;
240
241 if (modified_ios) {
242 (void)cmpxchg(reserved_ios, ios, modified_ios);
243 ios = modified_ios;
244 }
245
246 return ios;
247}
248
249unsigned dm_get_reserved_bio_based_ios(void)
250{
251 return __dm_get_reserved_ios(&reserved_bio_based_ios,
252 RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS);
253}
254EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
255
256unsigned dm_get_reserved_rq_based_ios(void)
257{
258 return __dm_get_reserved_ios(&reserved_rq_based_ios,
259 RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS);
260}
261EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
262
218static int __init local_init(void) 263static int __init local_init(void)
219{ 264{
220 int r = -ENOMEM; 265 int r = -ENOMEM;
@@ -2278,6 +2323,17 @@ struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
2278} 2323}
2279 2324
2280/* 2325/*
2326 * The queue_limits are only valid as long as you have a reference
2327 * count on 'md'.
2328 */
2329struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
2330{
2331 BUG_ON(!atomic_read(&md->holders));
2332 return &md->queue->limits;
2333}
2334EXPORT_SYMBOL_GPL(dm_get_queue_limits);
2335
2336/*
2281 * Fully initialize a request-based queue (->elevator, ->request_fn, etc). 2337 * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
2282 */ 2338 */
2283static int dm_init_request_based_queue(struct mapped_device *md) 2339static int dm_init_request_based_queue(struct mapped_device *md)
@@ -2862,18 +2918,18 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, u
2862 2918
2863 if (type == DM_TYPE_BIO_BASED) { 2919 if (type == DM_TYPE_BIO_BASED) {
2864 cachep = _io_cache; 2920 cachep = _io_cache;
2865 pool_size = 16; 2921 pool_size = dm_get_reserved_bio_based_ios();
2866 front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); 2922 front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
2867 } else if (type == DM_TYPE_REQUEST_BASED) { 2923 } else if (type == DM_TYPE_REQUEST_BASED) {
2868 cachep = _rq_tio_cache; 2924 cachep = _rq_tio_cache;
2869 pool_size = MIN_IOS; 2925 pool_size = dm_get_reserved_rq_based_ios();
2870 front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 2926 front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
2871 /* per_bio_data_size is not used. See __bind_mempools(). */ 2927 /* per_bio_data_size is not used. See __bind_mempools(). */
2872 WARN_ON(per_bio_data_size != 0); 2928 WARN_ON(per_bio_data_size != 0);
2873 } else 2929 } else
2874 goto out; 2930 goto out;
2875 2931
2876 pools->io_pool = mempool_create_slab_pool(MIN_IOS, cachep); 2932 pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
2877 if (!pools->io_pool) 2933 if (!pools->io_pool)
2878 goto out; 2934 goto out;
2879 2935
@@ -2924,6 +2980,13 @@ module_exit(dm_exit);
2924 2980
2925module_param(major, uint, 0); 2981module_param(major, uint, 0);
2926MODULE_PARM_DESC(major, "The major number of the device mapper"); 2982MODULE_PARM_DESC(major, "The major number of the device mapper");
2983
2984module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
2985MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
2986
2987module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
2988MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
2989
2927MODULE_DESCRIPTION(DM_NAME " driver"); 2990MODULE_DESCRIPTION(DM_NAME " driver");
2928MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 2991MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2929MODULE_LICENSE("GPL"); 2992MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 5e604cc7b4aa..1d1ad7b7e527 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -184,6 +184,9 @@ void dm_free_md_mempools(struct dm_md_mempools *pools);
184/* 184/*
185 * Helpers that are used by DM core 185 * Helpers that are used by DM core
186 */ 186 */
187unsigned dm_get_reserved_bio_based_ios(void);
188unsigned dm_get_reserved_rq_based_ios(void);
189
187static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen) 190static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
188{ 191{
189 return !maxlen || strlen(result) + 1 >= maxlen; 192 return !maxlen || strlen(result) + 1 >= maxlen;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index adf4d7e1d5e1..561a65f82e26 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -8111,6 +8111,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
8111 u64 *p; 8111 u64 *p;
8112 int lo, hi; 8112 int lo, hi;
8113 int rv = 1; 8113 int rv = 1;
8114 unsigned long flags;
8114 8115
8115 if (bb->shift < 0) 8116 if (bb->shift < 0)
8116 /* badblocks are disabled */ 8117 /* badblocks are disabled */
@@ -8125,7 +8126,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
8125 sectors = next - s; 8126 sectors = next - s;
8126 } 8127 }
8127 8128
8128 write_seqlock_irq(&bb->lock); 8129 write_seqlock_irqsave(&bb->lock, flags);
8129 8130
8130 p = bb->page; 8131 p = bb->page;
8131 lo = 0; 8132 lo = 0;
@@ -8241,7 +8242,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
8241 bb->changed = 1; 8242 bb->changed = 1;
8242 if (!acknowledged) 8243 if (!acknowledged)
8243 bb->unacked_exist = 1; 8244 bb->unacked_exist = 1;
8244 write_sequnlock_irq(&bb->lock); 8245 write_sequnlock_irqrestore(&bb->lock, flags);
8245 8246
8246 return rv; 8247 return rv;
8247} 8248}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index d60412c7f995..aacf6bf352d8 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1479,6 +1479,7 @@ static int raid1_spare_active(struct mddev *mddev)
1479 } 1479 }
1480 } 1480 }
1481 if (rdev 1481 if (rdev
1482 && rdev->recovery_offset == MaxSector
1482 && !test_bit(Faulty, &rdev->flags) 1483 && !test_bit(Faulty, &rdev->flags)
1483 && !test_and_set_bit(In_sync, &rdev->flags)) { 1484 && !test_and_set_bit(In_sync, &rdev->flags)) {
1484 count++; 1485 count++;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index df7b0a06b0ea..73dc8a377522 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1782,6 +1782,7 @@ static int raid10_spare_active(struct mddev *mddev)
1782 } 1782 }
1783 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state); 1783 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
1784 } else if (tmp->rdev 1784 } else if (tmp->rdev
1785 && tmp->rdev->recovery_offset == MaxSector
1785 && !test_bit(Faulty, &tmp->rdev->flags) 1786 && !test_bit(Faulty, &tmp->rdev->flags)
1786 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { 1787 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
1787 count++; 1788 count++;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 7ff4f252ca1a..f8b906843926 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -778,6 +778,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
778 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 778 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
779 bi->bi_io_vec[0].bv_offset = 0; 779 bi->bi_io_vec[0].bv_offset = 0;
780 bi->bi_size = STRIPE_SIZE; 780 bi->bi_size = STRIPE_SIZE;
781 /*
782 * If this is discard request, set bi_vcnt 0. We don't
783 * want to confuse SCSI because SCSI will replace payload
784 */
785 if (rw & REQ_DISCARD)
786 bi->bi_vcnt = 0;
781 if (rrdev) 787 if (rrdev)
782 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); 788 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
783 789
@@ -816,6 +822,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
816 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; 822 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
817 rbi->bi_io_vec[0].bv_offset = 0; 823 rbi->bi_io_vec[0].bv_offset = 0;
818 rbi->bi_size = STRIPE_SIZE; 824 rbi->bi_size = STRIPE_SIZE;
825 /*
826 * If this is discard request, set bi_vcnt 0. We don't
827 * want to confuse SCSI because SCSI will replace payload
828 */
829 if (rw & REQ_DISCARD)
830 rbi->bi_vcnt = 0;
819 if (conf->mddev->gendisk) 831 if (conf->mddev->gendisk)
820 trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev), 832 trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
821 rbi, disk_devt(conf->mddev->gendisk), 833 rbi, disk_devt(conf->mddev->gendisk),
@@ -2910,6 +2922,14 @@ static void handle_stripe_clean_event(struct r5conf *conf,
2910 } 2922 }
2911 /* now that discard is done we can proceed with any sync */ 2923 /* now that discard is done we can proceed with any sync */
2912 clear_bit(STRIPE_DISCARD, &sh->state); 2924 clear_bit(STRIPE_DISCARD, &sh->state);
2925 /*
2926 * SCSI discard will change some bio fields and the stripe has
2927 * no updated data, so remove it from hash list and the stripe
2928 * will be reinitialized
2929 */
2930 spin_lock_irq(&conf->device_lock);
2931 remove_hash(sh);
2932 spin_unlock_irq(&conf->device_lock);
2913 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) 2933 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
2914 set_bit(STRIPE_HANDLE, &sh->state); 2934 set_bit(STRIPE_HANDLE, &sh->state);
2915 2935
diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
index 2521f7e23018..e79749cfec81 100644
--- a/drivers/media/dvb-frontends/tda10071.c
+++ b/drivers/media/dvb-frontends/tda10071.c
@@ -912,14 +912,8 @@ static int tda10071_init(struct dvb_frontend *fe)
912 { 0xd5, 0x03, 0x03 }, 912 { 0xd5, 0x03, 0x03 },
913 }; 913 };
914 914
915 /* firmware status */ 915 if (priv->warm) {
916 ret = tda10071_rd_reg(priv, 0x51, &tmp);
917 if (ret)
918 goto error;
919
920 if (!tmp) {
921 /* warm state - wake up device from sleep */ 916 /* warm state - wake up device from sleep */
922 priv->warm = 1;
923 917
924 for (i = 0; i < ARRAY_SIZE(tab); i++) { 918 for (i = 0; i < ARRAY_SIZE(tab); i++) {
925 ret = tda10071_wr_reg_mask(priv, tab[i].reg, 919 ret = tda10071_wr_reg_mask(priv, tab[i].reg,
@@ -937,7 +931,6 @@ static int tda10071_init(struct dvb_frontend *fe)
937 goto error; 931 goto error;
938 } else { 932 } else {
939 /* cold state - try to download firmware */ 933 /* cold state - try to download firmware */
940 priv->warm = 0;
941 934
942 /* request the firmware, this will block and timeout */ 935 /* request the firmware, this will block and timeout */
943 ret = request_firmware(&fw, fw_file, priv->i2c->dev.parent); 936 ret = request_firmware(&fw, fw_file, priv->i2c->dev.parent);
diff --git a/drivers/media/i2c/ad9389b.c b/drivers/media/i2c/ad9389b.c
index bb0c99d7a4f1..b06a7e54ee0d 100644
--- a/drivers/media/i2c/ad9389b.c
+++ b/drivers/media/i2c/ad9389b.c
@@ -628,16 +628,13 @@ static int ad9389b_s_stream(struct v4l2_subdev *sd, int enable)
628 628
629static const struct v4l2_dv_timings_cap ad9389b_timings_cap = { 629static const struct v4l2_dv_timings_cap ad9389b_timings_cap = {
630 .type = V4L2_DV_BT_656_1120, 630 .type = V4L2_DV_BT_656_1120,
631 .bt = { 631 /* keep this initialization for compatibility with GCC < 4.4.6 */
632 .max_width = 1920, 632 .reserved = { 0 },
633 .max_height = 1200, 633 V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 170000000,
634 .min_pixelclock = 25000000, 634 V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
635 .max_pixelclock = 170000000,
636 .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
637 V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT, 635 V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
638 .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE | 636 V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
639 V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM, 637 V4L2_DV_BT_CAP_CUSTOM)
640 },
641}; 638};
642 639
643static int ad9389b_s_dv_timings(struct v4l2_subdev *sd, 640static int ad9389b_s_dv_timings(struct v4l2_subdev *sd,
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
index 7a576097471f..7c8d971f1f61 100644
--- a/drivers/media/i2c/adv7511.c
+++ b/drivers/media/i2c/adv7511.c
@@ -119,16 +119,14 @@ static int adv7511_s_clock_freq(struct v4l2_subdev *sd, u32 freq);
119 119
120static const struct v4l2_dv_timings_cap adv7511_timings_cap = { 120static const struct v4l2_dv_timings_cap adv7511_timings_cap = {
121 .type = V4L2_DV_BT_656_1120, 121 .type = V4L2_DV_BT_656_1120,
122 .bt = { 122 /* keep this initialization for compatibility with GCC < 4.4.6 */
123 .max_width = ADV7511_MAX_WIDTH, 123 .reserved = { 0 },
124 .max_height = ADV7511_MAX_HEIGHT, 124 V4L2_INIT_BT_TIMINGS(0, ADV7511_MAX_WIDTH, 0, ADV7511_MAX_HEIGHT,
125 .min_pixelclock = ADV7511_MIN_PIXELCLOCK, 125 ADV7511_MIN_PIXELCLOCK, ADV7511_MAX_PIXELCLOCK,
126 .max_pixelclock = ADV7511_MAX_PIXELCLOCK, 126 V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
127 .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
128 V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT, 127 V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
129 .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE | 128 V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
130 V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM, 129 V4L2_DV_BT_CAP_CUSTOM)
131 },
132}; 130};
133 131
134static inline struct adv7511_state *get_adv7511_state(struct v4l2_subdev *sd) 132static inline struct adv7511_state *get_adv7511_state(struct v4l2_subdev *sd)
@@ -1126,6 +1124,7 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
1126 state->i2c_edid = i2c_new_dummy(client->adapter, state->i2c_edid_addr >> 1); 1124 state->i2c_edid = i2c_new_dummy(client->adapter, state->i2c_edid_addr >> 1);
1127 if (state->i2c_edid == NULL) { 1125 if (state->i2c_edid == NULL) {
1128 v4l2_err(sd, "failed to register edid i2c client\n"); 1126 v4l2_err(sd, "failed to register edid i2c client\n");
1127 err = -ENOMEM;
1129 goto err_entity; 1128 goto err_entity;
1130 } 1129 }
1131 1130
@@ -1133,6 +1132,7 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
1133 state->work_queue = create_singlethread_workqueue(sd->name); 1132 state->work_queue = create_singlethread_workqueue(sd->name);
1134 if (state->work_queue == NULL) { 1133 if (state->work_queue == NULL) {
1135 v4l2_err(sd, "could not create workqueue\n"); 1134 v4l2_err(sd, "could not create workqueue\n");
1135 err = -ENOMEM;
1136 goto err_unreg_cec; 1136 goto err_unreg_cec;
1137 } 1137 }
1138 1138
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index d1748901337c..22f729d66a96 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -546,30 +546,24 @@ static inline bool is_digital_input(struct v4l2_subdev *sd)
546 546
547static const struct v4l2_dv_timings_cap adv7842_timings_cap_analog = { 547static const struct v4l2_dv_timings_cap adv7842_timings_cap_analog = {
548 .type = V4L2_DV_BT_656_1120, 548 .type = V4L2_DV_BT_656_1120,
549 .bt = { 549 /* keep this initialization for compatibility with GCC < 4.4.6 */
550 .max_width = 1920, 550 .reserved = { 0 },
551 .max_height = 1200, 551 V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 170000000,
552 .min_pixelclock = 25000000, 552 V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
553 .max_pixelclock = 170000000,
554 .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
555 V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT, 553 V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
556 .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE | 554 V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
557 V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM, 555 V4L2_DV_BT_CAP_CUSTOM)
558 },
559}; 556};
560 557
561static const struct v4l2_dv_timings_cap adv7842_timings_cap_digital = { 558static const struct v4l2_dv_timings_cap adv7842_timings_cap_digital = {
562 .type = V4L2_DV_BT_656_1120, 559 .type = V4L2_DV_BT_656_1120,
563 .bt = { 560 /* keep this initialization for compatibility with GCC < 4.4.6 */
564 .max_width = 1920, 561 .reserved = { 0 },
565 .max_height = 1200, 562 V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 225000000,
566 .min_pixelclock = 25000000, 563 V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
567 .max_pixelclock = 225000000,
568 .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
569 V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT, 564 V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
570 .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE | 565 V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
571 V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM, 566 V4L2_DV_BT_CAP_CUSTOM)
572 },
573}; 567};
574 568
575static inline const struct v4l2_dv_timings_cap * 569static inline const struct v4l2_dv_timings_cap *
diff --git a/drivers/media/i2c/ths8200.c b/drivers/media/i2c/ths8200.c
index a58a8f663ffb..d9f65d7e3e58 100644
--- a/drivers/media/i2c/ths8200.c
+++ b/drivers/media/i2c/ths8200.c
@@ -46,14 +46,10 @@ struct ths8200_state {
46 46
47static const struct v4l2_dv_timings_cap ths8200_timings_cap = { 47static const struct v4l2_dv_timings_cap ths8200_timings_cap = {
48 .type = V4L2_DV_BT_656_1120, 48 .type = V4L2_DV_BT_656_1120,
49 .bt = { 49 /* keep this initialization for compatibility with GCC < 4.4.6 */
50 .max_width = 1920, 50 .reserved = { 0 },
51 .max_height = 1080, 51 V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1080, 25000000, 148500000,
52 .min_pixelclock = 25000000, 52 V4L2_DV_BT_STD_CEA861, V4L2_DV_BT_CAP_PROGRESSIVE)
53 .max_pixelclock = 148500000,
54 .standards = V4L2_DV_BT_STD_CEA861,
55 .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE,
56 },
57}; 53};
58 54
59static inline struct ths8200_state *to_state(struct v4l2_subdev *sd) 55static inline struct ths8200_state *to_state(struct v4l2_subdev *sd)
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index e12bbd8c3f0b..fb60da85bc2c 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -1455,6 +1455,7 @@ static int video_release(struct file *file)
1455 1455
1456 /* stop video capture */ 1456 /* stop video capture */
1457 if (res_check(fh, RESOURCE_VIDEO)) { 1457 if (res_check(fh, RESOURCE_VIDEO)) {
1458 pm_qos_remove_request(&dev->qos_request);
1458 videobuf_streamoff(&fh->cap); 1459 videobuf_streamoff(&fh->cap);
1459 res_free(dev,fh,RESOURCE_VIDEO); 1460 res_free(dev,fh,RESOURCE_VIDEO);
1460 } 1461 }
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index 15d23968d1de..9b88a4601007 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -1423,6 +1423,7 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
1423 jpeg->vfd_decoder->release = video_device_release; 1423 jpeg->vfd_decoder->release = video_device_release;
1424 jpeg->vfd_decoder->lock = &jpeg->lock; 1424 jpeg->vfd_decoder->lock = &jpeg->lock;
1425 jpeg->vfd_decoder->v4l2_dev = &jpeg->v4l2_dev; 1425 jpeg->vfd_decoder->v4l2_dev = &jpeg->v4l2_dev;
1426 jpeg->vfd_decoder->vfl_dir = VFL_DIR_M2M;
1426 1427
1427 ret = video_register_device(jpeg->vfd_decoder, VFL_TYPE_GRABBER, -1); 1428 ret = video_register_device(jpeg->vfd_decoder, VFL_TYPE_GRABBER, -1);
1428 if (ret) { 1429 if (ret) {
diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c
index 7a9c5e9329f2..4f30341dc2ab 100644
--- a/drivers/media/platform/sh_vou.c
+++ b/drivers/media/platform/sh_vou.c
@@ -776,7 +776,7 @@ static int sh_vou_try_fmt_vid_out(struct file *file, void *priv,
776 v4l_bound_align_image(&pix->width, 0, VOU_MAX_IMAGE_WIDTH, 1, 776 v4l_bound_align_image(&pix->width, 0, VOU_MAX_IMAGE_WIDTH, 1,
777 &pix->height, 0, VOU_MAX_IMAGE_HEIGHT, 1, 0); 777 &pix->height, 0, VOU_MAX_IMAGE_HEIGHT, 1, 0);
778 778
779 for (i = 0; ARRAY_SIZE(vou_fmt); i++) 779 for (i = 0; i < ARRAY_SIZE(vou_fmt); i++)
780 if (vou_fmt[i].pfmt == pix->pixelformat) 780 if (vou_fmt[i].pfmt == pix->pixelformat)
781 return 0; 781 return 0;
782 782
diff --git a/drivers/media/platform/soc_camera/mx3_camera.c b/drivers/media/platform/soc_camera/mx3_camera.c
index 8f9f6211c52e..f975b7008692 100644
--- a/drivers/media/platform/soc_camera/mx3_camera.c
+++ b/drivers/media/platform/soc_camera/mx3_camera.c
@@ -266,7 +266,6 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
266 struct idmac_channel *ichan = mx3_cam->idmac_channel[0]; 266 struct idmac_channel *ichan = mx3_cam->idmac_channel[0];
267 struct idmac_video_param *video = &ichan->params.video; 267 struct idmac_video_param *video = &ichan->params.video;
268 const struct soc_mbus_pixelfmt *host_fmt = icd->current_fmt->host_fmt; 268 const struct soc_mbus_pixelfmt *host_fmt = icd->current_fmt->host_fmt;
269 unsigned long flags;
270 dma_cookie_t cookie; 269 dma_cookie_t cookie;
271 size_t new_size; 270 size_t new_size;
272 271
@@ -328,7 +327,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
328 memset(vb2_plane_vaddr(vb, 0), 0xaa, vb2_get_plane_payload(vb, 0)); 327 memset(vb2_plane_vaddr(vb, 0), 0xaa, vb2_get_plane_payload(vb, 0));
329#endif 328#endif
330 329
331 spin_lock_irqsave(&mx3_cam->lock, flags); 330 spin_lock_irq(&mx3_cam->lock);
332 list_add_tail(&buf->queue, &mx3_cam->capture); 331 list_add_tail(&buf->queue, &mx3_cam->capture);
333 332
334 if (!mx3_cam->active) 333 if (!mx3_cam->active)
@@ -351,7 +350,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
351 if (mx3_cam->active == buf) 350 if (mx3_cam->active == buf)
352 mx3_cam->active = NULL; 351 mx3_cam->active = NULL;
353 352
354 spin_unlock_irqrestore(&mx3_cam->lock, flags); 353 spin_unlock_irq(&mx3_cam->lock);
355error: 354error:
356 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); 355 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
357} 356}
diff --git a/drivers/media/tuners/e4000.c b/drivers/media/tuners/e4000.c
index ad9309da4a91..6c96e4898777 100644
--- a/drivers/media/tuners/e4000.c
+++ b/drivers/media/tuners/e4000.c
@@ -19,6 +19,7 @@
19 */ 19 */
20 20
21#include "e4000_priv.h" 21#include "e4000_priv.h"
22#include <linux/math64.h>
22 23
23/* write multiple registers */ 24/* write multiple registers */
24static int e4000_wr_regs(struct e4000_priv *priv, u8 reg, u8 *val, int len) 25static int e4000_wr_regs(struct e4000_priv *priv, u8 reg, u8 *val, int len)
@@ -233,7 +234,7 @@ static int e4000_set_params(struct dvb_frontend *fe)
233 * or more. 234 * or more.
234 */ 235 */
235 f_vco = c->frequency * e4000_pll_lut[i].mul; 236 f_vco = c->frequency * e4000_pll_lut[i].mul;
236 sigma_delta = 0x10000UL * (f_vco % priv->cfg->clock) / priv->cfg->clock; 237 sigma_delta = div_u64(0x10000ULL * (f_vco % priv->cfg->clock), priv->cfg->clock);
237 buf[0] = f_vco / priv->cfg->clock; 238 buf[0] = f_vco / priv->cfg->clock;
238 buf[1] = (sigma_delta >> 0) & 0xff; 239 buf[1] = (sigma_delta >> 0) & 0xff;
239 buf[2] = (sigma_delta >> 8) & 0xff; 240 buf[2] = (sigma_delta >> 8) & 0xff;
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index c43c8d32be40..be77482c3070 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -111,6 +111,13 @@ static const struct dmi_system_id stk_upside_down_dmi_table[] = {
111 DMI_MATCH(DMI_PRODUCT_NAME, "F3JC") 111 DMI_MATCH(DMI_PRODUCT_NAME, "F3JC")
112 } 112 }
113 }, 113 },
114 {
115 .ident = "T12Rg-H",
116 .matches = {
117 DMI_MATCH(DMI_SYS_VENDOR, "HCL Infosystems Limited"),
118 DMI_MATCH(DMI_PRODUCT_NAME, "T12Rg-H")
119 }
120 },
114 {} 121 {}
115}; 122};
116 123
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 81695d48c13e..c3bb2502225b 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -2090,6 +2090,15 @@ static struct usb_device_id uvc_ids[] = {
2090 .bInterfaceSubClass = 1, 2090 .bInterfaceSubClass = 1,
2091 .bInterfaceProtocol = 0, 2091 .bInterfaceProtocol = 0,
2092 .driver_info = UVC_QUIRK_PROBE_MINMAX }, 2092 .driver_info = UVC_QUIRK_PROBE_MINMAX },
2093 /* Microsoft Lifecam NX-3000 */
2094 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
2095 | USB_DEVICE_ID_MATCH_INT_INFO,
2096 .idVendor = 0x045e,
2097 .idProduct = 0x0721,
2098 .bInterfaceClass = USB_CLASS_VIDEO,
2099 .bInterfaceSubClass = 1,
2100 .bInterfaceProtocol = 0,
2101 .driver_info = UVC_QUIRK_PROBE_DEF },
2093 /* Microsoft Lifecam VX-7000 */ 2102 /* Microsoft Lifecam VX-7000 */
2094 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE 2103 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
2095 | USB_DEVICE_ID_MATCH_INT_INFO, 2104 | USB_DEVICE_ID_MATCH_INT_INFO,
@@ -2174,6 +2183,15 @@ static struct usb_device_id uvc_ids[] = {
2174 .bInterfaceSubClass = 1, 2183 .bInterfaceSubClass = 1,
2175 .bInterfaceProtocol = 0, 2184 .bInterfaceProtocol = 0,
2176 .driver_info = UVC_QUIRK_PROBE_DEF }, 2185 .driver_info = UVC_QUIRK_PROBE_DEF },
2186 /* Dell SP2008WFP Monitor */
2187 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
2188 | USB_DEVICE_ID_MATCH_INT_INFO,
2189 .idVendor = 0x05a9,
2190 .idProduct = 0x2641,
2191 .bInterfaceClass = USB_CLASS_VIDEO,
2192 .bInterfaceSubClass = 1,
2193 .bInterfaceProtocol = 0,
2194 .driver_info = UVC_QUIRK_PROBE_DEF },
2177 /* Dell Alienware X51 */ 2195 /* Dell Alienware X51 */
2178 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE 2196 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
2179 | USB_DEVICE_ID_MATCH_INT_INFO, 2197 | USB_DEVICE_ID_MATCH_INT_INFO,
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 594c75eab5a5..de0e87f0b2c3 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -353,7 +353,9 @@ static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
353 353
354 if (b->m.planes[plane].bytesused > length) 354 if (b->m.planes[plane].bytesused > length)
355 return -EINVAL; 355 return -EINVAL;
356 if (b->m.planes[plane].data_offset >= 356
357 if (b->m.planes[plane].data_offset > 0 &&
358 b->m.planes[plane].data_offset >=
357 b->m.planes[plane].bytesused) 359 b->m.planes[plane].bytesused)
358 return -EINVAL; 360 return -EINVAL;
359 } 361 }
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index fd56f2563201..646f08f4f504 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -423,6 +423,39 @@ static inline int vma_is_io(struct vm_area_struct *vma)
423 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP)); 423 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
424} 424}
425 425
426static int vb2_dc_get_user_pfn(unsigned long start, int n_pages,
427 struct vm_area_struct *vma, unsigned long *res)
428{
429 unsigned long pfn, start_pfn, prev_pfn;
430 unsigned int i;
431 int ret;
432
433 if (!vma_is_io(vma))
434 return -EFAULT;
435
436 ret = follow_pfn(vma, start, &pfn);
437 if (ret)
438 return ret;
439
440 start_pfn = pfn;
441 start += PAGE_SIZE;
442
443 for (i = 1; i < n_pages; ++i, start += PAGE_SIZE) {
444 prev_pfn = pfn;
445 ret = follow_pfn(vma, start, &pfn);
446
447 if (ret) {
448 pr_err("no page for address %lu\n", start);
449 return ret;
450 }
451 if (pfn != prev_pfn + 1)
452 return -EINVAL;
453 }
454
455 *res = start_pfn;
456 return 0;
457}
458
426static int vb2_dc_get_user_pages(unsigned long start, struct page **pages, 459static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
427 int n_pages, struct vm_area_struct *vma, int write) 460 int n_pages, struct vm_area_struct *vma, int write)
428{ 461{
@@ -433,6 +466,9 @@ static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
433 unsigned long pfn; 466 unsigned long pfn;
434 int ret = follow_pfn(vma, start, &pfn); 467 int ret = follow_pfn(vma, start, &pfn);
435 468
469 if (!pfn_valid(pfn))
470 return -EINVAL;
471
436 if (ret) { 472 if (ret) {
437 pr_err("no page for address %lu\n", start); 473 pr_err("no page for address %lu\n", start);
438 return ret; 474 return ret;
@@ -468,16 +504,49 @@ static void vb2_dc_put_userptr(void *buf_priv)
468 struct vb2_dc_buf *buf = buf_priv; 504 struct vb2_dc_buf *buf = buf_priv;
469 struct sg_table *sgt = buf->dma_sgt; 505 struct sg_table *sgt = buf->dma_sgt;
470 506
471 dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir); 507 if (sgt) {
472 if (!vma_is_io(buf->vma)) 508 dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
473 vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page); 509 if (!vma_is_io(buf->vma))
510 vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
474 511
475 sg_free_table(sgt); 512 sg_free_table(sgt);
476 kfree(sgt); 513 kfree(sgt);
514 }
477 vb2_put_vma(buf->vma); 515 vb2_put_vma(buf->vma);
478 kfree(buf); 516 kfree(buf);
479} 517}
480 518
519/*
520 * For some kind of reserved memory there might be no struct page available,
521 * so all that can be done to support such 'pages' is to try to convert
522 * pfn to dma address or at the last resort just assume that
523 * dma address == physical address (like it has been assumed in earlier version
524 * of videobuf2-dma-contig
525 */
526
527#ifdef __arch_pfn_to_dma
528static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
529{
530 return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
531}
532#elif defined(__pfn_to_bus)
533static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
534{
535 return (dma_addr_t)__pfn_to_bus(pfn);
536}
537#elif defined(__pfn_to_phys)
538static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
539{
540 return (dma_addr_t)__pfn_to_phys(pfn);
541}
542#else
543static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
544{
545 /* really, we cannot do anything better at this point */
546 return (dma_addr_t)(pfn) << PAGE_SHIFT;
547}
548#endif
549
481static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, 550static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
482 unsigned long size, int write) 551 unsigned long size, int write)
483{ 552{
@@ -548,6 +617,14 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
548 /* extract page list from userspace mapping */ 617 /* extract page list from userspace mapping */
549 ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write); 618 ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write);
550 if (ret) { 619 if (ret) {
620 unsigned long pfn;
621 if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) {
622 buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, pfn);
623 buf->size = size;
624 kfree(pages);
625 return buf;
626 }
627
551 pr_err("failed to get user pages\n"); 628 pr_err("failed to get user pages\n");
552 goto fail_vma; 629 goto fail_vma;
553 } 630 }
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index d0fdc134068a..f6ff711aa5bb 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -57,6 +57,7 @@ void mei_amthif_reset_params(struct mei_device *dev)
57 dev->iamthif_ioctl = false; 57 dev->iamthif_ioctl = false;
58 dev->iamthif_state = MEI_IAMTHIF_IDLE; 58 dev->iamthif_state = MEI_IAMTHIF_IDLE;
59 dev->iamthif_timer = 0; 59 dev->iamthif_timer = 0;
60 dev->iamthif_stall_timer = 0;
60} 61}
61 62
62/** 63/**
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 6d0282c08a06..cd2033cd7120 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -297,10 +297,13 @@ int __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
297 297
298 if (cl->reading_state != MEI_READ_COMPLETE && 298 if (cl->reading_state != MEI_READ_COMPLETE &&
299 !waitqueue_active(&cl->rx_wait)) { 299 !waitqueue_active(&cl->rx_wait)) {
300
300 mutex_unlock(&dev->device_lock); 301 mutex_unlock(&dev->device_lock);
301 302
302 if (wait_event_interruptible(cl->rx_wait, 303 if (wait_event_interruptible(cl->rx_wait,
303 (MEI_READ_COMPLETE == cl->reading_state))) { 304 cl->reading_state == MEI_READ_COMPLETE ||
305 mei_cl_is_transitioning(cl))) {
306
304 if (signal_pending(current)) 307 if (signal_pending(current))
305 return -EINTR; 308 return -EINTR;
306 return -ERESTARTSYS; 309 return -ERESTARTSYS;
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index 9eb031e92070..892cc4207fa2 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -90,6 +90,12 @@ static inline bool mei_cl_is_connected(struct mei_cl *cl)
90 cl->dev->dev_state == MEI_DEV_ENABLED && 90 cl->dev->dev_state == MEI_DEV_ENABLED &&
91 cl->state == MEI_FILE_CONNECTED); 91 cl->state == MEI_FILE_CONNECTED);
92} 92}
93static inline bool mei_cl_is_transitioning(struct mei_cl *cl)
94{
95 return (MEI_FILE_INITIALIZING == cl->state ||
96 MEI_FILE_DISCONNECTED == cl->state ||
97 MEI_FILE_DISCONNECTING == cl->state);
98}
93 99
94bool mei_cl_is_other_connecting(struct mei_cl *cl); 100bool mei_cl_is_other_connecting(struct mei_cl *cl);
95int mei_cl_disconnect(struct mei_cl *cl); 101int mei_cl_disconnect(struct mei_cl *cl);
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 6127ab64bb39..0a0448326e9d 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -35,11 +35,15 @@ static void mei_hbm_me_cl_allocate(struct mei_device *dev)
35 struct mei_me_client *clients; 35 struct mei_me_client *clients;
36 int b; 36 int b;
37 37
38 dev->me_clients_num = 0;
39 dev->me_client_presentation_num = 0;
40 dev->me_client_index = 0;
41
38 /* count how many ME clients we have */ 42 /* count how many ME clients we have */
39 for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX) 43 for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX)
40 dev->me_clients_num++; 44 dev->me_clients_num++;
41 45
42 if (dev->me_clients_num <= 0) 46 if (dev->me_clients_num == 0)
43 return; 47 return;
44 48
45 kfree(dev->me_clients); 49 kfree(dev->me_clients);
@@ -221,7 +225,7 @@ static int mei_hbm_prop_req(struct mei_device *dev)
221 struct hbm_props_request *prop_req; 225 struct hbm_props_request *prop_req;
222 const size_t len = sizeof(struct hbm_props_request); 226 const size_t len = sizeof(struct hbm_props_request);
223 unsigned long next_client_index; 227 unsigned long next_client_index;
224 u8 client_num; 228 unsigned long client_num;
225 229
226 230
227 client_num = dev->me_client_presentation_num; 231 client_num = dev->me_client_presentation_num;
@@ -677,8 +681,6 @@ void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
677 if (dev->dev_state == MEI_DEV_INIT_CLIENTS && 681 if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
678 dev->hbm_state == MEI_HBM_ENUM_CLIENTS) { 682 dev->hbm_state == MEI_HBM_ENUM_CLIENTS) {
679 dev->init_clients_timer = 0; 683 dev->init_clients_timer = 0;
680 dev->me_client_presentation_num = 0;
681 dev->me_client_index = 0;
682 mei_hbm_me_cl_allocate(dev); 684 mei_hbm_me_cl_allocate(dev);
683 dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES; 685 dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES;
684 686
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 92c73118b13c..6197018e2f16 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -175,6 +175,9 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
175 memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg)); 175 memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg));
176 } 176 }
177 177
178 /* we're already in reset, cancel the init timer */
179 dev->init_clients_timer = 0;
180
178 dev->me_clients_num = 0; 181 dev->me_clients_num = 0;
179 dev->rd_msg_hdr = 0; 182 dev->rd_msg_hdr = 0;
180 dev->wd_pending = false; 183 dev->wd_pending = false;
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 173ff095be0d..cabeddd66c1f 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -249,19 +249,16 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
249 mutex_unlock(&dev->device_lock); 249 mutex_unlock(&dev->device_lock);
250 250
251 if (wait_event_interruptible(cl->rx_wait, 251 if (wait_event_interruptible(cl->rx_wait,
252 (MEI_READ_COMPLETE == cl->reading_state || 252 MEI_READ_COMPLETE == cl->reading_state ||
253 MEI_FILE_INITIALIZING == cl->state || 253 mei_cl_is_transitioning(cl))) {
254 MEI_FILE_DISCONNECTED == cl->state || 254
255 MEI_FILE_DISCONNECTING == cl->state))) {
256 if (signal_pending(current)) 255 if (signal_pending(current))
257 return -EINTR; 256 return -EINTR;
258 return -ERESTARTSYS; 257 return -ERESTARTSYS;
259 } 258 }
260 259
261 mutex_lock(&dev->device_lock); 260 mutex_lock(&dev->device_lock);
262 if (MEI_FILE_INITIALIZING == cl->state || 261 if (mei_cl_is_transitioning(cl)) {
263 MEI_FILE_DISCONNECTED == cl->state ||
264 MEI_FILE_DISCONNECTING == cl->state) {
265 rets = -EBUSY; 262 rets = -EBUSY;
266 goto out; 263 goto out;
267 } 264 }
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 7b918b2fb894..456b322013e2 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -396,9 +396,9 @@ struct mei_device {
396 struct mei_me_client *me_clients; /* Note: memory has to be allocated */ 396 struct mei_me_client *me_clients; /* Note: memory has to be allocated */
397 DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX); 397 DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX);
398 DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX); 398 DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX);
399 u8 me_clients_num; 399 unsigned long me_clients_num;
400 u8 me_client_presentation_num; 400 unsigned long me_client_presentation_num;
401 u8 me_client_index; 401 unsigned long me_client_index;
402 402
403 struct mei_cl wd_cl; 403 struct mei_cl wd_cl;
404 enum mei_wd_states wd_state; 404 enum mei_wd_states wd_state;
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index 87ed3fb5149a..f344659dceac 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -113,14 +113,14 @@ static const struct sh_mobile_sdhi_ops sdhi_ops = {
113}; 113};
114 114
115static const struct of_device_id sh_mobile_sdhi_of_match[] = { 115static const struct of_device_id sh_mobile_sdhi_of_match[] = {
116 { .compatible = "renesas,shmobile-sdhi" }, 116 { .compatible = "renesas,sdhi-shmobile" },
117 { .compatible = "renesas,sh7372-sdhi" }, 117 { .compatible = "renesas,sdhi-sh7372" },
118 { .compatible = "renesas,sh73a0-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], }, 118 { .compatible = "renesas,sdhi-sh73a0", .data = &sh_mobile_sdhi_of_cfg[0], },
119 { .compatible = "renesas,r8a73a4-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], }, 119 { .compatible = "renesas,sdhi-r8a73a4", .data = &sh_mobile_sdhi_of_cfg[0], },
120 { .compatible = "renesas,r8a7740-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], }, 120 { .compatible = "renesas,sdhi-r8a7740", .data = &sh_mobile_sdhi_of_cfg[0], },
121 { .compatible = "renesas,r8a7778-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], }, 121 { .compatible = "renesas,sdhi-r8a7778", .data = &sh_mobile_sdhi_of_cfg[0], },
122 { .compatible = "renesas,r8a7779-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], }, 122 { .compatible = "renesas,sdhi-r8a7779", .data = &sh_mobile_sdhi_of_cfg[0], },
123 { .compatible = "renesas,r8a7790-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], }, 123 { .compatible = "renesas,sdhi-r8a7790", .data = &sh_mobile_sdhi_of_cfg[0], },
124 {}, 124 {},
125}; 125};
126MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match); 126MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match);
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 26b14f9fcac6..6bc9618af094 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -168,12 +168,25 @@ static inline int write_disable(struct m25p *flash)
168 */ 168 */
169static inline int set_4byte(struct m25p *flash, u32 jedec_id, int enable) 169static inline int set_4byte(struct m25p *flash, u32 jedec_id, int enable)
170{ 170{
171 int status;
172 bool need_wren = false;
173
171 switch (JEDEC_MFR(jedec_id)) { 174 switch (JEDEC_MFR(jedec_id)) {
172 case CFI_MFR_MACRONIX:
173 case CFI_MFR_ST: /* Micron, actually */ 175 case CFI_MFR_ST: /* Micron, actually */
176 /* Some Micron need WREN command; all will accept it */
177 need_wren = true;
178 case CFI_MFR_MACRONIX:
174 case 0xEF /* winbond */: 179 case 0xEF /* winbond */:
180 if (need_wren)
181 write_enable(flash);
182
175 flash->command[0] = enable ? OPCODE_EN4B : OPCODE_EX4B; 183 flash->command[0] = enable ? OPCODE_EN4B : OPCODE_EX4B;
176 return spi_write(flash->spi, flash->command, 1); 184 status = spi_write(flash->spi, flash->command, 1);
185
186 if (need_wren)
187 write_disable(flash);
188
189 return status;
177 default: 190 default:
178 /* Spansion style */ 191 /* Spansion style */
179 flash->command[0] = OPCODE_BRWR; 192 flash->command[0] = OPCODE_BRWR;
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 59ab0692f0b9..a9830ff8e3f3 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -349,7 +349,7 @@ static int legacy_set_geometry(struct gpmi_nand_data *this)
349 349
350int common_nfc_set_geometry(struct gpmi_nand_data *this) 350int common_nfc_set_geometry(struct gpmi_nand_data *this)
351{ 351{
352 return set_geometry_by_ecc_info(this) ? 0 : legacy_set_geometry(this); 352 return legacy_set_geometry(this);
353} 353}
354 354
355struct dma_chan *get_dma_chan(struct gpmi_nand_data *this) 355struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 7ed4841327f2..d340b2f198c6 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2869,10 +2869,8 @@ static int nand_flash_detect_ext_param_page(struct mtd_info *mtd,
2869 2869
2870 len = le16_to_cpu(p->ext_param_page_length) * 16; 2870 len = le16_to_cpu(p->ext_param_page_length) * 16;
2871 ep = kmalloc(len, GFP_KERNEL); 2871 ep = kmalloc(len, GFP_KERNEL);
2872 if (!ep) { 2872 if (!ep)
2873 ret = -ENOMEM; 2873 return -ENOMEM;
2874 goto ext_out;
2875 }
2876 2874
2877 /* Send our own NAND_CMD_PARAM. */ 2875 /* Send our own NAND_CMD_PARAM. */
2878 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1); 2876 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
@@ -2920,7 +2918,7 @@ static int nand_flash_detect_ext_param_page(struct mtd_info *mtd,
2920 } 2918 }
2921 2919
2922 pr_info("ONFI extended param page detected.\n"); 2920 pr_info("ONFI extended param page detected.\n");
2923 return 0; 2921 ret = 0;
2924 2922
2925ext_out: 2923ext_out:
2926 kfree(ep); 2924 kfree(ep);
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index dd03dfdfb0d6..c28d4e29af1a 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -1320,7 +1320,12 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
1320 for (cs = 0; cs < pdata->num_cs; cs++) { 1320 for (cs = 0; cs < pdata->num_cs; cs++) {
1321 struct mtd_info *mtd = info->host[cs]->mtd; 1321 struct mtd_info *mtd = info->host[cs]->mtd;
1322 1322
1323 mtd->name = pdev->name; 1323 /*
1324 * The mtd name matches the one used in 'mtdparts' kernel
1325 * parameter. This name cannot be changed or otherwise
1326 * user's mtd partitions configuration would get broken.
1327 */
1328 mtd->name = "pxa3xx_nand-0";
1324 info->cs = cs; 1329 info->cs = cs;
1325 ret = pxa3xx_nand_scan(mtd); 1330 ret = pxa3xx_nand_scan(mtd);
1326 if (ret) { 1331 if (ret) {
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 55bbb8b8200c..e883bfe2e727 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1724,6 +1724,7 @@ static int __bond_release_one(struct net_device *bond_dev,
1724 struct bonding *bond = netdev_priv(bond_dev); 1724 struct bonding *bond = netdev_priv(bond_dev);
1725 struct slave *slave, *oldcurrent; 1725 struct slave *slave, *oldcurrent;
1726 struct sockaddr addr; 1726 struct sockaddr addr;
1727 int old_flags = bond_dev->flags;
1727 netdev_features_t old_features = bond_dev->features; 1728 netdev_features_t old_features = bond_dev->features;
1728 1729
1729 /* slave is not a slave or master is not master of this slave */ 1730 /* slave is not a slave or master is not master of this slave */
@@ -1855,12 +1856,18 @@ static int __bond_release_one(struct net_device *bond_dev,
1855 * bond_change_active_slave(..., NULL) 1856 * bond_change_active_slave(..., NULL)
1856 */ 1857 */
1857 if (!USES_PRIMARY(bond->params.mode)) { 1858 if (!USES_PRIMARY(bond->params.mode)) {
1858 /* unset promiscuity level from slave */ 1859 /* unset promiscuity level from slave
1859 if (bond_dev->flags & IFF_PROMISC) 1860 * NOTE: The NETDEV_CHANGEADDR call above may change the value
1861 * of the IFF_PROMISC flag in the bond_dev, but we need the
1862 * value of that flag before that change, as that was the value
1863 * when this slave was attached, so we cache at the start of the
1864 * function and use it here. Same goes for ALLMULTI below
1865 */
1866 if (old_flags & IFF_PROMISC)
1860 dev_set_promiscuity(slave_dev, -1); 1867 dev_set_promiscuity(slave_dev, -1);
1861 1868
1862 /* unset allmulti level from slave */ 1869 /* unset allmulti level from slave */
1863 if (bond_dev->flags & IFF_ALLMULTI) 1870 if (old_flags & IFF_ALLMULTI)
1864 dev_set_allmulti(slave_dev, -1); 1871 dev_set_allmulti(slave_dev, -1);
1865 1872
1866 bond_hw_addr_flush(bond_dev, slave_dev); 1873 bond_hw_addr_flush(bond_dev, slave_dev);
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 3b1ff6148702..693d8ffe4653 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1405,10 +1405,10 @@ static int at91_can_remove(struct platform_device *pdev)
1405 1405
1406static const struct platform_device_id at91_can_id_table[] = { 1406static const struct platform_device_id at91_can_id_table[] = {
1407 { 1407 {
1408 .name = "at91_can", 1408 .name = "at91sam9x5_can",
1409 .driver_data = (kernel_ulong_t)&at91_at91sam9x5_data, 1409 .driver_data = (kernel_ulong_t)&at91_at91sam9x5_data,
1410 }, { 1410 }, {
1411 .name = "at91sam9x5_can", 1411 .name = "at91_can",
1412 .driver_data = (kernel_ulong_t)&at91_at91sam9263_data, 1412 .driver_data = (kernel_ulong_t)&at91_at91sam9263_data,
1413 }, { 1413 }, {
1414 /* sentinel */ 1414 /* sentinel */
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index f9cba4123c66..1870c4731a57 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -705,14 +705,14 @@ static size_t can_get_size(const struct net_device *dev)
705 size_t size; 705 size_t size;
706 706
707 size = nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */ 707 size = nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */
708 size += sizeof(struct can_ctrlmode); /* IFLA_CAN_CTRLMODE */ 708 size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */
709 size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */ 709 size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
710 size += sizeof(struct can_bittiming); /* IFLA_CAN_BITTIMING */ 710 size += nla_total_size(sizeof(struct can_bittiming)); /* IFLA_CAN_BITTIMING */
711 size += sizeof(struct can_clock); /* IFLA_CAN_CLOCK */ 711 size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */
712 if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */ 712 if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
713 size += sizeof(struct can_berr_counter); 713 size += nla_total_size(sizeof(struct can_berr_counter));
714 if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */ 714 if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
715 size += sizeof(struct can_bittiming_const); 715 size += nla_total_size(sizeof(struct can_bittiming_const));
716 716
717 return size; 717 return size;
718} 718}
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 71c677e651d7..8f5ce747feb5 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -62,7 +62,7 @@
62#define FLEXCAN_MCR_BCC BIT(16) 62#define FLEXCAN_MCR_BCC BIT(16)
63#define FLEXCAN_MCR_LPRIO_EN BIT(13) 63#define FLEXCAN_MCR_LPRIO_EN BIT(13)
64#define FLEXCAN_MCR_AEN BIT(12) 64#define FLEXCAN_MCR_AEN BIT(12)
65#define FLEXCAN_MCR_MAXMB(x) ((x) & 0xf) 65#define FLEXCAN_MCR_MAXMB(x) ((x) & 0x1f)
66#define FLEXCAN_MCR_IDAM_A (0 << 8) 66#define FLEXCAN_MCR_IDAM_A (0 << 8)
67#define FLEXCAN_MCR_IDAM_B (1 << 8) 67#define FLEXCAN_MCR_IDAM_B (1 << 8)
68#define FLEXCAN_MCR_IDAM_C (2 << 8) 68#define FLEXCAN_MCR_IDAM_C (2 << 8)
@@ -702,7 +702,6 @@ static int flexcan_chip_start(struct net_device *dev)
702{ 702{
703 struct flexcan_priv *priv = netdev_priv(dev); 703 struct flexcan_priv *priv = netdev_priv(dev);
704 struct flexcan_regs __iomem *regs = priv->base; 704 struct flexcan_regs __iomem *regs = priv->base;
705 unsigned int i;
706 int err; 705 int err;
707 u32 reg_mcr, reg_ctrl; 706 u32 reg_mcr, reg_ctrl;
708 707
@@ -736,9 +735,11 @@ static int flexcan_chip_start(struct net_device *dev)
736 * 735 *
737 */ 736 */
738 reg_mcr = flexcan_read(&regs->mcr); 737 reg_mcr = flexcan_read(&regs->mcr);
738 reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff);
739 reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_FEN | FLEXCAN_MCR_HALT | 739 reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_FEN | FLEXCAN_MCR_HALT |
740 FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN | 740 FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN |
741 FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_SRX_DIS; 741 FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_SRX_DIS |
742 FLEXCAN_MCR_MAXMB(FLEXCAN_TX_BUF_ID);
742 netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr); 743 netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr);
743 flexcan_write(reg_mcr, &regs->mcr); 744 flexcan_write(reg_mcr, &regs->mcr);
744 745
@@ -772,16 +773,9 @@ static int flexcan_chip_start(struct net_device *dev)
772 netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl); 773 netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
773 flexcan_write(reg_ctrl, &regs->ctrl); 774 flexcan_write(reg_ctrl, &regs->ctrl);
774 775
775 for (i = 0; i < ARRAY_SIZE(regs->cantxfg); i++) { 776 /* Abort any pending TX, mark Mailbox as INACTIVE */
776 flexcan_write(0, &regs->cantxfg[i].can_ctrl); 777 flexcan_write(FLEXCAN_MB_CNT_CODE(0x4),
777 flexcan_write(0, &regs->cantxfg[i].can_id); 778 &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
778 flexcan_write(0, &regs->cantxfg[i].data[0]);
779 flexcan_write(0, &regs->cantxfg[i].data[1]);
780
781 /* put MB into rx queue */
782 flexcan_write(FLEXCAN_MB_CNT_CODE(0x4),
783 &regs->cantxfg[i].can_ctrl);
784 }
785 779
786 /* acceptance mask/acceptance code (accept everything) */ 780 /* acceptance mask/acceptance code (accept everything) */
787 flexcan_write(0x0, &regs->rxgmask); 781 flexcan_write(0x0, &regs->rxgmask);
@@ -991,9 +985,9 @@ static void unregister_flexcandev(struct net_device *dev)
991} 985}
992 986
993static const struct of_device_id flexcan_of_match[] = { 987static const struct of_device_id flexcan_of_match[] = {
994 { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
995 { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
996 { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, }, 988 { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
989 { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
990 { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
997 { /* sentinel */ }, 991 { /* sentinel */ },
998}; 992};
999MODULE_DEVICE_TABLE(of, flexcan_of_match); 993MODULE_DEVICE_TABLE(of, flexcan_of_match);
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 874188ba06f7..25377e547f9b 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -76,6 +76,10 @@ MODULE_PARM_DESC(maxdev, "Maximum number of slcan interfaces");
76/* maximum rx buffer len: extended CAN frame with timestamp */ 76/* maximum rx buffer len: extended CAN frame with timestamp */
77#define SLC_MTU (sizeof("T1111222281122334455667788EA5F\r")+1) 77#define SLC_MTU (sizeof("T1111222281122334455667788EA5F\r")+1)
78 78
79#define SLC_CMD_LEN 1
80#define SLC_SFF_ID_LEN 3
81#define SLC_EFF_ID_LEN 8
82
79struct slcan { 83struct slcan {
80 int magic; 84 int magic;
81 85
@@ -142,47 +146,63 @@ static void slc_bump(struct slcan *sl)
142{ 146{
143 struct sk_buff *skb; 147 struct sk_buff *skb;
144 struct can_frame cf; 148 struct can_frame cf;
145 int i, dlc_pos, tmp; 149 int i, tmp;
146 unsigned long ultmp; 150 u32 tmpid;
147 char cmd = sl->rbuff[0]; 151 char *cmd = sl->rbuff;
148 152
149 if ((cmd != 't') && (cmd != 'T') && (cmd != 'r') && (cmd != 'R')) 153 cf.can_id = 0;
154
155 switch (*cmd) {
156 case 'r':
157 cf.can_id = CAN_RTR_FLAG;
158 /* fallthrough */
159 case 't':
160 /* store dlc ASCII value and terminate SFF CAN ID string */
161 cf.can_dlc = sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN];
162 sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN] = 0;
163 /* point to payload data behind the dlc */
164 cmd += SLC_CMD_LEN + SLC_SFF_ID_LEN + 1;
165 break;
166 case 'R':
167 cf.can_id = CAN_RTR_FLAG;
168 /* fallthrough */
169 case 'T':
170 cf.can_id |= CAN_EFF_FLAG;
171 /* store dlc ASCII value and terminate EFF CAN ID string */
172 cf.can_dlc = sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN];
173 sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN] = 0;
174 /* point to payload data behind the dlc */
175 cmd += SLC_CMD_LEN + SLC_EFF_ID_LEN + 1;
176 break;
177 default:
150 return; 178 return;
179 }
151 180
152 if (cmd & 0x20) /* tiny chars 'r' 't' => standard frame format */ 181 if (kstrtou32(sl->rbuff + SLC_CMD_LEN, 16, &tmpid))
153 dlc_pos = 4; /* dlc position tiiid */
154 else
155 dlc_pos = 9; /* dlc position Tiiiiiiiid */
156
157 if (!((sl->rbuff[dlc_pos] >= '0') && (sl->rbuff[dlc_pos] < '9')))
158 return; 182 return;
159 183
160 cf.can_dlc = sl->rbuff[dlc_pos] - '0'; /* get can_dlc from ASCII val */ 184 cf.can_id |= tmpid;
161 185
162 sl->rbuff[dlc_pos] = 0; /* terminate can_id string */ 186 /* get can_dlc from sanitized ASCII value */
163 187 if (cf.can_dlc >= '0' && cf.can_dlc < '9')
164 if (kstrtoul(sl->rbuff+1, 16, &ultmp)) 188 cf.can_dlc -= '0';
189 else
165 return; 190 return;
166 191
167 cf.can_id = ultmp;
168
169 if (!(cmd & 0x20)) /* NO tiny chars => extended frame format */
170 cf.can_id |= CAN_EFF_FLAG;
171
172 if ((cmd | 0x20) == 'r') /* RTR frame */
173 cf.can_id |= CAN_RTR_FLAG;
174
175 *(u64 *) (&cf.data) = 0; /* clear payload */ 192 *(u64 *) (&cf.data) = 0; /* clear payload */
176 193
177 for (i = 0, dlc_pos++; i < cf.can_dlc; i++) { 194 /* RTR frames may have a dlc > 0 but they never have any data bytes */
178 tmp = hex_to_bin(sl->rbuff[dlc_pos++]); 195 if (!(cf.can_id & CAN_RTR_FLAG)) {
179 if (tmp < 0) 196 for (i = 0; i < cf.can_dlc; i++) {
180 return; 197 tmp = hex_to_bin(*cmd++);
181 cf.data[i] = (tmp << 4); 198 if (tmp < 0)
182 tmp = hex_to_bin(sl->rbuff[dlc_pos++]); 199 return;
183 if (tmp < 0) 200 cf.data[i] = (tmp << 4);
184 return; 201 tmp = hex_to_bin(*cmd++);
185 cf.data[i] |= tmp; 202 if (tmp < 0)
203 return;
204 cf.data[i] |= tmp;
205 }
186 } 206 }
187 207
188 skb = dev_alloc_skb(sizeof(struct can_frame) + 208 skb = dev_alloc_skb(sizeof(struct can_frame) +
@@ -209,7 +229,6 @@ static void slc_bump(struct slcan *sl)
209/* parse tty input stream */ 229/* parse tty input stream */
210static void slcan_unesc(struct slcan *sl, unsigned char s) 230static void slcan_unesc(struct slcan *sl, unsigned char s)
211{ 231{
212
213 if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */ 232 if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */
214 if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && 233 if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
215 (sl->rcount > 4)) { 234 (sl->rcount > 4)) {
@@ -236,27 +255,46 @@ static void slcan_unesc(struct slcan *sl, unsigned char s)
236/* Encapsulate one can_frame and stuff into a TTY queue. */ 255/* Encapsulate one can_frame and stuff into a TTY queue. */
237static void slc_encaps(struct slcan *sl, struct can_frame *cf) 256static void slc_encaps(struct slcan *sl, struct can_frame *cf)
238{ 257{
239 int actual, idx, i; 258 int actual, i;
240 char cmd; 259 unsigned char *pos;
260 unsigned char *endpos;
261 canid_t id = cf->can_id;
262
263 pos = sl->xbuff;
241 264
242 if (cf->can_id & CAN_RTR_FLAG) 265 if (cf->can_id & CAN_RTR_FLAG)
243 cmd = 'R'; /* becomes 'r' in standard frame format */ 266 *pos = 'R'; /* becomes 'r' in standard frame format (SFF) */
244 else 267 else
245 cmd = 'T'; /* becomes 't' in standard frame format */ 268 *pos = 'T'; /* becomes 't' in standard frame format (SSF) */
246 269
247 if (cf->can_id & CAN_EFF_FLAG) 270 /* determine number of chars for the CAN-identifier */
248 sprintf(sl->xbuff, "%c%08X%d", cmd, 271 if (cf->can_id & CAN_EFF_FLAG) {
249 cf->can_id & CAN_EFF_MASK, cf->can_dlc); 272 id &= CAN_EFF_MASK;
250 else 273 endpos = pos + SLC_EFF_ID_LEN;
251 sprintf(sl->xbuff, "%c%03X%d", cmd | 0x20, 274 } else {
252 cf->can_id & CAN_SFF_MASK, cf->can_dlc); 275 *pos |= 0x20; /* convert R/T to lower case for SFF */
276 id &= CAN_SFF_MASK;
277 endpos = pos + SLC_SFF_ID_LEN;
278 }
253 279
254 idx = strlen(sl->xbuff); 280 /* build 3 (SFF) or 8 (EFF) digit CAN identifier */
281 pos++;
282 while (endpos >= pos) {
283 *endpos-- = hex_asc_upper[id & 0xf];
284 id >>= 4;
285 }
286
287 pos += (cf->can_id & CAN_EFF_FLAG) ? SLC_EFF_ID_LEN : SLC_SFF_ID_LEN;
255 288
256 for (i = 0; i < cf->can_dlc; i++) 289 *pos++ = cf->can_dlc + '0';
257 sprintf(&sl->xbuff[idx + 2*i], "%02X", cf->data[i]); 290
291 /* RTR frames may have a dlc > 0 but they never have any data bytes */
292 if (!(cf->can_id & CAN_RTR_FLAG)) {
293 for (i = 0; i < cf->can_dlc; i++)
294 pos = hex_byte_pack_upper(pos, cf->data[i]);
295 }
258 296
259 strcat(sl->xbuff, "\r"); /* add terminating character */ 297 *pos++ = '\r';
260 298
261 /* Order of next two lines is *very* important. 299 /* Order of next two lines is *very* important.
262 * When we are sending a little amount of data, 300 * When we are sending a little amount of data,
@@ -267,8 +305,8 @@ static void slc_encaps(struct slcan *sl, struct can_frame *cf)
267 * 14 Oct 1994 Dmitry Gorodchanin. 305 * 14 Oct 1994 Dmitry Gorodchanin.
268 */ 306 */
269 set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); 307 set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
270 actual = sl->tty->ops->write(sl->tty, sl->xbuff, strlen(sl->xbuff)); 308 actual = sl->tty->ops->write(sl->tty, sl->xbuff, pos - sl->xbuff);
271 sl->xleft = strlen(sl->xbuff) - actual; 309 sl->xleft = (pos - sl->xbuff) - actual;
272 sl->xhead = sl->xbuff + actual; 310 sl->xhead = sl->xbuff + actual;
273 sl->dev->stats.tx_bytes += cf->can_dlc; 311 sl->dev->stats.tx_bytes += cf->can_dlc;
274} 312}
@@ -286,11 +324,13 @@ static void slcan_write_wakeup(struct tty_struct *tty)
286 if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) 324 if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
287 return; 325 return;
288 326
327 spin_lock(&sl->lock);
289 if (sl->xleft <= 0) { 328 if (sl->xleft <= 0) {
290 /* Now serial buffer is almost free & we can start 329 /* Now serial buffer is almost free & we can start
291 * transmission of another packet */ 330 * transmission of another packet */
292 sl->dev->stats.tx_packets++; 331 sl->dev->stats.tx_packets++;
293 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 332 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
333 spin_unlock(&sl->lock);
294 netif_wake_queue(sl->dev); 334 netif_wake_queue(sl->dev);
295 return; 335 return;
296 } 336 }
@@ -298,6 +338,7 @@ static void slcan_write_wakeup(struct tty_struct *tty)
298 actual = tty->ops->write(tty, sl->xhead, sl->xleft); 338 actual = tty->ops->write(tty, sl->xhead, sl->xleft);
299 sl->xleft -= actual; 339 sl->xleft -= actual;
300 sl->xhead += actual; 340 sl->xhead += actual;
341 spin_unlock(&sl->lock);
301} 342}
302 343
303/* Send a can_frame to a TTY queue. */ 344/* Send a can_frame to a TTY queue. */
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index a0f647f92bf5..0b7a4c3b01a2 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -463,7 +463,7 @@ static int peak_usb_start(struct peak_usb_device *dev)
463 if (i < PCAN_USB_MAX_TX_URBS) { 463 if (i < PCAN_USB_MAX_TX_URBS) {
464 if (i == 0) { 464 if (i == 0) {
465 netdev_err(netdev, "couldn't setup any tx URB\n"); 465 netdev_err(netdev, "couldn't setup any tx URB\n");
466 return err; 466 goto err_tx;
467 } 467 }
468 468
469 netdev_warn(netdev, "tx performance may be slow\n"); 469 netdev_warn(netdev, "tx performance may be slow\n");
@@ -472,7 +472,7 @@ static int peak_usb_start(struct peak_usb_device *dev)
472 if (dev->adapter->dev_start) { 472 if (dev->adapter->dev_start) {
473 err = dev->adapter->dev_start(dev); 473 err = dev->adapter->dev_start(dev);
474 if (err) 474 if (err)
475 goto failed; 475 goto err_adapter;
476 } 476 }
477 477
478 dev->state |= PCAN_USB_STATE_STARTED; 478 dev->state |= PCAN_USB_STATE_STARTED;
@@ -481,19 +481,26 @@ static int peak_usb_start(struct peak_usb_device *dev)
481 if (dev->adapter->dev_set_bus) { 481 if (dev->adapter->dev_set_bus) {
482 err = dev->adapter->dev_set_bus(dev, 1); 482 err = dev->adapter->dev_set_bus(dev, 1);
483 if (err) 483 if (err)
484 goto failed; 484 goto err_adapter;
485 } 485 }
486 486
487 dev->can.state = CAN_STATE_ERROR_ACTIVE; 487 dev->can.state = CAN_STATE_ERROR_ACTIVE;
488 488
489 return 0; 489 return 0;
490 490
491failed: 491err_adapter:
492 if (err == -ENODEV) 492 if (err == -ENODEV)
493 netif_device_detach(dev->netdev); 493 netif_device_detach(dev->netdev);
494 494
495 netdev_warn(netdev, "couldn't submit control: %d\n", err); 495 netdev_warn(netdev, "couldn't submit control: %d\n", err);
496 496
497 for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++) {
498 usb_free_urb(dev->tx_contexts[i].urb);
499 dev->tx_contexts[i].urb = NULL;
500 }
501err_tx:
502 usb_kill_anchored_urbs(&dev->rx_submitted);
503
497 return err; 504 return err;
498} 505}
499 506
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 97b3d32a98bd..c5e375ddd6c0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1197,8 +1197,9 @@ union cdu_context {
1197/* TM (timers) host DB constants */ 1197/* TM (timers) host DB constants */
1198#define TM_ILT_PAGE_SZ_HW 0 1198#define TM_ILT_PAGE_SZ_HW 0
1199#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */ 1199#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */
1200/* #define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */ 1200#define TM_CONN_NUM (BNX2X_FIRST_VF_CID + \
1201#define TM_CONN_NUM 1024 1201 BNX2X_VF_CIDS + \
1202 CNIC_ISCSI_CID_MAX)
1202#define TM_ILT_SZ (8 * TM_CONN_NUM) 1203#define TM_ILT_SZ (8 * TM_CONN_NUM)
1203#define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ) 1204#define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ)
1204 1205
@@ -1527,7 +1528,6 @@ struct bnx2x {
1527#define PCI_32BIT_FLAG (1 << 1) 1528#define PCI_32BIT_FLAG (1 << 1)
1528#define ONE_PORT_FLAG (1 << 2) 1529#define ONE_PORT_FLAG (1 << 2)
1529#define NO_WOL_FLAG (1 << 3) 1530#define NO_WOL_FLAG (1 << 3)
1530#define USING_DAC_FLAG (1 << 4)
1531#define USING_MSIX_FLAG (1 << 5) 1531#define USING_MSIX_FLAG (1 << 5)
1532#define USING_MSI_FLAG (1 << 6) 1532#define USING_MSI_FLAG (1 << 6)
1533#define DISABLE_MSI_FLAG (1 << 7) 1533#define DISABLE_MSI_FLAG (1 << 7)
@@ -1621,7 +1621,7 @@ struct bnx2x {
1621 u16 rx_ticks_int; 1621 u16 rx_ticks_int;
1622 u16 rx_ticks; 1622 u16 rx_ticks;
1623/* Maximal coalescing timeout in us */ 1623/* Maximal coalescing timeout in us */
1624#define BNX2X_MAX_COALESCE_TOUT (0xf0*12) 1624#define BNX2X_MAX_COALESCE_TOUT (0xff*BNX2X_BTR)
1625 1625
1626 u32 lin_cnt; 1626 u32 lin_cnt;
1627 1627
@@ -2072,7 +2072,8 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
2072 2072
2073void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, 2073void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
2074 u8 src_type, u8 dst_type); 2074 u8 src_type, u8 dst_type);
2075int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae); 2075int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
2076 u32 *comp);
2076 2077
2077/* FLR related routines */ 2078/* FLR related routines */
2078u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp); 2079u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp);
@@ -2498,4 +2499,8 @@ enum bnx2x_pci_bus_speed {
2498}; 2499};
2499 2500
2500void bnx2x_set_local_cmng(struct bnx2x *bp); 2501void bnx2x_set_local_cmng(struct bnx2x *bp);
2502
2503#define MCPR_SCRATCH_BASE(bp) \
2504 (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
2505
2501#endif /* bnx2x.h */ 2506#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 61726af1de6e..4ab4c89c60cd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -681,6 +681,7 @@ static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
681 } 681 }
682 } 682 }
683#endif 683#endif
684 skb_record_rx_queue(skb, fp->rx_queue);
684 napi_gro_receive(&fp->napi, skb); 685 napi_gro_receive(&fp->napi, skb);
685} 686}
686 687
@@ -2481,8 +2482,7 @@ load_error_cnic2:
2481load_error_cnic1: 2482load_error_cnic1:
2482 bnx2x_napi_disable_cnic(bp); 2483 bnx2x_napi_disable_cnic(bp);
2483 /* Update the number of queues without the cnic queues */ 2484 /* Update the number of queues without the cnic queues */
2484 rc = bnx2x_set_real_num_queues(bp, 0); 2485 if (bnx2x_set_real_num_queues(bp, 0))
2485 if (rc)
2486 BNX2X_ERR("Unable to set real_num_queues not including cnic\n"); 2486 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2487load_error_cnic0: 2487load_error_cnic0:
2488 BNX2X_ERR("CNIC-related load failed\n"); 2488 BNX2X_ERR("CNIC-related load failed\n");
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 324de5f05332..e8efa1c93ffe 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -891,17 +891,8 @@ static void bnx2x_get_regs(struct net_device *dev,
891 * will re-enable parity attentions right after the dump. 891 * will re-enable parity attentions right after the dump.
892 */ 892 */
893 893
894 /* Disable parity on path 0 */
895 bnx2x_pretend_func(bp, 0);
896 bnx2x_disable_blocks_parity(bp); 894 bnx2x_disable_blocks_parity(bp);
897 895
898 /* Disable parity on path 1 */
899 bnx2x_pretend_func(bp, 1);
900 bnx2x_disable_blocks_parity(bp);
901
902 /* Return to current function */
903 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
904
905 dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1; 896 dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
906 dump_hdr.preset = DUMP_ALL_PRESETS; 897 dump_hdr.preset = DUMP_ALL_PRESETS;
907 dump_hdr.version = BNX2X_DUMP_VERSION; 898 dump_hdr.version = BNX2X_DUMP_VERSION;
@@ -928,18 +919,9 @@ static void bnx2x_get_regs(struct net_device *dev,
928 /* Actually read the registers */ 919 /* Actually read the registers */
929 __bnx2x_get_regs(bp, p); 920 __bnx2x_get_regs(bp, p);
930 921
931 /* Re-enable parity attentions on path 0 */ 922 /* Re-enable parity attentions */
932 bnx2x_pretend_func(bp, 0);
933 bnx2x_clear_blocks_parity(bp); 923 bnx2x_clear_blocks_parity(bp);
934 bnx2x_enable_blocks_parity(bp); 924 bnx2x_enable_blocks_parity(bp);
935
936 /* Re-enable parity attentions on path 1 */
937 bnx2x_pretend_func(bp, 1);
938 bnx2x_clear_blocks_parity(bp);
939 bnx2x_enable_blocks_parity(bp);
940
941 /* Return to current function */
942 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
943} 925}
944 926
945static int bnx2x_get_preset_regs_len(struct net_device *dev, u32 preset) 927static int bnx2x_get_preset_regs_len(struct net_device *dev, u32 preset)
@@ -993,17 +975,8 @@ static int bnx2x_get_dump_data(struct net_device *dev,
993 * will re-enable parity attentions right after the dump. 975 * will re-enable parity attentions right after the dump.
994 */ 976 */
995 977
996 /* Disable parity on path 0 */
997 bnx2x_pretend_func(bp, 0);
998 bnx2x_disable_blocks_parity(bp); 978 bnx2x_disable_blocks_parity(bp);
999 979
1000 /* Disable parity on path 1 */
1001 bnx2x_pretend_func(bp, 1);
1002 bnx2x_disable_blocks_parity(bp);
1003
1004 /* Return to current function */
1005 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1006
1007 dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1; 980 dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
1008 dump_hdr.preset = bp->dump_preset_idx; 981 dump_hdr.preset = bp->dump_preset_idx;
1009 dump_hdr.version = BNX2X_DUMP_VERSION; 982 dump_hdr.version = BNX2X_DUMP_VERSION;
@@ -1032,19 +1005,10 @@ static int bnx2x_get_dump_data(struct net_device *dev,
1032 /* Actually read the registers */ 1005 /* Actually read the registers */
1033 __bnx2x_get_preset_regs(bp, p, dump_hdr.preset); 1006 __bnx2x_get_preset_regs(bp, p, dump_hdr.preset);
1034 1007
1035 /* Re-enable parity attentions on path 0 */ 1008 /* Re-enable parity attentions */
1036 bnx2x_pretend_func(bp, 0);
1037 bnx2x_clear_blocks_parity(bp); 1009 bnx2x_clear_blocks_parity(bp);
1038 bnx2x_enable_blocks_parity(bp); 1010 bnx2x_enable_blocks_parity(bp);
1039 1011
1040 /* Re-enable parity attentions on path 1 */
1041 bnx2x_pretend_func(bp, 1);
1042 bnx2x_clear_blocks_parity(bp);
1043 bnx2x_enable_blocks_parity(bp);
1044
1045 /* Return to current function */
1046 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1047
1048 return 0; 1012 return 0;
1049} 1013}
1050 1014
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index 76df015f486a..c2dfea7968f4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -640,23 +640,35 @@ static const struct {
640 * [30] MCP Latched ump_tx_parity 640 * [30] MCP Latched ump_tx_parity
641 * [31] MCP Latched scpad_parity 641 * [31] MCP Latched scpad_parity
642 */ 642 */
643#define MISC_AEU_ENABLE_MCP_PRTY_BITS \ 643#define MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS \
644 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \ 644 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
645 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \ 645 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
646 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \ 646 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY)
647
648#define MISC_AEU_ENABLE_MCP_PRTY_BITS \
649 (MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS | \
647 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY) 650 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
648 651
649/* Below registers control the MCP parity attention output. When 652/* Below registers control the MCP parity attention output. When
650 * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are 653 * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are
651 * enabled, when cleared - disabled. 654 * enabled, when cleared - disabled.
652 */ 655 */
653static const u32 mcp_attn_ctl_regs[] = { 656static const struct {
654 MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0, 657 u32 addr;
655 MISC_REG_AEU_ENABLE4_NIG_0, 658 u32 bits;
656 MISC_REG_AEU_ENABLE4_PXP_0, 659} mcp_attn_ctl_regs[] = {
657 MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0, 660 { MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
658 MISC_REG_AEU_ENABLE4_NIG_1, 661 MISC_AEU_ENABLE_MCP_PRTY_BITS },
659 MISC_REG_AEU_ENABLE4_PXP_1 662 { MISC_REG_AEU_ENABLE4_NIG_0,
663 MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
664 { MISC_REG_AEU_ENABLE4_PXP_0,
665 MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
666 { MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
667 MISC_AEU_ENABLE_MCP_PRTY_BITS },
668 { MISC_REG_AEU_ENABLE4_NIG_1,
669 MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
670 { MISC_REG_AEU_ENABLE4_PXP_1,
671 MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }
660}; 672};
661 673
662static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable) 674static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
@@ -665,14 +677,14 @@ static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
665 u32 reg_val; 677 u32 reg_val;
666 678
667 for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) { 679 for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) {
668 reg_val = REG_RD(bp, mcp_attn_ctl_regs[i]); 680 reg_val = REG_RD(bp, mcp_attn_ctl_regs[i].addr);
669 681
670 if (enable) 682 if (enable)
671 reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS; 683 reg_val |= mcp_attn_ctl_regs[i].bits;
672 else 684 else
673 reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS; 685 reg_val &= ~mcp_attn_ctl_regs[i].bits;
674 686
675 REG_WR(bp, mcp_attn_ctl_regs[i], reg_val); 687 REG_WR(bp, mcp_attn_ctl_regs[i].addr, reg_val);
676 } 688 }
677} 689}
678 690
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index d60a2ea3da19..51468227bf3b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -175,6 +175,7 @@ typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy,
175#define EDC_MODE_LINEAR 0x0022 175#define EDC_MODE_LINEAR 0x0022
176#define EDC_MODE_LIMITING 0x0044 176#define EDC_MODE_LIMITING 0x0044
177#define EDC_MODE_PASSIVE_DAC 0x0055 177#define EDC_MODE_PASSIVE_DAC 0x0055
178#define EDC_MODE_ACTIVE_DAC 0x0066
178 179
179/* ETS defines*/ 180/* ETS defines*/
180#define DCBX_INVALID_COS (0xFF) 181#define DCBX_INVALID_COS (0xFF)
@@ -3684,6 +3685,41 @@ static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy,
3684 bnx2x_update_link_attr(params, vars->link_attr_sync); 3685 bnx2x_update_link_attr(params, vars->link_attr_sync);
3685} 3686}
3686 3687
3688static void bnx2x_disable_kr2(struct link_params *params,
3689 struct link_vars *vars,
3690 struct bnx2x_phy *phy)
3691{
3692 struct bnx2x *bp = params->bp;
3693 int i;
3694 static struct bnx2x_reg_set reg_set[] = {
3695 /* Step 1 - Program the TX/RX alignment markers */
3696 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690},
3697 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647},
3698 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0},
3699 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690},
3700 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647},
3701 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0},
3702 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c},
3703 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000},
3704 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000},
3705 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002},
3706 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000},
3707 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7},
3708 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7},
3709 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002},
3710 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000}
3711 };
3712 DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n");
3713
3714 for (i = 0; i < ARRAY_SIZE(reg_set); i++)
3715 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
3716 reg_set[i].val);
3717 vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
3718 bnx2x_update_link_attr(params, vars->link_attr_sync);
3719
3720 vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
3721}
3722
3687static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy, 3723static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy,
3688 struct link_params *params) 3724 struct link_params *params)
3689{ 3725{
@@ -3715,7 +3751,6 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3715 struct link_params *params, 3751 struct link_params *params,
3716 struct link_vars *vars) { 3752 struct link_vars *vars) {
3717 u16 lane, i, cl72_ctrl, an_adv = 0; 3753 u16 lane, i, cl72_ctrl, an_adv = 0;
3718 u16 ucode_ver;
3719 struct bnx2x *bp = params->bp; 3754 struct bnx2x *bp = params->bp;
3720 static struct bnx2x_reg_set reg_set[] = { 3755 static struct bnx2x_reg_set reg_set[] = {
3721 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, 3756 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
@@ -3806,15 +3841,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3806 3841
3807 /* Advertise pause */ 3842 /* Advertise pause */
3808 bnx2x_ext_phy_set_pause(params, phy, vars); 3843 bnx2x_ext_phy_set_pause(params, phy, vars);
3809 /* Set KR Autoneg Work-Around flag for Warpcore version older than D108 3844 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
3810 */
3811 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3812 MDIO_WC_REG_UC_INFO_B1_VERSION, &ucode_ver);
3813 if (ucode_ver < 0xd108) {
3814 DP(NETIF_MSG_LINK, "Enable AN KR work-around. WC ver:0x%x\n",
3815 ucode_ver);
3816 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
3817 }
3818 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 3845 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3819 MDIO_WC_REG_DIGITAL5_MISC7, 0x100); 3846 MDIO_WC_REG_DIGITAL5_MISC7, 0x100);
3820 3847
@@ -3838,6 +3865,8 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3838 bnx2x_set_aer_mmd(params, phy); 3865 bnx2x_set_aer_mmd(params, phy);
3839 3866
3840 bnx2x_warpcore_enable_AN_KR2(phy, params, vars); 3867 bnx2x_warpcore_enable_AN_KR2(phy, params, vars);
3868 } else {
3869 bnx2x_disable_kr2(params, vars, phy);
3841 } 3870 }
3842 3871
3843 /* Enable Autoneg: only on the main lane */ 3872 /* Enable Autoneg: only on the main lane */
@@ -4347,20 +4376,14 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
4347 struct bnx2x *bp = params->bp; 4376 struct bnx2x *bp = params->bp;
4348 u32 serdes_net_if; 4377 u32 serdes_net_if;
4349 u16 gp_status1 = 0, lnkup = 0, lnkup_kr = 0; 4378 u16 gp_status1 = 0, lnkup = 0, lnkup_kr = 0;
4350 u16 lane = bnx2x_get_warpcore_lane(phy, params);
4351 4379
4352 vars->turn_to_run_wc_rt = vars->turn_to_run_wc_rt ? 0 : 1; 4380 vars->turn_to_run_wc_rt = vars->turn_to_run_wc_rt ? 0 : 1;
4353 4381
4354 if (!vars->turn_to_run_wc_rt) 4382 if (!vars->turn_to_run_wc_rt)
4355 return; 4383 return;
4356 4384
4357 /* Return if there is no link partner */
4358 if (!(bnx2x_warpcore_get_sigdet(phy, params))) {
4359 DP(NETIF_MSG_LINK, "bnx2x_warpcore_get_sigdet false\n");
4360 return;
4361 }
4362
4363 if (vars->rx_tx_asic_rst) { 4385 if (vars->rx_tx_asic_rst) {
4386 u16 lane = bnx2x_get_warpcore_lane(phy, params);
4364 serdes_net_if = (REG_RD(bp, params->shmem_base + 4387 serdes_net_if = (REG_RD(bp, params->shmem_base +
4365 offsetof(struct shmem_region, dev_info. 4388 offsetof(struct shmem_region, dev_info.
4366 port_hw_config[params->port].default_cfg)) & 4389 port_hw_config[params->port].default_cfg)) &
@@ -4375,14 +4398,8 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
4375 /*10G KR*/ 4398 /*10G KR*/
4376 lnkup_kr = (gp_status1 >> (12+lane)) & 0x1; 4399 lnkup_kr = (gp_status1 >> (12+lane)) & 0x1;
4377 4400
4378 DP(NETIF_MSG_LINK,
4379 "gp_status1 0x%x\n", gp_status1);
4380
4381 if (lnkup_kr || lnkup) { 4401 if (lnkup_kr || lnkup) {
4382 vars->rx_tx_asic_rst = 0; 4402 vars->rx_tx_asic_rst = 0;
4383 DP(NETIF_MSG_LINK,
4384 "link up, rx_tx_asic_rst 0x%x\n",
4385 vars->rx_tx_asic_rst);
4386 } else { 4403 } else {
4387 /* Reset the lane to see if link comes up.*/ 4404 /* Reset the lane to see if link comes up.*/
4388 bnx2x_warpcore_reset_lane(bp, phy, 1); 4405 bnx2x_warpcore_reset_lane(bp, phy, 1);
@@ -4507,10 +4524,14 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
4507 * enabled transmitter to avoid current leakage in case 4524 * enabled transmitter to avoid current leakage in case
4508 * no module is connected 4525 * no module is connected
4509 */ 4526 */
4510 if (bnx2x_is_sfp_module_plugged(phy, params)) 4527 if ((params->loopback_mode == LOOPBACK_NONE) ||
4511 bnx2x_sfp_module_detection(phy, params); 4528 (params->loopback_mode == LOOPBACK_EXT)) {
4512 else 4529 if (bnx2x_is_sfp_module_plugged(phy, params))
4513 bnx2x_sfp_e3_set_transmitter(params, phy, 1); 4530 bnx2x_sfp_module_detection(phy, params);
4531 else
4532 bnx2x_sfp_e3_set_transmitter(params,
4533 phy, 1);
4534 }
4514 4535
4515 bnx2x_warpcore_config_sfi(phy, params); 4536 bnx2x_warpcore_config_sfi(phy, params);
4516 break; 4537 break;
@@ -5757,6 +5778,11 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
5757 rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed, 5778 rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed,
5758 duplex); 5779 duplex);
5759 5780
5781 /* In case of KR link down, start up the recovering procedure */
5782 if ((!link_up) && (phy->media_type == ETH_PHY_KR) &&
5783 (!(phy->flags & FLAGS_WC_DUAL_MODE)))
5784 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
5785
5760 DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n", 5786 DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n",
5761 vars->duplex, vars->flow_ctrl, vars->link_status); 5787 vars->duplex, vars->flow_ctrl, vars->link_status);
5762 return rc; 5788 return rc;
@@ -6507,6 +6533,11 @@ static int bnx2x_link_initialize(struct link_params *params,
6507 params->phy[INT_PHY].config_init(phy, params, vars); 6533 params->phy[INT_PHY].config_init(phy, params, vars);
6508 } 6534 }
6509 6535
6536 /* Re-read this value in case it was changed inside config_init due to
6537 * limitations of optic module
6538 */
6539 vars->line_speed = params->phy[INT_PHY].req_line_speed;
6540
6510 /* Init external phy*/ 6541 /* Init external phy*/
6511 if (non_ext_phy) { 6542 if (non_ext_phy) {
6512 if (params->phy[INT_PHY].supported & 6543 if (params->phy[INT_PHY].supported &
@@ -8080,7 +8111,10 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
8080 if (copper_module_type & 8111 if (copper_module_type &
8081 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) { 8112 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) {
8082 DP(NETIF_MSG_LINK, "Active Copper cable detected\n"); 8113 DP(NETIF_MSG_LINK, "Active Copper cable detected\n");
8083 check_limiting_mode = 1; 8114 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8115 *edc_mode = EDC_MODE_ACTIVE_DAC;
8116 else
8117 check_limiting_mode = 1;
8084 } else if (copper_module_type & 8118 } else if (copper_module_type &
8085 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) { 8119 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
8086 DP(NETIF_MSG_LINK, 8120 DP(NETIF_MSG_LINK,
@@ -8555,6 +8589,7 @@ static void bnx2x_warpcore_set_limiting_mode(struct link_params *params,
8555 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT; 8589 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT;
8556 break; 8590 break;
8557 case EDC_MODE_PASSIVE_DAC: 8591 case EDC_MODE_PASSIVE_DAC:
8592 case EDC_MODE_ACTIVE_DAC:
8558 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC; 8593 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC;
8559 break; 8594 break;
8560 default: 8595 default:
@@ -9730,32 +9765,41 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
9730 MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL, 9765 MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
9731 an_1000_val); 9766 an_1000_val);
9732 9767
9733 /* set 100 speed advertisement */ 9768 /* Set 10/100 speed advertisement */
9734 if ((phy->req_line_speed == SPEED_AUTO_NEG) && 9769 if (phy->req_line_speed == SPEED_AUTO_NEG) {
9735 (phy->speed_cap_mask & 9770 if (phy->speed_cap_mask &
9736 (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | 9771 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
9737 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))) { 9772 /* Enable autoneg and restart autoneg for legacy speeds
9738 an_10_100_val |= (1<<7); 9773 */
9739 /* Enable autoneg and restart autoneg for legacy speeds */ 9774 autoneg_val |= (1<<9 | 1<<12);
9740 autoneg_val |= (1<<9 | 1<<12);
9741
9742 if (phy->req_duplex == DUPLEX_FULL)
9743 an_10_100_val |= (1<<8); 9775 an_10_100_val |= (1<<8);
9744 DP(NETIF_MSG_LINK, "Advertising 100M\n"); 9776 DP(NETIF_MSG_LINK, "Advertising 100M-FD\n");
9745 } 9777 }
9746 /* set 10 speed advertisement */ 9778
9747 if (((phy->req_line_speed == SPEED_AUTO_NEG) && 9779 if (phy->speed_cap_mask &
9748 (phy->speed_cap_mask & 9780 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
9749 (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL | 9781 /* Enable autoneg and restart autoneg for legacy speeds
9750 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) && 9782 */
9751 (phy->supported & 9783 autoneg_val |= (1<<9 | 1<<12);
9752 (SUPPORTED_10baseT_Half | 9784 an_10_100_val |= (1<<7);
9753 SUPPORTED_10baseT_Full)))) { 9785 DP(NETIF_MSG_LINK, "Advertising 100M-HD\n");
9754 an_10_100_val |= (1<<5); 9786 }
9755 autoneg_val |= (1<<9 | 1<<12); 9787
9756 if (phy->req_duplex == DUPLEX_FULL) 9788 if ((phy->speed_cap_mask &
9789 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) &&
9790 (phy->supported & SUPPORTED_10baseT_Full)) {
9757 an_10_100_val |= (1<<6); 9791 an_10_100_val |= (1<<6);
9758 DP(NETIF_MSG_LINK, "Advertising 10M\n"); 9792 autoneg_val |= (1<<9 | 1<<12);
9793 DP(NETIF_MSG_LINK, "Advertising 10M-FD\n");
9794 }
9795
9796 if ((phy->speed_cap_mask &
9797 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) &&
9798 (phy->supported & SUPPORTED_10baseT_Half)) {
9799 an_10_100_val |= (1<<5);
9800 autoneg_val |= (1<<9 | 1<<12);
9801 DP(NETIF_MSG_LINK, "Advertising 10M-HD\n");
9802 }
9759 } 9803 }
9760 9804
9761 /* Only 10/100 are allowed to work in FORCE mode */ 9805 /* Only 10/100 are allowed to work in FORCE mode */
@@ -13432,43 +13476,6 @@ static void bnx2x_sfp_tx_fault_detection(struct bnx2x_phy *phy,
13432 } 13476 }
13433 } 13477 }
13434} 13478}
13435static void bnx2x_disable_kr2(struct link_params *params,
13436 struct link_vars *vars,
13437 struct bnx2x_phy *phy)
13438{
13439 struct bnx2x *bp = params->bp;
13440 int i;
13441 static struct bnx2x_reg_set reg_set[] = {
13442 /* Step 1 - Program the TX/RX alignment markers */
13443 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690},
13444 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647},
13445 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0},
13446 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690},
13447 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647},
13448 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0},
13449 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c},
13450 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000},
13451 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000},
13452 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002},
13453 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000},
13454 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7},
13455 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7},
13456 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002},
13457 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000}
13458 };
13459 DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n");
13460
13461 for (i = 0; i < ARRAY_SIZE(reg_set); i++)
13462 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
13463 reg_set[i].val);
13464 vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
13465 bnx2x_update_link_attr(params, vars->link_attr_sync);
13466
13467 vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
13468 /* Restart AN on leading lane */
13469 bnx2x_warpcore_restart_AN_KR(phy, params);
13470}
13471
13472static void bnx2x_kr2_recovery(struct link_params *params, 13479static void bnx2x_kr2_recovery(struct link_params *params,
13473 struct link_vars *vars, 13480 struct link_vars *vars,
13474 struct bnx2x_phy *phy) 13481 struct bnx2x_phy *phy)
@@ -13546,6 +13553,8 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
13546 /* Disable KR2 on both lanes */ 13553 /* Disable KR2 on both lanes */
13547 DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, next_page); 13554 DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, next_page);
13548 bnx2x_disable_kr2(params, vars, phy); 13555 bnx2x_disable_kr2(params, vars, phy);
13556 /* Restart AN on leading lane */
13557 bnx2x_warpcore_restart_AN_KR(phy, params);
13549 return; 13558 return;
13550 } 13559 }
13551} 13560}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index a6704b555042..b42f89ce02ef 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -503,9 +503,9 @@ void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
503} 503}
504 504
505/* issue a dmae command over the init-channel and wait for completion */ 505/* issue a dmae command over the init-channel and wait for completion */
506int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae) 506int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
507 u32 *comp)
507{ 508{
508 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
509 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000; 509 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
510 int rc = 0; 510 int rc = 0;
511 511
@@ -518,14 +518,14 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
518 spin_lock_bh(&bp->dmae_lock); 518 spin_lock_bh(&bp->dmae_lock);
519 519
520 /* reset completion */ 520 /* reset completion */
521 *wb_comp = 0; 521 *comp = 0;
522 522
523 /* post the command on the channel used for initializations */ 523 /* post the command on the channel used for initializations */
524 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); 524 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
525 525
526 /* wait for completion */ 526 /* wait for completion */
527 udelay(5); 527 udelay(5);
528 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { 528 while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
529 529
530 if (!cnt || 530 if (!cnt ||
531 (bp->recovery_state != BNX2X_RECOVERY_DONE && 531 (bp->recovery_state != BNX2X_RECOVERY_DONE &&
@@ -537,7 +537,7 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
537 cnt--; 537 cnt--;
538 udelay(50); 538 udelay(50);
539 } 539 }
540 if (*wb_comp & DMAE_PCI_ERR_FLAG) { 540 if (*comp & DMAE_PCI_ERR_FLAG) {
541 BNX2X_ERR("DMAE PCI error!\n"); 541 BNX2X_ERR("DMAE PCI error!\n");
542 rc = DMAE_PCI_ERROR; 542 rc = DMAE_PCI_ERROR;
543 } 543 }
@@ -574,7 +574,7 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
574 dmae.len = len32; 574 dmae.len = len32;
575 575
576 /* issue the command and wait for completion */ 576 /* issue the command and wait for completion */
577 rc = bnx2x_issue_dmae_with_comp(bp, &dmae); 577 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
578 if (rc) { 578 if (rc) {
579 BNX2X_ERR("DMAE returned failure %d\n", rc); 579 BNX2X_ERR("DMAE returned failure %d\n", rc);
580 bnx2x_panic(); 580 bnx2x_panic();
@@ -611,7 +611,7 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
611 dmae.len = len32; 611 dmae.len = len32;
612 612
613 /* issue the command and wait for completion */ 613 /* issue the command and wait for completion */
614 rc = bnx2x_issue_dmae_with_comp(bp, &dmae); 614 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
615 if (rc) { 615 if (rc) {
616 BNX2X_ERR("DMAE returned failure %d\n", rc); 616 BNX2X_ERR("DMAE returned failure %d\n", rc);
617 bnx2x_panic(); 617 bnx2x_panic();
@@ -751,6 +751,10 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
751 return rc; 751 return rc;
752} 752}
753 753
754#define MCPR_TRACE_BUFFER_SIZE (0x800)
755#define SCRATCH_BUFFER_SIZE(bp) \
756 (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000))
757
754void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl) 758void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
755{ 759{
756 u32 addr, val; 760 u32 addr, val;
@@ -775,7 +779,17 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
775 trace_shmem_base = bp->common.shmem_base; 779 trace_shmem_base = bp->common.shmem_base;
776 else 780 else
777 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr); 781 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
778 addr = trace_shmem_base - 0x800; 782
783 /* sanity */
784 if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE ||
785 trace_shmem_base >= MCPR_SCRATCH_BASE(bp) +
786 SCRATCH_BUFFER_SIZE(bp)) {
787 BNX2X_ERR("Unable to dump trace buffer (mark %x)\n",
788 trace_shmem_base);
789 return;
790 }
791
792 addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE;
779 793
780 /* validate TRCB signature */ 794 /* validate TRCB signature */
781 mark = REG_RD(bp, addr); 795 mark = REG_RD(bp, addr);
@@ -787,14 +801,17 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
787 /* read cyclic buffer pointer */ 801 /* read cyclic buffer pointer */
788 addr += 4; 802 addr += 4;
789 mark = REG_RD(bp, addr); 803 mark = REG_RD(bp, addr);
790 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) 804 mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000;
791 + ((mark + 0x3) & ~0x3) - 0x08000000; 805 if (mark >= trace_shmem_base || mark < addr + 4) {
806 BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n");
807 return;
808 }
792 printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark); 809 printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
793 810
794 printk("%s", lvl); 811 printk("%s", lvl);
795 812
796 /* dump buffer after the mark */ 813 /* dump buffer after the mark */
797 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) { 814 for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) {
798 for (word = 0; word < 8; word++) 815 for (word = 0; word < 8; word++)
799 data[word] = htonl(REG_RD(bp, offset + 4*word)); 816 data[word] = htonl(REG_RD(bp, offset + 4*word));
800 data[8] = 0x0; 817 data[8] = 0x0;
@@ -4280,65 +4297,60 @@ static void _print_next_block(int idx, const char *blk)
4280 pr_cont("%s%s", idx ? ", " : "", blk); 4297 pr_cont("%s%s", idx ? ", " : "", blk);
4281} 4298}
4282 4299
4283static int bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig, 4300static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
4284 int par_num, bool print) 4301 int *par_num, bool print)
4285{ 4302{
4286 int i = 0; 4303 u32 cur_bit;
4287 u32 cur_bit = 0; 4304 bool res;
4305 int i;
4306
4307 res = false;
4308
4288 for (i = 0; sig; i++) { 4309 for (i = 0; sig; i++) {
4289 cur_bit = ((u32)0x1 << i); 4310 cur_bit = (0x1UL << i);
4290 if (sig & cur_bit) { 4311 if (sig & cur_bit) {
4291 switch (cur_bit) { 4312 res |= true; /* Each bit is real error! */
4292 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: 4313
4293 if (print) { 4314 if (print) {
4294 _print_next_block(par_num++, "BRB"); 4315 switch (cur_bit) {
4316 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
4317 _print_next_block((*par_num)++, "BRB");
4295 _print_parity(bp, 4318 _print_parity(bp,
4296 BRB1_REG_BRB1_PRTY_STS); 4319 BRB1_REG_BRB1_PRTY_STS);
4297 } 4320 break;
4298 break; 4321 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
4299 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: 4322 _print_next_block((*par_num)++,
4300 if (print) { 4323 "PARSER");
4301 _print_next_block(par_num++, "PARSER");
4302 _print_parity(bp, PRS_REG_PRS_PRTY_STS); 4324 _print_parity(bp, PRS_REG_PRS_PRTY_STS);
4303 } 4325 break;
4304 break; 4326 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
4305 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: 4327 _print_next_block((*par_num)++, "TSDM");
4306 if (print) {
4307 _print_next_block(par_num++, "TSDM");
4308 _print_parity(bp, 4328 _print_parity(bp,
4309 TSDM_REG_TSDM_PRTY_STS); 4329 TSDM_REG_TSDM_PRTY_STS);
4310 } 4330 break;
4311 break; 4331 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
4312 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: 4332 _print_next_block((*par_num)++,
4313 if (print) {
4314 _print_next_block(par_num++,
4315 "SEARCHER"); 4333 "SEARCHER");
4316 _print_parity(bp, SRC_REG_SRC_PRTY_STS); 4334 _print_parity(bp, SRC_REG_SRC_PRTY_STS);
4317 } 4335 break;
4318 break; 4336 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
4319 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: 4337 _print_next_block((*par_num)++, "TCM");
4320 if (print) { 4338 _print_parity(bp, TCM_REG_TCM_PRTY_STS);
4321 _print_next_block(par_num++, "TCM"); 4339 break;
4322 _print_parity(bp, 4340 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
4323 TCM_REG_TCM_PRTY_STS); 4341 _print_next_block((*par_num)++,
4324 } 4342 "TSEMI");
4325 break;
4326 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
4327 if (print) {
4328 _print_next_block(par_num++, "TSEMI");
4329 _print_parity(bp, 4343 _print_parity(bp,
4330 TSEM_REG_TSEM_PRTY_STS_0); 4344 TSEM_REG_TSEM_PRTY_STS_0);
4331 _print_parity(bp, 4345 _print_parity(bp,
4332 TSEM_REG_TSEM_PRTY_STS_1); 4346 TSEM_REG_TSEM_PRTY_STS_1);
4333 } 4347 break;
4334 break; 4348 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
4335 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: 4349 _print_next_block((*par_num)++, "XPB");
4336 if (print) {
4337 _print_next_block(par_num++, "XPB");
4338 _print_parity(bp, GRCBASE_XPB + 4350 _print_parity(bp, GRCBASE_XPB +
4339 PB_REG_PB_PRTY_STS); 4351 PB_REG_PB_PRTY_STS);
4352 break;
4340 } 4353 }
4341 break;
4342 } 4354 }
4343 4355
4344 /* Clear the bit */ 4356 /* Clear the bit */
@@ -4346,53 +4358,59 @@ static int bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
4346 } 4358 }
4347 } 4359 }
4348 4360
4349 return par_num; 4361 return res;
4350} 4362}
4351 4363
4352static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig, 4364static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4353 int par_num, bool *global, 4365 int *par_num, bool *global,
4354 bool print) 4366 bool print)
4355{ 4367{
4356 int i = 0; 4368 u32 cur_bit;
4357 u32 cur_bit = 0; 4369 bool res;
4370 int i;
4371
4372 res = false;
4373
4358 for (i = 0; sig; i++) { 4374 for (i = 0; sig; i++) {
4359 cur_bit = ((u32)0x1 << i); 4375 cur_bit = (0x1UL << i);
4360 if (sig & cur_bit) { 4376 if (sig & cur_bit) {
4377 res |= true; /* Each bit is real error! */
4361 switch (cur_bit) { 4378 switch (cur_bit) {
4362 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: 4379 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
4363 if (print) { 4380 if (print) {
4364 _print_next_block(par_num++, "PBF"); 4381 _print_next_block((*par_num)++, "PBF");
4365 _print_parity(bp, PBF_REG_PBF_PRTY_STS); 4382 _print_parity(bp, PBF_REG_PBF_PRTY_STS);
4366 } 4383 }
4367 break; 4384 break;
4368 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: 4385 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
4369 if (print) { 4386 if (print) {
4370 _print_next_block(par_num++, "QM"); 4387 _print_next_block((*par_num)++, "QM");
4371 _print_parity(bp, QM_REG_QM_PRTY_STS); 4388 _print_parity(bp, QM_REG_QM_PRTY_STS);
4372 } 4389 }
4373 break; 4390 break;
4374 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: 4391 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
4375 if (print) { 4392 if (print) {
4376 _print_next_block(par_num++, "TM"); 4393 _print_next_block((*par_num)++, "TM");
4377 _print_parity(bp, TM_REG_TM_PRTY_STS); 4394 _print_parity(bp, TM_REG_TM_PRTY_STS);
4378 } 4395 }
4379 break; 4396 break;
4380 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: 4397 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
4381 if (print) { 4398 if (print) {
4382 _print_next_block(par_num++, "XSDM"); 4399 _print_next_block((*par_num)++, "XSDM");
4383 _print_parity(bp, 4400 _print_parity(bp,
4384 XSDM_REG_XSDM_PRTY_STS); 4401 XSDM_REG_XSDM_PRTY_STS);
4385 } 4402 }
4386 break; 4403 break;
4387 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: 4404 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
4388 if (print) { 4405 if (print) {
4389 _print_next_block(par_num++, "XCM"); 4406 _print_next_block((*par_num)++, "XCM");
4390 _print_parity(bp, XCM_REG_XCM_PRTY_STS); 4407 _print_parity(bp, XCM_REG_XCM_PRTY_STS);
4391 } 4408 }
4392 break; 4409 break;
4393 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: 4410 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
4394 if (print) { 4411 if (print) {
4395 _print_next_block(par_num++, "XSEMI"); 4412 _print_next_block((*par_num)++,
4413 "XSEMI");
4396 _print_parity(bp, 4414 _print_parity(bp,
4397 XSEM_REG_XSEM_PRTY_STS_0); 4415 XSEM_REG_XSEM_PRTY_STS_0);
4398 _print_parity(bp, 4416 _print_parity(bp,
@@ -4401,7 +4419,7 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4401 break; 4419 break;
4402 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: 4420 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
4403 if (print) { 4421 if (print) {
4404 _print_next_block(par_num++, 4422 _print_next_block((*par_num)++,
4405 "DOORBELLQ"); 4423 "DOORBELLQ");
4406 _print_parity(bp, 4424 _print_parity(bp,
4407 DORQ_REG_DORQ_PRTY_STS); 4425 DORQ_REG_DORQ_PRTY_STS);
@@ -4409,7 +4427,7 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4409 break; 4427 break;
4410 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: 4428 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
4411 if (print) { 4429 if (print) {
4412 _print_next_block(par_num++, "NIG"); 4430 _print_next_block((*par_num)++, "NIG");
4413 if (CHIP_IS_E1x(bp)) { 4431 if (CHIP_IS_E1x(bp)) {
4414 _print_parity(bp, 4432 _print_parity(bp,
4415 NIG_REG_NIG_PRTY_STS); 4433 NIG_REG_NIG_PRTY_STS);
@@ -4423,32 +4441,34 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4423 break; 4441 break;
4424 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: 4442 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
4425 if (print) 4443 if (print)
4426 _print_next_block(par_num++, 4444 _print_next_block((*par_num)++,
4427 "VAUX PCI CORE"); 4445 "VAUX PCI CORE");
4428 *global = true; 4446 *global = true;
4429 break; 4447 break;
4430 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: 4448 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
4431 if (print) { 4449 if (print) {
4432 _print_next_block(par_num++, "DEBUG"); 4450 _print_next_block((*par_num)++,
4451 "DEBUG");
4433 _print_parity(bp, DBG_REG_DBG_PRTY_STS); 4452 _print_parity(bp, DBG_REG_DBG_PRTY_STS);
4434 } 4453 }
4435 break; 4454 break;
4436 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: 4455 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
4437 if (print) { 4456 if (print) {
4438 _print_next_block(par_num++, "USDM"); 4457 _print_next_block((*par_num)++, "USDM");
4439 _print_parity(bp, 4458 _print_parity(bp,
4440 USDM_REG_USDM_PRTY_STS); 4459 USDM_REG_USDM_PRTY_STS);
4441 } 4460 }
4442 break; 4461 break;
4443 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: 4462 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
4444 if (print) { 4463 if (print) {
4445 _print_next_block(par_num++, "UCM"); 4464 _print_next_block((*par_num)++, "UCM");
4446 _print_parity(bp, UCM_REG_UCM_PRTY_STS); 4465 _print_parity(bp, UCM_REG_UCM_PRTY_STS);
4447 } 4466 }
4448 break; 4467 break;
4449 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: 4468 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
4450 if (print) { 4469 if (print) {
4451 _print_next_block(par_num++, "USEMI"); 4470 _print_next_block((*par_num)++,
4471 "USEMI");
4452 _print_parity(bp, 4472 _print_parity(bp,
4453 USEM_REG_USEM_PRTY_STS_0); 4473 USEM_REG_USEM_PRTY_STS_0);
4454 _print_parity(bp, 4474 _print_parity(bp,
@@ -4457,21 +4477,21 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4457 break; 4477 break;
4458 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: 4478 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
4459 if (print) { 4479 if (print) {
4460 _print_next_block(par_num++, "UPB"); 4480 _print_next_block((*par_num)++, "UPB");
4461 _print_parity(bp, GRCBASE_UPB + 4481 _print_parity(bp, GRCBASE_UPB +
4462 PB_REG_PB_PRTY_STS); 4482 PB_REG_PB_PRTY_STS);
4463 } 4483 }
4464 break; 4484 break;
4465 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: 4485 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
4466 if (print) { 4486 if (print) {
4467 _print_next_block(par_num++, "CSDM"); 4487 _print_next_block((*par_num)++, "CSDM");
4468 _print_parity(bp, 4488 _print_parity(bp,
4469 CSDM_REG_CSDM_PRTY_STS); 4489 CSDM_REG_CSDM_PRTY_STS);
4470 } 4490 }
4471 break; 4491 break;
4472 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: 4492 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
4473 if (print) { 4493 if (print) {
4474 _print_next_block(par_num++, "CCM"); 4494 _print_next_block((*par_num)++, "CCM");
4475 _print_parity(bp, CCM_REG_CCM_PRTY_STS); 4495 _print_parity(bp, CCM_REG_CCM_PRTY_STS);
4476 } 4496 }
4477 break; 4497 break;
@@ -4482,80 +4502,73 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4482 } 4502 }
4483 } 4503 }
4484 4504
4485 return par_num; 4505 return res;
4486} 4506}
4487 4507
4488static int bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig, 4508static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
4489 int par_num, bool print) 4509 int *par_num, bool print)
4490{ 4510{
4491 int i = 0; 4511 u32 cur_bit;
4492 u32 cur_bit = 0; 4512 bool res;
4513 int i;
4514
4515 res = false;
4516
4493 for (i = 0; sig; i++) { 4517 for (i = 0; sig; i++) {
4494 cur_bit = ((u32)0x1 << i); 4518 cur_bit = (0x1UL << i);
4495 if (sig & cur_bit) { 4519 if (sig & cur_bit) {
4496 switch (cur_bit) { 4520 res |= true; /* Each bit is real error! */
4497 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: 4521 if (print) {
4498 if (print) { 4522 switch (cur_bit) {
4499 _print_next_block(par_num++, "CSEMI"); 4523 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
4524 _print_next_block((*par_num)++,
4525 "CSEMI");
4500 _print_parity(bp, 4526 _print_parity(bp,
4501 CSEM_REG_CSEM_PRTY_STS_0); 4527 CSEM_REG_CSEM_PRTY_STS_0);
4502 _print_parity(bp, 4528 _print_parity(bp,
4503 CSEM_REG_CSEM_PRTY_STS_1); 4529 CSEM_REG_CSEM_PRTY_STS_1);
4504 } 4530 break;
4505 break; 4531 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
4506 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: 4532 _print_next_block((*par_num)++, "PXP");
4507 if (print) {
4508 _print_next_block(par_num++, "PXP");
4509 _print_parity(bp, PXP_REG_PXP_PRTY_STS); 4533 _print_parity(bp, PXP_REG_PXP_PRTY_STS);
4510 _print_parity(bp, 4534 _print_parity(bp,
4511 PXP2_REG_PXP2_PRTY_STS_0); 4535 PXP2_REG_PXP2_PRTY_STS_0);
4512 _print_parity(bp, 4536 _print_parity(bp,
4513 PXP2_REG_PXP2_PRTY_STS_1); 4537 PXP2_REG_PXP2_PRTY_STS_1);
4514 } 4538 break;
4515 break; 4539 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
4516 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: 4540 _print_next_block((*par_num)++,
4517 if (print) 4541 "PXPPCICLOCKCLIENT");
4518 _print_next_block(par_num++, 4542 break;
4519 "PXPPCICLOCKCLIENT"); 4543 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
4520 break; 4544 _print_next_block((*par_num)++, "CFC");
4521 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
4522 if (print) {
4523 _print_next_block(par_num++, "CFC");
4524 _print_parity(bp, 4545 _print_parity(bp,
4525 CFC_REG_CFC_PRTY_STS); 4546 CFC_REG_CFC_PRTY_STS);
4526 } 4547 break;
4527 break; 4548 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
4528 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: 4549 _print_next_block((*par_num)++, "CDU");
4529 if (print) {
4530 _print_next_block(par_num++, "CDU");
4531 _print_parity(bp, CDU_REG_CDU_PRTY_STS); 4550 _print_parity(bp, CDU_REG_CDU_PRTY_STS);
4532 } 4551 break;
4533 break; 4552 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
4534 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: 4553 _print_next_block((*par_num)++, "DMAE");
4535 if (print) {
4536 _print_next_block(par_num++, "DMAE");
4537 _print_parity(bp, 4554 _print_parity(bp,
4538 DMAE_REG_DMAE_PRTY_STS); 4555 DMAE_REG_DMAE_PRTY_STS);
4539 } 4556 break;
4540 break; 4557 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
4541 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: 4558 _print_next_block((*par_num)++, "IGU");
4542 if (print) {
4543 _print_next_block(par_num++, "IGU");
4544 if (CHIP_IS_E1x(bp)) 4559 if (CHIP_IS_E1x(bp))
4545 _print_parity(bp, 4560 _print_parity(bp,
4546 HC_REG_HC_PRTY_STS); 4561 HC_REG_HC_PRTY_STS);
4547 else 4562 else
4548 _print_parity(bp, 4563 _print_parity(bp,
4549 IGU_REG_IGU_PRTY_STS); 4564 IGU_REG_IGU_PRTY_STS);
4550 } 4565 break;
4551 break; 4566 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
4552 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: 4567 _print_next_block((*par_num)++, "MISC");
4553 if (print) {
4554 _print_next_block(par_num++, "MISC");
4555 _print_parity(bp, 4568 _print_parity(bp,
4556 MISC_REG_MISC_PRTY_STS); 4569 MISC_REG_MISC_PRTY_STS);
4570 break;
4557 } 4571 }
4558 break;
4559 } 4572 }
4560 4573
4561 /* Clear the bit */ 4574 /* Clear the bit */
@@ -4563,40 +4576,49 @@ static int bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
4563 } 4576 }
4564 } 4577 }
4565 4578
4566 return par_num; 4579 return res;
4567} 4580}
4568 4581
4569static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num, 4582static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
4570 bool *global, bool print) 4583 int *par_num, bool *global,
4584 bool print)
4571{ 4585{
4572 int i = 0; 4586 bool res = false;
4573 u32 cur_bit = 0; 4587 u32 cur_bit;
4588 int i;
4589
4574 for (i = 0; sig; i++) { 4590 for (i = 0; sig; i++) {
4575 cur_bit = ((u32)0x1 << i); 4591 cur_bit = (0x1UL << i);
4576 if (sig & cur_bit) { 4592 if (sig & cur_bit) {
4577 switch (cur_bit) { 4593 switch (cur_bit) {
4578 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: 4594 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
4579 if (print) 4595 if (print)
4580 _print_next_block(par_num++, "MCP ROM"); 4596 _print_next_block((*par_num)++,
4597 "MCP ROM");
4581 *global = true; 4598 *global = true;
4599 res |= true;
4582 break; 4600 break;
4583 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: 4601 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
4584 if (print) 4602 if (print)
4585 _print_next_block(par_num++, 4603 _print_next_block((*par_num)++,
4586 "MCP UMP RX"); 4604 "MCP UMP RX");
4587 *global = true; 4605 *global = true;
4606 res |= true;
4588 break; 4607 break;
4589 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: 4608 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
4590 if (print) 4609 if (print)
4591 _print_next_block(par_num++, 4610 _print_next_block((*par_num)++,
4592 "MCP UMP TX"); 4611 "MCP UMP TX");
4593 *global = true; 4612 *global = true;
4613 res |= true;
4594 break; 4614 break;
4595 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: 4615 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
4596 if (print) 4616 if (print)
4597 _print_next_block(par_num++, 4617 _print_next_block((*par_num)++,
4598 "MCP SCPAD"); 4618 "MCP SCPAD");
4599 *global = true; 4619 /* clear latched SCPAD PATIRY from MCP */
4620 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
4621 1UL << 10);
4600 break; 4622 break;
4601 } 4623 }
4602 4624
@@ -4605,45 +4627,50 @@ static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
4605 } 4627 }
4606 } 4628 }
4607 4629
4608 return par_num; 4630 return res;
4609} 4631}
4610 4632
4611static int bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig, 4633static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
4612 int par_num, bool print) 4634 int *par_num, bool print)
4613{ 4635{
4614 int i = 0; 4636 u32 cur_bit;
4615 u32 cur_bit = 0; 4637 bool res;
4638 int i;
4639
4640 res = false;
4641
4616 for (i = 0; sig; i++) { 4642 for (i = 0; sig; i++) {
4617 cur_bit = ((u32)0x1 << i); 4643 cur_bit = (0x1UL << i);
4618 if (sig & cur_bit) { 4644 if (sig & cur_bit) {
4619 switch (cur_bit) { 4645 res |= true; /* Each bit is real error! */
4620 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: 4646 if (print) {
4621 if (print) { 4647 switch (cur_bit) {
4622 _print_next_block(par_num++, "PGLUE_B"); 4648 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
4649 _print_next_block((*par_num)++,
4650 "PGLUE_B");
4623 _print_parity(bp, 4651 _print_parity(bp,
4624 PGLUE_B_REG_PGLUE_B_PRTY_STS); 4652 PGLUE_B_REG_PGLUE_B_PRTY_STS);
4625 } 4653 break;
4626 break; 4654 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
4627 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: 4655 _print_next_block((*par_num)++, "ATC");
4628 if (print) {
4629 _print_next_block(par_num++, "ATC");
4630 _print_parity(bp, 4656 _print_parity(bp,
4631 ATC_REG_ATC_PRTY_STS); 4657 ATC_REG_ATC_PRTY_STS);
4658 break;
4632 } 4659 }
4633 break;
4634 } 4660 }
4635
4636 /* Clear the bit */ 4661 /* Clear the bit */
4637 sig &= ~cur_bit; 4662 sig &= ~cur_bit;
4638 } 4663 }
4639 } 4664 }
4640 4665
4641 return par_num; 4666 return res;
4642} 4667}
4643 4668
4644static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, 4669static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4645 u32 *sig) 4670 u32 *sig)
4646{ 4671{
4672 bool res = false;
4673
4647 if ((sig[0] & HW_PRTY_ASSERT_SET_0) || 4674 if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4648 (sig[1] & HW_PRTY_ASSERT_SET_1) || 4675 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4649 (sig[2] & HW_PRTY_ASSERT_SET_2) || 4676 (sig[2] & HW_PRTY_ASSERT_SET_2) ||
@@ -4660,23 +4687,22 @@ static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4660 if (print) 4687 if (print)
4661 netdev_err(bp->dev, 4688 netdev_err(bp->dev,
4662 "Parity errors detected in blocks: "); 4689 "Parity errors detected in blocks: ");
4663 par_num = bnx2x_check_blocks_with_parity0(bp, 4690 res |= bnx2x_check_blocks_with_parity0(bp,
4664 sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print); 4691 sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
4665 par_num = bnx2x_check_blocks_with_parity1(bp, 4692 res |= bnx2x_check_blocks_with_parity1(bp,
4666 sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print); 4693 sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print);
4667 par_num = bnx2x_check_blocks_with_parity2(bp, 4694 res |= bnx2x_check_blocks_with_parity2(bp,
4668 sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print); 4695 sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print);
4669 par_num = bnx2x_check_blocks_with_parity3( 4696 res |= bnx2x_check_blocks_with_parity3(bp,
4670 sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print); 4697 sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print);
4671 par_num = bnx2x_check_blocks_with_parity4(bp, 4698 res |= bnx2x_check_blocks_with_parity4(bp,
4672 sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print); 4699 sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print);
4673 4700
4674 if (print) 4701 if (print)
4675 pr_cont("\n"); 4702 pr_cont("\n");
4703 }
4676 4704
4677 return true; 4705 return res;
4678 } else
4679 return false;
4680} 4706}
4681 4707
4682/** 4708/**
@@ -4703,6 +4729,14 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
4703 attn.sig[3] = REG_RD(bp, 4729 attn.sig[3] = REG_RD(bp,
4704 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + 4730 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
4705 port*4); 4731 port*4);
4732 /* Since MCP attentions can't be disabled inside the block, we need to
4733 * read AEU registers to see whether they're currently disabled
4734 */
4735 attn.sig[3] &= ((REG_RD(bp,
4736 !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
4737 : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) &
4738 MISC_AEU_ENABLE_MCP_PRTY_BITS) |
4739 ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
4706 4740
4707 if (!CHIP_IS_E1x(bp)) 4741 if (!CHIP_IS_E1x(bp))
4708 attn.sig[4] = REG_RD(bp, 4742 attn.sig[4] = REG_RD(bp,
@@ -5447,26 +5481,24 @@ static void bnx2x_timer(unsigned long data)
5447 if (IS_PF(bp) && 5481 if (IS_PF(bp) &&
5448 !BP_NOMCP(bp)) { 5482 !BP_NOMCP(bp)) {
5449 int mb_idx = BP_FW_MB_IDX(bp); 5483 int mb_idx = BP_FW_MB_IDX(bp);
5450 u32 drv_pulse; 5484 u16 drv_pulse;
5451 u32 mcp_pulse; 5485 u16 mcp_pulse;
5452 5486
5453 ++bp->fw_drv_pulse_wr_seq; 5487 ++bp->fw_drv_pulse_wr_seq;
5454 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; 5488 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5455 /* TBD - add SYSTEM_TIME */
5456 drv_pulse = bp->fw_drv_pulse_wr_seq; 5489 drv_pulse = bp->fw_drv_pulse_wr_seq;
5457 bnx2x_drv_pulse(bp); 5490 bnx2x_drv_pulse(bp);
5458 5491
5459 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) & 5492 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
5460 MCP_PULSE_SEQ_MASK); 5493 MCP_PULSE_SEQ_MASK);
5461 /* The delta between driver pulse and mcp response 5494 /* The delta between driver pulse and mcp response
5462 * should be 1 (before mcp response) or 0 (after mcp response) 5495 * should not get too big. If the MFW is more than 5 pulses
5496 * behind, we should worry about it enough to generate an error
5497 * log.
5463 */ 5498 */
5464 if ((drv_pulse != mcp_pulse) && 5499 if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5)
5465 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { 5500 BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5466 /* someone lost a heartbeat... */
5467 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5468 drv_pulse, mcp_pulse); 5501 drv_pulse, mcp_pulse);
5469 }
5470 } 5502 }
5471 5503
5472 if (bp->state == BNX2X_STATE_OPEN) 5504 if (bp->state == BNX2X_STATE_OPEN)
@@ -7120,7 +7152,7 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
7120 int port = BP_PORT(bp); 7152 int port = BP_PORT(bp);
7121 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; 7153 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
7122 u32 low, high; 7154 u32 low, high;
7123 u32 val; 7155 u32 val, reg;
7124 7156
7125 DP(NETIF_MSG_HW, "starting port init port %d\n", port); 7157 DP(NETIF_MSG_HW, "starting port init port %d\n", port);
7126 7158
@@ -7265,6 +7297,17 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
7265 val |= CHIP_IS_E1(bp) ? 0 : 0x10; 7297 val |= CHIP_IS_E1(bp) ? 0 : 0x10;
7266 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); 7298 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
7267 7299
7300 /* SCPAD_PARITY should NOT trigger close the gates */
7301 reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0;
7302 REG_WR(bp, reg,
7303 REG_RD(bp, reg) &
7304 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
7305
7306 reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0;
7307 REG_WR(bp, reg,
7308 REG_RD(bp, reg) &
7309 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
7310
7268 bnx2x_init_block(bp, BLOCK_NIG, init_phase); 7311 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
7269 7312
7270 if (!CHIP_IS_E1x(bp)) { 7313 if (!CHIP_IS_E1x(bp)) {
@@ -11679,9 +11722,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
11679static int bnx2x_open(struct net_device *dev) 11722static int bnx2x_open(struct net_device *dev)
11680{ 11723{
11681 struct bnx2x *bp = netdev_priv(dev); 11724 struct bnx2x *bp = netdev_priv(dev);
11682 bool global = false;
11683 int other_engine = BP_PATH(bp) ? 0 : 1;
11684 bool other_load_status, load_status;
11685 int rc; 11725 int rc;
11686 11726
11687 bp->stats_init = true; 11727 bp->stats_init = true;
@@ -11697,6 +11737,10 @@ static int bnx2x_open(struct net_device *dev)
11697 * Parity recovery is only relevant for PF driver. 11737 * Parity recovery is only relevant for PF driver.
11698 */ 11738 */
11699 if (IS_PF(bp)) { 11739 if (IS_PF(bp)) {
11740 int other_engine = BP_PATH(bp) ? 0 : 1;
11741 bool other_load_status, load_status;
11742 bool global = false;
11743
11700 other_load_status = bnx2x_get_load_status(bp, other_engine); 11744 other_load_status = bnx2x_get_load_status(bp, other_engine);
11701 load_status = bnx2x_get_load_status(bp, BP_PATH(bp)); 11745 load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
11702 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) || 11746 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
@@ -12074,7 +12118,6 @@ static int bnx2x_set_coherency_mask(struct bnx2x *bp)
12074 struct device *dev = &bp->pdev->dev; 12118 struct device *dev = &bp->pdev->dev;
12075 12119
12076 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { 12120 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
12077 bp->flags |= USING_DAC_FLAG;
12078 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { 12121 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
12079 dev_err(dev, "dma_set_coherent_mask failed, aborting\n"); 12122 dev_err(dev, "dma_set_coherent_mask failed, aborting\n");
12080 return -EIO; 12123 return -EIO;
@@ -12242,8 +12285,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
12242 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA; 12285 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
12243 12286
12244 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX; 12287 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
12245 if (bp->flags & USING_DAC_FLAG) 12288 dev->features |= NETIF_F_HIGHDMA;
12246 dev->features |= NETIF_F_HIGHDMA;
12247 12289
12248 /* Add Loopback capability to the device */ 12290 /* Add Loopback capability to the device */
12249 dev->hw_features |= NETIF_F_LOOPBACK; 12291 dev->hw_features |= NETIF_F_LOOPBACK;
@@ -12606,24 +12648,24 @@ static int set_max_cos_est(int chip_id)
12606 return BNX2X_MULTI_TX_COS_E1X; 12648 return BNX2X_MULTI_TX_COS_E1X;
12607 case BCM57712: 12649 case BCM57712:
12608 case BCM57712_MF: 12650 case BCM57712_MF:
12609 case BCM57712_VF:
12610 return BNX2X_MULTI_TX_COS_E2_E3A0; 12651 return BNX2X_MULTI_TX_COS_E2_E3A0;
12611 case BCM57800: 12652 case BCM57800:
12612 case BCM57800_MF: 12653 case BCM57800_MF:
12613 case BCM57800_VF:
12614 case BCM57810: 12654 case BCM57810:
12615 case BCM57810_MF: 12655 case BCM57810_MF:
12616 case BCM57840_4_10: 12656 case BCM57840_4_10:
12617 case BCM57840_2_20: 12657 case BCM57840_2_20:
12618 case BCM57840_O: 12658 case BCM57840_O:
12619 case BCM57840_MFO: 12659 case BCM57840_MFO:
12620 case BCM57810_VF:
12621 case BCM57840_MF: 12660 case BCM57840_MF:
12622 case BCM57840_VF:
12623 case BCM57811: 12661 case BCM57811:
12624 case BCM57811_MF: 12662 case BCM57811_MF:
12625 case BCM57811_VF:
12626 return BNX2X_MULTI_TX_COS_E3B0; 12663 return BNX2X_MULTI_TX_COS_E3B0;
12664 case BCM57712_VF:
12665 case BCM57800_VF:
12666 case BCM57810_VF:
12667 case BCM57840_VF:
12668 case BCM57811_VF:
12627 return 1; 12669 return 1;
12628 default: 12670 default:
12629 pr_err("Unknown board_type (%d), aborting\n", chip_id); 12671 pr_err("Unknown board_type (%d), aborting\n", chip_id);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 2604b6204abe..bf08ad68b405 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -470,10 +470,10 @@ static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp,
470 bnx2x_vfop_qdtor, cmd->done); 470 bnx2x_vfop_qdtor, cmd->done);
471 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor, 471 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
472 cmd->block); 472 cmd->block);
473 } else {
474 BNX2X_ERR("VF[%d] failed to add a vfop\n", vf->abs_vfid);
475 return -ENOMEM;
473 } 476 }
474 DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop. rc %d\n",
475 vf->abs_vfid, vfop->rc);
476 return -ENOMEM;
477} 477}
478 478
479static void 479static void
@@ -1819,7 +1819,7 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
1819 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); 1819 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
1820 if (fid & IGU_FID_ENCODE_IS_PF) 1820 if (fid & IGU_FID_ENCODE_IS_PF)
1821 current_pf = fid & IGU_FID_PF_NUM_MASK; 1821 current_pf = fid & IGU_FID_PF_NUM_MASK;
1822 else if (current_pf == BP_ABS_FUNC(bp)) 1822 else if (current_pf == BP_FUNC(bp))
1823 bnx2x_vf_set_igu_info(bp, sb_id, 1823 bnx2x_vf_set_igu_info(bp, sb_id,
1824 (fid & IGU_FID_VF_NUM_MASK)); 1824 (fid & IGU_FID_VF_NUM_MASK));
1825 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", 1825 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
@@ -3180,6 +3180,7 @@ int bnx2x_enable_sriov(struct bnx2x *bp)
3180 /* set local queue arrays */ 3180 /* set local queue arrays */
3181 vf->vfqs = &bp->vfdb->vfqs[qcount]; 3181 vf->vfqs = &bp->vfdb->vfqs[qcount];
3182 qcount += vf_sb_count(vf); 3182 qcount += vf_sb_count(vf);
3183 bnx2x_iov_static_resc(bp, vf);
3183 } 3184 }
3184 3185
3185 /* prepare msix vectors in VF configuration space */ 3186 /* prepare msix vectors in VF configuration space */
@@ -3187,6 +3188,8 @@ int bnx2x_enable_sriov(struct bnx2x *bp)
3187 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); 3188 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
3188 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, 3189 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
3189 num_vf_queues); 3190 num_vf_queues);
3191 DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
3192 vf_idx, num_vf_queues);
3190 } 3193 }
3191 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 3194 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
3192 3195
@@ -3387,14 +3390,16 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
3387 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true); 3390 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
3388 if (rc) { 3391 if (rc) {
3389 BNX2X_ERR("failed to delete eth macs\n"); 3392 BNX2X_ERR("failed to delete eth macs\n");
3390 return -EINVAL; 3393 rc = -EINVAL;
3394 goto out;
3391 } 3395 }
3392 3396
3393 /* remove existing uc list macs */ 3397 /* remove existing uc list macs */
3394 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true); 3398 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
3395 if (rc) { 3399 if (rc) {
3396 BNX2X_ERR("failed to delete uc_list macs\n"); 3400 BNX2X_ERR("failed to delete uc_list macs\n");
3397 return -EINVAL; 3401 rc = -EINVAL;
3402 goto out;
3398 } 3403 }
3399 3404
3400 /* configure the new mac to device */ 3405 /* configure the new mac to device */
@@ -3402,6 +3407,7 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
3402 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true, 3407 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
3403 BNX2X_ETH_MAC, &ramrod_flags); 3408 BNX2X_ETH_MAC, &ramrod_flags);
3404 3409
3410out:
3405 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 3411 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
3406 } 3412 }
3407 3413
@@ -3464,7 +3470,8 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3464 &ramrod_flags); 3470 &ramrod_flags);
3465 if (rc) { 3471 if (rc) {
3466 BNX2X_ERR("failed to delete vlans\n"); 3472 BNX2X_ERR("failed to delete vlans\n");
3467 return -EINVAL; 3473 rc = -EINVAL;
3474 goto out;
3468 } 3475 }
3469 3476
3470 /* send queue update ramrod to configure default vlan and silent 3477 /* send queue update ramrod to configure default vlan and silent
@@ -3498,7 +3505,8 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3498 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); 3505 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
3499 if (rc) { 3506 if (rc) {
3500 BNX2X_ERR("failed to configure vlan\n"); 3507 BNX2X_ERR("failed to configure vlan\n");
3501 return -EINVAL; 3508 rc = -EINVAL;
3509 goto out;
3502 } 3510 }
3503 3511
3504 /* configure default vlan to vf queue and set silent 3512 /* configure default vlan to vf queue and set silent
@@ -3516,18 +3524,18 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3516 rc = bnx2x_queue_state_change(bp, &q_params); 3524 rc = bnx2x_queue_state_change(bp, &q_params);
3517 if (rc) { 3525 if (rc) {
3518 BNX2X_ERR("Failed to configure default VLAN\n"); 3526 BNX2X_ERR("Failed to configure default VLAN\n");
3519 return rc; 3527 goto out;
3520 } 3528 }
3521 3529
3522 /* clear the flag indicating that this VF needs its vlan 3530 /* clear the flag indicating that this VF needs its vlan
3523 * (will only be set if the HV configured th Vlan before vf was 3531 * (will only be set if the HV configured the Vlan before vf was
3524 * and we were called because the VF came up later 3532 * up and we were called because the VF came up later
3525 */ 3533 */
3534out:
3526 vf->cfg_flags &= ~VF_CFG_VLAN; 3535 vf->cfg_flags &= ~VF_CFG_VLAN;
3527
3528 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 3536 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
3529 } 3537 }
3530 return 0; 3538 return rc;
3531} 3539}
3532 3540
3533/* crc is the first field in the bulletin board. Compute the crc over the 3541/* crc is the first field in the bulletin board. Compute the crc over the
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 86436c77af03..3b75070411aa 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -196,7 +196,7 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp)
196 196
197 } else if (bp->func_stx) { 197 } else if (bp->func_stx) {
198 *stats_comp = 0; 198 *stats_comp = 0;
199 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); 199 bnx2x_issue_dmae_with_comp(bp, dmae, stats_comp);
200 } 200 }
201} 201}
202 202
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 6cfb88732452..28757dfacf0d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -980,7 +980,7 @@ static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
980 dmae.len = len32; 980 dmae.len = len32;
981 981
982 /* issue the command and wait for completion */ 982 /* issue the command and wait for completion */
983 return bnx2x_issue_dmae_with_comp(bp, &dmae); 983 return bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
984} 984}
985 985
986static void bnx2x_vf_mbx_resp(struct bnx2x *bp, struct bnx2x_virtf *vf) 986static void bnx2x_vf_mbx_resp(struct bnx2x *bp, struct bnx2x_virtf *vf)
@@ -1765,28 +1765,28 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
1765 switch (mbx->first_tlv.tl.type) { 1765 switch (mbx->first_tlv.tl.type) {
1766 case CHANNEL_TLV_ACQUIRE: 1766 case CHANNEL_TLV_ACQUIRE:
1767 bnx2x_vf_mbx_acquire(bp, vf, mbx); 1767 bnx2x_vf_mbx_acquire(bp, vf, mbx);
1768 break; 1768 return;
1769 case CHANNEL_TLV_INIT: 1769 case CHANNEL_TLV_INIT:
1770 bnx2x_vf_mbx_init_vf(bp, vf, mbx); 1770 bnx2x_vf_mbx_init_vf(bp, vf, mbx);
1771 break; 1771 return;
1772 case CHANNEL_TLV_SETUP_Q: 1772 case CHANNEL_TLV_SETUP_Q:
1773 bnx2x_vf_mbx_setup_q(bp, vf, mbx); 1773 bnx2x_vf_mbx_setup_q(bp, vf, mbx);
1774 break; 1774 return;
1775 case CHANNEL_TLV_SET_Q_FILTERS: 1775 case CHANNEL_TLV_SET_Q_FILTERS:
1776 bnx2x_vf_mbx_set_q_filters(bp, vf, mbx); 1776 bnx2x_vf_mbx_set_q_filters(bp, vf, mbx);
1777 break; 1777 return;
1778 case CHANNEL_TLV_TEARDOWN_Q: 1778 case CHANNEL_TLV_TEARDOWN_Q:
1779 bnx2x_vf_mbx_teardown_q(bp, vf, mbx); 1779 bnx2x_vf_mbx_teardown_q(bp, vf, mbx);
1780 break; 1780 return;
1781 case CHANNEL_TLV_CLOSE: 1781 case CHANNEL_TLV_CLOSE:
1782 bnx2x_vf_mbx_close_vf(bp, vf, mbx); 1782 bnx2x_vf_mbx_close_vf(bp, vf, mbx);
1783 break; 1783 return;
1784 case CHANNEL_TLV_RELEASE: 1784 case CHANNEL_TLV_RELEASE:
1785 bnx2x_vf_mbx_release_vf(bp, vf, mbx); 1785 bnx2x_vf_mbx_release_vf(bp, vf, mbx);
1786 break; 1786 return;
1787 case CHANNEL_TLV_UPDATE_RSS: 1787 case CHANNEL_TLV_UPDATE_RSS:
1788 bnx2x_vf_mbx_update_rss(bp, vf, mbx); 1788 bnx2x_vf_mbx_update_rss(bp, vf, mbx);
1789 break; 1789 return;
1790 } 1790 }
1791 1791
1792 } else { 1792 } else {
@@ -1802,26 +1802,24 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
1802 for (i = 0; i < 20; i++) 1802 for (i = 0; i < 20; i++)
1803 DP_CONT(BNX2X_MSG_IOV, "%x ", 1803 DP_CONT(BNX2X_MSG_IOV, "%x ",
1804 mbx->msg->req.tlv_buf_size.tlv_buffer[i]); 1804 mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
1805 }
1805 1806
1806 /* test whether we can respond to the VF (do we have an address 1807 /* can we respond to VF (do we have an address for it?) */
1807 * for it?) 1808 if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
1808 */ 1809 /* mbx_resp uses the op_rc of the VF */
1809 if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) { 1810 vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
1810 /* mbx_resp uses the op_rc of the VF */
1811 vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
1812 1811
1813 /* notify the VF that we do not support this request */ 1812 /* notify the VF that we do not support this request */
1814 bnx2x_vf_mbx_resp(bp, vf); 1813 bnx2x_vf_mbx_resp(bp, vf);
1815 } else { 1814 } else {
1816 /* can't send a response since this VF is unknown to us 1815 /* can't send a response since this VF is unknown to us
1817 * just ack the FW to release the mailbox and unlock 1816 * just ack the FW to release the mailbox and unlock
1818 * the channel. 1817 * the channel.
1819 */ 1818 */
1820 storm_memset_vf_mbx_ack(bp, vf->abs_vfid); 1819 storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
1821 mmiowb(); 1820 /* Firmware ack should be written before unlocking channel */
1822 bnx2x_unlock_vf_pf_channel(bp, vf, 1821 mmiowb();
1823 mbx->first_tlv.tl.type); 1822 bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
1824 }
1825 } 1823 }
1826} 1824}
1827 1825
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 78d6d6b970e1..48f52882a22b 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -106,7 +106,6 @@
106#define XGMAC_DMA_HW_FEATURE 0x00000f58 /* Enabled Hardware Features */ 106#define XGMAC_DMA_HW_FEATURE 0x00000f58 /* Enabled Hardware Features */
107 107
108#define XGMAC_ADDR_AE 0x80000000 108#define XGMAC_ADDR_AE 0x80000000
109#define XGMAC_MAX_FILTER_ADDR 31
110 109
111/* PMT Control and Status */ 110/* PMT Control and Status */
112#define XGMAC_PMT_POINTER_RESET 0x80000000 111#define XGMAC_PMT_POINTER_RESET 0x80000000
@@ -384,6 +383,7 @@ struct xgmac_priv {
384 struct device *device; 383 struct device *device;
385 struct napi_struct napi; 384 struct napi_struct napi;
386 385
386 int max_macs;
387 struct xgmac_extra_stats xstats; 387 struct xgmac_extra_stats xstats;
388 388
389 spinlock_t stats_lock; 389 spinlock_t stats_lock;
@@ -1291,14 +1291,12 @@ static void xgmac_set_rx_mode(struct net_device *dev)
1291 netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n", 1291 netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n",
1292 netdev_mc_count(dev), netdev_uc_count(dev)); 1292 netdev_mc_count(dev), netdev_uc_count(dev));
1293 1293
1294 if (dev->flags & IFF_PROMISC) { 1294 if (dev->flags & IFF_PROMISC)
1295 writel(XGMAC_FRAME_FILTER_PR, ioaddr + XGMAC_FRAME_FILTER); 1295 value |= XGMAC_FRAME_FILTER_PR;
1296 return;
1297 }
1298 1296
1299 memset(hash_filter, 0, sizeof(hash_filter)); 1297 memset(hash_filter, 0, sizeof(hash_filter));
1300 1298
1301 if (netdev_uc_count(dev) > XGMAC_MAX_FILTER_ADDR) { 1299 if (netdev_uc_count(dev) > priv->max_macs) {
1302 use_hash = true; 1300 use_hash = true;
1303 value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF; 1301 value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF;
1304 } 1302 }
@@ -1321,7 +1319,7 @@ static void xgmac_set_rx_mode(struct net_device *dev)
1321 goto out; 1319 goto out;
1322 } 1320 }
1323 1321
1324 if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) { 1322 if ((netdev_mc_count(dev) + reg - 1) > priv->max_macs) {
1325 use_hash = true; 1323 use_hash = true;
1326 value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF; 1324 value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
1327 } else { 1325 } else {
@@ -1342,8 +1340,8 @@ static void xgmac_set_rx_mode(struct net_device *dev)
1342 } 1340 }
1343 1341
1344out: 1342out:
1345 for (i = reg; i < XGMAC_MAX_FILTER_ADDR; i++) 1343 for (i = reg; i <= priv->max_macs; i++)
1346 xgmac_set_mac_addr(ioaddr, NULL, reg); 1344 xgmac_set_mac_addr(ioaddr, NULL, i);
1347 for (i = 0; i < XGMAC_NUM_HASH; i++) 1345 for (i = 0; i < XGMAC_NUM_HASH; i++)
1348 writel(hash_filter[i], ioaddr + XGMAC_HASH(i)); 1346 writel(hash_filter[i], ioaddr + XGMAC_HASH(i));
1349 1347
@@ -1761,6 +1759,13 @@ static int xgmac_probe(struct platform_device *pdev)
1761 uid = readl(priv->base + XGMAC_VERSION); 1759 uid = readl(priv->base + XGMAC_VERSION);
1762 netdev_info(ndev, "h/w version is 0x%x\n", uid); 1760 netdev_info(ndev, "h/w version is 0x%x\n", uid);
1763 1761
1762 /* Figure out how many valid mac address filter registers we have */
1763 writel(1, priv->base + XGMAC_ADDR_HIGH(31));
1764 if (readl(priv->base + XGMAC_ADDR_HIGH(31)) == 1)
1765 priv->max_macs = 31;
1766 else
1767 priv->max_macs = 7;
1768
1764 writel(0, priv->base + XGMAC_DMA_INTR_ENA); 1769 writel(0, priv->base + XGMAC_DMA_INTR_ENA);
1765 ndev->irq = platform_get_irq(pdev, 0); 1770 ndev->irq = platform_get_irq(pdev, 0);
1766 if (ndev->irq == -ENXIO) { 1771 if (ndev->irq == -ENXIO) {
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 5f5896e522d2..a7a941b1a655 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -158,18 +158,6 @@ static inline board_info_t *to_dm9000_board(struct net_device *dev)
158 158
159/* DM9000 network board routine ---------------------------- */ 159/* DM9000 network board routine ---------------------------- */
160 160
161static void
162dm9000_reset(board_info_t * db)
163{
164 dev_dbg(db->dev, "resetting device\n");
165
166 /* RESET device */
167 writeb(DM9000_NCR, db->io_addr);
168 udelay(200);
169 writeb(NCR_RST, db->io_data);
170 udelay(200);
171}
172
173/* 161/*
174 * Read a byte from I/O port 162 * Read a byte from I/O port
175 */ 163 */
@@ -191,6 +179,27 @@ iow(board_info_t * db, int reg, int value)
191 writeb(value, db->io_data); 179 writeb(value, db->io_data);
192} 180}
193 181
182static void
183dm9000_reset(board_info_t *db)
184{
185 dev_dbg(db->dev, "resetting device\n");
186
187 /* Reset DM9000, see DM9000 Application Notes V1.22 Jun 11, 2004 page 29
188 * The essential point is that we have to do a double reset, and the
189 * instruction is to set LBK into MAC internal loopback mode.
190 */
191 iow(db, DM9000_NCR, 0x03);
192 udelay(100); /* Application note says at least 20 us */
193 if (ior(db, DM9000_NCR) & 1)
194 dev_err(db->dev, "dm9000 did not respond to first reset\n");
195
196 iow(db, DM9000_NCR, 0);
197 iow(db, DM9000_NCR, 0x03);
198 udelay(100);
199 if (ior(db, DM9000_NCR) & 1)
200 dev_err(db->dev, "dm9000 did not respond to second reset\n");
201}
202
194/* routines for sending block to chip */ 203/* routines for sending block to chip */
195 204
196static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count) 205static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count)
@@ -744,15 +753,20 @@ static const struct ethtool_ops dm9000_ethtool_ops = {
744static void dm9000_show_carrier(board_info_t *db, 753static void dm9000_show_carrier(board_info_t *db,
745 unsigned carrier, unsigned nsr) 754 unsigned carrier, unsigned nsr)
746{ 755{
756 int lpa;
747 struct net_device *ndev = db->ndev; 757 struct net_device *ndev = db->ndev;
758 struct mii_if_info *mii = &db->mii;
748 unsigned ncr = dm9000_read_locked(db, DM9000_NCR); 759 unsigned ncr = dm9000_read_locked(db, DM9000_NCR);
749 760
750 if (carrier) 761 if (carrier) {
751 dev_info(db->dev, "%s: link up, %dMbps, %s-duplex, no LPA\n", 762 lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA);
763 dev_info(db->dev,
764 "%s: link up, %dMbps, %s-duplex, lpa 0x%04X\n",
752 ndev->name, (nsr & NSR_SPEED) ? 10 : 100, 765 ndev->name, (nsr & NSR_SPEED) ? 10 : 100,
753 (ncr & NCR_FDX) ? "full" : "half"); 766 (ncr & NCR_FDX) ? "full" : "half", lpa);
754 else 767 } else {
755 dev_info(db->dev, "%s: link down\n", ndev->name); 768 dev_info(db->dev, "%s: link down\n", ndev->name);
769 }
756} 770}
757 771
758static void 772static void
@@ -890,9 +904,15 @@ dm9000_init_dm9000(struct net_device *dev)
890 (dev->features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0); 904 (dev->features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
891 905
892 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ 906 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
907 iow(db, DM9000_GPR, 0);
893 908
894 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */ 909 /* If we are dealing with DM9000B, some extra steps are required: a
895 dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); /* Init */ 910 * manual phy reset, and setting init params.
911 */
912 if (db->type == TYPE_DM9000B) {
913 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET);
914 dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM);
915 }
896 916
897 ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; 917 ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
898 918
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index ace5050dba38..db020230bd0b 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -88,6 +88,7 @@ static inline char *nic_name(struct pci_dev *pdev)
88#define BE_MIN_MTU 256 88#define BE_MIN_MTU 256
89 89
90#define BE_NUM_VLANS_SUPPORTED 64 90#define BE_NUM_VLANS_SUPPORTED 64
91#define BE_UMC_NUM_VLANS_SUPPORTED 15
91#define BE_MAX_EQD 96u 92#define BE_MAX_EQD 96u
92#define BE_MAX_TX_FRAG_COUNT 30 93#define BE_MAX_TX_FRAG_COUNT 30
93 94
@@ -333,6 +334,7 @@ enum vf_state {
333 334
334#define BE_FLAGS_LINK_STATUS_INIT 1 335#define BE_FLAGS_LINK_STATUS_INIT 1
335#define BE_FLAGS_WORKER_SCHEDULED (1 << 3) 336#define BE_FLAGS_WORKER_SCHEDULED (1 << 3)
337#define BE_FLAGS_VLAN_PROMISC (1 << 4)
336#define BE_FLAGS_NAPI_ENABLED (1 << 9) 338#define BE_FLAGS_NAPI_ENABLED (1 << 9)
337#define BE_UC_PMAC_COUNT 30 339#define BE_UC_PMAC_COUNT 30
338#define BE_VF_UC_PMAC_COUNT 2 340#define BE_VF_UC_PMAC_COUNT 2
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 1ab5dab11eff..c08fd32bb8e5 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -180,6 +180,9 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
180 dev_err(&adapter->pdev->dev, 180 dev_err(&adapter->pdev->dev,
181 "opcode %d-%d failed:status %d-%d\n", 181 "opcode %d-%d failed:status %d-%d\n",
182 opcode, subsystem, compl_status, extd_status); 182 opcode, subsystem, compl_status, extd_status);
183
184 if (extd_status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
185 return extd_status;
183 } 186 }
184 } 187 }
185done: 188done:
@@ -1195,7 +1198,6 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1195 1198
1196 if (lancer_chip(adapter)) { 1199 if (lancer_chip(adapter)) {
1197 req->hdr.version = 1; 1200 req->hdr.version = 1;
1198 req->if_id = cpu_to_le16(adapter->if_handle);
1199 } else if (BEx_chip(adapter)) { 1201 } else if (BEx_chip(adapter)) {
1200 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) 1202 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
1201 req->hdr.version = 2; 1203 req->hdr.version = 2;
@@ -1203,6 +1205,8 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1203 req->hdr.version = 2; 1205 req->hdr.version = 2;
1204 } 1206 }
1205 1207
1208 if (req->hdr.version > 0)
1209 req->if_id = cpu_to_le16(adapter->if_handle);
1206 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); 1210 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1207 req->ulp_num = BE_ULP1_NUM; 1211 req->ulp_num = BE_ULP1_NUM;
1208 req->type = BE_ETH_TX_RING_TYPE_STANDARD; 1212 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
@@ -1812,6 +1816,12 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1812 } else if (flags & IFF_ALLMULTI) { 1816 } else if (flags & IFF_ALLMULTI) {
1813 req->if_flags_mask = req->if_flags = 1817 req->if_flags_mask = req->if_flags =
1814 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); 1818 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1819 } else if (flags & BE_FLAGS_VLAN_PROMISC) {
1820 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
1821
1822 if (value == ON)
1823 req->if_flags =
1824 cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
1815 } else { 1825 } else {
1816 struct netdev_hw_addr *ha; 1826 struct netdev_hw_addr *ha;
1817 int i = 0; 1827 int i = 0;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index d026226db88c..108ca8abf0af 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -60,6 +60,8 @@ enum {
60 MCC_STATUS_NOT_SUPPORTED = 66 60 MCC_STATUS_NOT_SUPPORTED = 66
61}; 61};
62 62
63#define MCC_ADDL_STS_INSUFFICIENT_RESOURCES 0x16
64
63#define CQE_STATUS_COMPL_MASK 0xFFFF 65#define CQE_STATUS_COMPL_MASK 0xFFFF
64#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */ 66#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */
65#define CQE_STATUS_EXTD_MASK 0xFFFF 67#define CQE_STATUS_EXTD_MASK 0xFFFF
@@ -1791,7 +1793,7 @@ struct be_nic_res_desc {
1791 u8 acpi_params; 1793 u8 acpi_params;
1792 u8 wol_param; 1794 u8 wol_param;
1793 u16 rsvd7; 1795 u16 rsvd7;
1794 u32 rsvd8[3]; 1796 u32 rsvd8[7];
1795} __packed; 1797} __packed;
1796 1798
1797struct be_cmd_req_get_func_config { 1799struct be_cmd_req_get_func_config {
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 100b528b9bd0..2c38cc402119 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -855,11 +855,11 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
855 unsigned int eth_hdr_len; 855 unsigned int eth_hdr_len;
856 struct iphdr *ip; 856 struct iphdr *ip;
857 857
858 /* Lancer ASIC has a bug wherein packets that are 32 bytes or less 858 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
859 * may cause a transmit stall on that port. So the work-around is to 859 * may cause a transmit stall on that port. So the work-around is to
860 * pad such packets to a 36-byte length. 860 * pad short packets (<= 32 bytes) to a 36-byte length.
861 */ 861 */
862 if (unlikely(lancer_chip(adapter) && skb->len <= 32)) { 862 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
863 if (skb_padto(skb, 36)) 863 if (skb_padto(skb, 36))
864 goto tx_drop; 864 goto tx_drop;
865 skb->len = 36; 865 skb->len = 36;
@@ -1013,18 +1013,40 @@ static int be_vid_config(struct be_adapter *adapter)
1013 status = be_cmd_vlan_config(adapter, adapter->if_handle, 1013 status = be_cmd_vlan_config(adapter, adapter->if_handle,
1014 vids, num, 1, 0); 1014 vids, num, 1, 0);
1015 1015
1016 /* Set to VLAN promisc mode as setting VLAN filter failed */
1017 if (status) { 1016 if (status) {
1018 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n"); 1017 /* Set to VLAN promisc mode as setting VLAN filter failed */
1019 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n"); 1018 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1020 goto set_vlan_promisc; 1019 goto set_vlan_promisc;
1020 dev_err(&adapter->pdev->dev,
1021 "Setting HW VLAN filtering failed.\n");
1022 } else {
1023 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1024 /* hw VLAN filtering re-enabled. */
1025 status = be_cmd_rx_filter(adapter,
1026 BE_FLAGS_VLAN_PROMISC, OFF);
1027 if (!status) {
1028 dev_info(&adapter->pdev->dev,
1029 "Disabling VLAN Promiscuous mode.\n");
1030 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1031 dev_info(&adapter->pdev->dev,
1032 "Re-Enabling HW VLAN filtering\n");
1033 }
1034 }
1021 } 1035 }
1022 1036
1023 return status; 1037 return status;
1024 1038
1025set_vlan_promisc: 1039set_vlan_promisc:
1026 status = be_cmd_vlan_config(adapter, adapter->if_handle, 1040 dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1027 NULL, 0, 1, 1); 1041
1042 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1043 if (!status) {
1044 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
1045 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n");
1046 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1047 } else
1048 dev_err(&adapter->pdev->dev,
1049 "Failed to enable VLAN Promiscuous mode.\n");
1028 return status; 1050 return status;
1029} 1051}
1030 1052
@@ -1033,10 +1055,6 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1033 struct be_adapter *adapter = netdev_priv(netdev); 1055 struct be_adapter *adapter = netdev_priv(netdev);
1034 int status = 0; 1056 int status = 0;
1035 1057
1036 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1037 status = -EINVAL;
1038 goto ret;
1039 }
1040 1058
1041 /* Packets with VID 0 are always received by Lancer by default */ 1059 /* Packets with VID 0 are always received by Lancer by default */
1042 if (lancer_chip(adapter) && vid == 0) 1060 if (lancer_chip(adapter) && vid == 0)
@@ -1059,11 +1077,6 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1059 struct be_adapter *adapter = netdev_priv(netdev); 1077 struct be_adapter *adapter = netdev_priv(netdev);
1060 int status = 0; 1078 int status = 0;
1061 1079
1062 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1063 status = -EINVAL;
1064 goto ret;
1065 }
1066
1067 /* Packets with VID 0 are always received by Lancer by default */ 1080 /* Packets with VID 0 are always received by Lancer by default */
1068 if (lancer_chip(adapter) && vid == 0) 1081 if (lancer_chip(adapter) && vid == 0)
1069 goto ret; 1082 goto ret;
@@ -1188,8 +1201,8 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
1188 1201
1189 vi->vf = vf; 1202 vi->vf = vf;
1190 vi->tx_rate = vf_cfg->tx_rate; 1203 vi->tx_rate = vf_cfg->tx_rate;
1191 vi->vlan = vf_cfg->vlan_tag; 1204 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1192 vi->qos = 0; 1205 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1193 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN); 1206 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1194 1207
1195 return 0; 1208 return 0;
@@ -1199,28 +1212,29 @@ static int be_set_vf_vlan(struct net_device *netdev,
1199 int vf, u16 vlan, u8 qos) 1212 int vf, u16 vlan, u8 qos)
1200{ 1213{
1201 struct be_adapter *adapter = netdev_priv(netdev); 1214 struct be_adapter *adapter = netdev_priv(netdev);
1215 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1202 int status = 0; 1216 int status = 0;
1203 1217
1204 if (!sriov_enabled(adapter)) 1218 if (!sriov_enabled(adapter))
1205 return -EPERM; 1219 return -EPERM;
1206 1220
1207 if (vf >= adapter->num_vfs || vlan > 4095) 1221 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1208 return -EINVAL; 1222 return -EINVAL;
1209 1223
1210 if (vlan) { 1224 if (vlan || qos) {
1211 if (adapter->vf_cfg[vf].vlan_tag != vlan) { 1225 vlan |= qos << VLAN_PRIO_SHIFT;
1226 if (vf_cfg->vlan_tag != vlan) {
1212 /* If this is new value, program it. Else skip. */ 1227 /* If this is new value, program it. Else skip. */
1213 adapter->vf_cfg[vf].vlan_tag = vlan; 1228 vf_cfg->vlan_tag = vlan;
1214 1229 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1215 status = be_cmd_set_hsw_config(adapter, vlan, 1230 vf_cfg->if_handle, 0);
1216 vf + 1, adapter->vf_cfg[vf].if_handle, 0);
1217 } 1231 }
1218 } else { 1232 } else {
1219 /* Reset Transparent Vlan Tagging. */ 1233 /* Reset Transparent Vlan Tagging. */
1220 adapter->vf_cfg[vf].vlan_tag = 0; 1234 vf_cfg->vlan_tag = 0;
1221 vlan = adapter->vf_cfg[vf].def_vid; 1235 vlan = vf_cfg->def_vid;
1222 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, 1236 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1223 adapter->vf_cfg[vf].if_handle, 0); 1237 vf_cfg->if_handle, 0);
1224 } 1238 }
1225 1239
1226 1240
@@ -2963,6 +2977,8 @@ static void BEx_get_resources(struct be_adapter *adapter,
2963 2977
2964 if (adapter->function_mode & FLEX10_MODE) 2978 if (adapter->function_mode & FLEX10_MODE)
2965 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8; 2979 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2980 else if (adapter->function_mode & UMC_ENABLED)
2981 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
2966 else 2982 else
2967 res->max_vlans = BE_NUM_VLANS_SUPPORTED; 2983 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
2968 res->max_mcast_mac = BE_MAX_MC; 2984 res->max_mcast_mac = BE_MAX_MC;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index c4eaadeb572f..9fbe4dda7a0e 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -88,6 +88,7 @@
88 88
89#include <asm/io.h> 89#include <asm/io.h>
90#include <asm/reg.h> 90#include <asm/reg.h>
91#include <asm/mpc85xx.h>
91#include <asm/irq.h> 92#include <asm/irq.h>
92#include <asm/uaccess.h> 93#include <asm/uaccess.h>
93#include <linux/module.h> 94#include <linux/module.h>
@@ -939,9 +940,8 @@ static void gfar_init_filer_table(struct gfar_private *priv)
939 } 940 }
940} 941}
941 942
942static void gfar_detect_errata(struct gfar_private *priv) 943static void __gfar_detect_errata_83xx(struct gfar_private *priv)
943{ 944{
944 struct device *dev = &priv->ofdev->dev;
945 unsigned int pvr = mfspr(SPRN_PVR); 945 unsigned int pvr = mfspr(SPRN_PVR);
946 unsigned int svr = mfspr(SPRN_SVR); 946 unsigned int svr = mfspr(SPRN_SVR);
947 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */ 947 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
@@ -957,15 +957,33 @@ static void gfar_detect_errata(struct gfar_private *priv)
957 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) 957 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
958 priv->errata |= GFAR_ERRATA_76; 958 priv->errata |= GFAR_ERRATA_76;
959 959
960 /* MPC8313 and MPC837x all rev */ 960 /* MPC8313 Rev < 2.0 */
961 if ((pvr == 0x80850010 && mod == 0x80b0) || 961 if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
962 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) 962 priv->errata |= GFAR_ERRATA_12;
963 priv->errata |= GFAR_ERRATA_A002; 963}
964 964
965 /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */ 965static void __gfar_detect_errata_85xx(struct gfar_private *priv)
966 if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) || 966{
967 (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020)) 967 unsigned int svr = mfspr(SPRN_SVR);
968
969 if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
968 priv->errata |= GFAR_ERRATA_12; 970 priv->errata |= GFAR_ERRATA_12;
971 if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
972 ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
973 priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
974}
975
976static void gfar_detect_errata(struct gfar_private *priv)
977{
978 struct device *dev = &priv->ofdev->dev;
979
980 /* no plans to fix */
981 priv->errata |= GFAR_ERRATA_A002;
982
983 if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
984 __gfar_detect_errata_85xx(priv);
985 else /* non-mpc85xx parts, i.e. e300 core based */
986 __gfar_detect_errata_83xx(priv);
969 987
970 if (priv->errata) 988 if (priv->errata)
971 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", 989 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
@@ -1599,7 +1617,7 @@ static int __gfar_is_rx_idle(struct gfar_private *priv)
1599 /* Normaly TSEC should not hang on GRS commands, so we should 1617 /* Normaly TSEC should not hang on GRS commands, so we should
1600 * actually wait for IEVENT_GRSC flag. 1618 * actually wait for IEVENT_GRSC flag.
1601 */ 1619 */
1602 if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002))) 1620 if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
1603 return 0; 1621 return 0;
1604 1622
1605 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are 1623 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 098f133908ae..e006a09ba899 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -452,7 +452,9 @@ static int gianfar_ptp_probe(struct platform_device *dev)
452 err = -ENODEV; 452 err = -ENODEV;
453 453
454 etsects->caps = ptp_gianfar_caps; 454 etsects->caps = ptp_gianfar_caps;
455 etsects->cksel = DEFAULT_CKSEL; 455
456 if (get_of_u32(node, "fsl,cksel", &etsects->cksel))
457 etsects->cksel = DEFAULT_CKSEL;
456 458
457 if (get_of_u32(node, "fsl,tclk-period", &etsects->tclk_period) || 459 if (get_of_u32(node, "fsl,tclk-period", &etsects->tclk_period) ||
458 get_of_u32(node, "fsl,tmr-prsc", &etsects->tmr_prsc) || 460 get_of_u32(node, "fsl,tmr-prsc", &etsects->tmr_prsc) ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 0c524fa9f811..cfef7fc32cdd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -701,8 +701,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
701 701
702 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); 702 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
703 if (cmd_details) { 703 if (cmd_details) {
704 memcpy(details, cmd_details, 704 *details = *cmd_details;
705 sizeof(struct i40e_asq_cmd_details));
706 705
707 /* If the cmd_details are defined copy the cookie. The 706 /* If the cmd_details are defined copy the cookie. The
708 * cpu_to_le32 is not needed here because the data is ignored 707 * cpu_to_le32 is not needed here because the data is ignored
@@ -760,7 +759,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
760 desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); 759 desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
761 760
762 /* if the desc is available copy the temp desc to the right place */ 761 /* if the desc is available copy the temp desc to the right place */
763 memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc)); 762 *desc_on_ring = *desc;
764 763
765 /* if buff is not NULL assume indirect command */ 764 /* if buff is not NULL assume indirect command */
766 if (buff != NULL) { 765 if (buff != NULL) {
@@ -807,7 +806,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
807 806
808 /* if ready, copy the desc back to temp */ 807 /* if ready, copy the desc back to temp */
809 if (i40e_asq_done(hw)) { 808 if (i40e_asq_done(hw)) {
810 memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc)); 809 *desc = *desc_on_ring;
811 if (buff != NULL) 810 if (buff != NULL)
812 memcpy(buff, dma_buff->va, buff_size); 811 memcpy(buff, dma_buff->va, buff_size);
813 retval = le16_to_cpu(desc->retval); 812 retval = le16_to_cpu(desc->retval);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index c21df7bc3b1d..1e4ea134975a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -507,7 +507,7 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
507 507
508 /* save link status information */ 508 /* save link status information */
509 if (link) 509 if (link)
510 memcpy(link, hw_link_info, sizeof(struct i40e_link_status)); 510 *link = *hw_link_info;
511 511
512 /* flag cleared so helper functions don't call AQ again */ 512 /* flag cleared so helper functions don't call AQ again */
513 hw->phy.get_link_info = false; 513 hw->phy.get_link_info = false;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 601d482694ea..221aa4795017 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -101,10 +101,10 @@ int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
101 mem->size = ALIGN(size, alignment); 101 mem->size = ALIGN(size, alignment);
102 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size, 102 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
103 &mem->pa, GFP_KERNEL); 103 &mem->pa, GFP_KERNEL);
104 if (mem->va) 104 if (!mem->va)
105 return 0; 105 return -ENOMEM;
106 106
107 return -ENOMEM; 107 return 0;
108} 108}
109 109
110/** 110/**
@@ -136,10 +136,10 @@ int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
136 mem->size = size; 136 mem->size = size;
137 mem->va = kzalloc(size, GFP_KERNEL); 137 mem->va = kzalloc(size, GFP_KERNEL);
138 138
139 if (mem->va) 139 if (!mem->va)
140 return 0; 140 return -ENOMEM;
141 141
142 return -ENOMEM; 142 return 0;
143} 143}
144 144
145/** 145/**
@@ -174,8 +174,7 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
174 u16 needed, u16 id) 174 u16 needed, u16 id)
175{ 175{
176 int ret = -ENOMEM; 176 int ret = -ENOMEM;
177 int i = 0; 177 int i, j;
178 int j = 0;
179 178
180 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) { 179 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
181 dev_info(&pf->pdev->dev, 180 dev_info(&pf->pdev->dev,
@@ -186,7 +185,7 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
186 185
187 /* start the linear search with an imperfect hint */ 186 /* start the linear search with an imperfect hint */
188 i = pile->search_hint; 187 i = pile->search_hint;
189 while (i < pile->num_entries && ret < 0) { 188 while (i < pile->num_entries) {
190 /* skip already allocated entries */ 189 /* skip already allocated entries */
191 if (pile->list[i] & I40E_PILE_VALID_BIT) { 190 if (pile->list[i] & I40E_PILE_VALID_BIT) {
192 i++; 191 i++;
@@ -205,6 +204,7 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
205 pile->list[i+j] = id | I40E_PILE_VALID_BIT; 204 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
206 ret = i; 205 ret = i;
207 pile->search_hint = i + j; 206 pile->search_hint = i + j;
207 break;
208 } else { 208 } else {
209 /* not enough, so skip over it and continue looking */ 209 /* not enough, so skip over it and continue looking */
210 i += j; 210 i += j;
@@ -1388,7 +1388,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1388 bool add_happened = false; 1388 bool add_happened = false;
1389 int filter_list_len = 0; 1389 int filter_list_len = 0;
1390 u32 changed_flags = 0; 1390 u32 changed_flags = 0;
1391 i40e_status ret = 0; 1391 i40e_status aq_ret = 0;
1392 struct i40e_pf *pf; 1392 struct i40e_pf *pf;
1393 int num_add = 0; 1393 int num_add = 0;
1394 int num_del = 0; 1394 int num_del = 0;
@@ -1449,28 +1449,28 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1449 1449
1450 /* flush a full buffer */ 1450 /* flush a full buffer */
1451 if (num_del == filter_list_len) { 1451 if (num_del == filter_list_len) {
1452 ret = i40e_aq_remove_macvlan(&pf->hw, 1452 aq_ret = i40e_aq_remove_macvlan(&pf->hw,
1453 vsi->seid, del_list, num_del, 1453 vsi->seid, del_list, num_del,
1454 NULL); 1454 NULL);
1455 num_del = 0; 1455 num_del = 0;
1456 memset(del_list, 0, sizeof(*del_list)); 1456 memset(del_list, 0, sizeof(*del_list));
1457 1457
1458 if (ret) 1458 if (aq_ret)
1459 dev_info(&pf->pdev->dev, 1459 dev_info(&pf->pdev->dev,
1460 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n", 1460 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
1461 ret, 1461 aq_ret,
1462 pf->hw.aq.asq_last_status); 1462 pf->hw.aq.asq_last_status);
1463 } 1463 }
1464 } 1464 }
1465 if (num_del) { 1465 if (num_del) {
1466 ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, 1466 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
1467 del_list, num_del, NULL); 1467 del_list, num_del, NULL);
1468 num_del = 0; 1468 num_del = 0;
1469 1469
1470 if (ret) 1470 if (aq_ret)
1471 dev_info(&pf->pdev->dev, 1471 dev_info(&pf->pdev->dev,
1472 "ignoring delete macvlan error, err %d, aq_err %d\n", 1472 "ignoring delete macvlan error, err %d, aq_err %d\n",
1473 ret, pf->hw.aq.asq_last_status); 1473 aq_ret, pf->hw.aq.asq_last_status);
1474 } 1474 }
1475 1475
1476 kfree(del_list); 1476 kfree(del_list);
@@ -1515,32 +1515,30 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1515 1515
1516 /* flush a full buffer */ 1516 /* flush a full buffer */
1517 if (num_add == filter_list_len) { 1517 if (num_add == filter_list_len) {
1518 ret = i40e_aq_add_macvlan(&pf->hw, 1518 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1519 vsi->seid, 1519 add_list, num_add,
1520 add_list, 1520 NULL);
1521 num_add,
1522 NULL);
1523 num_add = 0; 1521 num_add = 0;
1524 1522
1525 if (ret) 1523 if (aq_ret)
1526 break; 1524 break;
1527 memset(add_list, 0, sizeof(*add_list)); 1525 memset(add_list, 0, sizeof(*add_list));
1528 } 1526 }
1529 } 1527 }
1530 if (num_add) { 1528 if (num_add) {
1531 ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, 1529 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1532 add_list, num_add, NULL); 1530 add_list, num_add, NULL);
1533 num_add = 0; 1531 num_add = 0;
1534 } 1532 }
1535 kfree(add_list); 1533 kfree(add_list);
1536 add_list = NULL; 1534 add_list = NULL;
1537 1535
1538 if (add_happened && (!ret)) { 1536 if (add_happened && (!aq_ret)) {
1539 /* do nothing */; 1537 /* do nothing */;
1540 } else if (add_happened && (ret)) { 1538 } else if (add_happened && (aq_ret)) {
1541 dev_info(&pf->pdev->dev, 1539 dev_info(&pf->pdev->dev,
1542 "add filter failed, err %d, aq_err %d\n", 1540 "add filter failed, err %d, aq_err %d\n",
1543 ret, pf->hw.aq.asq_last_status); 1541 aq_ret, pf->hw.aq.asq_last_status);
1544 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && 1542 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
1545 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, 1543 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1546 &vsi->state)) { 1544 &vsi->state)) {
@@ -1556,28 +1554,27 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1556 if (changed_flags & IFF_ALLMULTI) { 1554 if (changed_flags & IFF_ALLMULTI) {
1557 bool cur_multipromisc; 1555 bool cur_multipromisc;
1558 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); 1556 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
1559 ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, 1557 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
1560 vsi->seid, 1558 vsi->seid,
1561 cur_multipromisc, 1559 cur_multipromisc,
1562 NULL); 1560 NULL);
1563 if (ret) 1561 if (aq_ret)
1564 dev_info(&pf->pdev->dev, 1562 dev_info(&pf->pdev->dev,
1565 "set multi promisc failed, err %d, aq_err %d\n", 1563 "set multi promisc failed, err %d, aq_err %d\n",
1566 ret, pf->hw.aq.asq_last_status); 1564 aq_ret, pf->hw.aq.asq_last_status);
1567 } 1565 }
1568 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { 1566 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
1569 bool cur_promisc; 1567 bool cur_promisc;
1570 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || 1568 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
1571 test_bit(__I40E_FILTER_OVERFLOW_PROMISC, 1569 test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1572 &vsi->state)); 1570 &vsi->state));
1573 ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw, 1571 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
1574 vsi->seid, 1572 vsi->seid,
1575 cur_promisc, 1573 cur_promisc, NULL);
1576 NULL); 1574 if (aq_ret)
1577 if (ret)
1578 dev_info(&pf->pdev->dev, 1575 dev_info(&pf->pdev->dev,
1579 "set uni promisc failed, err %d, aq_err %d\n", 1576 "set uni promisc failed, err %d, aq_err %d\n",
1580 ret, pf->hw.aq.asq_last_status); 1577 aq_ret, pf->hw.aq.asq_last_status);
1581 } 1578 }
1582 1579
1583 clear_bit(__I40E_CONFIG_BUSY, &vsi->state); 1580 clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
@@ -1790,6 +1787,8 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
1790 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan 1787 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
1791 * @vsi: the vsi being configured 1788 * @vsi: the vsi being configured
1792 * @vid: vlan id to be removed (0 = untagged only , -1 = any) 1789 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
1790 *
1791 * Return: 0 on success or negative otherwise
1793 **/ 1792 **/
1794int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) 1793int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
1795{ 1794{
@@ -1863,37 +1862,39 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
1863 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload 1862 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
1864 * @netdev: network interface to be adjusted 1863 * @netdev: network interface to be adjusted
1865 * @vid: vlan id to be added 1864 * @vid: vlan id to be added
1865 *
1866 * net_device_ops implementation for adding vlan ids
1866 **/ 1867 **/
1867static int i40e_vlan_rx_add_vid(struct net_device *netdev, 1868static int i40e_vlan_rx_add_vid(struct net_device *netdev,
1868 __always_unused __be16 proto, u16 vid) 1869 __always_unused __be16 proto, u16 vid)
1869{ 1870{
1870 struct i40e_netdev_priv *np = netdev_priv(netdev); 1871 struct i40e_netdev_priv *np = netdev_priv(netdev);
1871 struct i40e_vsi *vsi = np->vsi; 1872 struct i40e_vsi *vsi = np->vsi;
1872 int ret; 1873 int ret = 0;
1873 1874
1874 if (vid > 4095) 1875 if (vid > 4095)
1875 return 0; 1876 return -EINVAL;
1877
1878 netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
1876 1879
1877 netdev_info(vsi->netdev, "adding %pM vid=%d\n",
1878 netdev->dev_addr, vid);
1879 /* If the network stack called us with vid = 0, we should 1880 /* If the network stack called us with vid = 0, we should
1880 * indicate to i40e_vsi_add_vlan() that we want to receive 1881 * indicate to i40e_vsi_add_vlan() that we want to receive
1881 * any traffic (i.e. with any vlan tag, or untagged) 1882 * any traffic (i.e. with any vlan tag, or untagged)
1882 */ 1883 */
1883 ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY); 1884 ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY);
1884 1885
1885 if (!ret) { 1886 if (!ret && (vid < VLAN_N_VID))
1886 if (vid < VLAN_N_VID) 1887 set_bit(vid, vsi->active_vlans);
1887 set_bit(vid, vsi->active_vlans);
1888 }
1889 1888
1890 return 0; 1889 return ret;
1891} 1890}
1892 1891
1893/** 1892/**
1894 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload 1893 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
1895 * @netdev: network interface to be adjusted 1894 * @netdev: network interface to be adjusted
1896 * @vid: vlan id to be removed 1895 * @vid: vlan id to be removed
1896 *
1897 * net_device_ops implementation for adding vlan ids
1897 **/ 1898 **/
1898static int i40e_vlan_rx_kill_vid(struct net_device *netdev, 1899static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
1899 __always_unused __be16 proto, u16 vid) 1900 __always_unused __be16 proto, u16 vid)
@@ -1901,15 +1902,16 @@ static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
1901 struct i40e_netdev_priv *np = netdev_priv(netdev); 1902 struct i40e_netdev_priv *np = netdev_priv(netdev);
1902 struct i40e_vsi *vsi = np->vsi; 1903 struct i40e_vsi *vsi = np->vsi;
1903 1904
1904 netdev_info(vsi->netdev, "removing %pM vid=%d\n", 1905 netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid);
1905 netdev->dev_addr, vid); 1906
1906 /* return code is ignored as there is nothing a user 1907 /* return code is ignored as there is nothing a user
1907 * can do about failure to remove and a log message was 1908 * can do about failure to remove and a log message was
1908 * already printed from another function 1909 * already printed from the other function
1909 */ 1910 */
1910 i40e_vsi_kill_vlan(vsi, vid); 1911 i40e_vsi_kill_vlan(vsi, vid);
1911 1912
1912 clear_bit(vid, vsi->active_vlans); 1913 clear_bit(vid, vsi->active_vlans);
1914
1913 return 0; 1915 return 0;
1914} 1916}
1915 1917
@@ -1936,10 +1938,10 @@ static void i40e_restore_vlan(struct i40e_vsi *vsi)
1936 * @vsi: the vsi being adjusted 1938 * @vsi: the vsi being adjusted
1937 * @vid: the vlan id to set as a PVID 1939 * @vid: the vlan id to set as a PVID
1938 **/ 1940 **/
1939i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) 1941int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
1940{ 1942{
1941 struct i40e_vsi_context ctxt; 1943 struct i40e_vsi_context ctxt;
1942 i40e_status ret; 1944 i40e_status aq_ret;
1943 1945
1944 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 1946 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1945 vsi->info.pvid = cpu_to_le16(vid); 1947 vsi->info.pvid = cpu_to_le16(vid);
@@ -1948,14 +1950,15 @@ i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
1948 1950
1949 ctxt.seid = vsi->seid; 1951 ctxt.seid = vsi->seid;
1950 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 1952 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1951 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 1953 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1952 if (ret) { 1954 if (aq_ret) {
1953 dev_info(&vsi->back->pdev->dev, 1955 dev_info(&vsi->back->pdev->dev,
1954 "%s: update vsi failed, aq_err=%d\n", 1956 "%s: update vsi failed, aq_err=%d\n",
1955 __func__, vsi->back->hw.aq.asq_last_status); 1957 __func__, vsi->back->hw.aq.asq_last_status);
1958 return -ENOENT;
1956 } 1959 }
1957 1960
1958 return ret; 1961 return 0;
1959} 1962}
1960 1963
1961/** 1964/**
@@ -3326,7 +3329,8 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
3326 **/ 3329 **/
3327static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) 3330static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
3328{ 3331{
3329 int num_tc = 0, i; 3332 u8 num_tc = 0;
3333 int i;
3330 3334
3331 /* Scan the ETS Config Priority Table to find 3335 /* Scan the ETS Config Priority Table to find
3332 * traffic class enabled for a given priority 3336 * traffic class enabled for a given priority
@@ -3341,9 +3345,7 @@ static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
3341 /* Traffic class index starts from zero so 3345 /* Traffic class index starts from zero so
3342 * increment to return the actual count 3346 * increment to return the actual count
3343 */ 3347 */
3344 num_tc++; 3348 return num_tc + 1;
3345
3346 return num_tc;
3347} 3349}
3348 3350
3349/** 3351/**
@@ -3451,28 +3453,27 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
3451 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; 3453 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
3452 struct i40e_pf *pf = vsi->back; 3454 struct i40e_pf *pf = vsi->back;
3453 struct i40e_hw *hw = &pf->hw; 3455 struct i40e_hw *hw = &pf->hw;
3456 i40e_status aq_ret;
3454 u32 tc_bw_max; 3457 u32 tc_bw_max;
3455 int ret;
3456 int i; 3458 int i;
3457 3459
3458 /* Get the VSI level BW configuration */ 3460 /* Get the VSI level BW configuration */
3459 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); 3461 aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
3460 if (ret) { 3462 if (aq_ret) {
3461 dev_info(&pf->pdev->dev, 3463 dev_info(&pf->pdev->dev,
3462 "couldn't get pf vsi bw config, err %d, aq_err %d\n", 3464 "couldn't get pf vsi bw config, err %d, aq_err %d\n",
3463 ret, pf->hw.aq.asq_last_status); 3465 aq_ret, pf->hw.aq.asq_last_status);
3464 return ret; 3466 return -EINVAL;
3465 } 3467 }
3466 3468
3467 /* Get the VSI level BW configuration per TC */ 3469 /* Get the VSI level BW configuration per TC */
3468 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, 3470 aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
3469 &bw_ets_config, 3471 NULL);
3470 NULL); 3472 if (aq_ret) {
3471 if (ret) {
3472 dev_info(&pf->pdev->dev, 3473 dev_info(&pf->pdev->dev,
3473 "couldn't get pf vsi ets bw config, err %d, aq_err %d\n", 3474 "couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
3474 ret, pf->hw.aq.asq_last_status); 3475 aq_ret, pf->hw.aq.asq_last_status);
3475 return ret; 3476 return -EINVAL;
3476 } 3477 }
3477 3478
3478 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) { 3479 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
@@ -3494,7 +3495,8 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
3494 /* 3 bits out of 4 for each TC */ 3495 /* 3 bits out of 4 for each TC */
3495 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7); 3496 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
3496 } 3497 }
3497 return ret; 3498
3499 return 0;
3498} 3500}
3499 3501
3500/** 3502/**
@@ -3505,30 +3507,30 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
3505 * 3507 *
3506 * Returns 0 on success, negative value on failure 3508 * Returns 0 on success, negative value on failure
3507 **/ 3509 **/
3508static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, 3510static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
3509 u8 enabled_tc,
3510 u8 *bw_share) 3511 u8 *bw_share)
3511{ 3512{
3512 struct i40e_aqc_configure_vsi_tc_bw_data bw_data; 3513 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
3513 int i, ret = 0; 3514 i40e_status aq_ret;
3515 int i;
3514 3516
3515 bw_data.tc_valid_bits = enabled_tc; 3517 bw_data.tc_valid_bits = enabled_tc;
3516 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 3518 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3517 bw_data.tc_bw_credits[i] = bw_share[i]; 3519 bw_data.tc_bw_credits[i] = bw_share[i];
3518 3520
3519 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, 3521 aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
3520 &bw_data, NULL); 3522 NULL);
3521 if (ret) { 3523 if (aq_ret) {
3522 dev_info(&vsi->back->pdev->dev, 3524 dev_info(&vsi->back->pdev->dev,
3523 "%s: AQ command Config VSI BW allocation per TC failed = %d\n", 3525 "%s: AQ command Config VSI BW allocation per TC failed = %d\n",
3524 __func__, vsi->back->hw.aq.asq_last_status); 3526 __func__, vsi->back->hw.aq.asq_last_status);
3525 return ret; 3527 return -EINVAL;
3526 } 3528 }
3527 3529
3528 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 3530 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3529 vsi->info.qs_handle[i] = bw_data.qs_handles[i]; 3531 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
3530 3532
3531 return ret; 3533 return 0;
3532} 3534}
3533 3535
3534/** 3536/**
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 48cbc833b051..151e00cad113 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1607,6 +1607,9 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
1607 igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0); 1607 igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0);
1608 igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); 1608 igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
1609 } 1609 }
1610 } else if (hw->phy.type == e1000_phy_82580) {
1611 /* enable MII loopback */
1612 igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041);
1610 } 1613 }
1611 1614
1612 /* add small delay to avoid loopback test failure */ 1615 /* add small delay to avoid loopback test failure */
@@ -2652,6 +2655,8 @@ static int igb_set_eee(struct net_device *netdev,
2652 (hw->phy.media_type != e1000_media_type_copper)) 2655 (hw->phy.media_type != e1000_media_type_copper))
2653 return -EOPNOTSUPP; 2656 return -EOPNOTSUPP;
2654 2657
2658 memset(&eee_curr, 0, sizeof(struct ethtool_eee));
2659
2655 ret_val = igb_get_eee(netdev, &eee_curr); 2660 ret_val = igb_get_eee(netdev, &eee_curr);
2656 if (ret_val) 2661 if (ret_val)
2657 return ret_val; 2662 return ret_val;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 7fb5677451f9..2c210ec35d59 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1131,15 +1131,13 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
1131 p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT); 1131 p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT);
1132 p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT); 1132 p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT);
1133 spin_unlock_bh(&mp->mib_counters_lock); 1133 spin_unlock_bh(&mp->mib_counters_lock);
1134
1135 mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
1136} 1134}
1137 1135
1138static void mib_counters_timer_wrapper(unsigned long _mp) 1136static void mib_counters_timer_wrapper(unsigned long _mp)
1139{ 1137{
1140 struct mv643xx_eth_private *mp = (void *)_mp; 1138 struct mv643xx_eth_private *mp = (void *)_mp;
1141
1142 mib_counters_update(mp); 1139 mib_counters_update(mp);
1140 mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
1143} 1141}
1144 1142
1145 1143
@@ -2237,6 +2235,7 @@ static int mv643xx_eth_open(struct net_device *dev)
2237 mp->int_mask |= INT_TX_END_0 << i; 2235 mp->int_mask |= INT_TX_END_0 << i;
2238 } 2236 }
2239 2237
2238 add_timer(&mp->mib_counters_timer);
2240 port_start(mp); 2239 port_start(mp);
2241 2240
2242 wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX); 2241 wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
@@ -2534,6 +2533,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
2534 if (!ppdev) 2533 if (!ppdev)
2535 return -ENOMEM; 2534 return -ENOMEM;
2536 ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 2535 ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
2536 ppdev->dev.of_node = pnp;
2537 2537
2538 ret = platform_device_add_resources(ppdev, &res, 1); 2538 ret = platform_device_add_resources(ppdev, &res, 1);
2539 if (ret) 2539 if (ret)
@@ -2916,7 +2916,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2916 mp->mib_counters_timer.data = (unsigned long)mp; 2916 mp->mib_counters_timer.data = (unsigned long)mp;
2917 mp->mib_counters_timer.function = mib_counters_timer_wrapper; 2917 mp->mib_counters_timer.function = mib_counters_timer_wrapper;
2918 mp->mib_counters_timer.expires = jiffies + 30 * HZ; 2918 mp->mib_counters_timer.expires = jiffies + 30 * HZ;
2919 add_timer(&mp->mib_counters_timer);
2920 2919
2921 spin_lock_init(&mp->mib_counters_lock); 2920 spin_lock_init(&mp->mib_counters_lock);
2922 2921
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 1a9c4f6269ea..ecc7f7b696b8 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3086,13 +3086,16 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
3086 PCI_DMA_FROMDEVICE); 3086 PCI_DMA_FROMDEVICE);
3087 skge_rx_reuse(e, skge->rx_buf_size); 3087 skge_rx_reuse(e, skge->rx_buf_size);
3088 } else { 3088 } else {
3089 struct skge_element ee;
3089 struct sk_buff *nskb; 3090 struct sk_buff *nskb;
3090 3091
3091 nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size); 3092 nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size);
3092 if (!nskb) 3093 if (!nskb)
3093 goto resubmit; 3094 goto resubmit;
3094 3095
3095 skb = e->skb; 3096 ee = *e;
3097
3098 skb = ee.skb;
3096 prefetch(skb->data); 3099 prefetch(skb->data);
3097 3100
3098 if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) { 3101 if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) {
@@ -3101,8 +3104,8 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
3101 } 3104 }
3102 3105
3103 pci_unmap_single(skge->hw->pdev, 3106 pci_unmap_single(skge->hw->pdev,
3104 dma_unmap_addr(e, mapaddr), 3107 dma_unmap_addr(&ee, mapaddr),
3105 dma_unmap_len(e, maplen), 3108 dma_unmap_len(&ee, maplen),
3106 PCI_DMA_FROMDEVICE); 3109 PCI_DMA_FROMDEVICE);
3107 } 3110 }
3108 3111
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index dec455c8f627..afe2efa69c86 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -70,14 +70,15 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
70 put_page(page); 70 put_page(page);
71 return -ENOMEM; 71 return -ENOMEM;
72 } 72 }
73 page_alloc->size = PAGE_SIZE << order; 73 page_alloc->page_size = PAGE_SIZE << order;
74 page_alloc->page = page; 74 page_alloc->page = page;
75 page_alloc->dma = dma; 75 page_alloc->dma = dma;
76 page_alloc->offset = frag_info->frag_align; 76 page_alloc->page_offset = frag_info->frag_align;
77 /* Not doing get_page() for each frag is a big win 77 /* Not doing get_page() for each frag is a big win
78 * on asymetric workloads. 78 * on asymetric workloads.
79 */ 79 */
80 atomic_set(&page->_count, page_alloc->size / frag_info->frag_stride); 80 atomic_set(&page->_count,
81 page_alloc->page_size / frag_info->frag_stride);
81 return 0; 82 return 0;
82} 83}
83 84
@@ -96,16 +97,19 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
96 for (i = 0; i < priv->num_frags; i++) { 97 for (i = 0; i < priv->num_frags; i++) {
97 frag_info = &priv->frag_info[i]; 98 frag_info = &priv->frag_info[i];
98 page_alloc[i] = ring_alloc[i]; 99 page_alloc[i] = ring_alloc[i];
99 page_alloc[i].offset += frag_info->frag_stride; 100 page_alloc[i].page_offset += frag_info->frag_stride;
100 if (page_alloc[i].offset + frag_info->frag_stride <= ring_alloc[i].size) 101
102 if (page_alloc[i].page_offset + frag_info->frag_stride <=
103 ring_alloc[i].page_size)
101 continue; 104 continue;
105
102 if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp)) 106 if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp))
103 goto out; 107 goto out;
104 } 108 }
105 109
106 for (i = 0; i < priv->num_frags; i++) { 110 for (i = 0; i < priv->num_frags; i++) {
107 frags[i] = ring_alloc[i]; 111 frags[i] = ring_alloc[i];
108 dma = ring_alloc[i].dma + ring_alloc[i].offset; 112 dma = ring_alloc[i].dma + ring_alloc[i].page_offset;
109 ring_alloc[i] = page_alloc[i]; 113 ring_alloc[i] = page_alloc[i];
110 rx_desc->data[i].addr = cpu_to_be64(dma); 114 rx_desc->data[i].addr = cpu_to_be64(dma);
111 } 115 }
@@ -117,7 +121,7 @@ out:
117 frag_info = &priv->frag_info[i]; 121 frag_info = &priv->frag_info[i];
118 if (page_alloc[i].page != ring_alloc[i].page) { 122 if (page_alloc[i].page != ring_alloc[i].page) {
119 dma_unmap_page(priv->ddev, page_alloc[i].dma, 123 dma_unmap_page(priv->ddev, page_alloc[i].dma,
120 page_alloc[i].size, PCI_DMA_FROMDEVICE); 124 page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
121 page = page_alloc[i].page; 125 page = page_alloc[i].page;
122 atomic_set(&page->_count, 1); 126 atomic_set(&page->_count, 1);
123 put_page(page); 127 put_page(page);
@@ -131,10 +135,12 @@ static void mlx4_en_free_frag(struct mlx4_en_priv *priv,
131 int i) 135 int i)
132{ 136{
133 const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; 137 const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
138 u32 next_frag_end = frags[i].page_offset + 2 * frag_info->frag_stride;
139
134 140
135 if (frags[i].offset + frag_info->frag_stride > frags[i].size) 141 if (next_frag_end > frags[i].page_size)
136 dma_unmap_page(priv->ddev, frags[i].dma, frags[i].size, 142 dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size,
137 PCI_DMA_FROMDEVICE); 143 PCI_DMA_FROMDEVICE);
138 144
139 if (frags[i].page) 145 if (frags[i].page)
140 put_page(frags[i].page); 146 put_page(frags[i].page);
@@ -161,7 +167,7 @@ out:
161 167
162 page_alloc = &ring->page_alloc[i]; 168 page_alloc = &ring->page_alloc[i];
163 dma_unmap_page(priv->ddev, page_alloc->dma, 169 dma_unmap_page(priv->ddev, page_alloc->dma,
164 page_alloc->size, PCI_DMA_FROMDEVICE); 170 page_alloc->page_size, PCI_DMA_FROMDEVICE);
165 page = page_alloc->page; 171 page = page_alloc->page;
166 atomic_set(&page->_count, 1); 172 atomic_set(&page->_count, 1);
167 put_page(page); 173 put_page(page);
@@ -184,10 +190,11 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
184 i, page_count(page_alloc->page)); 190 i, page_count(page_alloc->page));
185 191
186 dma_unmap_page(priv->ddev, page_alloc->dma, 192 dma_unmap_page(priv->ddev, page_alloc->dma,
187 page_alloc->size, PCI_DMA_FROMDEVICE); 193 page_alloc->page_size, PCI_DMA_FROMDEVICE);
188 while (page_alloc->offset + frag_info->frag_stride < page_alloc->size) { 194 while (page_alloc->page_offset + frag_info->frag_stride <
195 page_alloc->page_size) {
189 put_page(page_alloc->page); 196 put_page(page_alloc->page);
190 page_alloc->offset += frag_info->frag_stride; 197 page_alloc->page_offset += frag_info->frag_stride;
191 } 198 }
192 page_alloc->page = NULL; 199 page_alloc->page = NULL;
193 } 200 }
@@ -478,7 +485,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
478 /* Save page reference in skb */ 485 /* Save page reference in skb */
479 __skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page); 486 __skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page);
480 skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size); 487 skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size);
481 skb_frags_rx[nr].page_offset = frags[nr].offset; 488 skb_frags_rx[nr].page_offset = frags[nr].page_offset;
482 skb->truesize += frag_info->frag_stride; 489 skb->truesize += frag_info->frag_stride;
483 frags[nr].page = NULL; 490 frags[nr].page = NULL;
484 } 491 }
@@ -517,7 +524,7 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
517 524
518 /* Get pointer to first fragment so we could copy the headers into the 525 /* Get pointer to first fragment so we could copy the headers into the
519 * (linear part of the) skb */ 526 * (linear part of the) skb */
520 va = page_address(frags[0].page) + frags[0].offset; 527 va = page_address(frags[0].page) + frags[0].page_offset;
521 528
522 if (length <= SMALL_PACKET_SIZE) { 529 if (length <= SMALL_PACKET_SIZE) {
523 /* We are copying all relevant data to the skb - temporarily 530 /* We are copying all relevant data to the skb - temporarily
@@ -645,7 +652,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
645 dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh), 652 dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
646 DMA_FROM_DEVICE); 653 DMA_FROM_DEVICE);
647 ethh = (struct ethhdr *)(page_address(frags[0].page) + 654 ethh = (struct ethhdr *)(page_address(frags[0].page) +
648 frags[0].offset); 655 frags[0].page_offset);
649 656
650 if (is_multicast_ether_addr(ethh->h_dest)) { 657 if (is_multicast_ether_addr(ethh->h_dest)) {
651 struct mlx4_mac_entry *entry; 658 struct mlx4_mac_entry *entry;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 5e0aa569306a..bf06e3610d27 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -237,8 +237,8 @@ struct mlx4_en_tx_desc {
237struct mlx4_en_rx_alloc { 237struct mlx4_en_rx_alloc {
238 struct page *page; 238 struct page *page;
239 dma_addr_t dma; 239 dma_addr_t dma;
240 u32 offset; 240 u32 page_offset;
241 u32 size; 241 u32 page_size;
242}; 242};
243 243
244struct mlx4_en_tx_ring { 244struct mlx4_en_tx_ring {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 5472cbd34028..6ca30739625f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -180,28 +180,32 @@ static int verify_block_sig(struct mlx5_cmd_prot_block *block)
180 return 0; 180 return 0;
181} 181}
182 182
183static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token) 183static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token,
184 int csum)
184{ 185{
185 block->token = token; 186 block->token = token;
186 block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 2); 187 if (csum) {
187 block->sig = ~xor8_buf(block, sizeof(*block) - 1); 188 block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) -
189 sizeof(block->data) - 2);
190 block->sig = ~xor8_buf(block, sizeof(*block) - 1);
191 }
188} 192}
189 193
190static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token) 194static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum)
191{ 195{
192 struct mlx5_cmd_mailbox *next = msg->next; 196 struct mlx5_cmd_mailbox *next = msg->next;
193 197
194 while (next) { 198 while (next) {
195 calc_block_sig(next->buf, token); 199 calc_block_sig(next->buf, token, csum);
196 next = next->next; 200 next = next->next;
197 } 201 }
198} 202}
199 203
200static void set_signature(struct mlx5_cmd_work_ent *ent) 204static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
201{ 205{
202 ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay)); 206 ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
203 calc_chain_sig(ent->in, ent->token); 207 calc_chain_sig(ent->in, ent->token, csum);
204 calc_chain_sig(ent->out, ent->token); 208 calc_chain_sig(ent->out, ent->token, csum);
205} 209}
206 210
207static void poll_timeout(struct mlx5_cmd_work_ent *ent) 211static void poll_timeout(struct mlx5_cmd_work_ent *ent)
@@ -539,8 +543,7 @@ static void cmd_work_handler(struct work_struct *work)
539 lay->type = MLX5_PCI_CMD_XPORT; 543 lay->type = MLX5_PCI_CMD_XPORT;
540 lay->token = ent->token; 544 lay->token = ent->token;
541 lay->status_own = CMD_OWNER_HW; 545 lay->status_own = CMD_OWNER_HW;
542 if (!cmd->checksum_disabled) 546 set_signature(ent, !cmd->checksum_disabled);
543 set_signature(ent);
544 dump_command(dev, ent, 1); 547 dump_command(dev, ent, 1);
545 ktime_get_ts(&ent->ts1); 548 ktime_get_ts(&ent->ts1);
546 549
@@ -773,8 +776,6 @@ static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
773 776
774 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE); 777 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
775 block = next->buf; 778 block = next->buf;
776 if (xor8_buf(block, sizeof(*block)) != 0xff)
777 return -EINVAL;
778 779
779 memcpy(to, block->data, copy); 780 memcpy(to, block->data, copy);
780 to += copy; 781 to += copy;
@@ -1361,6 +1362,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
1361 goto err_map; 1362 goto err_map;
1362 } 1363 }
1363 1364
1365 cmd->checksum_disabled = 1;
1364 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; 1366 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
1365 cmd->bitmask = (1 << cmd->max_reg_cmds) - 1; 1367 cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
1366 1368
@@ -1510,7 +1512,7 @@ int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr)
1510 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; 1512 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
1511 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL; 1513 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
1512 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY; 1514 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
1513 case MLX5_CMD_STAT_LIM_ERR: return -EINVAL; 1515 case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
1514 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; 1516 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
1515 case MLX5_CMD_STAT_IX_ERR: return -EINVAL; 1517 case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
1516 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN; 1518 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 443cc4d7b024..2231d93cc7ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -366,9 +366,11 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
366 goto err_in; 366 goto err_in;
367 } 367 }
368 368
369 snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s",
370 name, pci_name(dev->pdev));
369 eq->eqn = out.eq_number; 371 eq->eqn = out.eq_number;
370 err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0, 372 err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
371 name, eq); 373 eq->name, eq);
372 if (err) 374 if (err)
373 goto err_eq; 375 goto err_eq;
374 376
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index b47739b0b5f6..bc0f5fb66e24 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -165,9 +165,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
165 struct mlx5_cmd_set_hca_cap_mbox_in *set_ctx = NULL; 165 struct mlx5_cmd_set_hca_cap_mbox_in *set_ctx = NULL;
166 struct mlx5_cmd_query_hca_cap_mbox_in query_ctx; 166 struct mlx5_cmd_query_hca_cap_mbox_in query_ctx;
167 struct mlx5_cmd_set_hca_cap_mbox_out set_out; 167 struct mlx5_cmd_set_hca_cap_mbox_out set_out;
168 struct mlx5_profile *prof = dev->profile;
169 u64 flags; 168 u64 flags;
170 int csum = 1;
171 int err; 169 int err;
172 170
173 memset(&query_ctx, 0, sizeof(query_ctx)); 171 memset(&query_ctx, 0, sizeof(query_ctx));
@@ -197,20 +195,14 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
197 memcpy(&set_ctx->hca_cap, &query_out->hca_cap, 195 memcpy(&set_ctx->hca_cap, &query_out->hca_cap,
198 sizeof(set_ctx->hca_cap)); 196 sizeof(set_ctx->hca_cap));
199 197
200 if (prof->mask & MLX5_PROF_MASK_CMDIF_CSUM) {
201 csum = !!prof->cmdif_csum;
202 flags = be64_to_cpu(set_ctx->hca_cap.flags);
203 if (csum)
204 flags |= MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
205 else
206 flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
207
208 set_ctx->hca_cap.flags = cpu_to_be64(flags);
209 }
210
211 if (dev->profile->mask & MLX5_PROF_MASK_QP_SIZE) 198 if (dev->profile->mask & MLX5_PROF_MASK_QP_SIZE)
212 set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp; 199 set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp;
213 200
201 flags = be64_to_cpu(query_out->hca_cap.flags);
202 /* disable checksum */
203 flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
204
205 set_ctx->hca_cap.flags = cpu_to_be64(flags);
214 memset(&set_out, 0, sizeof(set_out)); 206 memset(&set_out, 0, sizeof(set_out));
215 set_ctx->hca_cap.log_uar_page_sz = cpu_to_be16(PAGE_SHIFT - 12); 207 set_ctx->hca_cap.log_uar_page_sz = cpu_to_be16(PAGE_SHIFT - 12);
216 set_ctx->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_HCA_CAP); 208 set_ctx->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_HCA_CAP);
@@ -225,9 +217,6 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
225 if (err) 217 if (err)
226 goto query_ex; 218 goto query_ex;
227 219
228 if (!csum)
229 dev->cmd.checksum_disabled = 1;
230
231query_ex: 220query_ex:
232 kfree(query_out); 221 kfree(query_out);
233 kfree(set_ctx); 222 kfree(set_ctx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 3a2408d44820..7b12acf210f8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -90,6 +90,10 @@ struct mlx5_manage_pages_outbox {
90 __be64 pas[0]; 90 __be64 pas[0];
91}; 91};
92 92
93enum {
94 MAX_RECLAIM_TIME_MSECS = 5000,
95};
96
93static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id) 97static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
94{ 98{
95 struct rb_root *root = &dev->priv.page_root; 99 struct rb_root *root = &dev->priv.page_root;
@@ -279,6 +283,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
279 int err; 283 int err;
280 int i; 284 int i;
281 285
286 if (nclaimed)
287 *nclaimed = 0;
288
282 memset(&in, 0, sizeof(in)); 289 memset(&in, 0, sizeof(in));
283 outlen = sizeof(*out) + npages * sizeof(out->pas[0]); 290 outlen = sizeof(*out) + npages * sizeof(out->pas[0]);
284 out = mlx5_vzalloc(outlen); 291 out = mlx5_vzalloc(outlen);
@@ -388,20 +395,25 @@ static int optimal_reclaimed_pages(void)
388 395
389int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) 396int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
390{ 397{
391 unsigned long end = jiffies + msecs_to_jiffies(5000); 398 unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
392 struct fw_page *fwp; 399 struct fw_page *fwp;
393 struct rb_node *p; 400 struct rb_node *p;
401 int nclaimed = 0;
394 int err; 402 int err;
395 403
396 do { 404 do {
397 p = rb_first(&dev->priv.page_root); 405 p = rb_first(&dev->priv.page_root);
398 if (p) { 406 if (p) {
399 fwp = rb_entry(p, struct fw_page, rb_node); 407 fwp = rb_entry(p, struct fw_page, rb_node);
400 err = reclaim_pages(dev, fwp->func_id, optimal_reclaimed_pages(), NULL); 408 err = reclaim_pages(dev, fwp->func_id,
409 optimal_reclaimed_pages(),
410 &nclaimed);
401 if (err) { 411 if (err) {
402 mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err); 412 mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err);
403 return err; 413 return err;
404 } 414 }
415 if (nclaimed)
416 end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
405 } 417 }
406 if (time_after(jiffies, end)) { 418 if (time_after(jiffies, end)) {
407 mlx5_core_warn(dev, "FW did not return all pages. giving up...\n"); 419 mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 83c2091c9c23..ea54d95e5b9f 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -448,7 +448,8 @@ static int moxart_mac_probe(struct platform_device *pdev)
448 irq = irq_of_parse_and_map(node, 0); 448 irq = irq_of_parse_and_map(node, 0);
449 if (irq <= 0) { 449 if (irq <= 0) {
450 netdev_err(ndev, "irq_of_parse_and_map failed\n"); 450 netdev_err(ndev, "irq_of_parse_and_map failed\n");
451 return -EINVAL; 451 ret = -EINVAL;
452 goto irq_map_fail;
452 } 453 }
453 454
454 priv = netdev_priv(ndev); 455 priv = netdev_priv(ndev);
@@ -472,24 +473,32 @@ static int moxart_mac_probe(struct platform_device *pdev)
472 priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE * 473 priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE *
473 TX_DESC_NUM, &priv->tx_base, 474 TX_DESC_NUM, &priv->tx_base,
474 GFP_DMA | GFP_KERNEL); 475 GFP_DMA | GFP_KERNEL);
475 if (priv->tx_desc_base == NULL) 476 if (priv->tx_desc_base == NULL) {
477 ret = -ENOMEM;
476 goto init_fail; 478 goto init_fail;
479 }
477 480
478 priv->rx_desc_base = dma_alloc_coherent(NULL, RX_REG_DESC_SIZE * 481 priv->rx_desc_base = dma_alloc_coherent(NULL, RX_REG_DESC_SIZE *
479 RX_DESC_NUM, &priv->rx_base, 482 RX_DESC_NUM, &priv->rx_base,
480 GFP_DMA | GFP_KERNEL); 483 GFP_DMA | GFP_KERNEL);
481 if (priv->rx_desc_base == NULL) 484 if (priv->rx_desc_base == NULL) {
485 ret = -ENOMEM;
482 goto init_fail; 486 goto init_fail;
487 }
483 488
484 priv->tx_buf_base = kmalloc(priv->tx_buf_size * TX_DESC_NUM, 489 priv->tx_buf_base = kmalloc(priv->tx_buf_size * TX_DESC_NUM,
485 GFP_ATOMIC); 490 GFP_ATOMIC);
486 if (!priv->tx_buf_base) 491 if (!priv->tx_buf_base) {
492 ret = -ENOMEM;
487 goto init_fail; 493 goto init_fail;
494 }
488 495
489 priv->rx_buf_base = kmalloc(priv->rx_buf_size * RX_DESC_NUM, 496 priv->rx_buf_base = kmalloc(priv->rx_buf_size * RX_DESC_NUM,
490 GFP_ATOMIC); 497 GFP_ATOMIC);
491 if (!priv->rx_buf_base) 498 if (!priv->rx_buf_base) {
499 ret = -ENOMEM;
492 goto init_fail; 500 goto init_fail;
501 }
493 502
494 platform_set_drvdata(pdev, ndev); 503 platform_set_drvdata(pdev, ndev);
495 504
@@ -522,7 +531,8 @@ static int moxart_mac_probe(struct platform_device *pdev)
522init_fail: 531init_fail:
523 netdev_err(ndev, "init failed\n"); 532 netdev_err(ndev, "init failed\n");
524 moxart_mac_free_memory(ndev); 533 moxart_mac_free_memory(ndev);
525 534irq_map_fail:
535 free_netdev(ndev);
526 return ret; 536 return ret;
527} 537}
528 538
@@ -543,7 +553,7 @@ static const struct of_device_id moxart_mac_match[] = {
543 { } 553 { }
544}; 554};
545 555
546struct __initdata platform_driver moxart_mac_driver = { 556static struct platform_driver moxart_mac_driver = {
547 .probe = moxart_mac_probe, 557 .probe = moxart_mac_probe,
548 .remove = moxart_remove, 558 .remove = moxart_remove,
549 .driver = { 559 .driver = {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 4d7ad0074d1c..ff83a9fcd4c5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -665,7 +665,7 @@ static int qlcnic_set_channels(struct net_device *dev,
665 return err; 665 return err;
666 } 666 }
667 667
668 if (channel->tx_count) { 668 if (qlcnic_82xx_check(adapter) && channel->tx_count) {
669 err = qlcnic_validate_max_tx_rings(adapter, channel->tx_count); 669 err = qlcnic_validate_max_tx_rings(adapter, channel->tx_count);
670 if (err) 670 if (err)
671 return err; 671 return err;
@@ -1794,3 +1794,11 @@ const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops = {
1794 .set_msglevel = qlcnic_set_msglevel, 1794 .set_msglevel = qlcnic_set_msglevel,
1795 .get_msglevel = qlcnic_get_msglevel, 1795 .get_msglevel = qlcnic_get_msglevel,
1796}; 1796};
1797
1798const struct ethtool_ops qlcnic_ethtool_failed_ops = {
1799 .get_settings = qlcnic_get_settings,
1800 .get_drvinfo = qlcnic_get_drvinfo,
1801 .set_msglevel = qlcnic_set_msglevel,
1802 .get_msglevel = qlcnic_get_msglevel,
1803 .set_dump = qlcnic_set_dump,
1804};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index c4c5023e1fdf..9e61eb867452 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -431,6 +431,9 @@ static void qlcnic_82xx_cancel_idc_work(struct qlcnic_adapter *adapter)
431 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) 431 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
432 usleep_range(10000, 11000); 432 usleep_range(10000, 11000);
433 433
434 if (!adapter->fw_work.work.func)
435 return;
436
434 cancel_delayed_work_sync(&adapter->fw_work); 437 cancel_delayed_work_sync(&adapter->fw_work);
435} 438}
436 439
@@ -2254,7 +2257,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2254 2257
2255 err = qlcnic_alloc_adapter_resources(adapter); 2258 err = qlcnic_alloc_adapter_resources(adapter);
2256 if (err) 2259 if (err)
2257 goto err_out_free_netdev; 2260 goto err_out_free_wq;
2258 2261
2259 adapter->dev_rst_time = jiffies; 2262 adapter->dev_rst_time = jiffies;
2260 adapter->ahw->revision_id = pdev->revision; 2263 adapter->ahw->revision_id = pdev->revision;
@@ -2275,8 +2278,9 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2275 adapter->portnum = adapter->ahw->pci_func; 2278 adapter->portnum = adapter->ahw->pci_func;
2276 err = qlcnic_start_firmware(adapter); 2279 err = qlcnic_start_firmware(adapter);
2277 if (err) { 2280 if (err) {
2278 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"); 2281 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"
2279 goto err_out_free_hw; 2282 "\t\tIf reboot doesn't help, try flashing the card\n");
2283 goto err_out_maintenance_mode;
2280 } 2284 }
2281 2285
2282 qlcnic_get_multiq_capability(adapter); 2286 qlcnic_get_multiq_capability(adapter);
@@ -2392,6 +2396,9 @@ err_out_disable_msi:
2392err_out_free_hw: 2396err_out_free_hw:
2393 qlcnic_free_adapter_resources(adapter); 2397 qlcnic_free_adapter_resources(adapter);
2394 2398
2399err_out_free_wq:
2400 destroy_workqueue(adapter->qlcnic_wq);
2401
2395err_out_free_netdev: 2402err_out_free_netdev:
2396 free_netdev(netdev); 2403 free_netdev(netdev);
2397 2404
@@ -2408,6 +2415,22 @@ err_out_disable_pdev:
2408 pci_set_drvdata(pdev, NULL); 2415 pci_set_drvdata(pdev, NULL);
2409 pci_disable_device(pdev); 2416 pci_disable_device(pdev);
2410 return err; 2417 return err;
2418
2419err_out_maintenance_mode:
2420 netdev->netdev_ops = &qlcnic_netdev_failed_ops;
2421 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_failed_ops);
2422 err = register_netdev(netdev);
2423
2424 if (err) {
2425 dev_err(&pdev->dev, "Failed to register net device\n");
2426 qlcnic_clr_all_drv_state(adapter, 0);
2427 goto err_out_free_hw;
2428 }
2429
2430 pci_set_drvdata(pdev, adapter);
2431 qlcnic_add_sysfs(adapter);
2432
2433 return 0;
2411} 2434}
2412 2435
2413static void qlcnic_remove(struct pci_dev *pdev) 2436static void qlcnic_remove(struct pci_dev *pdev)
@@ -2518,8 +2541,16 @@ static int qlcnic_resume(struct pci_dev *pdev)
2518static int qlcnic_open(struct net_device *netdev) 2541static int qlcnic_open(struct net_device *netdev)
2519{ 2542{
2520 struct qlcnic_adapter *adapter = netdev_priv(netdev); 2543 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2544 u32 state;
2521 int err; 2545 int err;
2522 2546
2547 state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
2548 if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) {
2549 netdev_err(netdev, "%s: Device is in FAILED state\n", __func__);
2550
2551 return -EIO;
2552 }
2553
2523 netif_carrier_off(netdev); 2554 netif_carrier_off(netdev);
2524 2555
2525 err = qlcnic_attach(adapter); 2556 err = qlcnic_attach(adapter);
@@ -3228,6 +3259,13 @@ void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *adapter, u32 key)
3228 return; 3259 return;
3229 3260
3230 state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); 3261 state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
3262 if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) {
3263 netdev_err(adapter->netdev, "%s: Device is in FAILED state\n",
3264 __func__);
3265 qlcnic_api_unlock(adapter);
3266
3267 return;
3268 }
3231 3269
3232 if (state == QLCNIC_DEV_READY) { 3270 if (state == QLCNIC_DEV_READY) {
3233 QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, 3271 QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
@@ -3613,11 +3651,6 @@ int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *adapter, u32 txq)
3613 u8 max_hw = QLCNIC_MAX_TX_RINGS; 3651 u8 max_hw = QLCNIC_MAX_TX_RINGS;
3614 u32 max_allowed; 3652 u32 max_allowed;
3615 3653
3616 if (!qlcnic_82xx_check(adapter)) {
3617 netdev_err(netdev, "No Multi TX-Q support\n");
3618 return -EINVAL;
3619 }
3620
3621 if (!qlcnic_use_msi_x && !qlcnic_use_msi) { 3654 if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
3622 netdev_err(netdev, "No Multi TX-Q support in INT-x mode\n"); 3655 netdev_err(netdev, "No Multi TX-Q support in INT-x mode\n");
3623 return -EINVAL; 3656 return -EINVAL;
@@ -3657,8 +3690,7 @@ int qlcnic_validate_max_rss(struct qlcnic_adapter *adapter,
3657 u8 max_hw = adapter->ahw->max_rx_ques; 3690 u8 max_hw = adapter->ahw->max_rx_ques;
3658 u32 max_allowed; 3691 u32 max_allowed;
3659 3692
3660 if (qlcnic_82xx_check(adapter) && !qlcnic_use_msi_x && 3693 if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
3661 !qlcnic_use_msi) {
3662 netdev_err(netdev, "No RSS support in INT-x mode\n"); 3694 netdev_err(netdev, "No RSS support in INT-x mode\n");
3663 return -EINVAL; 3695 return -EINVAL;
3664 } 3696 }
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 330d9a8774ad..686f460b1502 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -397,6 +397,7 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
397{ 397{
398 struct net_device *netdev = adapter->netdev; 398 struct net_device *netdev = adapter->netdev;
399 399
400 rtnl_lock();
400 if (netif_running(netdev)) 401 if (netif_running(netdev))
401 __qlcnic_down(adapter, netdev); 402 __qlcnic_down(adapter, netdev);
402 403
@@ -407,12 +408,15 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
407 /* After disabling SRIOV re-init the driver in default mode 408 /* After disabling SRIOV re-init the driver in default mode
408 configure opmode based on op_mode of function 409 configure opmode based on op_mode of function
409 */ 410 */
410 if (qlcnic_83xx_configure_opmode(adapter)) 411 if (qlcnic_83xx_configure_opmode(adapter)) {
412 rtnl_unlock();
411 return -EIO; 413 return -EIO;
414 }
412 415
413 if (netif_running(netdev)) 416 if (netif_running(netdev))
414 __qlcnic_up(adapter, netdev); 417 __qlcnic_up(adapter, netdev);
415 418
419 rtnl_unlock();
416 return 0; 420 return 0;
417} 421}
418 422
@@ -533,6 +537,7 @@ static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs)
533 return -EIO; 537 return -EIO;
534 } 538 }
535 539
540 rtnl_lock();
536 if (netif_running(netdev)) 541 if (netif_running(netdev))
537 __qlcnic_down(adapter, netdev); 542 __qlcnic_down(adapter, netdev);
538 543
@@ -555,6 +560,7 @@ static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs)
555 __qlcnic_up(adapter, netdev); 560 __qlcnic_up(adapter, netdev);
556 561
557error: 562error:
563 rtnl_unlock();
558 return err; 564 return err;
559} 565}
560 566
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index c6165d05cc13..019f4377307f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -1272,6 +1272,7 @@ void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
1272void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter) 1272void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
1273{ 1273{
1274 struct device *dev = &adapter->pdev->dev; 1274 struct device *dev = &adapter->pdev->dev;
1275 u32 state;
1275 1276
1276 if (device_create_bin_file(dev, &bin_attr_port_stats)) 1277 if (device_create_bin_file(dev, &bin_attr_port_stats))
1277 dev_info(dev, "failed to create port stats sysfs entry"); 1278 dev_info(dev, "failed to create port stats sysfs entry");
@@ -1285,8 +1286,13 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
1285 if (device_create_bin_file(dev, &bin_attr_mem)) 1286 if (device_create_bin_file(dev, &bin_attr_mem))
1286 dev_info(dev, "failed to create mem sysfs entry\n"); 1287 dev_info(dev, "failed to create mem sysfs entry\n");
1287 1288
1289 state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
1290 if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD)
1291 return;
1292
1288 if (device_create_bin_file(dev, &bin_attr_pci_config)) 1293 if (device_create_bin_file(dev, &bin_attr_pci_config))
1289 dev_info(dev, "failed to create pci config sysfs entry"); 1294 dev_info(dev, "failed to create pci config sysfs entry");
1295
1290 if (device_create_file(dev, &dev_attr_beacon)) 1296 if (device_create_file(dev, &dev_attr_beacon))
1291 dev_info(dev, "failed to create beacon sysfs entry"); 1297 dev_info(dev, "failed to create beacon sysfs entry");
1292 1298
@@ -1307,6 +1313,7 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
1307void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter) 1313void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
1308{ 1314{
1309 struct device *dev = &adapter->pdev->dev; 1315 struct device *dev = &adapter->pdev->dev;
1316 u32 state;
1310 1317
1311 device_remove_bin_file(dev, &bin_attr_port_stats); 1318 device_remove_bin_file(dev, &bin_attr_port_stats);
1312 1319
@@ -1315,6 +1322,11 @@ void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
1315 device_remove_file(dev, &dev_attr_diag_mode); 1322 device_remove_file(dev, &dev_attr_diag_mode);
1316 device_remove_bin_file(dev, &bin_attr_crb); 1323 device_remove_bin_file(dev, &bin_attr_crb);
1317 device_remove_bin_file(dev, &bin_attr_mem); 1324 device_remove_bin_file(dev, &bin_attr_mem);
1325
1326 state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
1327 if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD)
1328 return;
1329
1318 device_remove_bin_file(dev, &bin_attr_pci_config); 1330 device_remove_bin_file(dev, &bin_attr_pci_config);
1319 device_remove_file(dev, &dev_attr_beacon); 1331 device_remove_file(dev, &dev_attr_beacon);
1320 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) 1332 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index 10093f0c4c0f..6bc5db703920 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -740,8 +740,8 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
740 int i; 740 int i;
741 741
742 if (!mpi_coredump) { 742 if (!mpi_coredump) {
743 netif_err(qdev, drv, qdev->ndev, "No memory available\n"); 743 netif_err(qdev, drv, qdev->ndev, "No memory allocated\n");
744 return -ENOMEM; 744 return -EINVAL;
745 } 745 }
746 746
747 /* Try to get the spinlock, but dont worry if 747 /* Try to get the spinlock, but dont worry if
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
index ff2bf8a4e247..7ad146080c36 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
@@ -1274,7 +1274,7 @@ void ql_mpi_reset_work(struct work_struct *work)
1274 return; 1274 return;
1275 } 1275 }
1276 1276
1277 if (!ql_core_dump(qdev, qdev->mpi_coredump)) { 1277 if (qdev->mpi_coredump && !ql_core_dump(qdev, qdev->mpi_coredump)) {
1278 netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n"); 1278 netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n");
1279 qdev->core_is_dumped = 1; 1279 qdev->core_is_dumped = 1;
1280 queue_delayed_work(qdev->workqueue, 1280 queue_delayed_work(qdev->workqueue,
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 5cd831ebfa83..b57c278d3b46 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -688,12 +688,16 @@ static struct sh_eth_cpu_data r8a7740_data = {
688 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | 688 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
689 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | 689 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
690 EESR_TDE | EESR_ECI, 690 EESR_TDE | EESR_ECI,
691 .fdr_value = 0x0000070f,
692 .rmcr_value = 0x00000001,
691 693
692 .apr = 1, 694 .apr = 1,
693 .mpr = 1, 695 .mpr = 1,
694 .tpauser = 1, 696 .tpauser = 1,
695 .bculr = 1, 697 .bculr = 1,
696 .hw_swap = 1, 698 .hw_swap = 1,
699 .rpadir = 1,
700 .rpadir_value = 2 << 16,
697 .no_trimd = 1, 701 .no_trimd = 1,
698 .no_ade = 1, 702 .no_ade = 1,
699 .tsu = 1, 703 .tsu = 1,
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 9f18ae984f9e..21f9ad6392e9 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -444,6 +444,18 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
444 EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS), 444 EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
445 EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS), 445 EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
446 EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS), 446 EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS),
447 EF10_DMA_STAT(rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
448 EF10_DMA_STAT(rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
449 EF10_DMA_STAT(rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
450 EF10_DMA_STAT(rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
451 EF10_DMA_STAT(rx_pm_trunc_qbb, PM_TRUNC_QBB),
452 EF10_DMA_STAT(rx_pm_discard_qbb, PM_DISCARD_QBB),
453 EF10_DMA_STAT(rx_pm_discard_mapping, PM_DISCARD_MAPPING),
454 EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
455 EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
456 EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
457 EF10_DMA_STAT(rx_dp_emerg_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS),
458 EF10_DMA_STAT(rx_dp_emerg_wait, RXDP_EMERGENCY_WAIT_CONDITIONS),
447}; 459};
448 460
449#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \ 461#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \
@@ -498,44 +510,72 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
498#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \ 510#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \
499 (1ULL << EF10_STAT_rx_length_error)) 511 (1ULL << EF10_STAT_rx_length_error))
500 512
501#if BITS_PER_LONG == 64 513/* These statistics are only provided if the firmware supports the
502#define STAT_MASK_BITMAP(bits) (bits) 514 * capability PM_AND_RXDP_COUNTERS.
503#else 515 */
504#define STAT_MASK_BITMAP(bits) (bits) & 0xffffffff, (bits) >> 32 516#define HUNT_PM_AND_RXDP_STAT_MASK ( \
505#endif 517 (1ULL << EF10_STAT_rx_pm_trunc_bb_overflow) | \
506 518 (1ULL << EF10_STAT_rx_pm_discard_bb_overflow) | \
507static const unsigned long *efx_ef10_stat_mask(struct efx_nic *efx) 519 (1ULL << EF10_STAT_rx_pm_trunc_vfifo_full) | \
508{ 520 (1ULL << EF10_STAT_rx_pm_discard_vfifo_full) | \
509 static const unsigned long hunt_40g_stat_mask[] = { 521 (1ULL << EF10_STAT_rx_pm_trunc_qbb) | \
510 STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK | 522 (1ULL << EF10_STAT_rx_pm_discard_qbb) | \
511 HUNT_40G_EXTRA_STAT_MASK) 523 (1ULL << EF10_STAT_rx_pm_discard_mapping) | \
512 }; 524 (1ULL << EF10_STAT_rx_dp_q_disabled_packets) | \
513 static const unsigned long hunt_10g_only_stat_mask[] = { 525 (1ULL << EF10_STAT_rx_dp_di_dropped_packets) | \
514 STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK | 526 (1ULL << EF10_STAT_rx_dp_streaming_packets) | \
515 HUNT_10G_ONLY_STAT_MASK) 527 (1ULL << EF10_STAT_rx_dp_emerg_fetch) | \
516 }; 528 (1ULL << EF10_STAT_rx_dp_emerg_wait))
529
530static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
531{
532 u64 raw_mask = HUNT_COMMON_STAT_MASK;
517 u32 port_caps = efx_mcdi_phy_get_caps(efx); 533 u32 port_caps = efx_mcdi_phy_get_caps(efx);
534 struct efx_ef10_nic_data *nic_data = efx->nic_data;
518 535
519 if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) 536 if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
520 return hunt_40g_stat_mask; 537 raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
521 else 538 else
522 return hunt_10g_only_stat_mask; 539 raw_mask |= HUNT_10G_ONLY_STAT_MASK;
540
541 if (nic_data->datapath_caps &
542 (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN))
543 raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK;
544
545 return raw_mask;
546}
547
548static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
549{
550 u64 raw_mask = efx_ef10_raw_stat_mask(efx);
551
552#if BITS_PER_LONG == 64
553 mask[0] = raw_mask;
554#else
555 mask[0] = raw_mask & 0xffffffff;
556 mask[1] = raw_mask >> 32;
557#endif
523} 558}
524 559
525static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names) 560static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
526{ 561{
562 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
563
564 efx_ef10_get_stat_mask(efx, mask);
527 return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, 565 return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
528 efx_ef10_stat_mask(efx), names); 566 mask, names);
529} 567}
530 568
531static int efx_ef10_try_update_nic_stats(struct efx_nic *efx) 569static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
532{ 570{
533 struct efx_ef10_nic_data *nic_data = efx->nic_data; 571 struct efx_ef10_nic_data *nic_data = efx->nic_data;
534 const unsigned long *stats_mask = efx_ef10_stat_mask(efx); 572 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
535 __le64 generation_start, generation_end; 573 __le64 generation_start, generation_end;
536 u64 *stats = nic_data->stats; 574 u64 *stats = nic_data->stats;
537 __le64 *dma_stats; 575 __le64 *dma_stats;
538 576
577 efx_ef10_get_stat_mask(efx, mask);
578
539 dma_stats = efx->stats_buffer.addr; 579 dma_stats = efx->stats_buffer.addr;
540 nic_data = efx->nic_data; 580 nic_data = efx->nic_data;
541 581
@@ -543,8 +583,9 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
543 if (generation_end == EFX_MC_STATS_GENERATION_INVALID) 583 if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
544 return 0; 584 return 0;
545 rmb(); 585 rmb();
546 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, stats_mask, 586 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
547 stats, efx->stats_buffer.addr, false); 587 stats, efx->stats_buffer.addr, false);
588 rmb();
548 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; 589 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
549 if (generation_end != generation_start) 590 if (generation_end != generation_start)
550 return -EAGAIN; 591 return -EAGAIN;
@@ -563,12 +604,14 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
563static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats, 604static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats,
564 struct rtnl_link_stats64 *core_stats) 605 struct rtnl_link_stats64 *core_stats)
565{ 606{
566 const unsigned long *mask = efx_ef10_stat_mask(efx); 607 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
567 struct efx_ef10_nic_data *nic_data = efx->nic_data; 608 struct efx_ef10_nic_data *nic_data = efx->nic_data;
568 u64 *stats = nic_data->stats; 609 u64 *stats = nic_data->stats;
569 size_t stats_count = 0, index; 610 size_t stats_count = 0, index;
570 int retry; 611 int retry;
571 612
613 efx_ef10_get_stat_mask(efx, mask);
614
572 /* If we're unlucky enough to read statistics during the DMA, wait 615 /* If we're unlucky enough to read statistics during the DMA, wait
573 * up to 10ms for it to finish (typically takes <500us) 616 * up to 10ms for it to finish (typically takes <500us)
574 */ 617 */
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 128d7cdf9eb2..366c8e3e3784 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -27,10 +27,10 @@
27 27
28/* A reboot/assertion causes the MCDI status word to be set after the 28/* A reboot/assertion causes the MCDI status word to be set after the
29 * command word is set or a REBOOT event is sent. If we notice a reboot 29 * command word is set or a REBOOT event is sent. If we notice a reboot
30 * via these mechanisms then wait 20ms for the status word to be set. 30 * via these mechanisms then wait 250ms for the status word to be set.
31 */ 31 */
32#define MCDI_STATUS_DELAY_US 100 32#define MCDI_STATUS_DELAY_US 100
33#define MCDI_STATUS_DELAY_COUNT 200 33#define MCDI_STATUS_DELAY_COUNT 2500
34#define MCDI_STATUS_SLEEP_MS \ 34#define MCDI_STATUS_SLEEP_MS \
35 (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000) 35 (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
36 36
@@ -800,9 +800,6 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
800 } else { 800 } else {
801 int count; 801 int count;
802 802
803 /* Nobody was waiting for an MCDI request, so trigger a reset */
804 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
805
806 /* Consume the status word since efx_mcdi_rpc_finish() won't */ 803 /* Consume the status word since efx_mcdi_rpc_finish() won't */
807 for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) { 804 for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
808 if (efx_mcdi_poll_reboot(efx)) 805 if (efx_mcdi_poll_reboot(efx))
@@ -810,6 +807,9 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
810 udelay(MCDI_STATUS_DELAY_US); 807 udelay(MCDI_STATUS_DELAY_US);
811 } 808 }
812 mcdi->new_epoch = true; 809 mcdi->new_epoch = true;
810
811 /* Nobody was waiting for an MCDI request, so trigger a reset */
812 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
813 } 813 }
814 814
815 spin_unlock(&mcdi->iface_lock); 815 spin_unlock(&mcdi->iface_lock);
@@ -963,7 +963,7 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
963 bool *was_attached) 963 bool *was_attached)
964{ 964{
965 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN); 965 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
966 MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_OUT_LEN); 966 MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_EXT_OUT_LEN);
967 size_t outlen; 967 size_t outlen;
968 int rc; 968 int rc;
969 969
@@ -981,6 +981,22 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
981 goto fail; 981 goto fail;
982 } 982 }
983 983
984 /* We currently assume we have control of the external link
985 * and are completely trusted by firmware. Abort probing
986 * if that's not true for this function.
987 */
988 if (driver_operating &&
989 outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN &&
990 (MCDI_DWORD(outbuf, DRV_ATTACH_EXT_OUT_FUNC_FLAGS) &
991 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
992 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) !=
993 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
994 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) {
995 netif_err(efx, probe, efx->net_dev,
996 "This driver version only supports one function per port\n");
997 return -ENODEV;
998 }
999
984 if (was_attached != NULL) 1000 if (was_attached != NULL)
985 *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE); 1001 *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
986 return 0; 1002 return 0;
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index b5cf62492f8e..e0a63ddb7a6c 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -2574,8 +2574,58 @@
2574#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */ 2574#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */
2575#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */ 2575#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */
2576#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */ 2576#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */
2577#define MC_CMD_GMAC_DMABUF_START 0x40 /* enum */ 2577/* enum: PM trunc_bb_overflow counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
2578#define MC_CMD_GMAC_DMABUF_END 0x5f /* enum */ 2578 * capability only.
2579 */
2580#define MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW 0x3c
2581/* enum: PM discard_bb_overflow counter. Valid for EF10 with
2582 * PM_AND_RXDP_COUNTERS capability only.
2583 */
2584#define MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW 0x3d
2585/* enum: PM trunc_vfifo_full counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
2586 * capability only.
2587 */
2588#define MC_CMD_MAC_PM_TRUNC_VFIFO_FULL 0x3e
2589/* enum: PM discard_vfifo_full counter. Valid for EF10 with
2590 * PM_AND_RXDP_COUNTERS capability only.
2591 */
2592#define MC_CMD_MAC_PM_DISCARD_VFIFO_FULL 0x3f
2593/* enum: PM trunc_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
2594 * capability only.
2595 */
2596#define MC_CMD_MAC_PM_TRUNC_QBB 0x40
2597/* enum: PM discard_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
2598 * capability only.
2599 */
2600#define MC_CMD_MAC_PM_DISCARD_QBB 0x41
2601/* enum: PM discard_mapping counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
2602 * capability only.
2603 */
2604#define MC_CMD_MAC_PM_DISCARD_MAPPING 0x42
2605/* enum: RXDP counter: Number of packets dropped due to the queue being
2606 * disabled. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
2607 */
2608#define MC_CMD_MAC_RXDP_Q_DISABLED_PKTS 0x43
2609/* enum: RXDP counter: Number of packets dropped by the DICPU. Valid for EF10
2610 * with PM_AND_RXDP_COUNTERS capability only.
2611 */
2612#define MC_CMD_MAC_RXDP_DI_DROPPED_PKTS 0x45
2613/* enum: RXDP counter: Number of non-host packets. Valid for EF10 with
2614 * PM_AND_RXDP_COUNTERS capability only.
2615 */
2616#define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46
2617/* enum: RXDP counter: Number of times an emergency descriptor fetch was
2618 * performed. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
2619 */
2620#define MC_CMD_MAC_RXDP_EMERGENCY_FETCH_CONDITIONS 0x47
2621/* enum: RXDP counter: Number of times the DPCPU waited for an existing
2622 * descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
2623 */
2624#define MC_CMD_MAC_RXDP_EMERGENCY_WAIT_CONDITIONS 0x48
2625/* enum: Start of GMAC stats buffer space, for Siena only. */
2626#define MC_CMD_GMAC_DMABUF_START 0x40
2627/* enum: End of GMAC stats buffer space, for Siena only. */
2628#define MC_CMD_GMAC_DMABUF_END 0x5f
2579#define MC_CMD_MAC_GENERATION_END 0x60 /* enum */ 2629#define MC_CMD_MAC_GENERATION_END 0x60 /* enum */
2580#define MC_CMD_MAC_NSTATS 0x61 /* enum */ 2630#define MC_CMD_MAC_NSTATS 0x61 /* enum */
2581 2631
@@ -5065,6 +5115,8 @@
5065#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1 5115#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1
5066#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26 5116#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26
5067#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1 5117#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
5118#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27
5119#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
5068/* RxDPCPU firmware id. */ 5120/* RxDPCPU firmware id. */
5069#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4 5121#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
5070#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2 5122#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index e7dbd2dd202e..9826594c8a48 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -469,8 +469,7 @@ size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
469 * @count: Length of the @desc array 469 * @count: Length of the @desc array
470 * @mask: Bitmask of which elements of @desc are enabled 470 * @mask: Bitmask of which elements of @desc are enabled
471 * @stats: Buffer to update with the converted statistics. The length 471 * @stats: Buffer to update with the converted statistics. The length
472 * of this array must be at least the number of set bits in the 472 * of this array must be at least @count.
473 * first @count bits of @mask.
474 * @dma_buf: DMA buffer containing hardware statistics 473 * @dma_buf: DMA buffer containing hardware statistics
475 * @accumulate: If set, the converted values will be added rather than 474 * @accumulate: If set, the converted values will be added rather than
476 * directly stored to the corresponding elements of @stats 475 * directly stored to the corresponding elements of @stats
@@ -503,11 +502,9 @@ void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
503 } 502 }
504 503
505 if (accumulate) 504 if (accumulate)
506 *stats += val; 505 stats[index] += val;
507 else 506 else
508 *stats = val; 507 stats[index] = val;
509 } 508 }
510
511 ++stats;
512 } 509 }
513} 510}
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index fda29d39032f..890bbbe8320e 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -386,6 +386,18 @@ enum {
386 EF10_STAT_rx_align_error, 386 EF10_STAT_rx_align_error,
387 EF10_STAT_rx_length_error, 387 EF10_STAT_rx_length_error,
388 EF10_STAT_rx_nodesc_drops, 388 EF10_STAT_rx_nodesc_drops,
389 EF10_STAT_rx_pm_trunc_bb_overflow,
390 EF10_STAT_rx_pm_discard_bb_overflow,
391 EF10_STAT_rx_pm_trunc_vfifo_full,
392 EF10_STAT_rx_pm_discard_vfifo_full,
393 EF10_STAT_rx_pm_trunc_qbb,
394 EF10_STAT_rx_pm_discard_qbb,
395 EF10_STAT_rx_pm_discard_mapping,
396 EF10_STAT_rx_dp_q_disabled_packets,
397 EF10_STAT_rx_dp_di_dropped_packets,
398 EF10_STAT_rx_dp_streaming_packets,
399 EF10_STAT_rx_dp_emerg_fetch,
400 EF10_STAT_rx_dp_emerg_wait,
389 EF10_STAT_COUNT 401 EF10_STAT_COUNT
390}; 402};
391 403
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 5730fe2445a6..98eedb90cdc3 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -1124,8 +1124,7 @@ static const char * chip_ids[ 16 ] = {
1124 void __iomem *__ioaddr = ioaddr; \ 1124 void __iomem *__ioaddr = ioaddr; \
1125 if (__len >= 2 && (unsigned long)__ptr & 2) { \ 1125 if (__len >= 2 && (unsigned long)__ptr & 2) { \
1126 __len -= 2; \ 1126 __len -= 2; \
1127 SMC_outw(*(u16 *)__ptr, ioaddr, \ 1127 SMC_outsw(ioaddr, DATA_REG(lp), __ptr, 1); \
1128 DATA_REG(lp)); \
1129 __ptr += 2; \ 1128 __ptr += 2; \
1130 } \ 1129 } \
1131 if (SMC_CAN_USE_DATACS && lp->datacs) \ 1130 if (SMC_CAN_USE_DATACS && lp->datacs) \
@@ -1133,8 +1132,7 @@ static const char * chip_ids[ 16 ] = {
1133 SMC_outsl(__ioaddr, DATA_REG(lp), __ptr, __len>>2); \ 1132 SMC_outsl(__ioaddr, DATA_REG(lp), __ptr, __len>>2); \
1134 if (__len & 2) { \ 1133 if (__len & 2) { \
1135 __ptr += (__len & ~3); \ 1134 __ptr += (__len & ~3); \
1136 SMC_outw(*((u16 *)__ptr), ioaddr, \ 1135 SMC_outsw(ioaddr, DATA_REG(lp), __ptr, 1); \
1137 DATA_REG(lp)); \
1138 } \ 1136 } \
1139 } else if (SMC_16BIT(lp)) \ 1137 } else if (SMC_16BIT(lp)) \
1140 SMC_outsw(ioaddr, DATA_REG(lp), p, (l) >> 1); \ 1138 SMC_outsw(ioaddr, DATA_REG(lp), p, (l) >> 1); \
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 79974e31187a..cc3ce557e4aa 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -639,13 +639,6 @@ void cpsw_rx_handler(void *token, int len, int status)
639static irqreturn_t cpsw_interrupt(int irq, void *dev_id) 639static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
640{ 640{
641 struct cpsw_priv *priv = dev_id; 641 struct cpsw_priv *priv = dev_id;
642 u32 rx, tx, rx_thresh;
643
644 rx_thresh = __raw_readl(&priv->wr_regs->rx_thresh_stat);
645 rx = __raw_readl(&priv->wr_regs->rx_stat);
646 tx = __raw_readl(&priv->wr_regs->tx_stat);
647 if (!rx_thresh && !rx && !tx)
648 return IRQ_NONE;
649 642
650 cpsw_intr_disable(priv); 643 cpsw_intr_disable(priv);
651 if (priv->irq_enabled == true) { 644 if (priv->irq_enabled == true) {
@@ -1169,9 +1162,9 @@ static int cpsw_ndo_open(struct net_device *ndev)
1169 } 1162 }
1170 } 1163 }
1171 1164
1165 napi_enable(&priv->napi);
1172 cpdma_ctlr_start(priv->dma); 1166 cpdma_ctlr_start(priv->dma);
1173 cpsw_intr_enable(priv); 1167 cpsw_intr_enable(priv);
1174 napi_enable(&priv->napi);
1175 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); 1168 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
1176 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); 1169 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
1177 1170
@@ -1771,8 +1764,8 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1771 } 1764 }
1772 data->mac_control = prop; 1765 data->mac_control = prop;
1773 1766
1774 if (!of_property_read_u32(node, "dual_emac", &prop)) 1767 if (of_property_read_bool(node, "dual_emac"))
1775 data->dual_emac = prop; 1768 data->dual_emac = 1;
1776 1769
1777 /* 1770 /*
1778 * Populate all the child nodes here... 1771 * Populate all the child nodes here...
@@ -1782,7 +1775,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1782 if (ret) 1775 if (ret)
1783 pr_warn("Doesn't have any child node\n"); 1776 pr_warn("Doesn't have any child node\n");
1784 1777
1785 for_each_node_by_name(slave_node, "slave") { 1778 for_each_child_of_node(node, slave_node) {
1786 struct cpsw_slave_data *slave_data = data->slave_data + i; 1779 struct cpsw_slave_data *slave_data = data->slave_data + i;
1787 const void *mac_addr = NULL; 1780 const void *mac_addr = NULL;
1788 u32 phyid; 1781 u32 phyid;
@@ -1791,6 +1784,10 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1791 struct device_node *mdio_node; 1784 struct device_node *mdio_node;
1792 struct platform_device *mdio; 1785 struct platform_device *mdio;
1793 1786
1787 /* This is no slave child node, continue */
1788 if (strcmp(slave_node->name, "slave"))
1789 continue;
1790
1794 parp = of_get_property(slave_node, "phy_id", &lenp); 1791 parp = of_get_property(slave_node, "phy_id", &lenp);
1795 if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) { 1792 if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
1796 pr_err("Missing slave[%d] phy_id property\n", i); 1793 pr_err("Missing slave[%d] phy_id property\n", i);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 67df09ea9d04..6a32ef9d63ae 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -876,8 +876,7 @@ static void emac_dev_mcast_set(struct net_device *ndev)
876 netdev_mc_count(ndev) > EMAC_DEF_MAX_MULTICAST_ADDRESSES) { 876 netdev_mc_count(ndev) > EMAC_DEF_MAX_MULTICAST_ADDRESSES) {
877 mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST); 877 mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
878 emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL); 878 emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL);
879 } 879 } else if (!netdev_mc_empty(ndev)) {
880 if (!netdev_mc_empty(ndev)) {
881 struct netdev_hw_addr *ha; 880 struct netdev_hw_addr *ha;
882 881
883 mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST); 882 mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index c8f088ab5fdf..bdf697b184ae 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -32,7 +32,7 @@
32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 33
34#define DRV_NAME "via-rhine" 34#define DRV_NAME "via-rhine"
35#define DRV_VERSION "1.5.0" 35#define DRV_VERSION "1.5.1"
36#define DRV_RELDATE "2010-10-09" 36#define DRV_RELDATE "2010-10-09"
37 37
38#include <linux/types.h> 38#include <linux/types.h>
@@ -1704,7 +1704,12 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1704 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); 1704 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1705 1705
1706 if (unlikely(vlan_tx_tag_present(skb))) { 1706 if (unlikely(vlan_tx_tag_present(skb))) {
1707 rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16); 1707 u16 vid_pcp = vlan_tx_tag_get(skb);
1708
1709 /* drop CFI/DEI bit, register needs VID and PCP */
1710 vid_pcp = (vid_pcp & VLAN_VID_MASK) |
1711 ((vid_pcp & VLAN_PRIO_MASK) >> 1);
1712 rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
1708 /* request tagging */ 1713 /* request tagging */
1709 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000); 1714 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1710 } 1715 }
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index b88121f240ca..0029148077a9 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -297,6 +297,12 @@ static int temac_dma_bd_init(struct net_device *ndev)
297 lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); 297 lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
298 lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p); 298 lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
299 299
300 /* Init descriptor indexes */
301 lp->tx_bd_ci = 0;
302 lp->tx_bd_next = 0;
303 lp->tx_bd_tail = 0;
304 lp->rx_bd_ci = 0;
305
300 return 0; 306 return 0;
301 307
302out: 308out:
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 0721e72f9299..5af1c3e5032a 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -975,7 +975,6 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
975 return -EINVAL; /* Cannot change this parameter when up */ 975 return -EINVAL; /* Cannot change this parameter when up */
976 if ((ym = kmalloc(sizeof(struct yamdrv_ioctl_mcs), GFP_KERNEL)) == NULL) 976 if ((ym = kmalloc(sizeof(struct yamdrv_ioctl_mcs), GFP_KERNEL)) == NULL)
977 return -ENOBUFS; 977 return -ENOBUFS;
978 ym->bitrate = 9600;
979 if (copy_from_user(ym, ifr->ifr_data, sizeof(struct yamdrv_ioctl_mcs))) { 978 if (copy_from_user(ym, ifr->ifr_data, sizeof(struct yamdrv_ioctl_mcs))) {
980 kfree(ym); 979 kfree(ym);
981 return -EFAULT; 980 return -EFAULT;
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index 42e6deee6db5..0632d34905c7 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -82,7 +82,6 @@ struct mrf24j40 {
82 82
83 struct mutex buffer_mutex; /* only used to protect buf */ 83 struct mutex buffer_mutex; /* only used to protect buf */
84 struct completion tx_complete; 84 struct completion tx_complete;
85 struct work_struct irqwork;
86 u8 *buf; /* 3 bytes. Used for SPI single-register transfers. */ 85 u8 *buf; /* 3 bytes. Used for SPI single-register transfers. */
87}; 86};
88 87
@@ -344,6 +343,8 @@ static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
344 if (ret) 343 if (ret)
345 goto err; 344 goto err;
346 345
346 INIT_COMPLETION(devrec->tx_complete);
347
347 /* Set TXNTRIG bit of TXNCON to send packet */ 348 /* Set TXNTRIG bit of TXNCON to send packet */
348 ret = read_short_reg(devrec, REG_TXNCON, &val); 349 ret = read_short_reg(devrec, REG_TXNCON, &val);
349 if (ret) 350 if (ret)
@@ -354,8 +355,6 @@ static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
354 val |= 0x4; 355 val |= 0x4;
355 write_short_reg(devrec, REG_TXNCON, val); 356 write_short_reg(devrec, REG_TXNCON, val);
356 357
357 INIT_COMPLETION(devrec->tx_complete);
358
359 /* Wait for the device to send the TX complete interrupt. */ 358 /* Wait for the device to send the TX complete interrupt. */
360 ret = wait_for_completion_interruptible_timeout( 359 ret = wait_for_completion_interruptible_timeout(
361 &devrec->tx_complete, 360 &devrec->tx_complete,
@@ -590,17 +589,6 @@ static struct ieee802154_ops mrf24j40_ops = {
590static irqreturn_t mrf24j40_isr(int irq, void *data) 589static irqreturn_t mrf24j40_isr(int irq, void *data)
591{ 590{
592 struct mrf24j40 *devrec = data; 591 struct mrf24j40 *devrec = data;
593
594 disable_irq_nosync(irq);
595
596 schedule_work(&devrec->irqwork);
597
598 return IRQ_HANDLED;
599}
600
601static void mrf24j40_isrwork(struct work_struct *work)
602{
603 struct mrf24j40 *devrec = container_of(work, struct mrf24j40, irqwork);
604 u8 intstat; 592 u8 intstat;
605 int ret; 593 int ret;
606 594
@@ -618,7 +606,7 @@ static void mrf24j40_isrwork(struct work_struct *work)
618 mrf24j40_handle_rx(devrec); 606 mrf24j40_handle_rx(devrec);
619 607
620out: 608out:
621 enable_irq(devrec->spi->irq); 609 return IRQ_HANDLED;
622} 610}
623 611
624static int mrf24j40_probe(struct spi_device *spi) 612static int mrf24j40_probe(struct spi_device *spi)
@@ -642,7 +630,6 @@ static int mrf24j40_probe(struct spi_device *spi)
642 630
643 mutex_init(&devrec->buffer_mutex); 631 mutex_init(&devrec->buffer_mutex);
644 init_completion(&devrec->tx_complete); 632 init_completion(&devrec->tx_complete);
645 INIT_WORK(&devrec->irqwork, mrf24j40_isrwork);
646 devrec->spi = spi; 633 devrec->spi = spi;
647 spi_set_drvdata(spi, devrec); 634 spi_set_drvdata(spi, devrec);
648 635
@@ -688,11 +675,12 @@ static int mrf24j40_probe(struct spi_device *spi)
688 val &= ~0x3; /* Clear RX mode (normal) */ 675 val &= ~0x3; /* Clear RX mode (normal) */
689 write_short_reg(devrec, REG_RXMCR, val); 676 write_short_reg(devrec, REG_RXMCR, val);
690 677
691 ret = request_irq(spi->irq, 678 ret = request_threaded_irq(spi->irq,
692 mrf24j40_isr, 679 NULL,
693 IRQF_TRIGGER_FALLING, 680 mrf24j40_isr,
694 dev_name(&spi->dev), 681 IRQF_TRIGGER_LOW|IRQF_ONESHOT,
695 devrec); 682 dev_name(&spi->dev),
683 devrec);
696 684
697 if (ret) { 685 if (ret) {
698 dev_err(printdev(devrec), "Unable to get IRQ"); 686 dev_err(printdev(devrec), "Unable to get IRQ");
@@ -721,7 +709,6 @@ static int mrf24j40_remove(struct spi_device *spi)
721 dev_dbg(printdev(devrec), "remove\n"); 709 dev_dbg(printdev(devrec), "remove\n");
722 710
723 free_irq(spi->irq, devrec); 711 free_irq(spi->irq, devrec);
724 flush_work(&devrec->irqwork); /* TODO: Is this the right call? */
725 ieee802154_unregister_device(devrec->dev); 712 ieee802154_unregister_device(devrec->dev);
726 ieee802154_free_device(devrec->dev); 713 ieee802154_free_device(devrec->dev);
727 /* TODO: Will ieee802154_free_device() wait until ->xmit() is 714 /* TODO: Will ieee802154_free_device() wait until ->xmit() is
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index a34d6bf5e43b..cc70ecfc7062 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -429,11 +429,13 @@ static void slip_write_wakeup(struct tty_struct *tty)
429 if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) 429 if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
430 return; 430 return;
431 431
432 spin_lock(&sl->lock);
432 if (sl->xleft <= 0) { 433 if (sl->xleft <= 0) {
433 /* Now serial buffer is almost free & we can start 434 /* Now serial buffer is almost free & we can start
434 * transmission of another packet */ 435 * transmission of another packet */
435 sl->dev->stats.tx_packets++; 436 sl->dev->stats.tx_packets++;
436 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 437 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
438 spin_unlock(&sl->lock);
437 sl_unlock(sl); 439 sl_unlock(sl);
438 return; 440 return;
439 } 441 }
@@ -441,6 +443,7 @@ static void slip_write_wakeup(struct tty_struct *tty)
441 actual = tty->ops->write(tty, sl->xhead, sl->xleft); 443 actual = tty->ops->write(tty, sl->xhead, sl->xleft);
442 sl->xleft -= actual; 444 sl->xleft -= actual;
443 sl->xhead += actual; 445 sl->xhead += actual;
446 spin_unlock(&sl->lock);
444} 447}
445 448
446static void sl_tx_timeout(struct net_device *dev) 449static void sl_tx_timeout(struct net_device *dev)
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 807815fc9968..7cb105c103fe 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1293,7 +1293,8 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
1293 if (unlikely(!noblock)) 1293 if (unlikely(!noblock))
1294 add_wait_queue(&tfile->wq.wait, &wait); 1294 add_wait_queue(&tfile->wq.wait, &wait);
1295 while (len) { 1295 while (len) {
1296 current->state = TASK_INTERRUPTIBLE; 1296 if (unlikely(!noblock))
1297 current->state = TASK_INTERRUPTIBLE;
1297 1298
1298 /* Read frames from the queue */ 1299 /* Read frames from the queue */
1299 if (!(skb = skb_dequeue(&tfile->socket.sk->sk_receive_queue))) { 1300 if (!(skb = skb_dequeue(&tfile->socket.sk->sk_receive_queue))) {
@@ -1320,9 +1321,10 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
1320 break; 1321 break;
1321 } 1322 }
1322 1323
1323 current->state = TASK_RUNNING; 1324 if (unlikely(!noblock)) {
1324 if (unlikely(!noblock)) 1325 current->state = TASK_RUNNING;
1325 remove_wait_queue(&tfile->wq.wait, &wait); 1326 remove_wait_queue(&tfile->wq.wait, &wait);
1327 }
1326 1328
1327 return ret; 1329 return ret;
1328} 1330}
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 3569293df872..846cc19c04f2 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -36,8 +36,8 @@
36#define AX_RXHDR_L4_TYPE_TCP 16 36#define AX_RXHDR_L4_TYPE_TCP 16
37#define AX_RXHDR_L3CSUM_ERR 2 37#define AX_RXHDR_L3CSUM_ERR 2
38#define AX_RXHDR_L4CSUM_ERR 1 38#define AX_RXHDR_L4CSUM_ERR 1
39#define AX_RXHDR_CRC_ERR ((u32)BIT(31)) 39#define AX_RXHDR_CRC_ERR ((u32)BIT(29))
40#define AX_RXHDR_DROP_ERR ((u32)BIT(30)) 40#define AX_RXHDR_DROP_ERR ((u32)BIT(31))
41#define AX_ACCESS_MAC 0x01 41#define AX_ACCESS_MAC 0x01
42#define AX_ACCESS_PHY 0x02 42#define AX_ACCESS_PHY 0x02
43#define AX_ACCESS_EEPROM 0x04 43#define AX_ACCESS_EEPROM 0x04
@@ -1406,6 +1406,19 @@ static const struct driver_info sitecom_info = {
1406 .tx_fixup = ax88179_tx_fixup, 1406 .tx_fixup = ax88179_tx_fixup,
1407}; 1407};
1408 1408
1409static const struct driver_info samsung_info = {
1410 .description = "Samsung USB Ethernet Adapter",
1411 .bind = ax88179_bind,
1412 .unbind = ax88179_unbind,
1413 .status = ax88179_status,
1414 .link_reset = ax88179_link_reset,
1415 .reset = ax88179_reset,
1416 .stop = ax88179_stop,
1417 .flags = FLAG_ETHER | FLAG_FRAMING_AX,
1418 .rx_fixup = ax88179_rx_fixup,
1419 .tx_fixup = ax88179_tx_fixup,
1420};
1421
1409static const struct usb_device_id products[] = { 1422static const struct usb_device_id products[] = {
1410{ 1423{
1411 /* ASIX AX88179 10/100/1000 */ 1424 /* ASIX AX88179 10/100/1000 */
@@ -1418,7 +1431,11 @@ static const struct usb_device_id products[] = {
1418}, { 1431}, {
1419 /* Sitecom USB 3.0 to Gigabit Adapter */ 1432 /* Sitecom USB 3.0 to Gigabit Adapter */
1420 USB_DEVICE(0x0df6, 0x0072), 1433 USB_DEVICE(0x0df6, 0x0072),
1421 .driver_info = (unsigned long) &sitecom_info, 1434 .driver_info = (unsigned long)&sitecom_info,
1435}, {
1436 /* Samsung USB Ethernet Adapter */
1437 USB_DEVICE(0x04e8, 0xa100),
1438 .driver_info = (unsigned long)&samsung_info,
1422}, 1439},
1423 { }, 1440 { },
1424}; 1441};
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 2dbb9460349d..c6867f926cff 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -303,7 +303,7 @@ static void dm9601_set_multicast(struct net_device *net)
303 rx_ctl |= 0x02; 303 rx_ctl |= 0x02;
304 } else if (net->flags & IFF_ALLMULTI || 304 } else if (net->flags & IFF_ALLMULTI ||
305 netdev_mc_count(net) > DM_MAX_MCAST) { 305 netdev_mc_count(net) > DM_MAX_MCAST) {
306 rx_ctl |= 0x04; 306 rx_ctl |= 0x08;
307 } else if (!netdev_mc_empty(net)) { 307 } else if (!netdev_mc_empty(net)) {
308 struct netdev_hw_addr *ha; 308 struct netdev_hw_addr *ha;
309 309
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 6312332afeba..818ce90185b5 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -714,7 +714,8 @@ static const struct usb_device_id products[] = {
714 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 714 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
715 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ 715 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
716 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 716 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
717 {QMI_FIXED_INTF(0x1e2d, 0x12d1, 4)}, /* Cinterion PLxx */ 717 {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */
718 {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
718 719
719 /* 4. Gobi 1000 devices */ 720 /* 4. Gobi 1000 devices */
720 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ 721 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 7b331e613e02..90a429b7ebad 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1241,7 +1241,9 @@ static int build_dma_sg(const struct sk_buff *skb, struct urb *urb)
1241 if (num_sgs == 1) 1241 if (num_sgs == 1)
1242 return 0; 1242 return 0;
1243 1243
1244 urb->sg = kmalloc(num_sgs * sizeof(struct scatterlist), GFP_ATOMIC); 1244 /* reserve one for zero packet */
1245 urb->sg = kmalloc((num_sgs + 1) * sizeof(struct scatterlist),
1246 GFP_ATOMIC);
1245 if (!urb->sg) 1247 if (!urb->sg)
1246 return -ENOMEM; 1248 return -ENOMEM;
1247 1249
@@ -1305,7 +1307,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1305 if (build_dma_sg(skb, urb) < 0) 1307 if (build_dma_sg(skb, urb) < 0)
1306 goto drop; 1308 goto drop;
1307 } 1309 }
1308 entry->length = length = urb->transfer_buffer_length; 1310 length = urb->transfer_buffer_length;
1309 1311
1310 /* don't assume the hardware handles USB_ZERO_PACKET 1312 /* don't assume the hardware handles USB_ZERO_PACKET
1311 * NOTE: strictly conforming cdc-ether devices should expect 1313 * NOTE: strictly conforming cdc-ether devices should expect
@@ -1317,15 +1319,18 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1317 if (length % dev->maxpacket == 0) { 1319 if (length % dev->maxpacket == 0) {
1318 if (!(info->flags & FLAG_SEND_ZLP)) { 1320 if (!(info->flags & FLAG_SEND_ZLP)) {
1319 if (!(info->flags & FLAG_MULTI_PACKET)) { 1321 if (!(info->flags & FLAG_MULTI_PACKET)) {
1320 urb->transfer_buffer_length++; 1322 length++;
1321 if (skb_tailroom(skb)) { 1323 if (skb_tailroom(skb) && !urb->num_sgs) {
1322 skb->data[skb->len] = 0; 1324 skb->data[skb->len] = 0;
1323 __skb_put(skb, 1); 1325 __skb_put(skb, 1);
1324 } 1326 } else if (urb->num_sgs)
1327 sg_set_buf(&urb->sg[urb->num_sgs++],
1328 dev->padding_pkt, 1);
1325 } 1329 }
1326 } else 1330 } else
1327 urb->transfer_flags |= URB_ZERO_PACKET; 1331 urb->transfer_flags |= URB_ZERO_PACKET;
1328 } 1332 }
1333 entry->length = urb->transfer_buffer_length = length;
1329 1334
1330 spin_lock_irqsave(&dev->txq.lock, flags); 1335 spin_lock_irqsave(&dev->txq.lock, flags);
1331 retval = usb_autopm_get_interface_async(dev->intf); 1336 retval = usb_autopm_get_interface_async(dev->intf);
@@ -1509,6 +1514,7 @@ void usbnet_disconnect (struct usb_interface *intf)
1509 1514
1510 usb_kill_urb(dev->interrupt); 1515 usb_kill_urb(dev->interrupt);
1511 usb_free_urb(dev->interrupt); 1516 usb_free_urb(dev->interrupt);
1517 kfree(dev->padding_pkt);
1512 1518
1513 free_netdev(net); 1519 free_netdev(net);
1514} 1520}
@@ -1679,9 +1685,18 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1679 /* initialize max rx_qlen and tx_qlen */ 1685 /* initialize max rx_qlen and tx_qlen */
1680 usbnet_update_max_qlen(dev); 1686 usbnet_update_max_qlen(dev);
1681 1687
1688 if (dev->can_dma_sg && !(info->flags & FLAG_SEND_ZLP) &&
1689 !(info->flags & FLAG_MULTI_PACKET)) {
1690 dev->padding_pkt = kzalloc(1, GFP_KERNEL);
1691 if (!dev->padding_pkt) {
1692 status = -ENOMEM;
1693 goto out4;
1694 }
1695 }
1696
1682 status = register_netdev (net); 1697 status = register_netdev (net);
1683 if (status) 1698 if (status)
1684 goto out4; 1699 goto out5;
1685 netif_info(dev, probe, dev->net, 1700 netif_info(dev, probe, dev->net,
1686 "register '%s' at usb-%s-%s, %s, %pM\n", 1701 "register '%s' at usb-%s-%s, %s, %pM\n",
1687 udev->dev.driver->name, 1702 udev->dev.driver->name,
@@ -1699,6 +1714,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1699 1714
1700 return 0; 1715 return 0;
1701 1716
1717out5:
1718 kfree(dev->padding_pkt);
1702out4: 1719out4:
1703 usb_free_urb(dev->interrupt); 1720 usb_free_urb(dev->interrupt);
1704out3: 1721out3:
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index defec2b3c5a4..9fbdfcd1e1a0 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -938,7 +938,9 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
938 return -EINVAL; 938 return -EINVAL;
939 } else { 939 } else {
940 vi->curr_queue_pairs = queue_pairs; 940 vi->curr_queue_pairs = queue_pairs;
941 schedule_delayed_work(&vi->refill, 0); 941 /* virtnet_open() will refill when device is going to up. */
942 if (dev->flags & IFF_UP)
943 schedule_delayed_work(&vi->refill, 0);
942 } 944 }
943 945
944 return 0; 946 return 0;
@@ -1116,6 +1118,11 @@ static int virtnet_cpu_callback(struct notifier_block *nfb,
1116{ 1118{
1117 struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb); 1119 struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb);
1118 1120
1121 mutex_lock(&vi->config_lock);
1122
1123 if (!vi->config_enable)
1124 goto done;
1125
1119 switch(action & ~CPU_TASKS_FROZEN) { 1126 switch(action & ~CPU_TASKS_FROZEN) {
1120 case CPU_ONLINE: 1127 case CPU_ONLINE:
1121 case CPU_DOWN_FAILED: 1128 case CPU_DOWN_FAILED:
@@ -1128,6 +1135,9 @@ static int virtnet_cpu_callback(struct notifier_block *nfb,
1128 default: 1135 default:
1129 break; 1136 break;
1130 } 1137 }
1138
1139done:
1140 mutex_unlock(&vi->config_lock);
1131 return NOTIFY_OK; 1141 return NOTIFY_OK;
1132} 1142}
1133 1143
@@ -1733,7 +1743,9 @@ static int virtnet_restore(struct virtio_device *vdev)
1733 vi->config_enable = true; 1743 vi->config_enable = true;
1734 mutex_unlock(&vi->config_lock); 1744 mutex_unlock(&vi->config_lock);
1735 1745
1746 rtnl_lock();
1736 virtnet_set_queues(vi, vi->curr_queue_pairs); 1747 virtnet_set_queues(vi, vi->curr_queue_pairs);
1748 rtnl_unlock();
1737 1749
1738 return 0; 1750 return 0;
1739} 1751}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index d1292fe746bc..2ef5b6219f3f 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -952,8 +952,7 @@ void vxlan_sock_release(struct vxlan_sock *vs)
952 952
953 spin_lock(&vn->sock_lock); 953 spin_lock(&vn->sock_lock);
954 hlist_del_rcu(&vs->hlist); 954 hlist_del_rcu(&vs->hlist);
955 smp_wmb(); 955 rcu_assign_sk_user_data(vs->sock->sk, NULL);
956 vs->sock->sk->sk_user_data = NULL;
957 vxlan_notify_del_rx_port(sk); 956 vxlan_notify_del_rx_port(sk);
958 spin_unlock(&vn->sock_lock); 957 spin_unlock(&vn->sock_lock);
959 958
@@ -1048,8 +1047,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1048 1047
1049 port = inet_sk(sk)->inet_sport; 1048 port = inet_sk(sk)->inet_sport;
1050 1049
1051 smp_read_barrier_depends(); 1050 vs = rcu_dereference_sk_user_data(sk);
1052 vs = (struct vxlan_sock *)sk->sk_user_data;
1053 if (!vs) 1051 if (!vs)
1054 goto drop; 1052 goto drop;
1055 1053
@@ -2302,8 +2300,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
2302 atomic_set(&vs->refcnt, 1); 2300 atomic_set(&vs->refcnt, 1);
2303 vs->rcv = rcv; 2301 vs->rcv = rcv;
2304 vs->data = data; 2302 vs->data = data;
2305 smp_wmb(); 2303 rcu_assign_sk_user_data(vs->sock->sk, vs);
2306 vs->sock->sk->sk_user_data = vs;
2307 2304
2308 spin_lock(&vn->sock_lock); 2305 spin_lock(&vn->sock_lock);
2309 hlist_add_head_rcu(&vs->hlist, vs_head(net, port)); 2306 hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 3f0c4f268751..bcfff0d62de4 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -1972,6 +1972,7 @@ fst_get_iface(struct fst_card_info *card, struct fst_port_info *port,
1972 } 1972 }
1973 1973
1974 i = port->index; 1974 i = port->index;
1975 memset(&sync, 0, sizeof(sync));
1975 sync.clock_rate = FST_RDL(card, portConfig[i].lineSpeed); 1976 sync.clock_rate = FST_RDL(card, portConfig[i].lineSpeed);
1976 /* Lucky card and linux use same encoding here */ 1977 /* Lucky card and linux use same encoding here */
1977 sync.clock_type = FST_RDB(card, portConfig[i].internalClock) == 1978 sync.clock_type = FST_RDB(card, portConfig[i].internalClock) ==
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index 6a24a5a70cc7..4c0a69779b89 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -355,6 +355,7 @@ static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
355 ifr->ifr_settings.size = size; /* data size wanted */ 355 ifr->ifr_settings.size = size; /* data size wanted */
356 return -ENOBUFS; 356 return -ENOBUFS;
357 } 357 }
358 memset(&line, 0, sizeof(line));
358 line.clock_type = get_status(port)->clocking; 359 line.clock_type = get_status(port)->clocking;
359 line.clock_rate = 0; 360 line.clock_rate = 0;
360 line.loopback = 0; 361 line.loopback = 0;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index e4f65900132d..709301f88dcd 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -208,6 +208,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
208 struct ath_hw *ah = sc->sc_ah; 208 struct ath_hw *ah = sc->sc_ah;
209 struct ath_common *common = ath9k_hw_common(ah); 209 struct ath_common *common = ath9k_hw_common(ah);
210 unsigned long flags; 210 unsigned long flags;
211 int i;
211 212
212 if (ath_startrecv(sc) != 0) { 213 if (ath_startrecv(sc) != 0) {
213 ath_err(common, "Unable to restart recv logic\n"); 214 ath_err(common, "Unable to restart recv logic\n");
@@ -235,6 +236,15 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
235 } 236 }
236 work: 237 work:
237 ath_restart_work(sc); 238 ath_restart_work(sc);
239
240 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
241 if (!ATH_TXQ_SETUP(sc, i))
242 continue;
243
244 spin_lock_bh(&sc->tx.txq[i].axq_lock);
245 ath_txq_schedule(sc, &sc->tx.txq[i]);
246 spin_unlock_bh(&sc->tx.txq[i].axq_lock);
247 }
238 } 248 }
239 249
240 ieee80211_wake_queues(sc->hw); 250 ieee80211_wake_queues(sc->hw);
@@ -539,21 +549,10 @@ chip_reset:
539 549
540static int ath_reset(struct ath_softc *sc) 550static int ath_reset(struct ath_softc *sc)
541{ 551{
542 int i, r; 552 int r;
543 553
544 ath9k_ps_wakeup(sc); 554 ath9k_ps_wakeup(sc);
545
546 r = ath_reset_internal(sc, NULL); 555 r = ath_reset_internal(sc, NULL);
547
548 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
549 if (!ATH_TXQ_SETUP(sc, i))
550 continue;
551
552 spin_lock_bh(&sc->tx.txq[i].axq_lock);
553 ath_txq_schedule(sc, &sc->tx.txq[i]);
554 spin_unlock_bh(&sc->tx.txq[i].axq_lock);
555 }
556
557 ath9k_ps_restore(sc); 556 ath9k_ps_restore(sc);
558 557
559 return r; 558 return r;
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 4ee472a5a4e4..ab9e3a8410bc 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -1270,13 +1270,6 @@ static void ath9k_antenna_check(struct ath_softc *sc,
1270 return; 1270 return;
1271 1271
1272 /* 1272 /*
1273 * All MPDUs in an aggregate will use the same LNA
1274 * as the first MPDU.
1275 */
1276 if (rs->rs_isaggr && !rs->rs_firstaggr)
1277 return;
1278
1279 /*
1280 * Change the default rx antenna if rx diversity 1273 * Change the default rx antenna if rx diversity
1281 * chooses the other antenna 3 times in a row. 1274 * chooses the other antenna 3 times in a row.
1282 */ 1275 */
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 35b515fe3ffa..dd30452df966 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -399,6 +399,7 @@ static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
399 tbf->bf_buf_addr = bf->bf_buf_addr; 399 tbf->bf_buf_addr = bf->bf_buf_addr;
400 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len); 400 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
401 tbf->bf_state = bf->bf_state; 401 tbf->bf_state = bf->bf_state;
402 tbf->bf_state.stale = false;
402 403
403 return tbf; 404 return tbf;
404} 405}
@@ -1389,11 +1390,15 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1389 u16 tid, u16 *ssn) 1390 u16 tid, u16 *ssn)
1390{ 1391{
1391 struct ath_atx_tid *txtid; 1392 struct ath_atx_tid *txtid;
1393 struct ath_txq *txq;
1392 struct ath_node *an; 1394 struct ath_node *an;
1393 u8 density; 1395 u8 density;
1394 1396
1395 an = (struct ath_node *)sta->drv_priv; 1397 an = (struct ath_node *)sta->drv_priv;
1396 txtid = ATH_AN_2_TID(an, tid); 1398 txtid = ATH_AN_2_TID(an, tid);
1399 txq = txtid->ac->txq;
1400
1401 ath_txq_lock(sc, txq);
1397 1402
1398 /* update ampdu factor/density, they may have changed. This may happen 1403 /* update ampdu factor/density, they may have changed. This may happen
1399 * in HT IBSS when a beacon with HT-info is received after the station 1404 * in HT IBSS when a beacon with HT-info is received after the station
@@ -1417,6 +1422,8 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1417 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf)); 1422 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
1418 txtid->baw_head = txtid->baw_tail = 0; 1423 txtid->baw_head = txtid->baw_tail = 0;
1419 1424
1425 ath_txq_unlock_complete(sc, txq);
1426
1420 return 0; 1427 return 0;
1421} 1428}
1422 1429
@@ -1555,8 +1562,10 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
1555 __skb_unlink(bf->bf_mpdu, tid_q); 1562 __skb_unlink(bf->bf_mpdu, tid_q);
1556 list_add_tail(&bf->list, &bf_q); 1563 list_add_tail(&bf->list, &bf_q);
1557 ath_set_rates(tid->an->vif, tid->an->sta, bf); 1564 ath_set_rates(tid->an->vif, tid->an->sta, bf);
1558 ath_tx_addto_baw(sc, tid, bf); 1565 if (bf_isampdu(bf)) {
1559 bf->bf_state.bf_type &= ~BUF_AGGR; 1566 ath_tx_addto_baw(sc, tid, bf);
1567 bf->bf_state.bf_type &= ~BUF_AGGR;
1568 }
1560 if (bf_tail) 1569 if (bf_tail)
1561 bf_tail->bf_next = bf; 1570 bf_tail->bf_next = bf;
1562 1571
@@ -1950,7 +1959,9 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1950 if (bf_is_ampdu_not_probing(bf)) 1959 if (bf_is_ampdu_not_probing(bf))
1951 txq->axq_ampdu_depth++; 1960 txq->axq_ampdu_depth++;
1952 1961
1953 bf = bf->bf_lastbf->bf_next; 1962 bf_last = bf->bf_lastbf;
1963 bf = bf_last->bf_next;
1964 bf_last->bf_next = NULL;
1954 } 1965 }
1955 } 1966 }
1956} 1967}
@@ -1958,15 +1969,18 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1958static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, 1969static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1959 struct ath_atx_tid *tid, struct sk_buff *skb) 1970 struct ath_atx_tid *tid, struct sk_buff *skb)
1960{ 1971{
1972 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1961 struct ath_frame_info *fi = get_frame_info(skb); 1973 struct ath_frame_info *fi = get_frame_info(skb);
1962 struct list_head bf_head; 1974 struct list_head bf_head;
1963 struct ath_buf *bf; 1975 struct ath_buf *bf = fi->bf;
1964
1965 bf = fi->bf;
1966 1976
1967 INIT_LIST_HEAD(&bf_head); 1977 INIT_LIST_HEAD(&bf_head);
1968 list_add_tail(&bf->list, &bf_head); 1978 list_add_tail(&bf->list, &bf_head);
1969 bf->bf_state.bf_type = 0; 1979 bf->bf_state.bf_type = 0;
1980 if (tid && (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
1981 bf->bf_state.bf_type = BUF_AMPDU;
1982 ath_tx_addto_baw(sc, tid, bf);
1983 }
1970 1984
1971 bf->bf_next = NULL; 1985 bf->bf_next = NULL;
1972 bf->bf_lastbf = bf; 1986 bf->bf_lastbf = bf;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index 64f4a2bc8dde..c3462b75bd08 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -464,8 +464,6 @@ static struct sdio_driver brcmf_sdmmc_driver = {
464 464
465static int brcmf_sdio_pd_probe(struct platform_device *pdev) 465static int brcmf_sdio_pd_probe(struct platform_device *pdev)
466{ 466{
467 int ret;
468
469 brcmf_dbg(SDIO, "Enter\n"); 467 brcmf_dbg(SDIO, "Enter\n");
470 468
471 brcmfmac_sdio_pdata = pdev->dev.platform_data; 469 brcmfmac_sdio_pdata = pdev->dev.platform_data;
@@ -473,11 +471,7 @@ static int brcmf_sdio_pd_probe(struct platform_device *pdev)
473 if (brcmfmac_sdio_pdata->power_on) 471 if (brcmfmac_sdio_pdata->power_on)
474 brcmfmac_sdio_pdata->power_on(); 472 brcmfmac_sdio_pdata->power_on();
475 473
476 ret = sdio_register_driver(&brcmf_sdmmc_driver); 474 return 0;
477 if (ret)
478 brcmf_err("sdio_register_driver failed: %d\n", ret);
479
480 return ret;
481} 475}
482 476
483static int brcmf_sdio_pd_remove(struct platform_device *pdev) 477static int brcmf_sdio_pd_remove(struct platform_device *pdev)
@@ -500,6 +494,15 @@ static struct platform_driver brcmf_sdio_pd = {
500 } 494 }
501}; 495};
502 496
497void brcmf_sdio_register(void)
498{
499 int ret;
500
501 ret = sdio_register_driver(&brcmf_sdmmc_driver);
502 if (ret)
503 brcmf_err("sdio_register_driver failed: %d\n", ret);
504}
505
503void brcmf_sdio_exit(void) 506void brcmf_sdio_exit(void)
504{ 507{
505 brcmf_dbg(SDIO, "Enter\n"); 508 brcmf_dbg(SDIO, "Enter\n");
@@ -510,18 +513,13 @@ void brcmf_sdio_exit(void)
510 sdio_unregister_driver(&brcmf_sdmmc_driver); 513 sdio_unregister_driver(&brcmf_sdmmc_driver);
511} 514}
512 515
513void brcmf_sdio_init(void) 516void __init brcmf_sdio_init(void)
514{ 517{
515 int ret; 518 int ret;
516 519
517 brcmf_dbg(SDIO, "Enter\n"); 520 brcmf_dbg(SDIO, "Enter\n");
518 521
519 ret = platform_driver_probe(&brcmf_sdio_pd, brcmf_sdio_pd_probe); 522 ret = platform_driver_probe(&brcmf_sdio_pd, brcmf_sdio_pd_probe);
520 if (ret == -ENODEV) { 523 if (ret == -ENODEV)
521 brcmf_dbg(SDIO, "No platform data available, registering without.\n"); 524 brcmf_dbg(SDIO, "No platform data available.\n");
522 ret = sdio_register_driver(&brcmf_sdmmc_driver);
523 }
524
525 if (ret)
526 brcmf_err("driver registration failed: %d\n", ret);
527} 525}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index f7c1985844e4..74156f84180c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -156,10 +156,11 @@ extern int brcmf_bus_start(struct device *dev);
156#ifdef CONFIG_BRCMFMAC_SDIO 156#ifdef CONFIG_BRCMFMAC_SDIO
157extern void brcmf_sdio_exit(void); 157extern void brcmf_sdio_exit(void);
158extern void brcmf_sdio_init(void); 158extern void brcmf_sdio_init(void);
159extern void brcmf_sdio_register(void);
159#endif 160#endif
160#ifdef CONFIG_BRCMFMAC_USB 161#ifdef CONFIG_BRCMFMAC_USB
161extern void brcmf_usb_exit(void); 162extern void brcmf_usb_exit(void);
162extern void brcmf_usb_init(void); 163extern void brcmf_usb_register(void);
163#endif 164#endif
164 165
165#endif /* _BRCMF_BUS_H_ */ 166#endif /* _BRCMF_BUS_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index e067aec1fbf1..40e7f854e10f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -1231,21 +1231,23 @@ u32 brcmf_get_chip_info(struct brcmf_if *ifp)
1231 return bus->chip << 4 | bus->chiprev; 1231 return bus->chip << 4 | bus->chiprev;
1232} 1232}
1233 1233
1234static void brcmf_driver_init(struct work_struct *work) 1234static void brcmf_driver_register(struct work_struct *work)
1235{ 1235{
1236 brcmf_debugfs_init();
1237
1238#ifdef CONFIG_BRCMFMAC_SDIO 1236#ifdef CONFIG_BRCMFMAC_SDIO
1239 brcmf_sdio_init(); 1237 brcmf_sdio_register();
1240#endif 1238#endif
1241#ifdef CONFIG_BRCMFMAC_USB 1239#ifdef CONFIG_BRCMFMAC_USB
1242 brcmf_usb_init(); 1240 brcmf_usb_register();
1243#endif 1241#endif
1244} 1242}
1245static DECLARE_WORK(brcmf_driver_work, brcmf_driver_init); 1243static DECLARE_WORK(brcmf_driver_work, brcmf_driver_register);
1246 1244
1247static int __init brcmfmac_module_init(void) 1245static int __init brcmfmac_module_init(void)
1248{ 1246{
1247 brcmf_debugfs_init();
1248#ifdef CONFIG_BRCMFMAC_SDIO
1249 brcmf_sdio_init();
1250#endif
1249 if (!schedule_work(&brcmf_driver_work)) 1251 if (!schedule_work(&brcmf_driver_work))
1250 return -EBUSY; 1252 return -EBUSY;
1251 1253
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 39e01a7c8556..f4aea47e0730 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -1539,7 +1539,7 @@ void brcmf_usb_exit(void)
1539 brcmf_release_fw(&fw_image_list); 1539 brcmf_release_fw(&fw_image_list);
1540} 1540}
1541 1541
1542void brcmf_usb_init(void) 1542void brcmf_usb_register(void)
1543{ 1543{
1544 brcmf_dbg(USB, "Enter\n"); 1544 brcmf_dbg(USB, "Enter\n");
1545 INIT_LIST_HEAD(&fw_image_list); 1545 INIT_LIST_HEAD(&fw_image_list);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 3a6544710c8a..edc5d105ff98 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -457,6 +457,8 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
457 if (err != 0) 457 if (err != 0)
458 brcms_err(wl->wlc->hw->d11core, "%s: brcms_up() returned %d\n", 458 brcms_err(wl->wlc->hw->d11core, "%s: brcms_up() returned %d\n",
459 __func__, err); 459 __func__, err);
460
461 bcma_core_pci_power_save(wl->wlc->hw->d11core->bus, true);
460 return err; 462 return err;
461} 463}
462 464
@@ -479,6 +481,8 @@ static void brcms_ops_stop(struct ieee80211_hw *hw)
479 return; 481 return;
480 } 482 }
481 483
484 bcma_core_pci_power_save(wl->wlc->hw->d11core->bus, false);
485
482 /* put driver in down state */ 486 /* put driver in down state */
483 spin_lock_bh(&wl->lock); 487 spin_lock_bh(&wl->lock);
484 brcms_down(wl); 488 brcms_down(wl);
diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c
index f5e6b489ed32..755a0c8edfe1 100644
--- a/drivers/net/wireless/cw1200/cw1200_spi.c
+++ b/drivers/net/wireless/cw1200/cw1200_spi.c
@@ -42,7 +42,6 @@ struct hwbus_priv {
42 spinlock_t lock; /* Serialize all bus operations */ 42 spinlock_t lock; /* Serialize all bus operations */
43 wait_queue_head_t wq; 43 wait_queue_head_t wq;
44 int claimed; 44 int claimed;
45 int irq_disabled;
46}; 45};
47 46
48#define SDIO_TO_SPI_ADDR(addr) ((addr & 0x1f)>>2) 47#define SDIO_TO_SPI_ADDR(addr) ((addr & 0x1f)>>2)
@@ -238,9 +237,9 @@ static irqreturn_t cw1200_spi_irq_handler(int irq, void *dev_id)
238 struct hwbus_priv *self = dev_id; 237 struct hwbus_priv *self = dev_id;
239 238
240 if (self->core) { 239 if (self->core) {
241 disable_irq_nosync(self->func->irq); 240 cw1200_spi_lock(self);
242 self->irq_disabled = 1;
243 cw1200_irq_handler(self->core); 241 cw1200_irq_handler(self->core);
242 cw1200_spi_unlock(self);
244 return IRQ_HANDLED; 243 return IRQ_HANDLED;
245 } else { 244 } else {
246 return IRQ_NONE; 245 return IRQ_NONE;
@@ -253,9 +252,10 @@ static int cw1200_spi_irq_subscribe(struct hwbus_priv *self)
253 252
254 pr_debug("SW IRQ subscribe\n"); 253 pr_debug("SW IRQ subscribe\n");
255 254
256 ret = request_any_context_irq(self->func->irq, cw1200_spi_irq_handler, 255 ret = request_threaded_irq(self->func->irq, NULL,
257 IRQF_TRIGGER_HIGH, 256 cw1200_spi_irq_handler,
258 "cw1200_wlan_irq", self); 257 IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
258 "cw1200_wlan_irq", self);
259 if (WARN_ON(ret < 0)) 259 if (WARN_ON(ret < 0))
260 goto exit; 260 goto exit;
261 261
@@ -273,22 +273,13 @@ exit:
273 273
274static int cw1200_spi_irq_unsubscribe(struct hwbus_priv *self) 274static int cw1200_spi_irq_unsubscribe(struct hwbus_priv *self)
275{ 275{
276 int ret = 0;
277
276 pr_debug("SW IRQ unsubscribe\n"); 278 pr_debug("SW IRQ unsubscribe\n");
277 disable_irq_wake(self->func->irq); 279 disable_irq_wake(self->func->irq);
278 free_irq(self->func->irq, self); 280 free_irq(self->func->irq, self);
279 281
280 return 0; 282 return ret;
281}
282
283static int cw1200_spi_irq_enable(struct hwbus_priv *self, int enable)
284{
285 /* Disables are handled by the interrupt handler */
286 if (enable && self->irq_disabled) {
287 enable_irq(self->func->irq);
288 self->irq_disabled = 0;
289 }
290
291 return 0;
292} 283}
293 284
294static int cw1200_spi_off(const struct cw1200_platform_data_spi *pdata) 285static int cw1200_spi_off(const struct cw1200_platform_data_spi *pdata)
@@ -368,7 +359,6 @@ static struct hwbus_ops cw1200_spi_hwbus_ops = {
368 .unlock = cw1200_spi_unlock, 359 .unlock = cw1200_spi_unlock,
369 .align_size = cw1200_spi_align_size, 360 .align_size = cw1200_spi_align_size,
370 .power_mgmt = cw1200_spi_pm, 361 .power_mgmt = cw1200_spi_pm,
371 .irq_enable = cw1200_spi_irq_enable,
372}; 362};
373 363
374/* Probe Function to be called by SPI stack when device is discovered */ 364/* Probe Function to be called by SPI stack when device is discovered */
diff --git a/drivers/net/wireless/cw1200/fwio.c b/drivers/net/wireless/cw1200/fwio.c
index 0b2061bbc68b..acdff0f7f952 100644
--- a/drivers/net/wireless/cw1200/fwio.c
+++ b/drivers/net/wireless/cw1200/fwio.c
@@ -485,7 +485,7 @@ int cw1200_load_firmware(struct cw1200_common *priv)
485 485
486 /* Enable interrupt signalling */ 486 /* Enable interrupt signalling */
487 priv->hwbus_ops->lock(priv->hwbus_priv); 487 priv->hwbus_ops->lock(priv->hwbus_priv);
488 ret = __cw1200_irq_enable(priv, 2); 488 ret = __cw1200_irq_enable(priv, 1);
489 priv->hwbus_ops->unlock(priv->hwbus_priv); 489 priv->hwbus_ops->unlock(priv->hwbus_priv);
490 if (ret < 0) 490 if (ret < 0)
491 goto unsubscribe; 491 goto unsubscribe;
diff --git a/drivers/net/wireless/cw1200/hwbus.h b/drivers/net/wireless/cw1200/hwbus.h
index 51dfb3a90735..8b2fc831c3de 100644
--- a/drivers/net/wireless/cw1200/hwbus.h
+++ b/drivers/net/wireless/cw1200/hwbus.h
@@ -28,7 +28,6 @@ struct hwbus_ops {
28 void (*unlock)(struct hwbus_priv *self); 28 void (*unlock)(struct hwbus_priv *self);
29 size_t (*align_size)(struct hwbus_priv *self, size_t size); 29 size_t (*align_size)(struct hwbus_priv *self, size_t size);
30 int (*power_mgmt)(struct hwbus_priv *self, bool suspend); 30 int (*power_mgmt)(struct hwbus_priv *self, bool suspend);
31 int (*irq_enable)(struct hwbus_priv *self, int enable);
32}; 31};
33 32
34#endif /* CW1200_HWBUS_H */ 33#endif /* CW1200_HWBUS_H */
diff --git a/drivers/net/wireless/cw1200/hwio.c b/drivers/net/wireless/cw1200/hwio.c
index 41bd7615ccaa..ff230b7aeedd 100644
--- a/drivers/net/wireless/cw1200/hwio.c
+++ b/drivers/net/wireless/cw1200/hwio.c
@@ -273,21 +273,6 @@ int __cw1200_irq_enable(struct cw1200_common *priv, int enable)
273 u16 val16; 273 u16 val16;
274 int ret; 274 int ret;
275 275
276 /* We need to do this hack because the SPI layer can sleep on I/O
277 and the general path involves I/O to the device in interrupt
278 context.
279
280 However, the initial enable call needs to go to the hardware.
281
282 We don't worry about shutdown because we do a full reset which
283 clears the interrupt enabled bits.
284 */
285 if (priv->hwbus_ops->irq_enable) {
286 ret = priv->hwbus_ops->irq_enable(priv->hwbus_priv, enable);
287 if (ret || enable < 2)
288 return ret;
289 }
290
291 if (HIF_8601_SILICON == priv->hw_type) { 276 if (HIF_8601_SILICON == priv->hw_type) {
292 ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32); 277 ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32);
293 if (ret < 0) { 278 if (ret < 0) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 30d45e2fc193..8ac305be68f4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -240,6 +240,12 @@ const struct iwl_cfg iwl6035_2agn_cfg = {
240 .ht_params = &iwl6000_ht_params, 240 .ht_params = &iwl6000_ht_params,
241}; 241};
242 242
243const struct iwl_cfg iwl6035_2agn_sff_cfg = {
244 .name = "Intel(R) Centrino(R) Ultimate-N 6235 AGN",
245 IWL_DEVICE_6035,
246 .ht_params = &iwl6000_ht_params,
247};
248
243const struct iwl_cfg iwl1030_bgn_cfg = { 249const struct iwl_cfg iwl1030_bgn_cfg = {
244 .name = "Intel(R) Centrino(R) Wireless-N 1030 BGN", 250 .name = "Intel(R) Centrino(R) Wireless-N 1030 BGN",
245 IWL_DEVICE_6030, 251 IWL_DEVICE_6030,
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index e4d370bff306..b03c25e14903 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -280,6 +280,7 @@ extern const struct iwl_cfg iwl2000_2bgn_cfg;
280extern const struct iwl_cfg iwl2000_2bgn_d_cfg; 280extern const struct iwl_cfg iwl2000_2bgn_d_cfg;
281extern const struct iwl_cfg iwl2030_2bgn_cfg; 281extern const struct iwl_cfg iwl2030_2bgn_cfg;
282extern const struct iwl_cfg iwl6035_2agn_cfg; 282extern const struct iwl_cfg iwl6035_2agn_cfg;
283extern const struct iwl_cfg iwl6035_2agn_sff_cfg;
283extern const struct iwl_cfg iwl105_bgn_cfg; 284extern const struct iwl_cfg iwl105_bgn_cfg;
284extern const struct iwl_cfg iwl105_bgn_d_cfg; 285extern const struct iwl_cfg iwl105_bgn_d_cfg;
285extern const struct iwl_cfg iwl135_bgn_cfg; 286extern const struct iwl_cfg iwl135_bgn_cfg;
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index dd57a36ecb10..80b47508647c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -601,8 +601,10 @@ static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
601{ 601{
602 int ret; 602 int ret;
603 603
604 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 604 if (trans->state != IWL_TRANS_FW_ALIVE) {
605 "%s bad state = %d", __func__, trans->state); 605 IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
606 return -EIO;
607 }
606 608
607 if (!(cmd->flags & CMD_ASYNC)) 609 if (!(cmd->flags & CMD_ASYNC))
608 lock_map_acquire_read(&trans->sync_cmd_lockdep_map); 610 lock_map_acquire_read(&trans->sync_cmd_lockdep_map);
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index 21407a353a3b..d58e393324ef 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -273,7 +273,10 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
273 if (!mvmvif->queue_params[ac].uapsd) 273 if (!mvmvif->queue_params[ac].uapsd)
274 continue; 274 continue;
275 275
276 cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK); 276 if (mvm->cur_ucode != IWL_UCODE_WOWLAN)
277 cmd->flags |=
278 cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
279
277 cmd->uapsd_ac_flags |= BIT(ac); 280 cmd->uapsd_ac_flags |= BIT(ac);
278 281
279 /* QNDP TID - the highest TID with no admission control */ 282 /* QNDP TID - the highest TID with no admission control */
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 9a7ab8495300..621fb71f282a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -394,6 +394,11 @@ static bool iwl_mvm_scan_abort_notif(struct iwl_notif_wait_data *notif_wait,
394 return false; 394 return false;
395 } 395 }
396 396
397 /*
398 * If scan cannot be aborted, it means that we had a
399 * SCAN_COMPLETE_NOTIFICATION in the pipe and it called
400 * ieee80211_scan_completed already.
401 */
397 IWL_DEBUG_SCAN(mvm, "Scan cannot be aborted, exit now: %d\n", 402 IWL_DEBUG_SCAN(mvm, "Scan cannot be aborted, exit now: %d\n",
398 *resp); 403 *resp);
399 return true; 404 return true;
@@ -417,14 +422,19 @@ void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
417 SCAN_COMPLETE_NOTIFICATION }; 422 SCAN_COMPLETE_NOTIFICATION };
418 int ret; 423 int ret;
419 424
425 if (mvm->scan_status == IWL_MVM_SCAN_NONE)
426 return;
427
420 iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort, 428 iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort,
421 scan_abort_notif, 429 scan_abort_notif,
422 ARRAY_SIZE(scan_abort_notif), 430 ARRAY_SIZE(scan_abort_notif),
423 iwl_mvm_scan_abort_notif, NULL); 431 iwl_mvm_scan_abort_notif, NULL);
424 432
425 ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, CMD_SYNC, 0, NULL); 433 ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD,
434 CMD_SYNC | CMD_SEND_IN_RFKILL, 0, NULL);
426 if (ret) { 435 if (ret) {
427 IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret); 436 IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret);
437 /* mac80211's state will be cleaned in the fw_restart flow */
428 goto out_remove_notif; 438 goto out_remove_notif;
429 } 439 }
430 440
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index dc02cb9792af..26108a1a29fa 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -139,13 +139,16 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
139 139
140/* 6x00 Series */ 140/* 6x00 Series */
141 {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)}, 141 {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)},
142 {IWL_PCI_DEVICE(0x422B, 0x1108, iwl6000_3agn_cfg)},
142 {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)}, 143 {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)},
144 {IWL_PCI_DEVICE(0x422B, 0x1128, iwl6000_3agn_cfg)},
143 {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)}, 145 {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)},
144 {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)}, 146 {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)},
145 {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)}, 147 {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)},
146 {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)}, 148 {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)},
147 {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)}, 149 {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)},
148 {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)}, 150 {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)},
151 {IWL_PCI_DEVICE(0x4238, 0x1118, iwl6000_3agn_cfg)},
149 {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)}, 152 {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
150 {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)}, 153 {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
151 154
@@ -153,12 +156,16 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
153 {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)}, 156 {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)},
154 {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)}, 157 {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)},
155 {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)}, 158 {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)},
159 {IWL_PCI_DEVICE(0x0082, 0x1308, iwl6005_2agn_cfg)},
156 {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)}, 160 {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)},
157 {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)}, 161 {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)},
162 {IWL_PCI_DEVICE(0x0082, 0x1328, iwl6005_2agn_cfg)},
158 {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)}, 163 {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)},
164 {IWL_PCI_DEVICE(0x0085, 0x1318, iwl6005_2agn_cfg)},
159 {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)}, 165 {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)},
160 {IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)}, 166 {IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)},
161 {IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)}, 167 {IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)},
168 {IWL_PCI_DEVICE(0x0085, 0xC228, iwl6005_2agn_sff_cfg)},
162 {IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_2agn_d_cfg)}, 169 {IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_2agn_d_cfg)},
163 {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_mow1_cfg)},/* low 5GHz active */ 170 {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_mow1_cfg)},/* low 5GHz active */
164 {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_mow2_cfg)},/* high 5GHz active */ 171 {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_mow2_cfg)},/* high 5GHz active */
@@ -240,8 +247,11 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
240 247
241/* 6x35 Series */ 248/* 6x35 Series */
242 {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)}, 249 {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
250 {IWL_PCI_DEVICE(0x088E, 0x406A, iwl6035_2agn_sff_cfg)},
243 {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)}, 251 {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
252 {IWL_PCI_DEVICE(0x088F, 0x426A, iwl6035_2agn_sff_cfg)},
244 {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)}, 253 {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
254 {IWL_PCI_DEVICE(0x088E, 0x446A, iwl6035_2agn_sff_cfg)},
245 {IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)}, 255 {IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)},
246 {IWL_PCI_DEVICE(0x088F, 0x5260, iwl6035_2agn_cfg)}, 256 {IWL_PCI_DEVICE(0x088F, 0x5260, iwl6035_2agn_cfg)},
247 257
@@ -260,54 +270,86 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
260#if IS_ENABLED(CONFIG_IWLMVM) 270#if IS_ENABLED(CONFIG_IWLMVM)
261/* 7000 Series */ 271/* 7000 Series */
262 {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)}, 272 {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)},
273 {IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)},
263 {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)}, 274 {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)},
264 {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)}, 275 {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)},
276 {IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7260_2n_cfg)},
265 {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)}, 277 {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)},
266 {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_n_cfg)}, 278 {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_n_cfg)},
267 {IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7260_n_cfg)}, 279 {IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7260_n_cfg)},
268 {IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7260_2ac_cfg)}, 280 {IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7260_2ac_cfg)},
281 {IWL_PCI_DEVICE(0x08B2, 0x4272, iwl7260_2ac_cfg)},
269 {IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7260_2n_cfg)}, 282 {IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7260_2n_cfg)},
283 {IWL_PCI_DEVICE(0x08B2, 0x426A, iwl7260_2n_cfg)},
270 {IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7260_n_cfg)}, 284 {IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7260_n_cfg)},
271 {IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7260_2ac_cfg)}, 285 {IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7260_2ac_cfg)},
286 {IWL_PCI_DEVICE(0x08B1, 0x4472, iwl7260_2ac_cfg)},
272 {IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7260_2n_cfg)}, 287 {IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7260_2n_cfg)},
288 {IWL_PCI_DEVICE(0x08B1, 0x446A, iwl7260_2n_cfg)},
273 {IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)}, 289 {IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)},
274 {IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)}, 290 {IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)},
275 {IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)}, 291 {IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)},
276 {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg_high_temp)}, 292 {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg_high_temp)},
277 {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg_high_temp)}, 293 {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg_high_temp)},
278 {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg_high_temp)}, 294 {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg_high_temp)},
295 {IWL_PCI_DEVICE(0x08B1, 0x4570, iwl7260_2ac_cfg)},
296 {IWL_PCI_DEVICE(0x08B1, 0x4560, iwl7260_2n_cfg)},
297 {IWL_PCI_DEVICE(0x08B2, 0x4370, iwl7260_2ac_cfg)},
298 {IWL_PCI_DEVICE(0x08B2, 0x4360, iwl7260_2n_cfg)},
299 {IWL_PCI_DEVICE(0x08B1, 0x5070, iwl7260_2ac_cfg)},
279 {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)}, 300 {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)},
301 {IWL_PCI_DEVICE(0x08B1, 0x402A, iwl7260_2n_cfg)},
280 {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)}, 302 {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)},
281 {IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)}, 303 {IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)},
282 {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)}, 304 {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)},
305 {IWL_PCI_DEVICE(0x08B1, 0xC072, iwl7260_2ac_cfg)},
283 {IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7260_2ac_cfg)}, 306 {IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7260_2ac_cfg)},
284 {IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7260_2n_cfg)}, 307 {IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7260_2n_cfg)},
308 {IWL_PCI_DEVICE(0x08B1, 0xC06A, iwl7260_2n_cfg)},
285 {IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7260_2n_cfg)}, 309 {IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7260_2n_cfg)},
286 {IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7260_n_cfg)}, 310 {IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7260_n_cfg)},
287 {IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7260_n_cfg)}, 311 {IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7260_n_cfg)},
312 {IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7260_2ac_cfg)},
313 {IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7260_2n_cfg)},
288 {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)}, 314 {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)},
315 {IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7260_2ac_cfg)},
289 {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)}, 316 {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)},
317 {IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7260_n_cfg)},
290 {IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7260_n_cfg)}, 318 {IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7260_n_cfg)},
291 {IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7260_2ac_cfg)}, 319 {IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7260_2ac_cfg)},
320 {IWL_PCI_DEVICE(0x08B1, 0xC472, iwl7260_2ac_cfg)},
292 {IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7260_2n_cfg)}, 321 {IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7260_2n_cfg)},
293 {IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7260_n_cfg)}, 322 {IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7260_n_cfg)},
323 {IWL_PCI_DEVICE(0x08B1, 0xC570, iwl7260_2ac_cfg)},
324 {IWL_PCI_DEVICE(0x08B1, 0xC560, iwl7260_2n_cfg)},
325 {IWL_PCI_DEVICE(0x08B2, 0xC370, iwl7260_2ac_cfg)},
326 {IWL_PCI_DEVICE(0x08B1, 0xC360, iwl7260_2n_cfg)},
294 {IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7260_2n_cfg)}, 327 {IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7260_2n_cfg)},
328 {IWL_PCI_DEVICE(0x08B1, 0xC02A, iwl7260_2n_cfg)},
295 {IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7260_2n_cfg)}, 329 {IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7260_2n_cfg)},
296 {IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7260_2n_cfg)}, 330 {IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7260_2n_cfg)},
297 331
298/* 3160 Series */ 332/* 3160 Series */
299 {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_2ac_cfg)}, 333 {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_2ac_cfg)},
334 {IWL_PCI_DEVICE(0x08B3, 0x0072, iwl3160_2ac_cfg)},
300 {IWL_PCI_DEVICE(0x08B3, 0x0170, iwl3160_2ac_cfg)}, 335 {IWL_PCI_DEVICE(0x08B3, 0x0170, iwl3160_2ac_cfg)},
336 {IWL_PCI_DEVICE(0x08B3, 0x0172, iwl3160_2ac_cfg)},
301 {IWL_PCI_DEVICE(0x08B3, 0x0060, iwl3160_2n_cfg)}, 337 {IWL_PCI_DEVICE(0x08B3, 0x0060, iwl3160_2n_cfg)},
302 {IWL_PCI_DEVICE(0x08B3, 0x0062, iwl3160_n_cfg)}, 338 {IWL_PCI_DEVICE(0x08B3, 0x0062, iwl3160_n_cfg)},
303 {IWL_PCI_DEVICE(0x08B4, 0x0270, iwl3160_2ac_cfg)}, 339 {IWL_PCI_DEVICE(0x08B4, 0x0270, iwl3160_2ac_cfg)},
340 {IWL_PCI_DEVICE(0x08B4, 0x0272, iwl3160_2ac_cfg)},
304 {IWL_PCI_DEVICE(0x08B3, 0x0470, iwl3160_2ac_cfg)}, 341 {IWL_PCI_DEVICE(0x08B3, 0x0470, iwl3160_2ac_cfg)},
342 {IWL_PCI_DEVICE(0x08B3, 0x0472, iwl3160_2ac_cfg)},
343 {IWL_PCI_DEVICE(0x08B4, 0x0370, iwl3160_2ac_cfg)},
305 {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_2ac_cfg)}, 344 {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_2ac_cfg)},
345 {IWL_PCI_DEVICE(0x08B3, 0x8072, iwl3160_2ac_cfg)},
306 {IWL_PCI_DEVICE(0x08B3, 0x8170, iwl3160_2ac_cfg)}, 346 {IWL_PCI_DEVICE(0x08B3, 0x8170, iwl3160_2ac_cfg)},
347 {IWL_PCI_DEVICE(0x08B3, 0x8172, iwl3160_2ac_cfg)},
307 {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)}, 348 {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)},
308 {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)}, 349 {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)},
309 {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)}, 350 {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)},
310 {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)}, 351 {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)},
352 {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)},
311#endif /* CONFIG_IWLMVM */ 353#endif /* CONFIG_IWLMVM */
312 354
313 {0} 355 {0}
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index bad95d28d50d..c3f904d422b0 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -1401,6 +1401,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1401 spin_lock_init(&trans_pcie->reg_lock); 1401 spin_lock_init(&trans_pcie->reg_lock);
1402 init_waitqueue_head(&trans_pcie->ucode_write_waitq); 1402 init_waitqueue_head(&trans_pcie->ucode_write_waitq);
1403 1403
1404 err = pci_enable_device(pdev);
1405 if (err)
1406 goto out_no_pci;
1407
1404 if (!cfg->base_params->pcie_l1_allowed) { 1408 if (!cfg->base_params->pcie_l1_allowed) {
1405 /* 1409 /*
1406 * W/A - seems to solve weird behavior. We need to remove this 1410 * W/A - seems to solve weird behavior. We need to remove this
@@ -1412,10 +1416,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1412 PCIE_LINK_STATE_CLKPM); 1416 PCIE_LINK_STATE_CLKPM);
1413 } 1417 }
1414 1418
1415 err = pci_enable_device(pdev);
1416 if (err)
1417 goto out_no_pci;
1418
1419 pci_set_master(pdev); 1419 pci_set_master(pdev);
1420 1420
1421 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); 1421 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index f45eb29c2ede..1424335163b9 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1102,6 +1102,8 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
1102 * non-AGG queue. 1102 * non-AGG queue.
1103 */ 1103 */
1104 iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); 1104 iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
1105
1106 ssn = trans_pcie->txq[txq_id].q.read_ptr;
1105 } 1107 }
1106 1108
1107 /* Place first TFD at index corresponding to start sequence number. 1109 /* Place first TFD at index corresponding to start sequence number.
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index 21c688264708..1214c587fd08 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -150,7 +150,7 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv,
150 */ 150 */
151int 151int
152mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, 152mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
153 struct mwifiex_ra_list_tbl *pra_list, int headroom, 153 struct mwifiex_ra_list_tbl *pra_list,
154 int ptrindex, unsigned long ra_list_flags) 154 int ptrindex, unsigned long ra_list_flags)
155 __releases(&priv->wmm.ra_list_spinlock) 155 __releases(&priv->wmm.ra_list_spinlock)
156{ 156{
@@ -160,6 +160,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
160 int pad = 0, ret; 160 int pad = 0, ret;
161 struct mwifiex_tx_param tx_param; 161 struct mwifiex_tx_param tx_param;
162 struct txpd *ptx_pd = NULL; 162 struct txpd *ptx_pd = NULL;
163 int headroom = adapter->iface_type == MWIFIEX_USB ? 0 : INTF_HEADER_LEN;
163 164
164 skb_src = skb_peek(&pra_list->skb_head); 165 skb_src = skb_peek(&pra_list->skb_head);
165 if (!skb_src) { 166 if (!skb_src) {
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.h b/drivers/net/wireless/mwifiex/11n_aggr.h
index 900e1c62a0cc..892098d6a696 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.h
+++ b/drivers/net/wireless/mwifiex/11n_aggr.h
@@ -26,7 +26,7 @@
26int mwifiex_11n_deaggregate_pkt(struct mwifiex_private *priv, 26int mwifiex_11n_deaggregate_pkt(struct mwifiex_private *priv,
27 struct sk_buff *skb); 27 struct sk_buff *skb);
28int mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, 28int mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
29 struct mwifiex_ra_list_tbl *ptr, int headroom, 29 struct mwifiex_ra_list_tbl *ptr,
30 int ptr_index, unsigned long flags) 30 int ptr_index, unsigned long flags)
31 __releases(&priv->wmm.ra_list_spinlock); 31 __releases(&priv->wmm.ra_list_spinlock);
32 32
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 2d761477d15e..a6c46f3b6e3a 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -1155,7 +1155,7 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
1155 uint32_t conditions = le32_to_cpu(phs_cfg->params.hs_config.conditions); 1155 uint32_t conditions = le32_to_cpu(phs_cfg->params.hs_config.conditions);
1156 1156
1157 if (phs_cfg->action == cpu_to_le16(HS_ACTIVATE) && 1157 if (phs_cfg->action == cpu_to_le16(HS_ACTIVATE) &&
1158 adapter->iface_type == MWIFIEX_SDIO) { 1158 adapter->iface_type != MWIFIEX_USB) {
1159 mwifiex_hs_activated_event(priv, true); 1159 mwifiex_hs_activated_event(priv, true);
1160 return 0; 1160 return 0;
1161 } else { 1161 } else {
@@ -1167,8 +1167,7 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
1167 } 1167 }
1168 if (conditions != HS_CFG_CANCEL) { 1168 if (conditions != HS_CFG_CANCEL) {
1169 adapter->is_hs_configured = true; 1169 adapter->is_hs_configured = true;
1170 if (adapter->iface_type == MWIFIEX_USB || 1170 if (adapter->iface_type == MWIFIEX_USB)
1171 adapter->iface_type == MWIFIEX_PCIE)
1172 mwifiex_hs_activated_event(priv, true); 1171 mwifiex_hs_activated_event(priv, true);
1173 } else { 1172 } else {
1174 adapter->is_hs_configured = false; 1173 adapter->is_hs_configured = false;
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 9d7c0e6c4fc7..37f873bb342f 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -1422,13 +1422,19 @@ static int mwifiex_deauthenticate_infra(struct mwifiex_private *priv, u8 *mac)
1422 */ 1422 */
1423int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac) 1423int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac)
1424{ 1424{
1425 int ret = 0;
1426
1425 if (!priv->media_connected) 1427 if (!priv->media_connected)
1426 return 0; 1428 return 0;
1427 1429
1428 switch (priv->bss_mode) { 1430 switch (priv->bss_mode) {
1429 case NL80211_IFTYPE_STATION: 1431 case NL80211_IFTYPE_STATION:
1430 case NL80211_IFTYPE_P2P_CLIENT: 1432 case NL80211_IFTYPE_P2P_CLIENT:
1431 return mwifiex_deauthenticate_infra(priv, mac); 1433 ret = mwifiex_deauthenticate_infra(priv, mac);
1434 if (ret)
1435 cfg80211_disconnected(priv->netdev, 0, NULL, 0,
1436 GFP_KERNEL);
1437 break;
1432 case NL80211_IFTYPE_ADHOC: 1438 case NL80211_IFTYPE_ADHOC:
1433 return mwifiex_send_cmd_sync(priv, 1439 return mwifiex_send_cmd_sync(priv,
1434 HostCmd_CMD_802_11_AD_HOC_STOP, 1440 HostCmd_CMD_802_11_AD_HOC_STOP,
@@ -1440,7 +1446,7 @@ int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac)
1440 break; 1446 break;
1441 } 1447 }
1442 1448
1443 return 0; 1449 return ret;
1444} 1450}
1445EXPORT_SYMBOL_GPL(mwifiex_deauthenticate); 1451EXPORT_SYMBOL_GPL(mwifiex_deauthenticate);
1446 1452
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index fd778337deee..c2b91f566e05 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -358,10 +358,12 @@ process_start:
358 } 358 }
359 } while (true); 359 } while (true);
360 360
361 if ((adapter->int_status) || IS_CARD_RX_RCVD(adapter)) 361 spin_lock_irqsave(&adapter->main_proc_lock, flags);
362 if ((adapter->int_status) || IS_CARD_RX_RCVD(adapter)) {
363 spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
362 goto process_start; 364 goto process_start;
365 }
363 366
364 spin_lock_irqsave(&adapter->main_proc_lock, flags);
365 adapter->mwifiex_processing = false; 367 adapter->mwifiex_processing = false;
366 spin_unlock_irqrestore(&adapter->main_proc_lock, flags); 368 spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
367 369
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index 8b057524b252..8c351f71f72f 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -118,7 +118,8 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
118 dev_dbg(adapter->dev, 118 dev_dbg(adapter->dev,
119 "info: successfully disconnected from %pM: reason code %d\n", 119 "info: successfully disconnected from %pM: reason code %d\n",
120 priv->cfg_bssid, reason_code); 120 priv->cfg_bssid, reason_code);
121 if (priv->bss_mode == NL80211_IFTYPE_STATION) { 121 if (priv->bss_mode == NL80211_IFTYPE_STATION ||
122 priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) {
122 cfg80211_disconnected(priv->netdev, reason_code, NULL, 0, 123 cfg80211_disconnected(priv->netdev, reason_code, NULL, 0,
123 GFP_KERNEL); 124 GFP_KERNEL);
124 } 125 }
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index 2472d4b7f00e..1c70b8d09227 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -447,9 +447,6 @@ static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message)
447 */ 447 */
448 adapter->is_suspended = true; 448 adapter->is_suspended = true;
449 449
450 for (i = 0; i < adapter->priv_num; i++)
451 netif_carrier_off(adapter->priv[i]->netdev);
452
453 if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb) 450 if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb)
454 usb_kill_urb(card->rx_cmd.urb); 451 usb_kill_urb(card->rx_cmd.urb);
455 452
@@ -509,10 +506,6 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
509 MWIFIEX_RX_CMD_BUF_SIZE); 506 MWIFIEX_RX_CMD_BUF_SIZE);
510 } 507 }
511 508
512 for (i = 0; i < adapter->priv_num; i++)
513 if (adapter->priv[i]->media_connected)
514 netif_carrier_on(adapter->priv[i]->netdev);
515
516 /* Disable Host Sleep */ 509 /* Disable Host Sleep */
517 if (adapter->hs_activated) 510 if (adapter->hs_activated)
518 mwifiex_cancel_hs(mwifiex_get_priv(adapter, 511 mwifiex_cancel_hs(mwifiex_get_priv(adapter,
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 2e8f9cdea54d..95fa3599b407 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -1239,8 +1239,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
1239 if (enable_tx_amsdu && mwifiex_is_amsdu_allowed(priv, tid) && 1239 if (enable_tx_amsdu && mwifiex_is_amsdu_allowed(priv, tid) &&
1240 mwifiex_is_11n_aggragation_possible(priv, ptr, 1240 mwifiex_is_11n_aggragation_possible(priv, ptr,
1241 adapter->tx_buf_size)) 1241 adapter->tx_buf_size))
1242 mwifiex_11n_aggregate_pkt(priv, ptr, INTF_HEADER_LEN, 1242 mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags);
1243 ptr_index, flags);
1244 /* ra_list_spinlock has been freed in 1243 /* ra_list_spinlock has been freed in
1245 mwifiex_11n_aggregate_pkt() */ 1244 mwifiex_11n_aggregate_pkt() */
1246 else 1245 else
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index b9deef66cf4b..e328d3058c41 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -83,6 +83,7 @@ static struct usb_device_id p54u_table[] = {
83 {USB_DEVICE(0x06a9, 0x000e)}, /* Westell 802.11g USB (A90-211WG-01) */ 83 {USB_DEVICE(0x06a9, 0x000e)}, /* Westell 802.11g USB (A90-211WG-01) */
84 {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */ 84 {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */
85 {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */ 85 {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */
86 {USB_DEVICE(0x07aa, 0x0020)}, /* Corega WLUSB2GTST USB */
86 {USB_DEVICE(0x0803, 0x4310)}, /* Zoom 4410a */ 87 {USB_DEVICE(0x0803, 0x4310)}, /* Zoom 4410a */
87 {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */ 88 {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */
88 {USB_DEVICE(0x083a, 0x4531)}, /* T-Com Sinus 154 data II */ 89 {USB_DEVICE(0x083a, 0x4531)}, /* T-Com Sinus 154 data II */
@@ -979,6 +980,7 @@ static int p54u_load_firmware(struct ieee80211_hw *dev,
979 if (err) { 980 if (err) {
980 dev_err(&priv->udev->dev, "(p54usb) cannot load firmware %s " 981 dev_err(&priv->udev->dev, "(p54usb) cannot load firmware %s "
981 "(%d)!\n", p54u_fwlist[i].fw, err); 982 "(%d)!\n", p54u_fwlist[i].fw, err);
983 usb_put_dev(udev);
982 } 984 }
983 985
984 return err; 986 return err;
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 76d95deb274b..dc49e525ae5e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -105,13 +105,11 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops)
105 goto exit_release_regions; 105 goto exit_release_regions;
106 } 106 }
107 107
108 pci_enable_msi(pci_dev);
109
110 hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw); 108 hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
111 if (!hw) { 109 if (!hw) {
112 rt2x00_probe_err("Failed to allocate hardware\n"); 110 rt2x00_probe_err("Failed to allocate hardware\n");
113 retval = -ENOMEM; 111 retval = -ENOMEM;
114 goto exit_disable_msi; 112 goto exit_release_regions;
115 } 113 }
116 114
117 pci_set_drvdata(pci_dev, hw); 115 pci_set_drvdata(pci_dev, hw);
@@ -152,9 +150,6 @@ exit_free_reg:
152exit_free_device: 150exit_free_device:
153 ieee80211_free_hw(hw); 151 ieee80211_free_hw(hw);
154 152
155exit_disable_msi:
156 pci_disable_msi(pci_dev);
157
158exit_release_regions: 153exit_release_regions:
159 pci_release_regions(pci_dev); 154 pci_release_regions(pci_dev);
160 155
@@ -179,8 +174,6 @@ void rt2x00pci_remove(struct pci_dev *pci_dev)
179 rt2x00pci_free_reg(rt2x00dev); 174 rt2x00pci_free_reg(rt2x00dev);
180 ieee80211_free_hw(hw); 175 ieee80211_free_hw(hw);
181 176
182 pci_disable_msi(pci_dev);
183
184 /* 177 /*
185 * Free the PCI device data. 178 * Free the PCI device data.
186 */ 179 */
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index 763cf1defab5..5a060e537fbe 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -343,7 +343,8 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
343 (bool)GET_RX_DESC_PAGGR(pdesc)); 343 (bool)GET_RX_DESC_PAGGR(pdesc));
344 rx_status->mactime = GET_RX_DESC_TSFL(pdesc); 344 rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
345 if (phystatus) { 345 if (phystatus) {
346 p_drvinfo = (struct rx_fwinfo_92c *)(pdesc + RTL_RX_DESC_SIZE); 346 p_drvinfo = (struct rx_fwinfo_92c *)(skb->data +
347 stats->rx_bufshift);
347 rtl92c_translate_rx_signal_stuff(hw, skb, stats, pdesc, 348 rtl92c_translate_rx_signal_stuff(hw, skb, stats, pdesc,
348 p_drvinfo); 349 p_drvinfo);
349 } 350 }
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index cc03e7c87cbe..703258742d28 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -2057,7 +2057,7 @@ struct rtl_priv {
2057 that it points to the data allocated 2057 that it points to the data allocated
2058 beyond this structure like: 2058 beyond this structure like:
2059 rtl_pci_priv or rtl_usb_priv */ 2059 rtl_pci_priv or rtl_usb_priv */
2060 u8 priv[0]; 2060 u8 priv[0] __aligned(sizeof(void *));
2061}; 2061};
2062 2062
2063#define rtl_priv(hw) (((struct rtl_priv *)(hw)->priv)) 2063#define rtl_priv(hw) (((struct rtl_priv *)(hw)->priv))
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index a53782ef1540..1b08d8798372 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -24,6 +24,12 @@
24struct backend_info { 24struct backend_info {
25 struct xenbus_device *dev; 25 struct xenbus_device *dev;
26 struct xenvif *vif; 26 struct xenvif *vif;
27
28 /* This is the state that will be reflected in xenstore when any
29 * active hotplug script completes.
30 */
31 enum xenbus_state state;
32
27 enum xenbus_state frontend_state; 33 enum xenbus_state frontend_state;
28 struct xenbus_watch hotplug_status_watch; 34 struct xenbus_watch hotplug_status_watch;
29 u8 have_hotplug_status_watch:1; 35 u8 have_hotplug_status_watch:1;
@@ -33,11 +39,15 @@ static int connect_rings(struct backend_info *);
33static void connect(struct backend_info *); 39static void connect(struct backend_info *);
34static void backend_create_xenvif(struct backend_info *be); 40static void backend_create_xenvif(struct backend_info *be);
35static void unregister_hotplug_status_watch(struct backend_info *be); 41static void unregister_hotplug_status_watch(struct backend_info *be);
42static void set_backend_state(struct backend_info *be,
43 enum xenbus_state state);
36 44
37static int netback_remove(struct xenbus_device *dev) 45static int netback_remove(struct xenbus_device *dev)
38{ 46{
39 struct backend_info *be = dev_get_drvdata(&dev->dev); 47 struct backend_info *be = dev_get_drvdata(&dev->dev);
40 48
49 set_backend_state(be, XenbusStateClosed);
50
41 unregister_hotplug_status_watch(be); 51 unregister_hotplug_status_watch(be);
42 if (be->vif) { 52 if (be->vif) {
43 kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE); 53 kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
@@ -136,6 +146,8 @@ static int netback_probe(struct xenbus_device *dev,
136 if (err) 146 if (err)
137 goto fail; 147 goto fail;
138 148
149 be->state = XenbusStateInitWait;
150
139 /* This kicks hotplug scripts, so do it immediately. */ 151 /* This kicks hotplug scripts, so do it immediately. */
140 backend_create_xenvif(be); 152 backend_create_xenvif(be);
141 153
@@ -208,24 +220,113 @@ static void backend_create_xenvif(struct backend_info *be)
208 kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE); 220 kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
209} 221}
210 222
211 223static void backend_disconnect(struct backend_info *be)
212static void disconnect_backend(struct xenbus_device *dev)
213{ 224{
214 struct backend_info *be = dev_get_drvdata(&dev->dev);
215
216 if (be->vif) 225 if (be->vif)
217 xenvif_disconnect(be->vif); 226 xenvif_disconnect(be->vif);
218} 227}
219 228
220static void destroy_backend(struct xenbus_device *dev) 229static void backend_connect(struct backend_info *be)
221{ 230{
222 struct backend_info *be = dev_get_drvdata(&dev->dev); 231 if (be->vif)
232 connect(be);
233}
223 234
224 if (be->vif) { 235static inline void backend_switch_state(struct backend_info *be,
225 kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE); 236 enum xenbus_state state)
226 xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status"); 237{
227 xenvif_free(be->vif); 238 struct xenbus_device *dev = be->dev;
228 be->vif = NULL; 239
240 pr_debug("%s -> %s\n", dev->nodename, xenbus_strstate(state));
241 be->state = state;
242
243 /* If we are waiting for a hotplug script then defer the
244 * actual xenbus state change.
245 */
246 if (!be->have_hotplug_status_watch)
247 xenbus_switch_state(dev, state);
248}
249
250/* Handle backend state transitions:
251 *
252 * The backend state starts in InitWait and the following transitions are
253 * allowed.
254 *
255 * InitWait -> Connected
256 *
257 * ^ \ |
258 * | \ |
259 * | \ |
260 * | \ |
261 * | \ |
262 * | \ |
263 * | V V
264 *
265 * Closed <-> Closing
266 *
267 * The state argument specifies the eventual state of the backend and the
268 * function transitions to that state via the shortest path.
269 */
270static void set_backend_state(struct backend_info *be,
271 enum xenbus_state state)
272{
273 while (be->state != state) {
274 switch (be->state) {
275 case XenbusStateClosed:
276 switch (state) {
277 case XenbusStateInitWait:
278 case XenbusStateConnected:
279 pr_info("%s: prepare for reconnect\n",
280 be->dev->nodename);
281 backend_switch_state(be, XenbusStateInitWait);
282 break;
283 case XenbusStateClosing:
284 backend_switch_state(be, XenbusStateClosing);
285 break;
286 default:
287 BUG();
288 }
289 break;
290 case XenbusStateInitWait:
291 switch (state) {
292 case XenbusStateConnected:
293 backend_connect(be);
294 backend_switch_state(be, XenbusStateConnected);
295 break;
296 case XenbusStateClosing:
297 case XenbusStateClosed:
298 backend_switch_state(be, XenbusStateClosing);
299 break;
300 default:
301 BUG();
302 }
303 break;
304 case XenbusStateConnected:
305 switch (state) {
306 case XenbusStateInitWait:
307 case XenbusStateClosing:
308 case XenbusStateClosed:
309 backend_disconnect(be);
310 backend_switch_state(be, XenbusStateClosing);
311 break;
312 default:
313 BUG();
314 }
315 break;
316 case XenbusStateClosing:
317 switch (state) {
318 case XenbusStateInitWait:
319 case XenbusStateConnected:
320 case XenbusStateClosed:
321 backend_switch_state(be, XenbusStateClosed);
322 break;
323 default:
324 BUG();
325 }
326 break;
327 default:
328 BUG();
329 }
229 } 330 }
230} 331}
231 332
@@ -237,40 +338,33 @@ static void frontend_changed(struct xenbus_device *dev,
237{ 338{
238 struct backend_info *be = dev_get_drvdata(&dev->dev); 339 struct backend_info *be = dev_get_drvdata(&dev->dev);
239 340
240 pr_debug("frontend state %s\n", xenbus_strstate(frontend_state)); 341 pr_debug("%s -> %s\n", dev->otherend, xenbus_strstate(frontend_state));
241 342
242 be->frontend_state = frontend_state; 343 be->frontend_state = frontend_state;
243 344
244 switch (frontend_state) { 345 switch (frontend_state) {
245 case XenbusStateInitialising: 346 case XenbusStateInitialising:
246 if (dev->state == XenbusStateClosed) { 347 set_backend_state(be, XenbusStateInitWait);
247 pr_info("%s: prepare for reconnect\n", dev->nodename);
248 xenbus_switch_state(dev, XenbusStateInitWait);
249 }
250 break; 348 break;
251 349
252 case XenbusStateInitialised: 350 case XenbusStateInitialised:
253 break; 351 break;
254 352
255 case XenbusStateConnected: 353 case XenbusStateConnected:
256 if (dev->state == XenbusStateConnected) 354 set_backend_state(be, XenbusStateConnected);
257 break;
258 if (be->vif)
259 connect(be);
260 break; 355 break;
261 356
262 case XenbusStateClosing: 357 case XenbusStateClosing:
263 disconnect_backend(dev); 358 set_backend_state(be, XenbusStateClosing);
264 xenbus_switch_state(dev, XenbusStateClosing);
265 break; 359 break;
266 360
267 case XenbusStateClosed: 361 case XenbusStateClosed:
268 xenbus_switch_state(dev, XenbusStateClosed); 362 set_backend_state(be, XenbusStateClosed);
269 if (xenbus_dev_is_online(dev)) 363 if (xenbus_dev_is_online(dev))
270 break; 364 break;
271 destroy_backend(dev);
272 /* fall through if not online */ 365 /* fall through if not online */
273 case XenbusStateUnknown: 366 case XenbusStateUnknown:
367 set_backend_state(be, XenbusStateClosed);
274 device_unregister(&dev->dev); 368 device_unregister(&dev->dev);
275 break; 369 break;
276 370
@@ -363,7 +457,9 @@ static void hotplug_status_changed(struct xenbus_watch *watch,
363 if (IS_ERR(str)) 457 if (IS_ERR(str))
364 return; 458 return;
365 if (len == sizeof("connected")-1 && !memcmp(str, "connected", len)) { 459 if (len == sizeof("connected")-1 && !memcmp(str, "connected", len)) {
366 xenbus_switch_state(be->dev, XenbusStateConnected); 460 /* Complete any pending state change */
461 xenbus_switch_state(be->dev, be->state);
462
367 /* Not interested in this watch anymore. */ 463 /* Not interested in this watch anymore. */
368 unregister_hotplug_status_watch(be); 464 unregister_hotplug_status_watch(be);
369 } 465 }
@@ -393,12 +489,8 @@ static void connect(struct backend_info *be)
393 err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, 489 err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
394 hotplug_status_changed, 490 hotplug_status_changed,
395 "%s/%s", dev->nodename, "hotplug-status"); 491 "%s/%s", dev->nodename, "hotplug-status");
396 if (err) { 492 if (!err)
397 /* Switch now, since we can't do a watch. */
398 xenbus_switch_state(dev, XenbusStateConnected);
399 } else {
400 be->have_hotplug_status_watch = 1; 493 be->have_hotplug_status_watch = 1;
401 }
402 494
403 netif_wake_queue(be->vif->dev); 495 netif_wake_queue(be->vif->dev);
404} 496}
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 9d2009a9004d..78cc76053328 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -74,10 +74,4 @@ config OF_MTD
74 depends on MTD 74 depends on MTD
75 def_bool y 75 def_bool y
76 76
77config OF_RESERVED_MEM
78 depends on OF_FLATTREE && (DMA_CMA || (HAVE_GENERIC_DMA_COHERENT && HAVE_MEMBLOCK))
79 def_bool y
80 help
81 Initialization code for DMA reserved memory
82
83endmenu # OF 77endmenu # OF
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index ed9660adad77..efd05102c405 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -9,4 +9,3 @@ obj-$(CONFIG_OF_MDIO) += of_mdio.o
9obj-$(CONFIG_OF_PCI) += of_pci.o 9obj-$(CONFIG_OF_PCI) += of_pci.o
10obj-$(CONFIG_OF_PCI_IRQ) += of_pci_irq.o 10obj-$(CONFIG_OF_PCI_IRQ) += of_pci_irq.o
11obj-$(CONFIG_OF_MTD) += of_mtd.o 11obj-$(CONFIG_OF_MTD) += of_mtd.o
12obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 865d3f66c86b..7d4c70f859e3 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -303,10 +303,8 @@ struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
303 struct device_node *cpun, *cpus; 303 struct device_node *cpun, *cpus;
304 304
305 cpus = of_find_node_by_path("/cpus"); 305 cpus = of_find_node_by_path("/cpus");
306 if (!cpus) { 306 if (!cpus)
307 pr_warn("Missing cpus node, bailing out\n");
308 return NULL; 307 return NULL;
309 }
310 308
311 for_each_child_of_node(cpus, cpun) { 309 for_each_child_of_node(cpus, cpun) {
312 if (of_node_cmp(cpun->type, "cpu")) 310 if (of_node_cmp(cpun->type, "cpu"))
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 229dd9d69e18..a4fa9ad31b8f 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -18,7 +18,6 @@
18#include <linux/string.h> 18#include <linux/string.h>
19#include <linux/errno.h> 19#include <linux/errno.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/random.h>
22 21
23#include <asm/setup.h> /* for COMMAND_LINE_SIZE */ 22#include <asm/setup.h> /* for COMMAND_LINE_SIZE */
24#ifdef CONFIG_PPC 23#ifdef CONFIG_PPC
@@ -803,14 +802,3 @@ void __init unflatten_device_tree(void)
803} 802}
804 803
805#endif /* CONFIG_OF_EARLY_FLATTREE */ 804#endif /* CONFIG_OF_EARLY_FLATTREE */
806
807/* Feed entire flattened device tree into the random pool */
808static int __init add_fdt_randomness(void)
809{
810 if (initial_boot_params)
811 add_device_randomness(initial_boot_params,
812 be32_to_cpu(initial_boot_params->totalsize));
813
814 return 0;
815}
816core_initcall(add_fdt_randomness);
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
deleted file mode 100644
index 0fe40c7d6904..000000000000
--- a/drivers/of/of_reserved_mem.c
+++ /dev/null
@@ -1,173 +0,0 @@
1/*
2 * Device tree based initialization code for reserved memory.
3 *
4 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com
6 * Author: Marek Szyprowski <m.szyprowski@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License or (at your optional) any later version of the license.
12 */
13
14#include <linux/memblock.h>
15#include <linux/err.h>
16#include <linux/of.h>
17#include <linux/of_fdt.h>
18#include <linux/of_platform.h>
19#include <linux/mm.h>
20#include <linux/sizes.h>
21#include <linux/mm_types.h>
22#include <linux/dma-contiguous.h>
23#include <linux/dma-mapping.h>
24#include <linux/of_reserved_mem.h>
25
26#define MAX_RESERVED_REGIONS 16
27struct reserved_mem {
28 phys_addr_t base;
29 unsigned long size;
30 struct cma *cma;
31 char name[32];
32};
33static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
34static int reserved_mem_count;
35
36static int __init fdt_scan_reserved_mem(unsigned long node, const char *uname,
37 int depth, void *data)
38{
39 struct reserved_mem *rmem = &reserved_mem[reserved_mem_count];
40 phys_addr_t base, size;
41 int is_cma, is_reserved;
42 unsigned long len;
43 const char *status;
44 __be32 *prop;
45
46 is_cma = IS_ENABLED(CONFIG_DMA_CMA) &&
47 of_flat_dt_is_compatible(node, "linux,contiguous-memory-region");
48 is_reserved = of_flat_dt_is_compatible(node, "reserved-memory-region");
49
50 if (!is_reserved && !is_cma) {
51 /* ignore node and scan next one */
52 return 0;
53 }
54
55 status = of_get_flat_dt_prop(node, "status", &len);
56 if (status && strcmp(status, "okay") != 0) {
57 /* ignore disabled node nad scan next one */
58 return 0;
59 }
60
61 prop = of_get_flat_dt_prop(node, "reg", &len);
62 if (!prop || (len < (dt_root_size_cells + dt_root_addr_cells) *
63 sizeof(__be32))) {
64 pr_err("Reserved mem: node %s, incorrect \"reg\" property\n",
65 uname);
66 /* ignore node and scan next one */
67 return 0;
68 }
69 base = dt_mem_next_cell(dt_root_addr_cells, &prop);
70 size = dt_mem_next_cell(dt_root_size_cells, &prop);
71
72 if (!size) {
73 /* ignore node and scan next one */
74 return 0;
75 }
76
77 pr_info("Reserved mem: found %s, memory base %lx, size %ld MiB\n",
78 uname, (unsigned long)base, (unsigned long)size / SZ_1M);
79
80 if (reserved_mem_count == ARRAY_SIZE(reserved_mem))
81 return -ENOSPC;
82
83 rmem->base = base;
84 rmem->size = size;
85 strlcpy(rmem->name, uname, sizeof(rmem->name));
86
87 if (is_cma) {
88 struct cma *cma;
89 if (dma_contiguous_reserve_area(size, base, 0, &cma) == 0) {
90 rmem->cma = cma;
91 reserved_mem_count++;
92 if (of_get_flat_dt_prop(node,
93 "linux,default-contiguous-region",
94 NULL))
95 dma_contiguous_set_default(cma);
96 }
97 } else if (is_reserved) {
98 if (memblock_remove(base, size) == 0)
99 reserved_mem_count++;
100 else
101 pr_err("Failed to reserve memory for %s\n", uname);
102 }
103
104 return 0;
105}
106
107static struct reserved_mem *get_dma_memory_region(struct device *dev)
108{
109 struct device_node *node;
110 const char *name;
111 int i;
112
113 node = of_parse_phandle(dev->of_node, "memory-region", 0);
114 if (!node)
115 return NULL;
116
117 name = kbasename(node->full_name);
118 for (i = 0; i < reserved_mem_count; i++)
119 if (strcmp(name, reserved_mem[i].name) == 0)
120 return &reserved_mem[i];
121 return NULL;
122}
123
124/**
125 * of_reserved_mem_device_init() - assign reserved memory region to given device
126 *
127 * This function assign memory region pointed by "memory-region" device tree
128 * property to the given device.
129 */
130void of_reserved_mem_device_init(struct device *dev)
131{
132 struct reserved_mem *region = get_dma_memory_region(dev);
133 if (!region)
134 return;
135
136 if (region->cma) {
137 dev_set_cma_area(dev, region->cma);
138 pr_info("Assigned CMA %s to %s device\n", region->name,
139 dev_name(dev));
140 } else {
141 if (dma_declare_coherent_memory(dev, region->base, region->base,
142 region->size, DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE) != 0)
143 pr_info("Declared reserved memory %s to %s device\n",
144 region->name, dev_name(dev));
145 }
146}
147
148/**
149 * of_reserved_mem_device_release() - release reserved memory device structures
150 *
151 * This function releases structures allocated for memory region handling for
152 * the given device.
153 */
154void of_reserved_mem_device_release(struct device *dev)
155{
156 struct reserved_mem *region = get_dma_memory_region(dev);
157 if (!region && !region->cma)
158 dma_release_declared_memory(dev);
159}
160
161/**
162 * early_init_dt_scan_reserved_mem() - create reserved memory regions
163 *
164 * This function grabs memory from early allocator for device exclusive use
165 * defined in device tree structures. It should be called by arch specific code
166 * once the early allocator (memblock) has been activated and all other
167 * subsystems have already allocated/reserved memory.
168 */
169void __init early_init_dt_scan_reserved_mem(void)
170{
171 of_scan_flat_dt_by_path("/memory/reserved-memory",
172 fdt_scan_reserved_mem, NULL);
173}
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 9b439ac63d8e..f6dcde220821 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -21,7 +21,6 @@
21#include <linux/of_device.h> 21#include <linux/of_device.h>
22#include <linux/of_irq.h> 22#include <linux/of_irq.h>
23#include <linux/of_platform.h> 23#include <linux/of_platform.h>
24#include <linux/of_reserved_mem.h>
25#include <linux/platform_device.h> 24#include <linux/platform_device.h>
26 25
27const struct of_device_id of_default_bus_match_table[] = { 26const struct of_device_id of_default_bus_match_table[] = {
@@ -219,8 +218,6 @@ static struct platform_device *of_platform_device_create_pdata(
219 dev->dev.bus = &platform_bus_type; 218 dev->dev.bus = &platform_bus_type;
220 dev->dev.platform_data = platform_data; 219 dev->dev.platform_data = platform_data;
221 220
222 of_reserved_mem_device_init(&dev->dev);
223
224 /* We do not fill the DMA ops for platform devices by default. 221 /* We do not fill the DMA ops for platform devices by default.
225 * This is currently the responsibility of the platform code 222 * This is currently the responsibility of the platform code
226 * to do such, possibly using a device notifier 223 * to do such, possibly using a device notifier
@@ -228,7 +225,6 @@ static struct platform_device *of_platform_device_create_pdata(
228 225
229 if (of_device_add(dev) != 0) { 226 if (of_device_add(dev) != 0) {
230 platform_device_put(dev); 227 platform_device_put(dev);
231 of_reserved_mem_device_release(&dev->dev);
232 return NULL; 228 return NULL;
233 } 229 }
234 230
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 0b7d23b4ad95..be12fbfcae10 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -994,14 +994,16 @@ void acpiphp_enumerate_slots(struct pci_bus *bus)
994 994
995 /* 995 /*
996 * This bridge should have been registered as a hotplug function 996 * This bridge should have been registered as a hotplug function
997 * under its parent, so the context has to be there. If not, we 997 * under its parent, so the context should be there, unless the
998 * are in deep goo. 998 * parent is going to be handled by pciehp, in which case this
999 * bridge is not interesting to us either.
999 */ 1000 */
1000 mutex_lock(&acpiphp_context_lock); 1001 mutex_lock(&acpiphp_context_lock);
1001 context = acpiphp_get_context(handle); 1002 context = acpiphp_get_context(handle);
1002 if (WARN_ON(!context)) { 1003 if (!context) {
1003 mutex_unlock(&acpiphp_context_lock); 1004 mutex_unlock(&acpiphp_context_lock);
1004 put_device(&bus->dev); 1005 put_device(&bus->dev);
1006 pci_dev_put(bridge->pci_dev);
1005 kfree(bridge); 1007 kfree(bridge);
1006 return; 1008 return;
1007 } 1009 }
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e8ccf6c0f08a..bdd64b1b4817 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1155,8 +1155,14 @@ static void pci_enable_bridge(struct pci_dev *dev)
1155 1155
1156 pci_enable_bridge(dev->bus->self); 1156 pci_enable_bridge(dev->bus->self);
1157 1157
1158 if (pci_is_enabled(dev)) 1158 if (pci_is_enabled(dev)) {
1159 if (!dev->is_busmaster) {
1160 dev_warn(&dev->dev, "driver skip pci_set_master, fix it!\n");
1161 pci_set_master(dev);
1162 }
1159 return; 1163 return;
1164 }
1165
1160 retval = pci_enable_device(dev); 1166 retval = pci_enable_device(dev);
1161 if (retval) 1167 if (retval)
1162 dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n", 1168 dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n",
diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c
index a138965c01cb..b8fcc38c0d11 100644
--- a/drivers/pinctrl/pinconf.c
+++ b/drivers/pinctrl/pinconf.c
@@ -490,7 +490,7 @@ exit:
490 * <devicename> <state> <pinname> are values that should match the pinctrl-maps 490 * <devicename> <state> <pinname> are values that should match the pinctrl-maps
491 * <newvalue> reflects the new config and is driver dependant 491 * <newvalue> reflects the new config and is driver dependant
492 */ 492 */
493static int pinconf_dbg_config_write(struct file *file, 493static ssize_t pinconf_dbg_config_write(struct file *file,
494 const char __user *user_buf, size_t count, loff_t *ppos) 494 const char __user *user_buf, size_t count, loff_t *ppos)
495{ 495{
496 struct pinctrl_maps *maps_node; 496 struct pinctrl_maps *maps_node;
@@ -508,7 +508,7 @@ static int pinconf_dbg_config_write(struct file *file,
508 int i; 508 int i;
509 509
510 /* Get userspace string and assure termination */ 510 /* Get userspace string and assure termination */
511 buf_size = min(count, (size_t)(sizeof(buf)-1)); 511 buf_size = min(count, sizeof(buf) - 1);
512 if (copy_from_user(buf, user_buf, buf_size)) 512 if (copy_from_user(buf, user_buf, buf_size))
513 return -EFAULT; 513 return -EFAULT;
514 buf[buf_size] = 0; 514 buf[buf_size] = 0;
diff --git a/drivers/pinctrl/pinctrl-exynos.c b/drivers/pinctrl/pinctrl-exynos.c
index 2689f8d01a1e..155b1b3a0e7a 100644
--- a/drivers/pinctrl/pinctrl-exynos.c
+++ b/drivers/pinctrl/pinctrl-exynos.c
@@ -663,18 +663,18 @@ static void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata)
663/* pin banks of s5pv210 pin-controller */ 663/* pin banks of s5pv210 pin-controller */
664static struct samsung_pin_bank s5pv210_pin_bank[] = { 664static struct samsung_pin_bank s5pv210_pin_bank[] = {
665 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), 665 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
666 EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04), 666 EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpa1", 0x04),
667 EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08), 667 EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08),
668 EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpc0", 0x0c), 668 EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpc0", 0x0c),
669 EXYNOS_PIN_BANK_EINTG(5, 0x080, "gpc1", 0x10), 669 EXYNOS_PIN_BANK_EINTG(5, 0x080, "gpc1", 0x10),
670 EXYNOS_PIN_BANK_EINTG(4, 0x0a0, "gpd0", 0x14), 670 EXYNOS_PIN_BANK_EINTG(4, 0x0a0, "gpd0", 0x14),
671 EXYNOS_PIN_BANK_EINTG(4, 0x0c0, "gpd1", 0x18), 671 EXYNOS_PIN_BANK_EINTG(6, 0x0c0, "gpd1", 0x18),
672 EXYNOS_PIN_BANK_EINTG(5, 0x0e0, "gpe0", 0x1c), 672 EXYNOS_PIN_BANK_EINTG(8, 0x0e0, "gpe0", 0x1c),
673 EXYNOS_PIN_BANK_EINTG(8, 0x100, "gpe1", 0x20), 673 EXYNOS_PIN_BANK_EINTG(5, 0x100, "gpe1", 0x20),
674 EXYNOS_PIN_BANK_EINTG(6, 0x120, "gpf0", 0x24), 674 EXYNOS_PIN_BANK_EINTG(8, 0x120, "gpf0", 0x24),
675 EXYNOS_PIN_BANK_EINTG(8, 0x140, "gpf1", 0x28), 675 EXYNOS_PIN_BANK_EINTG(8, 0x140, "gpf1", 0x28),
676 EXYNOS_PIN_BANK_EINTG(8, 0x160, "gpf2", 0x2c), 676 EXYNOS_PIN_BANK_EINTG(8, 0x160, "gpf2", 0x2c),
677 EXYNOS_PIN_BANK_EINTG(8, 0x180, "gpf3", 0x30), 677 EXYNOS_PIN_BANK_EINTG(6, 0x180, "gpf3", 0x30),
678 EXYNOS_PIN_BANK_EINTG(7, 0x1a0, "gpg0", 0x34), 678 EXYNOS_PIN_BANK_EINTG(7, 0x1a0, "gpg0", 0x34),
679 EXYNOS_PIN_BANK_EINTG(7, 0x1c0, "gpg1", 0x38), 679 EXYNOS_PIN_BANK_EINTG(7, 0x1c0, "gpg1", 0x38),
680 EXYNOS_PIN_BANK_EINTG(7, 0x1e0, "gpg2", 0x3c), 680 EXYNOS_PIN_BANK_EINTG(7, 0x1e0, "gpg2", 0x3c),
diff --git a/drivers/pinctrl/pinctrl-palmas.c b/drivers/pinctrl/pinctrl-palmas.c
index 82638fac3cfa..30c4d356cb33 100644
--- a/drivers/pinctrl/pinctrl-palmas.c
+++ b/drivers/pinctrl/pinctrl-palmas.c
@@ -891,9 +891,10 @@ static int palmas_pinconf_set(struct pinctrl_dev *pctldev,
891 param = pinconf_to_config_param(configs[i]); 891 param = pinconf_to_config_param(configs[i]);
892 param_val = pinconf_to_config_argument(configs[i]); 892 param_val = pinconf_to_config_argument(configs[i]);
893 893
894 if (param == PIN_CONFIG_BIAS_PULL_PIN_DEFAULT)
895 continue;
896
894 switch (param) { 897 switch (param) {
895 case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
896 return 0;
897 case PIN_CONFIG_BIAS_DISABLE: 898 case PIN_CONFIG_BIAS_DISABLE:
898 case PIN_CONFIG_BIAS_PULL_UP: 899 case PIN_CONFIG_BIAS_PULL_UP:
899 case PIN_CONFIG_BIAS_PULL_DOWN: 900 case PIN_CONFIG_BIAS_PULL_DOWN:
diff --git a/drivers/pinctrl/pinctrl-tegra114.c b/drivers/pinctrl/pinctrl-tegra114.c
index 622c4854977e..93c9e3899d5e 100644
--- a/drivers/pinctrl/pinctrl-tegra114.c
+++ b/drivers/pinctrl/pinctrl-tegra114.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Arthur: Pritesh Raithatha <praithatha@nvidia.com> 6 * Author: Pritesh Raithatha <praithatha@nvidia.com>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License, 9 * under the terms and conditions of the GNU General Public License,
@@ -2763,7 +2763,6 @@ static struct platform_driver tegra114_pinctrl_driver = {
2763}; 2763};
2764module_platform_driver(tegra114_pinctrl_driver); 2764module_platform_driver(tegra114_pinctrl_driver);
2765 2765
2766MODULE_ALIAS("platform:tegra114-pinctrl");
2767MODULE_AUTHOR("Pritesh Raithatha <praithatha@nvidia.com>"); 2766MODULE_AUTHOR("Pritesh Raithatha <praithatha@nvidia.com>");
2768MODULE_DESCRIPTION("NVIDIA Tegra114 pincontrol driver"); 2767MODULE_DESCRIPTION("NVIDIA Tegra114 pinctrl driver");
2769MODULE_LICENSE("GPL v2"); 2768MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 96d6b2eef4f2..b51a7460cc49 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -504,6 +504,7 @@ config ASUS_WMI
504 depends on BACKLIGHT_CLASS_DEVICE 504 depends on BACKLIGHT_CLASS_DEVICE
505 depends on RFKILL || RFKILL = n 505 depends on RFKILL || RFKILL = n
506 depends on HOTPLUG_PCI 506 depends on HOTPLUG_PCI
507 depends on ACPI_VIDEO || ACPI_VIDEO = n
507 select INPUT_SPARSEKMAP 508 select INPUT_SPARSEKMAP
508 select LEDS_CLASS 509 select LEDS_CLASS
509 select NEW_LEDS 510 select NEW_LEDS
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 3b32852d6d2d..47caab0ea7a1 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -127,18 +127,17 @@ MODULE_PARM_DESC(minor,
127 "default is -1 (automatic)"); 127 "default is -1 (automatic)");
128#endif 128#endif
129 129
130static int kbd_backlight = 1; 130static int kbd_backlight = -1;
131module_param(kbd_backlight, int, 0444); 131module_param(kbd_backlight, int, 0444);
132MODULE_PARM_DESC(kbd_backlight, 132MODULE_PARM_DESC(kbd_backlight,
133 "set this to 0 to disable keyboard backlight, " 133 "set this to 0 to disable keyboard backlight, "
134 "1 to enable it (default: 0)"); 134 "1 to enable it (default: no change from current value)");
135 135
136static int kbd_backlight_timeout; /* = 0 */ 136static int kbd_backlight_timeout = -1;
137module_param(kbd_backlight_timeout, int, 0444); 137module_param(kbd_backlight_timeout, int, 0444);
138MODULE_PARM_DESC(kbd_backlight_timeout, 138MODULE_PARM_DESC(kbd_backlight_timeout,
139 "set this to 0 to set the default 10 seconds timeout, " 139 "meaningful values vary from 0 to 3 and their meaning depends "
140 "1 for 30 seconds, 2 for 60 seconds and 3 to disable timeout " 140 "on the model (default: no change from current value)");
141 "(default: 0)");
142 141
143#ifdef CONFIG_PM_SLEEP 142#ifdef CONFIG_PM_SLEEP
144static void sony_nc_kbd_backlight_resume(void); 143static void sony_nc_kbd_backlight_resume(void);
@@ -1841,6 +1840,8 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
1841 if (!kbdbl_ctl) 1840 if (!kbdbl_ctl)
1842 return -ENOMEM; 1841 return -ENOMEM;
1843 1842
1843 kbdbl_ctl->mode = kbd_backlight;
1844 kbdbl_ctl->timeout = kbd_backlight_timeout;
1844 kbdbl_ctl->handle = handle; 1845 kbdbl_ctl->handle = handle;
1845 if (handle == 0x0137) 1846 if (handle == 0x0137)
1846 kbdbl_ctl->base = 0x0C00; 1847 kbdbl_ctl->base = 0x0C00;
@@ -1867,8 +1868,8 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
1867 if (ret) 1868 if (ret)
1868 goto outmode; 1869 goto outmode;
1869 1870
1870 __sony_nc_kbd_backlight_mode_set(kbd_backlight); 1871 __sony_nc_kbd_backlight_mode_set(kbdbl_ctl->mode);
1871 __sony_nc_kbd_backlight_timeout_set(kbd_backlight_timeout); 1872 __sony_nc_kbd_backlight_timeout_set(kbdbl_ctl->timeout);
1872 1873
1873 return 0; 1874 return 0;
1874 1875
@@ -1883,17 +1884,8 @@ outkzalloc:
1883static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd) 1884static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd)
1884{ 1885{
1885 if (kbdbl_ctl) { 1886 if (kbdbl_ctl) {
1886 int result;
1887
1888 device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr); 1887 device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr);
1889 device_remove_file(&pd->dev, &kbdbl_ctl->timeout_attr); 1888 device_remove_file(&pd->dev, &kbdbl_ctl->timeout_attr);
1890
1891 /* restore the default hw behaviour */
1892 sony_call_snc_handle(kbdbl_ctl->handle,
1893 kbdbl_ctl->base | 0x10000, &result);
1894 sony_call_snc_handle(kbdbl_ctl->handle,
1895 kbdbl_ctl->base + 0x200, &result);
1896
1897 kfree(kbdbl_ctl); 1889 kfree(kbdbl_ctl);
1898 kbdbl_ctl = NULL; 1890 kbdbl_ctl = NULL;
1899 } 1891 }
diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
index 1a7816390773..b9f2653e4ef9 100644
--- a/drivers/regulator/da9063-regulator.c
+++ b/drivers/regulator/da9063-regulator.c
@@ -709,7 +709,7 @@ static struct da9063_regulators_pdata *da9063_parse_regulators_dt(
709 struct of_regulator_match **da9063_reg_matches) 709 struct of_regulator_match **da9063_reg_matches)
710{ 710{
711 da9063_reg_matches = NULL; 711 da9063_reg_matches = NULL;
712 return PTR_ERR(-ENODEV); 712 return ERR_PTR(-ENODEV);
713} 713}
714#endif 714#endif
715 715
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index 488dfe7ce9a6..7e2b165972e6 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -201,13 +201,7 @@ static unsigned int palmas_smps_ramp_delay[4] = {0, 10000, 5000, 2500};
201#define SMPS_CTRL_MODE_ECO 0x02 201#define SMPS_CTRL_MODE_ECO 0x02
202#define SMPS_CTRL_MODE_PWM 0x03 202#define SMPS_CTRL_MODE_PWM 0x03
203 203
204/* These values are derived from the data sheet. And are the number of steps 204#define PALMAS_SMPS_NUM_VOLTAGES 122
205 * where there is a voltage change, the ranges at beginning and end of register
206 * max/min values where there are no change are ommitted.
207 *
208 * So they are basically (maxV-minV)/stepV
209 */
210#define PALMAS_SMPS_NUM_VOLTAGES 117
211#define PALMAS_SMPS10_NUM_VOLTAGES 2 205#define PALMAS_SMPS10_NUM_VOLTAGES 2
212#define PALMAS_LDO_NUM_VOLTAGES 50 206#define PALMAS_LDO_NUM_VOLTAGES 50
213 207
@@ -979,6 +973,7 @@ static int palmas_regulators_probe(struct platform_device *pdev)
979 pmic->desc[id].min_uV = 900000; 973 pmic->desc[id].min_uV = 900000;
980 pmic->desc[id].uV_step = 50000; 974 pmic->desc[id].uV_step = 50000;
981 pmic->desc[id].linear_min_sel = 1; 975 pmic->desc[id].linear_min_sel = 1;
976 pmic->desc[id].enable_time = 500;
982 pmic->desc[id].vsel_reg = 977 pmic->desc[id].vsel_reg =
983 PALMAS_BASE_TO_REG(PALMAS_LDO_BASE, 978 PALMAS_BASE_TO_REG(PALMAS_LDO_BASE,
984 palmas_regs_info[id].vsel_addr); 979 palmas_regs_info[id].vsel_addr);
@@ -997,6 +992,11 @@ static int palmas_regulators_probe(struct platform_device *pdev)
997 pmic->desc[id].min_uV = 450000; 992 pmic->desc[id].min_uV = 450000;
998 pmic->desc[id].uV_step = 25000; 993 pmic->desc[id].uV_step = 25000;
999 } 994 }
995
996 /* LOD6 in vibrator mode will have enable time 2000us */
997 if (pdata && pdata->ldo6_vibrator &&
998 (id == PALMAS_REG_LDO6))
999 pmic->desc[id].enable_time = 2000;
1000 } else { 1000 } else {
1001 pmic->desc[id].n_voltages = 1; 1001 pmic->desc[id].n_voltages = 1;
1002 pmic->desc[id].ops = &palmas_ops_extreg; 1002 pmic->desc[id].ops = &palmas_ops_extreg;
diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c
index d8e3e1262bc2..20c271d49dcb 100644
--- a/drivers/regulator/ti-abb-regulator.c
+++ b/drivers/regulator/ti-abb-regulator.c
@@ -279,8 +279,12 @@ static int ti_abb_set_opp(struct regulator_dev *rdev, struct ti_abb *abb,
279 ti_abb_rmw(regs->opp_sel_mask, info->opp_sel, regs->control_reg, 279 ti_abb_rmw(regs->opp_sel_mask, info->opp_sel, regs->control_reg,
280 abb->base); 280 abb->base);
281 281
282 /* program LDO VBB vset override if needed */ 282 /*
283 if (abb->ldo_base) 283 * program LDO VBB vset override if needed for !bypass mode
284 * XXX: Do not switch sequence - for !bypass, LDO override reset *must*
285 * be performed *before* switch to bias mode else VBB glitches.
286 */
287 if (abb->ldo_base && info->opp_sel != TI_ABB_NOMINAL_OPP)
284 ti_abb_program_ldovbb(dev, abb, info); 288 ti_abb_program_ldovbb(dev, abb, info);
285 289
286 /* Initiate ABB ldo change */ 290 /* Initiate ABB ldo change */
@@ -295,6 +299,14 @@ static int ti_abb_set_opp(struct regulator_dev *rdev, struct ti_abb *abb,
295 if (ret) 299 if (ret)
296 goto out; 300 goto out;
297 301
302 /*
303 * Reset LDO VBB vset override bypass mode
304 * XXX: Do not switch sequence - for bypass, LDO override reset *must*
305 * be performed *after* switch to bypass else VBB glitches.
306 */
307 if (abb->ldo_base && info->opp_sel == TI_ABB_NOMINAL_OPP)
308 ti_abb_program_ldovbb(dev, abb, info);
309
298out: 310out:
299 return ret; 311 return ret;
300} 312}
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index 1432b26ef2e9..2205fbc2c37b 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -63,7 +63,7 @@ static irqreturn_t wm831x_ldo_uv_irq(int irq, void *data)
63 */ 63 */
64 64
65static const struct regulator_linear_range wm831x_gp_ldo_ranges[] = { 65static const struct regulator_linear_range wm831x_gp_ldo_ranges[] = {
66 { .min_uV = 900000, .max_uV = 1650000, .min_sel = 0, .max_sel = 14, 66 { .min_uV = 900000, .max_uV = 1600000, .min_sel = 0, .max_sel = 14,
67 .uV_step = 50000 }, 67 .uV_step = 50000 },
68 { .min_uV = 1700000, .max_uV = 3300000, .min_sel = 15, .max_sel = 31, 68 { .min_uV = 1700000, .max_uV = 3300000, .min_sel = 15, .max_sel = 31,
69 .uV_step = 100000 }, 69 .uV_step = 100000 },
@@ -332,7 +332,7 @@ static struct platform_driver wm831x_gp_ldo_driver = {
332 */ 332 */
333 333
334static const struct regulator_linear_range wm831x_aldo_ranges[] = { 334static const struct regulator_linear_range wm831x_aldo_ranges[] = {
335 { .min_uV = 1000000, .max_uV = 1650000, .min_sel = 0, .max_sel = 12, 335 { .min_uV = 1000000, .max_uV = 1600000, .min_sel = 0, .max_sel = 12,
336 .uV_step = 50000 }, 336 .uV_step = 50000 },
337 { .min_uV = 1700000, .max_uV = 3500000, .min_sel = 13, .max_sel = 31, 337 { .min_uV = 1700000, .max_uV = 3500000, .min_sel = 13, .max_sel = 31,
338 .uV_step = 100000 }, 338 .uV_step = 100000 },
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 835b5f0f344e..61ca9292a429 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -543,7 +543,7 @@ static int wm8350_dcdc_set_suspend_mode(struct regulator_dev *rdev,
543} 543}
544 544
545static const struct regulator_linear_range wm8350_ldo_ranges[] = { 545static const struct regulator_linear_range wm8350_ldo_ranges[] = {
546 { .min_uV = 900000, .max_uV = 1750000, .min_sel = 0, .max_sel = 15, 546 { .min_uV = 900000, .max_uV = 1650000, .min_sel = 0, .max_sel = 15,
547 .uV_step = 50000 }, 547 .uV_step = 50000 },
548 { .min_uV = 1800000, .max_uV = 3300000, .min_sel = 16, .max_sel = 31, 548 { .min_uV = 1800000, .max_uV = 3300000, .min_sel = 16, .max_sel = 31,
549 .uV_step = 100000 }, 549 .uV_step = 100000 },
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 5adb2042e824..cee7e2708a1f 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2077,6 +2077,7 @@ dasd_eckd_build_format(struct dasd_device *base,
2077 int intensity = 0; 2077 int intensity = 0;
2078 int r0_perm; 2078 int r0_perm;
2079 int nr_tracks; 2079 int nr_tracks;
2080 int use_prefix;
2080 2081
2081 startdev = dasd_alias_get_start_dev(base); 2082 startdev = dasd_alias_get_start_dev(base);
2082 if (!startdev) 2083 if (!startdev)
@@ -2106,28 +2107,46 @@ dasd_eckd_build_format(struct dasd_device *base,
2106 intensity = fdata->intensity; 2107 intensity = fdata->intensity;
2107 } 2108 }
2108 2109
2110 use_prefix = base_priv->features.feature[8] & 0x01;
2111
2109 switch (intensity) { 2112 switch (intensity) {
2110 case 0x00: /* Normal format */ 2113 case 0x00: /* Normal format */
2111 case 0x08: /* Normal format, use cdl. */ 2114 case 0x08: /* Normal format, use cdl. */
2112 cplength = 2 + (rpt*nr_tracks); 2115 cplength = 2 + (rpt*nr_tracks);
2113 datasize = sizeof(struct PFX_eckd_data) + 2116 if (use_prefix)
2114 sizeof(struct LO_eckd_data) + 2117 datasize = sizeof(struct PFX_eckd_data) +
2115 rpt * nr_tracks * sizeof(struct eckd_count); 2118 sizeof(struct LO_eckd_data) +
2119 rpt * nr_tracks * sizeof(struct eckd_count);
2120 else
2121 datasize = sizeof(struct DE_eckd_data) +
2122 sizeof(struct LO_eckd_data) +
2123 rpt * nr_tracks * sizeof(struct eckd_count);
2116 break; 2124 break;
2117 case 0x01: /* Write record zero and format track. */ 2125 case 0x01: /* Write record zero and format track. */
2118 case 0x09: /* Write record zero and format track, use cdl. */ 2126 case 0x09: /* Write record zero and format track, use cdl. */
2119 cplength = 2 + rpt * nr_tracks; 2127 cplength = 2 + rpt * nr_tracks;
2120 datasize = sizeof(struct PFX_eckd_data) + 2128 if (use_prefix)
2121 sizeof(struct LO_eckd_data) + 2129 datasize = sizeof(struct PFX_eckd_data) +
2122 sizeof(struct eckd_count) + 2130 sizeof(struct LO_eckd_data) +
2123 rpt * nr_tracks * sizeof(struct eckd_count); 2131 sizeof(struct eckd_count) +
2132 rpt * nr_tracks * sizeof(struct eckd_count);
2133 else
2134 datasize = sizeof(struct DE_eckd_data) +
2135 sizeof(struct LO_eckd_data) +
2136 sizeof(struct eckd_count) +
2137 rpt * nr_tracks * sizeof(struct eckd_count);
2124 break; 2138 break;
2125 case 0x04: /* Invalidate track. */ 2139 case 0x04: /* Invalidate track. */
2126 case 0x0c: /* Invalidate track, use cdl. */ 2140 case 0x0c: /* Invalidate track, use cdl. */
2127 cplength = 3; 2141 cplength = 3;
2128 datasize = sizeof(struct PFX_eckd_data) + 2142 if (use_prefix)
2129 sizeof(struct LO_eckd_data) + 2143 datasize = sizeof(struct PFX_eckd_data) +
2130 sizeof(struct eckd_count); 2144 sizeof(struct LO_eckd_data) +
2145 sizeof(struct eckd_count);
2146 else
2147 datasize = sizeof(struct DE_eckd_data) +
2148 sizeof(struct LO_eckd_data) +
2149 sizeof(struct eckd_count);
2131 break; 2150 break;
2132 default: 2151 default:
2133 dev_warn(&startdev->cdev->dev, 2152 dev_warn(&startdev->cdev->dev,
@@ -2147,14 +2166,25 @@ dasd_eckd_build_format(struct dasd_device *base,
2147 2166
2148 switch (intensity & ~0x08) { 2167 switch (intensity & ~0x08) {
2149 case 0x00: /* Normal format. */ 2168 case 0x00: /* Normal format. */
2150 prefix(ccw++, (struct PFX_eckd_data *) data, 2169 if (use_prefix) {
2151 fdata->start_unit, fdata->stop_unit, 2170 prefix(ccw++, (struct PFX_eckd_data *) data,
2152 DASD_ECKD_CCW_WRITE_CKD, base, startdev); 2171 fdata->start_unit, fdata->stop_unit,
2153 /* grant subsystem permission to format R0 */ 2172 DASD_ECKD_CCW_WRITE_CKD, base, startdev);
2154 if (r0_perm) 2173 /* grant subsystem permission to format R0 */
2155 ((struct PFX_eckd_data *)data) 2174 if (r0_perm)
2156 ->define_extent.ga_extended |= 0x04; 2175 ((struct PFX_eckd_data *)data)
2157 data += sizeof(struct PFX_eckd_data); 2176 ->define_extent.ga_extended |= 0x04;
2177 data += sizeof(struct PFX_eckd_data);
2178 } else {
2179 define_extent(ccw++, (struct DE_eckd_data *) data,
2180 fdata->start_unit, fdata->stop_unit,
2181 DASD_ECKD_CCW_WRITE_CKD, startdev);
2182 /* grant subsystem permission to format R0 */
2183 if (r0_perm)
2184 ((struct DE_eckd_data *) data)
2185 ->ga_extended |= 0x04;
2186 data += sizeof(struct DE_eckd_data);
2187 }
2158 ccw[-1].flags |= CCW_FLAG_CC; 2188 ccw[-1].flags |= CCW_FLAG_CC;
2159 locate_record(ccw++, (struct LO_eckd_data *) data, 2189 locate_record(ccw++, (struct LO_eckd_data *) data,
2160 fdata->start_unit, 0, rpt*nr_tracks, 2190 fdata->start_unit, 0, rpt*nr_tracks,
@@ -2163,11 +2193,18 @@ dasd_eckd_build_format(struct dasd_device *base,
2163 data += sizeof(struct LO_eckd_data); 2193 data += sizeof(struct LO_eckd_data);
2164 break; 2194 break;
2165 case 0x01: /* Write record zero + format track. */ 2195 case 0x01: /* Write record zero + format track. */
2166 prefix(ccw++, (struct PFX_eckd_data *) data, 2196 if (use_prefix) {
2167 fdata->start_unit, fdata->stop_unit, 2197 prefix(ccw++, (struct PFX_eckd_data *) data,
2168 DASD_ECKD_CCW_WRITE_RECORD_ZERO, 2198 fdata->start_unit, fdata->stop_unit,
2169 base, startdev); 2199 DASD_ECKD_CCW_WRITE_RECORD_ZERO,
2170 data += sizeof(struct PFX_eckd_data); 2200 base, startdev);
2201 data += sizeof(struct PFX_eckd_data);
2202 } else {
2203 define_extent(ccw++, (struct DE_eckd_data *) data,
2204 fdata->start_unit, fdata->stop_unit,
2205 DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev);
2206 data += sizeof(struct DE_eckd_data);
2207 }
2171 ccw[-1].flags |= CCW_FLAG_CC; 2208 ccw[-1].flags |= CCW_FLAG_CC;
2172 locate_record(ccw++, (struct LO_eckd_data *) data, 2209 locate_record(ccw++, (struct LO_eckd_data *) data,
2173 fdata->start_unit, 0, rpt * nr_tracks + 1, 2210 fdata->start_unit, 0, rpt * nr_tracks + 1,
@@ -2176,10 +2213,17 @@ dasd_eckd_build_format(struct dasd_device *base,
2176 data += sizeof(struct LO_eckd_data); 2213 data += sizeof(struct LO_eckd_data);
2177 break; 2214 break;
2178 case 0x04: /* Invalidate track. */ 2215 case 0x04: /* Invalidate track. */
2179 prefix(ccw++, (struct PFX_eckd_data *) data, 2216 if (use_prefix) {
2180 fdata->start_unit, fdata->stop_unit, 2217 prefix(ccw++, (struct PFX_eckd_data *) data,
2181 DASD_ECKD_CCW_WRITE_CKD, base, startdev); 2218 fdata->start_unit, fdata->stop_unit,
2182 data += sizeof(struct PFX_eckd_data); 2219 DASD_ECKD_CCW_WRITE_CKD, base, startdev);
2220 data += sizeof(struct PFX_eckd_data);
2221 } else {
2222 define_extent(ccw++, (struct DE_eckd_data *) data,
2223 fdata->start_unit, fdata->stop_unit,
2224 DASD_ECKD_CCW_WRITE_CKD, startdev);
2225 data += sizeof(struct DE_eckd_data);
2226 }
2183 ccw[-1].flags |= CCW_FLAG_CC; 2227 ccw[-1].flags |= CCW_FLAG_CC;
2184 locate_record(ccw++, (struct LO_eckd_data *) data, 2228 locate_record(ccw++, (struct LO_eckd_data *) data,
2185 fdata->start_unit, 0, 1, 2229 fdata->start_unit, 0, 1,
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index a3aa374799dc..1fe264379e0d 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -486,7 +486,7 @@ sclp_sync_wait(void)
486 timeout = 0; 486 timeout = 0;
487 if (timer_pending(&sclp_request_timer)) { 487 if (timer_pending(&sclp_request_timer)) {
488 /* Get timeout TOD value */ 488 /* Get timeout TOD value */
489 timeout = get_tod_clock() + 489 timeout = get_tod_clock_fast() +
490 sclp_tod_from_jiffies(sclp_request_timer.expires - 490 sclp_tod_from_jiffies(sclp_request_timer.expires -
491 jiffies); 491 jiffies);
492 } 492 }
@@ -508,7 +508,7 @@ sclp_sync_wait(void)
508 while (sclp_running_state != sclp_running_state_idle) { 508 while (sclp_running_state != sclp_running_state_idle) {
509 /* Check for expired request timer */ 509 /* Check for expired request timer */
510 if (timer_pending(&sclp_request_timer) && 510 if (timer_pending(&sclp_request_timer) &&
511 get_tod_clock() > timeout && 511 get_tod_clock_fast() > timeout &&
512 del_timer(&sclp_request_timer)) 512 del_timer(&sclp_request_timer))
513 sclp_request_timer.function(sclp_request_timer.data); 513 sclp_request_timer.function(sclp_request_timer.data);
514 cpu_relax(); 514 cpu_relax();
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 8cd34bf644b3..77df9cb00688 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -145,9 +145,11 @@ bool __init sclp_has_linemode(void)
145 145
146 if (sccb->header.response_code != 0x20) 146 if (sccb->header.response_code != 0x20)
147 return 0; 147 return 0;
148 if (sccb->sclp_send_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)) 148 if (!(sccb->sclp_send_mask & (EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK)))
149 return 1; 149 return 0;
150 return 0; 150 if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)))
151 return 0;
152 return 1;
151} 153}
152 154
153bool __init sclp_has_vt220(void) 155bool __init sclp_has_vt220(void)
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index a0f47c83fd62..3f4ca4e09a4c 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -810,7 +810,7 @@ static void tty3270_resize_work(struct work_struct *work)
810 struct winsize ws; 810 struct winsize ws;
811 811
812 screen = tty3270_alloc_screen(tp->n_rows, tp->n_cols); 812 screen = tty3270_alloc_screen(tp->n_rows, tp->n_cols);
813 if (!screen) 813 if (IS_ERR(screen))
814 return; 814 return;
815 /* Switch to new output size */ 815 /* Switch to new output size */
816 spin_lock_bh(&tp->view.lock); 816 spin_lock_bh(&tp->view.lock);
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 9b3a24e8d3a0..cf31d3321dab 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -313,7 +313,7 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp)
313 int ret; 313 int ret;
314 314
315 dev_num = iminor(inode); 315 dev_num = iminor(inode);
316 if (dev_num > MAXMINOR) 316 if (dev_num >= MAXMINOR)
317 return -ENODEV; 317 return -ENODEV;
318 logptr = &sys_ser[dev_num]; 318 logptr = &sys_ser[dev_num];
319 319
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index d7da67a31c77..88e35d85d205 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -878,9 +878,9 @@ static void css_reset(void)
878 atomic_inc(&chpid_reset_count); 878 atomic_inc(&chpid_reset_count);
879 } 879 }
880 /* Wait for machine check for all channel paths. */ 880 /* Wait for machine check for all channel paths. */
881 timeout = get_tod_clock() + (RCHP_TIMEOUT << 12); 881 timeout = get_tod_clock_fast() + (RCHP_TIMEOUT << 12);
882 while (atomic_read(&chpid_reset_count) != 0) { 882 while (atomic_read(&chpid_reset_count) != 0) {
883 if (get_tod_clock() > timeout) 883 if (get_tod_clock_fast() > timeout)
884 break; 884 break;
885 cpu_relax(); 885 cpu_relax();
886 } 886 }
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 8ed52aa49122..bbd3e511c771 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -338,10 +338,10 @@ again:
338 retries++; 338 retries++;
339 339
340 if (!start_time) { 340 if (!start_time) {
341 start_time = get_tod_clock(); 341 start_time = get_tod_clock_fast();
342 goto again; 342 goto again;
343 } 343 }
344 if ((get_tod_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE) 344 if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE)
345 goto again; 345 goto again;
346 } 346 }
347 if (retries) { 347 if (retries) {
@@ -504,7 +504,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
504 int count, stop; 504 int count, stop;
505 unsigned char state = 0; 505 unsigned char state = 0;
506 506
507 q->timestamp = get_tod_clock(); 507 q->timestamp = get_tod_clock_fast();
508 508
509 /* 509 /*
510 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved 510 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
@@ -595,7 +595,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
595 * At this point we know, that inbound first_to_check 595 * At this point we know, that inbound first_to_check
596 * has (probably) not moved (see qdio_inbound_processing). 596 * has (probably) not moved (see qdio_inbound_processing).
597 */ 597 */
598 if (get_tod_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { 598 if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
599 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", 599 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
600 q->first_to_check); 600 q->first_to_check);
601 return 1; 601 return 1;
@@ -728,7 +728,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
728 int count, stop; 728 int count, stop;
729 unsigned char state = 0; 729 unsigned char state = 0;
730 730
731 q->timestamp = get_tod_clock(); 731 q->timestamp = get_tod_clock_fast();
732 732
733 if (need_siga_sync(q)) 733 if (need_siga_sync(q))
734 if (((queue_type(q) != QDIO_IQDIO_QFMT) && 734 if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index feab3a5e50b5..757eb0716d45 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -696,7 +696,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
696 while ((pci_device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC, 696 while ((pci_device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC,
697 PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER, 697 PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER,
698 pci_device)) != NULL) { 698 pci_device)) != NULL) {
699 struct blogic_adapter *adapter = adapter; 699 struct blogic_adapter *host_adapter = adapter;
700 struct blogic_adapter_info adapter_info; 700 struct blogic_adapter_info adapter_info;
701 enum blogic_isa_ioport mod_ioaddr_req; 701 enum blogic_isa_ioport mod_ioaddr_req;
702 unsigned char bus; 702 unsigned char bus;
@@ -744,9 +744,9 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
744 known and enabled, note that the particular Standard ISA I/O 744 known and enabled, note that the particular Standard ISA I/O
745 Address should not be probed. 745 Address should not be probed.
746 */ 746 */
747 adapter->io_addr = io_addr; 747 host_adapter->io_addr = io_addr;
748 blogic_intreset(adapter); 748 blogic_intreset(host_adapter);
749 if (blogic_cmd(adapter, BLOGIC_INQ_PCI_INFO, NULL, 0, 749 if (blogic_cmd(host_adapter, BLOGIC_INQ_PCI_INFO, NULL, 0,
750 &adapter_info, sizeof(adapter_info)) == 750 &adapter_info, sizeof(adapter_info)) ==
751 sizeof(adapter_info)) { 751 sizeof(adapter_info)) {
752 if (adapter_info.isa_port < 6) 752 if (adapter_info.isa_port < 6)
@@ -762,7 +762,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
762 I/O Address assigned at system initialization. 762 I/O Address assigned at system initialization.
763 */ 763 */
764 mod_ioaddr_req = BLOGIC_IO_DISABLE; 764 mod_ioaddr_req = BLOGIC_IO_DISABLE;
765 blogic_cmd(adapter, BLOGIC_MOD_IOADDR, &mod_ioaddr_req, 765 blogic_cmd(host_adapter, BLOGIC_MOD_IOADDR, &mod_ioaddr_req,
766 sizeof(mod_ioaddr_req), NULL, 0); 766 sizeof(mod_ioaddr_req), NULL, 0);
767 /* 767 /*
768 For the first MultiMaster Host Adapter enumerated, 768 For the first MultiMaster Host Adapter enumerated,
@@ -779,12 +779,12 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
779 779
780 fetch_localram.offset = BLOGIC_AUTOSCSI_BASE + 45; 780 fetch_localram.offset = BLOGIC_AUTOSCSI_BASE + 45;
781 fetch_localram.count = sizeof(autoscsi_byte45); 781 fetch_localram.count = sizeof(autoscsi_byte45);
782 blogic_cmd(adapter, BLOGIC_FETCH_LOCALRAM, 782 blogic_cmd(host_adapter, BLOGIC_FETCH_LOCALRAM,
783 &fetch_localram, sizeof(fetch_localram), 783 &fetch_localram, sizeof(fetch_localram),
784 &autoscsi_byte45, 784 &autoscsi_byte45,
785 sizeof(autoscsi_byte45)); 785 sizeof(autoscsi_byte45));
786 blogic_cmd(adapter, BLOGIC_GET_BOARD_ID, NULL, 0, &id, 786 blogic_cmd(host_adapter, BLOGIC_GET_BOARD_ID, NULL, 0,
787 sizeof(id)); 787 &id, sizeof(id));
788 if (id.fw_ver_digit1 == '5') 788 if (id.fw_ver_digit1 == '5')
789 force_scan_order = 789 force_scan_order =
790 autoscsi_byte45.force_scan_order; 790 autoscsi_byte45.force_scan_order;
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 2ef497ebadc0..ee5c1833eb73 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -20,7 +20,7 @@
20 * | Device Discovery | 0x2095 | 0x2020-0x2022, | 20 * | Device Discovery | 0x2095 | 0x2020-0x2022, |
21 * | | | 0x2011-0x2012, | 21 * | | | 0x2011-0x2012, |
22 * | | | 0x2016 | 22 * | | | 0x2016 |
23 * | Queue Command and IO tracing | 0x3058 | 0x3006-0x300b | 23 * | Queue Command and IO tracing | 0x3059 | 0x3006-0x300b |
24 * | | | 0x3027-0x3028 | 24 * | | | 0x3027-0x3028 |
25 * | | | 0x303d-0x3041 | 25 * | | | 0x303d-0x3041 |
26 * | | | 0x302d,0x3033 | 26 * | | | 0x302d,0x3033 |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index df1b30ba938c..ff9c86b1a0d8 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1957,6 +1957,15 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1957 que = MSW(sts->handle); 1957 que = MSW(sts->handle);
1958 req = ha->req_q_map[que]; 1958 req = ha->req_q_map[que];
1959 1959
1960 /* Check for invalid queue pointer */
1961 if (req == NULL ||
1962 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
1963 ql_dbg(ql_dbg_io, vha, 0x3059,
1964 "Invalid status handle (0x%x): Bad req pointer. req=%p, "
1965 "que=%u.\n", sts->handle, req, que);
1966 return;
1967 }
1968
1960 /* Validate handle. */ 1969 /* Validate handle. */
1961 if (handle < req->num_outstanding_cmds) 1970 if (handle < req->num_outstanding_cmds)
1962 sp = req->outstanding_cmds[handle]; 1971 sp = req->outstanding_cmds[handle];
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index e62d17d41d4e..5693f6d7eddb 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2854,6 +2854,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
2854 gd->events |= DISK_EVENT_MEDIA_CHANGE; 2854 gd->events |= DISK_EVENT_MEDIA_CHANGE;
2855 } 2855 }
2856 2856
2857 blk_pm_runtime_init(sdp->request_queue, dev);
2857 add_disk(gd); 2858 add_disk(gd);
2858 if (sdkp->capacity) 2859 if (sdkp->capacity)
2859 sd_dif_config_host(sdkp); 2860 sd_dif_config_host(sdkp);
@@ -2862,7 +2863,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
2862 2863
2863 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", 2864 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
2864 sdp->removable ? "removable " : ""); 2865 sdp->removable ? "removable " : "");
2865 blk_pm_runtime_init(sdp->request_queue, dev);
2866 scsi_autopm_put_device(sdp); 2866 scsi_autopm_put_device(sdp);
2867 put_device(&sdkp->dev); 2867 put_device(&sdkp->dev);
2868} 2868}
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index fd7cc566095a..d4ac60b4a56e 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1583,7 +1583,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
1583 /* Initialize the hardware */ 1583 /* Initialize the hardware */
1584 ret = clk_prepare_enable(clk); 1584 ret = clk_prepare_enable(clk);
1585 if (ret) 1585 if (ret)
1586 goto out_unmap_regs; 1586 goto out_free_irq;
1587 spi_writel(as, CR, SPI_BIT(SWRST)); 1587 spi_writel(as, CR, SPI_BIT(SWRST));
1588 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ 1588 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
1589 if (as->caps.has_wdrbt) { 1589 if (as->caps.has_wdrbt) {
@@ -1614,6 +1614,7 @@ out_free_dma:
1614 spi_writel(as, CR, SPI_BIT(SWRST)); 1614 spi_writel(as, CR, SPI_BIT(SWRST));
1615 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ 1615 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
1616 clk_disable_unprepare(clk); 1616 clk_disable_unprepare(clk);
1617out_free_irq:
1617 free_irq(irq, master); 1618 free_irq(irq, master);
1618out_unmap_regs: 1619out_unmap_regs:
1619 iounmap(as->regs); 1620 iounmap(as->regs);
diff --git a/drivers/spi/spi-clps711x.c b/drivers/spi/spi-clps711x.c
index 5655acf55bfe..6416798828e7 100644
--- a/drivers/spi/spi-clps711x.c
+++ b/drivers/spi/spi-clps711x.c
@@ -226,7 +226,6 @@ static int spi_clps711x_probe(struct platform_device *pdev)
226 dev_name(&pdev->dev), hw); 226 dev_name(&pdev->dev), hw);
227 if (ret) { 227 if (ret) {
228 dev_err(&pdev->dev, "Can't request IRQ\n"); 228 dev_err(&pdev->dev, "Can't request IRQ\n");
229 clk_put(hw->spi_clk);
230 goto clk_out; 229 goto clk_out;
231 } 230 }
232 231
@@ -247,7 +246,6 @@ err_out:
247 gpio_free(hw->chipselect[i]); 246 gpio_free(hw->chipselect[i]);
248 247
249 spi_master_put(master); 248 spi_master_put(master);
250 kfree(master);
251 249
252 return ret; 250 return ret;
253} 251}
@@ -263,7 +261,6 @@ static int spi_clps711x_remove(struct platform_device *pdev)
263 gpio_free(hw->chipselect[i]); 261 gpio_free(hw->chipselect[i]);
264 262
265 spi_unregister_master(master); 263 spi_unregister_master(master);
266 kfree(master);
267 264
268 return 0; 265 return 0;
269} 266}
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 6cd07d13ecab..4e44575bd87a 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -476,15 +476,9 @@ static int dspi_probe(struct platform_device *pdev)
476 master->bus_num = bus_num; 476 master->bus_num = bus_num;
477 477
478 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 478 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
479 if (!res) {
480 dev_err(&pdev->dev, "can't get platform resource\n");
481 ret = -EINVAL;
482 goto out_master_put;
483 }
484
485 dspi->base = devm_ioremap_resource(&pdev->dev, res); 479 dspi->base = devm_ioremap_resource(&pdev->dev, res);
486 if (!dspi->base) { 480 if (IS_ERR(dspi->base)) {
487 ret = -EINVAL; 481 ret = PTR_ERR(dspi->base);
488 goto out_master_put; 482 goto out_master_put;
489 } 483 }
490 484
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
index dbc5e999a1f5..6adf4e35816d 100644
--- a/drivers/spi/spi-mpc512x-psc.c
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -522,8 +522,10 @@ static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
522 psc_num = master->bus_num; 522 psc_num = master->bus_num;
523 snprintf(clk_name, sizeof(clk_name), "psc%d_mclk", psc_num); 523 snprintf(clk_name, sizeof(clk_name), "psc%d_mclk", psc_num);
524 clk = devm_clk_get(dev, clk_name); 524 clk = devm_clk_get(dev, clk_name);
525 if (IS_ERR(clk)) 525 if (IS_ERR(clk)) {
526 ret = PTR_ERR(clk);
526 goto free_irq; 527 goto free_irq;
528 }
527 ret = clk_prepare_enable(clk); 529 ret = clk_prepare_enable(clk);
528 if (ret) 530 if (ret)
529 goto free_irq; 531 goto free_irq;
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 2eb06ee0b326..c1a50674c1e3 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -546,8 +546,17 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
546 if (pm_runtime_suspended(&drv_data->pdev->dev)) 546 if (pm_runtime_suspended(&drv_data->pdev->dev))
547 return IRQ_NONE; 547 return IRQ_NONE;
548 548
549 sccr1_reg = read_SSCR1(reg); 549 /*
550 * If the device is not yet in RPM suspended state and we get an
551 * interrupt that is meant for another device, check if status bits
552 * are all set to one. That means that the device is already
553 * powered off.
554 */
550 status = read_SSSR(reg); 555 status = read_SSSR(reg);
556 if (status == ~0)
557 return IRQ_NONE;
558
559 sccr1_reg = read_SSCR1(reg);
551 560
552 /* Ignore possible writes if we don't need to write */ 561 /* Ignore possible writes if we don't need to write */
553 if (!(sccr1_reg & SSCR1_TIE)) 562 if (!(sccr1_reg & SSCR1_TIE))
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 512b8893893b..a80376dc3a10 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -1428,6 +1428,8 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
1428 S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN, 1428 S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
1429 sdd->regs + S3C64XX_SPI_INT_EN); 1429 sdd->regs + S3C64XX_SPI_INT_EN);
1430 1430
1431 pm_runtime_enable(&pdev->dev);
1432
1431 if (spi_register_master(master)) { 1433 if (spi_register_master(master)) {
1432 dev_err(&pdev->dev, "cannot register SPI master\n"); 1434 dev_err(&pdev->dev, "cannot register SPI master\n");
1433 ret = -EBUSY; 1435 ret = -EBUSY;
@@ -1440,8 +1442,6 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
1440 mem_res, 1442 mem_res,
1441 sdd->rx_dma.dmach, sdd->tx_dma.dmach); 1443 sdd->rx_dma.dmach, sdd->tx_dma.dmach);
1442 1444
1443 pm_runtime_enable(&pdev->dev);
1444
1445 return 0; 1445 return 0;
1446 1446
1447err3: 1447err3:
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
index 0b68cb592fa4..e488a90a98b8 100644
--- a/drivers/spi/spi-sh-hspi.c
+++ b/drivers/spi/spi-sh-hspi.c
@@ -296,6 +296,8 @@ static int hspi_probe(struct platform_device *pdev)
296 goto error1; 296 goto error1;
297 } 297 }
298 298
299 pm_runtime_enable(&pdev->dev);
300
299 master->num_chipselect = 1; 301 master->num_chipselect = 1;
300 master->bus_num = pdev->id; 302 master->bus_num = pdev->id;
301 master->setup = hspi_setup; 303 master->setup = hspi_setup;
@@ -309,8 +311,6 @@ static int hspi_probe(struct platform_device *pdev)
309 goto error1; 311 goto error1;
310 } 312 }
311 313
312 pm_runtime_enable(&pdev->dev);
313
314 return 0; 314 return 0;
315 315
316 error1: 316 error1:
diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c
index 3ba4c5712dff..853f62b2b1a9 100644
--- a/drivers/staging/comedi/drivers/ni_65xx.c
+++ b/drivers/staging/comedi/drivers/ni_65xx.c
@@ -369,28 +369,23 @@ static int ni_65xx_dio_insn_bits(struct comedi_device *dev,
369{ 369{
370 const struct ni_65xx_board *board = comedi_board(dev); 370 const struct ni_65xx_board *board = comedi_board(dev);
371 struct ni_65xx_private *devpriv = dev->private; 371 struct ni_65xx_private *devpriv = dev->private;
372 unsigned base_bitfield_channel; 372 int base_bitfield_channel;
373 const unsigned max_ports_per_bitfield = 5;
374 unsigned read_bits = 0; 373 unsigned read_bits = 0;
375 unsigned j; 374 int last_port_offset = ni_65xx_port_by_channel(s->n_chan - 1);
375 int port_offset;
376 376
377 base_bitfield_channel = CR_CHAN(insn->chanspec); 377 base_bitfield_channel = CR_CHAN(insn->chanspec);
378 for (j = 0; j < max_ports_per_bitfield; ++j) { 378 for (port_offset = ni_65xx_port_by_channel(base_bitfield_channel);
379 const unsigned port_offset = 379 port_offset <= last_port_offset; port_offset++) {
380 ni_65xx_port_by_channel(base_bitfield_channel) + j; 380 unsigned port = sprivate(s)->base_port + port_offset;
381 const unsigned port = 381 int base_port_channel = port_offset * ni_65xx_channels_per_port;
382 sprivate(s)->base_port + port_offset;
383 unsigned base_port_channel;
384 unsigned port_mask, port_data, port_read_bits; 382 unsigned port_mask, port_data, port_read_bits;
385 int bitshift; 383 int bitshift = base_port_channel - base_bitfield_channel;
386 if (port >= ni_65xx_total_num_ports(board)) 384
385 if (bitshift >= 32)
387 break; 386 break;
388 base_port_channel = port_offset * ni_65xx_channels_per_port;
389 port_mask = data[0]; 387 port_mask = data[0];
390 port_data = data[1]; 388 port_data = data[1];
391 bitshift = base_port_channel - base_bitfield_channel;
392 if (bitshift >= 32 || bitshift <= -32)
393 break;
394 if (bitshift > 0) { 389 if (bitshift > 0) {
395 port_mask >>= bitshift; 390 port_mask >>= bitshift;
396 port_data >>= bitshift; 391 port_data >>= bitshift;
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 47c5888461ff..a2e52a0c53c9 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -41,7 +41,6 @@ struct imx_drm_device {
41 struct list_head encoder_list; 41 struct list_head encoder_list;
42 struct list_head connector_list; 42 struct list_head connector_list;
43 struct mutex mutex; 43 struct mutex mutex;
44 int references;
45 int pipes; 44 int pipes;
46 struct drm_fbdev_cma *fbhelper; 45 struct drm_fbdev_cma *fbhelper;
47}; 46};
@@ -241,8 +240,6 @@ struct drm_device *imx_drm_device_get(void)
241 } 240 }
242 } 241 }
243 242
244 imxdrm->references++;
245
246 return imxdrm->drm; 243 return imxdrm->drm;
247 244
248unwind_crtc: 245unwind_crtc:
@@ -280,8 +277,6 @@ void imx_drm_device_put(void)
280 list_for_each_entry(enc, &imxdrm->encoder_list, list) 277 list_for_each_entry(enc, &imxdrm->encoder_list, list)
281 module_put(enc->owner); 278 module_put(enc->owner);
282 279
283 imxdrm->references--;
284
285 mutex_unlock(&imxdrm->mutex); 280 mutex_unlock(&imxdrm->mutex);
286} 281}
287EXPORT_SYMBOL_GPL(imx_drm_device_put); 282EXPORT_SYMBOL_GPL(imx_drm_device_put);
@@ -485,7 +480,7 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
485 480
486 mutex_lock(&imxdrm->mutex); 481 mutex_lock(&imxdrm->mutex);
487 482
488 if (imxdrm->references) { 483 if (imxdrm->drm->open_count) {
489 ret = -EBUSY; 484 ret = -EBUSY;
490 goto err_busy; 485 goto err_busy;
491 } 486 }
@@ -564,7 +559,7 @@ int imx_drm_add_encoder(struct drm_encoder *encoder,
564 559
565 mutex_lock(&imxdrm->mutex); 560 mutex_lock(&imxdrm->mutex);
566 561
567 if (imxdrm->references) { 562 if (imxdrm->drm->open_count) {
568 ret = -EBUSY; 563 ret = -EBUSY;
569 goto err_busy; 564 goto err_busy;
570 } 565 }
@@ -709,7 +704,7 @@ int imx_drm_add_connector(struct drm_connector *connector,
709 704
710 mutex_lock(&imxdrm->mutex); 705 mutex_lock(&imxdrm->mutex);
711 706
712 if (imxdrm->references) { 707 if (imxdrm->drm->open_count) {
713 ret = -EBUSY; 708 ret = -EBUSY;
714 goto err_busy; 709 goto err_busy;
715 } 710 }
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index 2644edf438c1..c8b43442dc74 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -1387,7 +1387,7 @@ echo_copyout_lsm (struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
1387 if (nob > ulsm_nob) 1387 if (nob > ulsm_nob)
1388 return (-EINVAL); 1388 return (-EINVAL);
1389 1389
1390 if (copy_to_user (ulsm, lsm, sizeof(ulsm))) 1390 if (copy_to_user (ulsm, lsm, sizeof(*ulsm)))
1391 return (-EFAULT); 1391 return (-EFAULT);
1392 1392
1393 for (i = 0; i < lsm->lsm_stripe_count; i++) { 1393 for (i = 0; i < lsm->lsm_stripe_count; i++) {
diff --git a/drivers/staging/media/msi3101/Kconfig b/drivers/staging/media/msi3101/Kconfig
index b94a95a597d6..76d5bbd4d93c 100644
--- a/drivers/staging/media/msi3101/Kconfig
+++ b/drivers/staging/media/msi3101/Kconfig
@@ -1,3 +1,4 @@
1config USB_MSI3101 1config USB_MSI3101
2 tristate "Mirics MSi3101 SDR Dongle" 2 tristate "Mirics MSi3101 SDR Dongle"
3 depends on USB && VIDEO_DEV && VIDEO_V4L2 3 depends on USB && VIDEO_DEV && VIDEO_V4L2
4 select VIDEOBUF2_VMALLOC
diff --git a/drivers/staging/media/msi3101/sdr-msi3101.c b/drivers/staging/media/msi3101/sdr-msi3101.c
index 24c7b70a6cbf..4c3bf776bb20 100644
--- a/drivers/staging/media/msi3101/sdr-msi3101.c
+++ b/drivers/staging/media/msi3101/sdr-msi3101.c
@@ -1131,7 +1131,13 @@ static int msi3101_queue_setup(struct vb2_queue *vq,
1131 /* Absolute min and max number of buffers available for mmap() */ 1131 /* Absolute min and max number of buffers available for mmap() */
1132 *nbuffers = 32; 1132 *nbuffers = 32;
1133 *nplanes = 1; 1133 *nplanes = 1;
1134 sizes[0] = PAGE_ALIGN(3 * 3072); /* 3 * 768 * 4 */ 1134 /*
1135 * 3, wMaxPacketSize 3x 1024 bytes
1136 * 504, max IQ sample pairs per 1024 frame
1137 * 2, two samples, I and Q
1138 * 4, 32-bit float
1139 */
1140 sizes[0] = PAGE_ALIGN(3 * 504 * 2 * 4); /* = 12096 */
1135 dev_dbg(&s->udev->dev, "%s: nbuffers=%d sizes[0]=%d\n", 1141 dev_dbg(&s->udev->dev, "%s: nbuffers=%d sizes[0]=%d\n",
1136 __func__, *nbuffers, sizes[0]); 1142 __func__, *nbuffers, sizes[0]);
1137 return 0; 1143 return 0;
@@ -1657,7 +1663,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
1657 f->frequency * 625UL / 10UL); 1663 f->frequency * 625UL / 10UL);
1658} 1664}
1659 1665
1660const struct v4l2_ioctl_ops msi3101_ioctl_ops = { 1666static const struct v4l2_ioctl_ops msi3101_ioctl_ops = {
1661 .vidioc_querycap = msi3101_querycap, 1667 .vidioc_querycap = msi3101_querycap,
1662 1668
1663 .vidioc_enum_input = msi3101_enum_input, 1669 .vidioc_enum_input = msi3101_enum_input,
diff --git a/drivers/staging/octeon-usb/cvmx-usb.c b/drivers/staging/octeon-usb/cvmx-usb.c
index d7b3c82b5ead..45dfe94199ae 100644
--- a/drivers/staging/octeon-usb/cvmx-usb.c
+++ b/drivers/staging/octeon-usb/cvmx-usb.c
@@ -604,7 +604,7 @@ int cvmx_usb_initialize(struct cvmx_usb_state *state, int usb_port_number,
604 } 604 }
605 } 605 }
606 606
607 memset(usb, 0, sizeof(usb)); 607 memset(usb, 0, sizeof(*usb));
608 usb->init_flags = flags; 608 usb->init_flags = flags;
609 609
610 /* Initialize the USB state structure */ 610 /* Initialize the USB state structure */
diff --git a/drivers/staging/rtl8188eu/core/rtw_mp.c b/drivers/staging/rtl8188eu/core/rtw_mp.c
index c7ff2e4d1f23..9832dcbbd07f 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mp.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mp.c
@@ -907,7 +907,7 @@ u32 mp_query_psd(struct adapter *pAdapter, u8 *data)
907 sscanf(data, "pts =%d, start =%d, stop =%d", &psd_pts, &psd_start, &psd_stop); 907 sscanf(data, "pts =%d, start =%d, stop =%d", &psd_pts, &psd_start, &psd_stop);
908 } 908 }
909 909
910 _rtw_memset(data, '\0', sizeof(data)); 910 _rtw_memset(data, '\0', sizeof(*data));
911 911
912 i = psd_start; 912 i = psd_start;
913 while (i < psd_stop) { 913 while (i < psd_stop) {
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
index 9c2e7a20c09e..ec0028d4e61a 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
@@ -57,7 +57,7 @@ static void Init_ODM_ComInfo_88E(struct adapter *Adapter)
57 u8 cut_ver, fab_ver; 57 u8 cut_ver, fab_ver;
58 58
59 /* Init Value */ 59 /* Init Value */
60 _rtw_memset(dm_odm, 0, sizeof(dm_odm)); 60 _rtw_memset(dm_odm, 0, sizeof(*dm_odm));
61 61
62 dm_odm->Adapter = Adapter; 62 dm_odm->Adapter = Adapter;
63 63
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index cd4100fb3645..95953ebc0279 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -6973,7 +6973,7 @@ static int rtw_mp_ctx(struct net_device *dev,
6973 stop = strncmp(extra, "stop", 4); 6973 stop = strncmp(extra, "stop", 4);
6974 sscanf(extra, "count =%d, pkt", &count); 6974 sscanf(extra, "count =%d, pkt", &count);
6975 6975
6976 _rtw_memset(extra, '\0', sizeof(extra)); 6976 _rtw_memset(extra, '\0', sizeof(*extra));
6977 6977
6978 if (stop == 0) { 6978 if (stop == 0) {
6979 bStartTest = 0; /* To set Stop */ 6979 bStartTest = 0; /* To set Stop */
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index d3078d200e50..9ca3180ebaa0 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -54,6 +54,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
54 /*=== Customer ID ===*/ 54 /*=== Customer ID ===*/
55 /****** 8188EUS ********/ 55 /****** 8188EUS ********/
56 {USB_DEVICE(0x8179, 0x07B8)}, /* Abocom - Abocom */ 56 {USB_DEVICE(0x8179, 0x07B8)}, /* Abocom - Abocom */
57 {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
57 {} /* Terminating entry */ 58 {} /* Terminating entry */
58}; 59};
59 60
diff --git a/drivers/staging/rtl8192u/r819xU_cmdpkt.c b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
index 5bc361b16d4c..56144014b7c9 100644
--- a/drivers/staging/rtl8192u/r819xU_cmdpkt.c
+++ b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
@@ -37,6 +37,8 @@ rt_status SendTxCommandPacket(struct net_device *dev, void *pData, u32 DataLen)
37 /* Get TCB and local buffer from common pool. 37 /* Get TCB and local buffer from common pool.
38 (It is shared by CmdQ, MgntQ, and USB coalesce DataQ) */ 38 (It is shared by CmdQ, MgntQ, and USB coalesce DataQ) */
39 skb = dev_alloc_skb(USB_HWDESC_HEADER_LEN + DataLen + 4); 39 skb = dev_alloc_skb(USB_HWDESC_HEADER_LEN + DataLen + 4);
40 if (!skb)
41 return RT_STATUS_FAILURE;
40 memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev)); 42 memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
41 tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); 43 tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
42 tcb_desc->queue_index = TXCMD_QUEUE; 44 tcb_desc->queue_index = TXCMD_QUEUE;
diff --git a/drivers/staging/vt6656/iwctl.c b/drivers/staging/vt6656/iwctl.c
index d0cf7d8a20e5..8872e0f84f40 100644
--- a/drivers/staging/vt6656/iwctl.c
+++ b/drivers/staging/vt6656/iwctl.c
@@ -1634,6 +1634,9 @@ int iwctl_siwencodeext(struct net_device *dev, struct iw_request_info *info,
1634 if (pMgmt == NULL) 1634 if (pMgmt == NULL)
1635 return -EFAULT; 1635 return -EFAULT;
1636 1636
1637 if (!(pDevice->flags & DEVICE_FLAGS_OPENED))
1638 return -ENODEV;
1639
1637 buf = kzalloc(sizeof(struct viawget_wpa_param), GFP_KERNEL); 1640 buf = kzalloc(sizeof(struct viawget_wpa_param), GFP_KERNEL);
1638 if (buf == NULL) 1641 if (buf == NULL)
1639 return -ENOMEM; 1642 return -ENOMEM;
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 536971786ae8..6f9d28182445 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -1098,6 +1098,8 @@ static int device_close(struct net_device *dev)
1098 memset(pMgmt->abyCurrBSSID, 0, 6); 1098 memset(pMgmt->abyCurrBSSID, 0, 6);
1099 pMgmt->eCurrState = WMAC_STATE_IDLE; 1099 pMgmt->eCurrState = WMAC_STATE_IDLE;
1100 1100
1101 pDevice->flags &= ~DEVICE_FLAGS_OPENED;
1102
1101 device_free_tx_bufs(pDevice); 1103 device_free_tx_bufs(pDevice);
1102 device_free_rx_bufs(pDevice); 1104 device_free_rx_bufs(pDevice);
1103 device_free_int_bufs(pDevice); 1105 device_free_int_bufs(pDevice);
@@ -1109,7 +1111,6 @@ static int device_close(struct net_device *dev)
1109 usb_free_urb(pDevice->pInterruptURB); 1111 usb_free_urb(pDevice->pInterruptURB);
1110 1112
1111 BSSvClearNodeDBTable(pDevice, 0); 1113 BSSvClearNodeDBTable(pDevice, 0);
1112 pDevice->flags &=(~DEVICE_FLAGS_OPENED);
1113 1114
1114 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_close2 \n"); 1115 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_close2 \n");
1115 1116
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index fb743a8811bb..14f3e852215d 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -148,6 +148,8 @@ static void *s_vGetFreeContext(struct vnt_private *pDevice)
148 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"GetFreeContext()\n"); 148 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"GetFreeContext()\n");
149 149
150 for (ii = 0; ii < pDevice->cbTD; ii++) { 150 for (ii = 0; ii < pDevice->cbTD; ii++) {
151 if (!pDevice->apTD[ii])
152 return NULL;
151 pContext = pDevice->apTD[ii]; 153 pContext = pDevice->apTD[ii];
152 if (pContext->bBoolInUse == false) { 154 if (pContext->bBoolInUse == false) {
153 pContext->bBoolInUse = true; 155 pContext->bBoolInUse = true;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 35b61f7d6c63..38e44b9abf0f 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -753,7 +753,8 @@ static void iscsit_unmap_iovec(struct iscsi_cmd *cmd)
753 753
754static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn) 754static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
755{ 755{
756 struct iscsi_cmd *cmd; 756 LIST_HEAD(ack_list);
757 struct iscsi_cmd *cmd, *cmd_p;
757 758
758 conn->exp_statsn = exp_statsn; 759 conn->exp_statsn = exp_statsn;
759 760
@@ -761,19 +762,23 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
761 return; 762 return;
762 763
763 spin_lock_bh(&conn->cmd_lock); 764 spin_lock_bh(&conn->cmd_lock);
764 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { 765 list_for_each_entry_safe(cmd, cmd_p, &conn->conn_cmd_list, i_conn_node) {
765 spin_lock(&cmd->istate_lock); 766 spin_lock(&cmd->istate_lock);
766 if ((cmd->i_state == ISTATE_SENT_STATUS) && 767 if ((cmd->i_state == ISTATE_SENT_STATUS) &&
767 iscsi_sna_lt(cmd->stat_sn, exp_statsn)) { 768 iscsi_sna_lt(cmd->stat_sn, exp_statsn)) {
768 cmd->i_state = ISTATE_REMOVE; 769 cmd->i_state = ISTATE_REMOVE;
769 spin_unlock(&cmd->istate_lock); 770 spin_unlock(&cmd->istate_lock);
770 iscsit_add_cmd_to_immediate_queue(cmd, conn, 771 list_move_tail(&cmd->i_conn_node, &ack_list);
771 cmd->i_state);
772 continue; 772 continue;
773 } 773 }
774 spin_unlock(&cmd->istate_lock); 774 spin_unlock(&cmd->istate_lock);
775 } 775 }
776 spin_unlock_bh(&conn->cmd_lock); 776 spin_unlock_bh(&conn->cmd_lock);
777
778 list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) {
779 list_del(&cmd->i_conn_node);
780 iscsit_free_cmd(cmd, false);
781 }
777} 782}
778 783
779static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd) 784static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd)
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 14d1aed5af1d..ef6d836a4d09 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -1192,7 +1192,7 @@ get_target:
1192 */ 1192 */
1193alloc_tags: 1193alloc_tags:
1194 tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth); 1194 tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth);
1195 tag_num += ISCSIT_EXTRA_TAGS; 1195 tag_num += (tag_num / 2) + ISCSIT_EXTRA_TAGS;
1196 tag_size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size; 1196 tag_size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;
1197 1197
1198 ret = transport_alloc_session_tags(sess->se_sess, tag_num, tag_size); 1198 ret = transport_alloc_session_tags(sess->se_sess, tag_num, tag_size);
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index f2de28e178fd..b0cac0c342e1 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -736,7 +736,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
736 * Fallthrough 736 * Fallthrough
737 */ 737 */
738 case ISCSI_OP_SCSI_TMFUNC: 738 case ISCSI_OP_SCSI_TMFUNC:
739 rc = transport_generic_free_cmd(&cmd->se_cmd, 1); 739 rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
740 if (!rc && shutdown && se_cmd && se_cmd->se_sess) { 740 if (!rc && shutdown && se_cmd && se_cmd->se_sess) {
741 __iscsit_free_cmd(cmd, true, shutdown); 741 __iscsit_free_cmd(cmd, true, shutdown);
742 target_put_sess_cmd(se_cmd->se_sess, se_cmd); 742 target_put_sess_cmd(se_cmd->se_sess, se_cmd);
@@ -752,7 +752,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
752 se_cmd = &cmd->se_cmd; 752 se_cmd = &cmd->se_cmd;
753 __iscsit_free_cmd(cmd, true, shutdown); 753 __iscsit_free_cmd(cmd, true, shutdown);
754 754
755 rc = transport_generic_free_cmd(&cmd->se_cmd, 1); 755 rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
756 if (!rc && shutdown && se_cmd->se_sess) { 756 if (!rc && shutdown && se_cmd->se_sess) {
757 __iscsit_free_cmd(cmd, true, shutdown); 757 __iscsit_free_cmd(cmd, true, shutdown);
758 target_put_sess_cmd(se_cmd->se_sess, se_cmd); 758 target_put_sess_cmd(se_cmd->se_sess, se_cmd);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 551c96ca60ac..0f199f6a0738 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -134,10 +134,10 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
134 * pSCSI Host ID and enable for phba mode 134 * pSCSI Host ID and enable for phba mode
135 */ 135 */
136 sh = scsi_host_lookup(phv->phv_host_id); 136 sh = scsi_host_lookup(phv->phv_host_id);
137 if (IS_ERR(sh)) { 137 if (!sh) {
138 pr_err("pSCSI: Unable to locate SCSI Host for" 138 pr_err("pSCSI: Unable to locate SCSI Host for"
139 " phv_host_id: %d\n", phv->phv_host_id); 139 " phv_host_id: %d\n", phv->phv_host_id);
140 return PTR_ERR(sh); 140 return -EINVAL;
141 } 141 }
142 142
143 phv->phv_lld_host = sh; 143 phv->phv_lld_host = sh;
@@ -515,10 +515,10 @@ static int pscsi_configure_device(struct se_device *dev)
515 sh = phv->phv_lld_host; 515 sh = phv->phv_lld_host;
516 } else { 516 } else {
517 sh = scsi_host_lookup(pdv->pdv_host_id); 517 sh = scsi_host_lookup(pdv->pdv_host_id);
518 if (IS_ERR(sh)) { 518 if (!sh) {
519 pr_err("pSCSI: Unable to locate" 519 pr_err("pSCSI: Unable to locate"
520 " pdv_host_id: %d\n", pdv->pdv_host_id); 520 " pdv_host_id: %d\n", pdv->pdv_host_id);
521 return PTR_ERR(sh); 521 return -EINVAL;
522 } 522 }
523 } 523 }
524 } else { 524 } else {
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 6c17295e8d7c..d9b92b2c524d 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -263,6 +263,11 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
263 sectors, cmd->se_dev->dev_attrib.max_write_same_len); 263 sectors, cmd->se_dev->dev_attrib.max_write_same_len);
264 return TCM_INVALID_CDB_FIELD; 264 return TCM_INVALID_CDB_FIELD;
265 } 265 }
266 /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
267 if (flags[0] & 0x10) {
268 pr_warn("WRITE SAME with ANCHOR not supported\n");
269 return TCM_INVALID_CDB_FIELD;
270 }
266 /* 271 /*
267 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting 272 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
268 * translated into block discard requests within backend code. 273 * translated into block discard requests within backend code.
@@ -349,7 +354,16 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
349{ 354{
350 struct se_device *dev = cmd->se_dev; 355 struct se_device *dev = cmd->se_dev;
351 356
352 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; 357 /*
358 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
359 * within target_complete_ok_work() if the command was successfully
360 * sent to the backend driver.
361 */
362 spin_lock_irq(&cmd->t_state_lock);
363 if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status)
364 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
365 spin_unlock_irq(&cmd->t_state_lock);
366
353 /* 367 /*
354 * Unlock ->caw_sem originally obtained during sbc_compare_and_write() 368 * Unlock ->caw_sem originally obtained during sbc_compare_and_write()
355 * before the original READ I/O submission. 369 * before the original READ I/O submission.
@@ -363,7 +377,7 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
363{ 377{
364 struct se_device *dev = cmd->se_dev; 378 struct se_device *dev = cmd->se_dev;
365 struct scatterlist *write_sg = NULL, *sg; 379 struct scatterlist *write_sg = NULL, *sg;
366 unsigned char *buf, *addr; 380 unsigned char *buf = NULL, *addr;
367 struct sg_mapping_iter m; 381 struct sg_mapping_iter m;
368 unsigned int offset = 0, len; 382 unsigned int offset = 0, len;
369 unsigned int nlbas = cmd->t_task_nolb; 383 unsigned int nlbas = cmd->t_task_nolb;
@@ -378,6 +392,15 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
378 */ 392 */
379 if (!cmd->t_data_sg || !cmd->t_bidi_data_sg) 393 if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
380 return TCM_NO_SENSE; 394 return TCM_NO_SENSE;
395 /*
396 * Immediately exit + release dev->caw_sem if command has already
397 * been failed with a non-zero SCSI status.
398 */
399 if (cmd->scsi_status) {
400 pr_err("compare_and_write_callback: non zero scsi_status:"
401 " 0x%02x\n", cmd->scsi_status);
402 goto out;
403 }
381 404
382 buf = kzalloc(cmd->data_length, GFP_KERNEL); 405 buf = kzalloc(cmd->data_length, GFP_KERNEL);
383 if (!buf) { 406 if (!buf) {
@@ -508,6 +531,12 @@ sbc_compare_and_write(struct se_cmd *cmd)
508 cmd->transport_complete_callback = NULL; 531 cmd->transport_complete_callback = NULL;
509 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 532 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
510 } 533 }
534 /*
535 * Reset cmd->data_length to individual block_size in order to not
536 * confuse backend drivers that depend on this value matching the
537 * size of the I/O being submitted.
538 */
539 cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size;
511 540
512 ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents, 541 ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,
513 DMA_FROM_DEVICE); 542 DMA_FROM_DEVICE);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 84747cc1aac0..81e945eefbbd 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -236,17 +236,24 @@ int transport_alloc_session_tags(struct se_session *se_sess,
236{ 236{
237 int rc; 237 int rc;
238 238
239 se_sess->sess_cmd_map = kzalloc(tag_num * tag_size, GFP_KERNEL); 239 se_sess->sess_cmd_map = kzalloc(tag_num * tag_size,
240 GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
240 if (!se_sess->sess_cmd_map) { 241 if (!se_sess->sess_cmd_map) {
241 pr_err("Unable to allocate se_sess->sess_cmd_map\n"); 242 se_sess->sess_cmd_map = vzalloc(tag_num * tag_size);
242 return -ENOMEM; 243 if (!se_sess->sess_cmd_map) {
244 pr_err("Unable to allocate se_sess->sess_cmd_map\n");
245 return -ENOMEM;
246 }
243 } 247 }
244 248
245 rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num); 249 rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num);
246 if (rc < 0) { 250 if (rc < 0) {
247 pr_err("Unable to init se_sess->sess_tag_pool," 251 pr_err("Unable to init se_sess->sess_tag_pool,"
248 " tag_num: %u\n", tag_num); 252 " tag_num: %u\n", tag_num);
249 kfree(se_sess->sess_cmd_map); 253 if (is_vmalloc_addr(se_sess->sess_cmd_map))
254 vfree(se_sess->sess_cmd_map);
255 else
256 kfree(se_sess->sess_cmd_map);
250 se_sess->sess_cmd_map = NULL; 257 se_sess->sess_cmd_map = NULL;
251 return -ENOMEM; 258 return -ENOMEM;
252 } 259 }
@@ -412,7 +419,10 @@ void transport_free_session(struct se_session *se_sess)
412{ 419{
413 if (se_sess->sess_cmd_map) { 420 if (se_sess->sess_cmd_map) {
414 percpu_ida_destroy(&se_sess->sess_tag_pool); 421 percpu_ida_destroy(&se_sess->sess_tag_pool);
415 kfree(se_sess->sess_cmd_map); 422 if (is_vmalloc_addr(se_sess->sess_cmd_map))
423 vfree(se_sess->sess_cmd_map);
424 else
425 kfree(se_sess->sess_cmd_map);
416 } 426 }
417 kmem_cache_free(se_sess_cache, se_sess); 427 kmem_cache_free(se_sess_cache, se_sess);
418} 428}
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 4d22e7d2adca..474cd44fac14 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -82,6 +82,9 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
82 mutex_lock(&g_device_mutex); 82 mutex_lock(&g_device_mutex);
83 list_for_each_entry(se_dev, &g_device_list, g_dev_node) { 83 list_for_each_entry(se_dev, &g_device_list, g_dev_node) {
84 84
85 if (!se_dev->dev_attrib.emulate_3pc)
86 continue;
87
85 memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN); 88 memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
86 target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]); 89 target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
87 90
@@ -298,8 +301,8 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op
298 (unsigned long long)xop->dst_lba); 301 (unsigned long long)xop->dst_lba);
299 302
300 if (dc != 0) { 303 if (dc != 0) {
301 xop->dbl = (desc[29] << 16) & 0xff; 304 xop->dbl = (desc[29] & 0xff) << 16;
302 xop->dbl |= (desc[30] << 8) & 0xff; 305 xop->dbl |= (desc[30] & 0xff) << 8;
303 xop->dbl |= desc[31] & 0xff; 306 xop->dbl |= desc[31] & 0xff;
304 307
305 pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl); 308 pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl);
@@ -357,6 +360,7 @@ struct xcopy_pt_cmd {
357 struct se_cmd se_cmd; 360 struct se_cmd se_cmd;
358 struct xcopy_op *xcopy_op; 361 struct xcopy_op *xcopy_op;
359 struct completion xpt_passthrough_sem; 362 struct completion xpt_passthrough_sem;
363 unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
360}; 364};
361 365
362static struct se_port xcopy_pt_port; 366static struct se_port xcopy_pt_port;
@@ -675,7 +679,8 @@ static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd)
675 679
676 pr_debug("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n", 680 pr_debug("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n",
677 se_cmd->scsi_status); 681 se_cmd->scsi_status);
678 return 0; 682
683 return (se_cmd->scsi_status) ? -EINVAL : 0;
679} 684}
680 685
681static int target_xcopy_read_source( 686static int target_xcopy_read_source(
@@ -708,7 +713,7 @@ static int target_xcopy_read_source(
708 (unsigned long long)src_lba, src_sectors, length); 713 (unsigned long long)src_lba, src_sectors, length);
709 714
710 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length, 715 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
711 DMA_FROM_DEVICE, 0, NULL); 716 DMA_FROM_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
712 xop->src_pt_cmd = xpt_cmd; 717 xop->src_pt_cmd = xpt_cmd;
713 718
714 rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0], 719 rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
@@ -768,7 +773,7 @@ static int target_xcopy_write_destination(
768 (unsigned long long)dst_lba, dst_sectors, length); 773 (unsigned long long)dst_lba, dst_sectors, length);
769 774
770 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length, 775 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
771 DMA_TO_DEVICE, 0, NULL); 776 DMA_TO_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
772 xop->dst_pt_cmd = xpt_cmd; 777 xop->dst_pt_cmd = xpt_cmd;
773 778
774 rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0], 779 rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0],
@@ -884,30 +889,42 @@ out:
884 889
885sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) 890sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
886{ 891{
892 struct se_device *dev = se_cmd->se_dev;
887 struct xcopy_op *xop = NULL; 893 struct xcopy_op *xop = NULL;
888 unsigned char *p = NULL, *seg_desc; 894 unsigned char *p = NULL, *seg_desc;
889 unsigned int list_id, list_id_usage, sdll, inline_dl, sa; 895 unsigned int list_id, list_id_usage, sdll, inline_dl, sa;
896 sense_reason_t ret = TCM_INVALID_PARAMETER_LIST;
890 int rc; 897 int rc;
891 unsigned short tdll; 898 unsigned short tdll;
892 899
900 if (!dev->dev_attrib.emulate_3pc) {
901 pr_err("EXTENDED_COPY operation explicitly disabled\n");
902 return TCM_UNSUPPORTED_SCSI_OPCODE;
903 }
904
893 sa = se_cmd->t_task_cdb[1] & 0x1f; 905 sa = se_cmd->t_task_cdb[1] & 0x1f;
894 if (sa != 0x00) { 906 if (sa != 0x00) {
895 pr_err("EXTENDED_COPY(LID4) not supported\n"); 907 pr_err("EXTENDED_COPY(LID4) not supported\n");
896 return TCM_UNSUPPORTED_SCSI_OPCODE; 908 return TCM_UNSUPPORTED_SCSI_OPCODE;
897 } 909 }
898 910
911 xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
912 if (!xop) {
913 pr_err("Unable to allocate xcopy_op\n");
914 return TCM_OUT_OF_RESOURCES;
915 }
916 xop->xop_se_cmd = se_cmd;
917
899 p = transport_kmap_data_sg(se_cmd); 918 p = transport_kmap_data_sg(se_cmd);
900 if (!p) { 919 if (!p) {
901 pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n"); 920 pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n");
921 kfree(xop);
902 return TCM_OUT_OF_RESOURCES; 922 return TCM_OUT_OF_RESOURCES;
903 } 923 }
904 924
905 list_id = p[0]; 925 list_id = p[0];
906 if (list_id != 0x00) { 926 list_id_usage = (p[1] & 0x18) >> 3;
907 pr_err("XCOPY with non zero list_id: 0x%02x\n", list_id); 927
908 goto out;
909 }
910 list_id_usage = (p[1] & 0x18);
911 /* 928 /*
912 * Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH 929 * Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH
913 */ 930 */
@@ -920,13 +937,6 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
920 goto out; 937 goto out;
921 } 938 }
922 939
923 xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
924 if (!xop) {
925 pr_err("Unable to allocate xcopy_op\n");
926 goto out;
927 }
928 xop->xop_se_cmd = se_cmd;
929
930 pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x" 940 pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x"
931 " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage, 941 " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
932 tdll, sdll, inline_dl); 942 tdll, sdll, inline_dl);
@@ -935,6 +945,17 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
935 if (rc <= 0) 945 if (rc <= 0)
936 goto out; 946 goto out;
937 947
948 if (xop->src_dev->dev_attrib.block_size !=
949 xop->dst_dev->dev_attrib.block_size) {
950 pr_err("XCOPY: Non matching src_dev block_size: %u + dst_dev"
951 " block_size: %u currently unsupported\n",
952 xop->src_dev->dev_attrib.block_size,
953 xop->dst_dev->dev_attrib.block_size);
954 xcopy_pt_undepend_remotedev(xop);
955 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
956 goto out;
957 }
958
938 pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc, 959 pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc,
939 rc * XCOPY_TARGET_DESC_LEN); 960 rc * XCOPY_TARGET_DESC_LEN);
940 seg_desc = &p[16]; 961 seg_desc = &p[16];
@@ -957,7 +978,7 @@ out:
957 if (p) 978 if (p)
958 transport_kunmap_data_sg(se_cmd); 979 transport_kunmap_data_sg(se_cmd);
959 kfree(xop); 980 kfree(xop);
960 return TCM_INVALID_CDB_FIELD; 981 return ret;
961} 982}
962 983
963static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd) 984static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd)
diff --git a/drivers/thermal/samsung/exynos_thermal_common.c b/drivers/thermal/samsung/exynos_thermal_common.c
index f10a6ad37c06..c2301da08ac7 100644
--- a/drivers/thermal/samsung/exynos_thermal_common.c
+++ b/drivers/thermal/samsung/exynos_thermal_common.c
@@ -310,8 +310,6 @@ void exynos_report_trigger(struct thermal_sensor_conf *conf)
310 } 310 }
311 311
312 th_zone = conf->pzone_data; 312 th_zone = conf->pzone_data;
313 if (th_zone->therm_dev)
314 return;
315 313
316 if (th_zone->bind == false) { 314 if (th_zone->bind == false) {
317 for (i = 0; i < th_zone->cool_dev_size; i++) { 315 for (i = 0; i < th_zone->cool_dev_size; i++) {
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index b43afda8acd1..32f38b90c4f6 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -317,6 +317,9 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on)
317 317
318 con = readl(data->base + reg->tmu_ctrl); 318 con = readl(data->base + reg->tmu_ctrl);
319 319
320 if (pdata->test_mux)
321 con |= (pdata->test_mux << reg->test_mux_addr_shift);
322
320 if (pdata->reference_voltage) { 323 if (pdata->reference_voltage) {
321 con &= ~(reg->buf_vref_sel_mask << reg->buf_vref_sel_shift); 324 con &= ~(reg->buf_vref_sel_mask << reg->buf_vref_sel_shift);
322 con |= pdata->reference_voltage << reg->buf_vref_sel_shift; 325 con |= pdata->reference_voltage << reg->buf_vref_sel_shift;
@@ -488,7 +491,7 @@ static const struct of_device_id exynos_tmu_match[] = {
488 }, 491 },
489 { 492 {
490 .compatible = "samsung,exynos4412-tmu", 493 .compatible = "samsung,exynos4412-tmu",
491 .data = (void *)EXYNOS5250_TMU_DRV_DATA, 494 .data = (void *)EXYNOS4412_TMU_DRV_DATA,
492 }, 495 },
493 { 496 {
494 .compatible = "samsung,exynos5250-tmu", 497 .compatible = "samsung,exynos5250-tmu",
@@ -629,9 +632,10 @@ static int exynos_tmu_probe(struct platform_device *pdev)
629 if (ret) 632 if (ret)
630 return ret; 633 return ret;
631 634
632 if (pdata->type == SOC_ARCH_EXYNOS || 635 if (pdata->type == SOC_ARCH_EXYNOS4210 ||
633 pdata->type == SOC_ARCH_EXYNOS4210 || 636 pdata->type == SOC_ARCH_EXYNOS4412 ||
634 pdata->type == SOC_ARCH_EXYNOS5440) 637 pdata->type == SOC_ARCH_EXYNOS5250 ||
638 pdata->type == SOC_ARCH_EXYNOS5440)
635 data->soc = pdata->type; 639 data->soc = pdata->type;
636 else { 640 else {
637 ret = -EINVAL; 641 ret = -EINVAL;
diff --git a/drivers/thermal/samsung/exynos_tmu.h b/drivers/thermal/samsung/exynos_tmu.h
index b364c9eee701..3fb65547e64c 100644
--- a/drivers/thermal/samsung/exynos_tmu.h
+++ b/drivers/thermal/samsung/exynos_tmu.h
@@ -41,7 +41,8 @@ enum calibration_mode {
41 41
42enum soc_type { 42enum soc_type {
43 SOC_ARCH_EXYNOS4210 = 1, 43 SOC_ARCH_EXYNOS4210 = 1,
44 SOC_ARCH_EXYNOS, 44 SOC_ARCH_EXYNOS4412,
45 SOC_ARCH_EXYNOS5250,
45 SOC_ARCH_EXYNOS5440, 46 SOC_ARCH_EXYNOS5440,
46}; 47};
47 48
@@ -84,6 +85,7 @@ enum soc_type {
84 * @triminfo_reload_shift: shift of triminfo reload enable bit in triminfo_ctrl 85 * @triminfo_reload_shift: shift of triminfo reload enable bit in triminfo_ctrl
85 reg. 86 reg.
86 * @tmu_ctrl: TMU main controller register. 87 * @tmu_ctrl: TMU main controller register.
88 * @test_mux_addr_shift: shift bits of test mux address.
87 * @buf_vref_sel_shift: shift bits of reference voltage in tmu_ctrl register. 89 * @buf_vref_sel_shift: shift bits of reference voltage in tmu_ctrl register.
88 * @buf_vref_sel_mask: mask bits of reference voltage in tmu_ctrl register. 90 * @buf_vref_sel_mask: mask bits of reference voltage in tmu_ctrl register.
89 * @therm_trip_mode_shift: shift bits of tripping mode in tmu_ctrl register. 91 * @therm_trip_mode_shift: shift bits of tripping mode in tmu_ctrl register.
@@ -150,6 +152,7 @@ struct exynos_tmu_registers {
150 u32 triminfo_reload_shift; 152 u32 triminfo_reload_shift;
151 153
152 u32 tmu_ctrl; 154 u32 tmu_ctrl;
155 u32 test_mux_addr_shift;
153 u32 buf_vref_sel_shift; 156 u32 buf_vref_sel_shift;
154 u32 buf_vref_sel_mask; 157 u32 buf_vref_sel_mask;
155 u32 therm_trip_mode_shift; 158 u32 therm_trip_mode_shift;
@@ -257,6 +260,7 @@ struct exynos_tmu_registers {
257 * @first_point_trim: temp value of the first point trimming 260 * @first_point_trim: temp value of the first point trimming
258 * @second_point_trim: temp value of the second point trimming 261 * @second_point_trim: temp value of the second point trimming
259 * @default_temp_offset: default temperature offset in case of no trimming 262 * @default_temp_offset: default temperature offset in case of no trimming
263 * @test_mux; information if SoC supports test MUX
260 * @cal_type: calibration type for temperature 264 * @cal_type: calibration type for temperature
261 * @cal_mode: calibration mode for temperature 265 * @cal_mode: calibration mode for temperature
262 * @freq_clip_table: Table representing frequency reduction percentage. 266 * @freq_clip_table: Table representing frequency reduction percentage.
@@ -286,6 +290,7 @@ struct exynos_tmu_platform_data {
286 u8 first_point_trim; 290 u8 first_point_trim;
287 u8 second_point_trim; 291 u8 second_point_trim;
288 u8 default_temp_offset; 292 u8 default_temp_offset;
293 u8 test_mux;
289 294
290 enum calibration_type cal_type; 295 enum calibration_type cal_type;
291 enum calibration_mode cal_mode; 296 enum calibration_mode cal_mode;
diff --git a/drivers/thermal/samsung/exynos_tmu_data.c b/drivers/thermal/samsung/exynos_tmu_data.c
index 9002499c1f69..073c292baa53 100644
--- a/drivers/thermal/samsung/exynos_tmu_data.c
+++ b/drivers/thermal/samsung/exynos_tmu_data.c
@@ -90,14 +90,15 @@ struct exynos_tmu_init_data const exynos4210_default_tmu_data = {
90}; 90};
91#endif 91#endif
92 92
93#if defined(CONFIG_SOC_EXYNOS5250) || defined(CONFIG_SOC_EXYNOS4412) 93#if defined(CONFIG_SOC_EXYNOS4412) || defined(CONFIG_SOC_EXYNOS5250)
94static const struct exynos_tmu_registers exynos5250_tmu_registers = { 94static const struct exynos_tmu_registers exynos4412_tmu_registers = {
95 .triminfo_data = EXYNOS_TMU_REG_TRIMINFO, 95 .triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
96 .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT, 96 .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT,
97 .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT, 97 .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT,
98 .triminfo_ctrl = EXYNOS_TMU_TRIMINFO_CON, 98 .triminfo_ctrl = EXYNOS_TMU_TRIMINFO_CON,
99 .triminfo_reload_shift = EXYNOS_TRIMINFO_RELOAD_SHIFT, 99 .triminfo_reload_shift = EXYNOS_TRIMINFO_RELOAD_SHIFT,
100 .tmu_ctrl = EXYNOS_TMU_REG_CONTROL, 100 .tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
101 .test_mux_addr_shift = EXYNOS4412_MUX_ADDR_SHIFT,
101 .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT, 102 .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT,
102 .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK, 103 .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK,
103 .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT, 104 .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
@@ -128,7 +129,7 @@ static const struct exynos_tmu_registers exynos5250_tmu_registers = {
128 .emul_time_mask = EXYNOS_EMUL_TIME_MASK, 129 .emul_time_mask = EXYNOS_EMUL_TIME_MASK,
129}; 130};
130 131
131#define EXYNOS5250_TMU_DATA \ 132#define EXYNOS4412_TMU_DATA \
132 .threshold_falling = 10, \ 133 .threshold_falling = 10, \
133 .trigger_levels[0] = 85, \ 134 .trigger_levels[0] = 85, \
134 .trigger_levels[1] = 103, \ 135 .trigger_levels[1] = 103, \
@@ -162,15 +163,32 @@ static const struct exynos_tmu_registers exynos5250_tmu_registers = {
162 .temp_level = 103, \ 163 .temp_level = 103, \
163 }, \ 164 }, \
164 .freq_tab_count = 2, \ 165 .freq_tab_count = 2, \
165 .type = SOC_ARCH_EXYNOS, \ 166 .registers = &exynos4412_tmu_registers, \
166 .registers = &exynos5250_tmu_registers, \
167 .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_TRIM_RELOAD | \ 167 .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_TRIM_RELOAD | \
168 TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \ 168 TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \
169 TMU_SUPPORT_EMUL_TIME) 169 TMU_SUPPORT_EMUL_TIME)
170#endif
170 171
172#if defined(CONFIG_SOC_EXYNOS4412)
173struct exynos_tmu_init_data const exynos4412_default_tmu_data = {
174 .tmu_data = {
175 {
176 EXYNOS4412_TMU_DATA,
177 .type = SOC_ARCH_EXYNOS4412,
178 .test_mux = EXYNOS4412_MUX_ADDR_VALUE,
179 },
180 },
181 .tmu_count = 1,
182};
183#endif
184
185#if defined(CONFIG_SOC_EXYNOS5250)
171struct exynos_tmu_init_data const exynos5250_default_tmu_data = { 186struct exynos_tmu_init_data const exynos5250_default_tmu_data = {
172 .tmu_data = { 187 .tmu_data = {
173 { EXYNOS5250_TMU_DATA }, 188 {
189 EXYNOS4412_TMU_DATA,
190 .type = SOC_ARCH_EXYNOS5250,
191 },
174 }, 192 },
175 .tmu_count = 1, 193 .tmu_count = 1,
176}; 194};
diff --git a/drivers/thermal/samsung/exynos_tmu_data.h b/drivers/thermal/samsung/exynos_tmu_data.h
index dc7feb51099b..a1ea19d9e0a6 100644
--- a/drivers/thermal/samsung/exynos_tmu_data.h
+++ b/drivers/thermal/samsung/exynos_tmu_data.h
@@ -95,6 +95,10 @@
95 95
96#define EXYNOS_MAX_TRIGGER_PER_REG 4 96#define EXYNOS_MAX_TRIGGER_PER_REG 4
97 97
98/* Exynos4412 specific */
99#define EXYNOS4412_MUX_ADDR_VALUE 6
100#define EXYNOS4412_MUX_ADDR_SHIFT 20
101
98/*exynos5440 specific registers*/ 102/*exynos5440 specific registers*/
99#define EXYNOS5440_TMU_S0_7_TRIM 0x000 103#define EXYNOS5440_TMU_S0_7_TRIM 0x000
100#define EXYNOS5440_TMU_S0_7_CTRL 0x020 104#define EXYNOS5440_TMU_S0_7_CTRL 0x020
@@ -138,7 +142,14 @@ extern struct exynos_tmu_init_data const exynos4210_default_tmu_data;
138#define EXYNOS4210_TMU_DRV_DATA (NULL) 142#define EXYNOS4210_TMU_DRV_DATA (NULL)
139#endif 143#endif
140 144
141#if (defined(CONFIG_SOC_EXYNOS5250) || defined(CONFIG_SOC_EXYNOS4412)) 145#if defined(CONFIG_SOC_EXYNOS4412)
146extern struct exynos_tmu_init_data const exynos4412_default_tmu_data;
147#define EXYNOS4412_TMU_DRV_DATA (&exynos4412_default_tmu_data)
148#else
149#define EXYNOS4412_TMU_DRV_DATA (NULL)
150#endif
151
152#if defined(CONFIG_SOC_EXYNOS5250)
142extern struct exynos_tmu_init_data const exynos5250_default_tmu_data; 153extern struct exynos_tmu_init_data const exynos5250_default_tmu_data;
143#define EXYNOS5250_TMU_DRV_DATA (&exynos5250_default_tmu_data) 154#define EXYNOS5250_TMU_DRV_DATA (&exynos5250_default_tmu_data)
144#else 155#else
diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
index eeef0e2498ca..fdb07199d9c2 100644
--- a/drivers/thermal/thermal_hwmon.c
+++ b/drivers/thermal/thermal_hwmon.c
@@ -159,7 +159,7 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
159 159
160 INIT_LIST_HEAD(&hwmon->tz_list); 160 INIT_LIST_HEAD(&hwmon->tz_list);
161 strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH); 161 strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH);
162 hwmon->device = hwmon_device_register(&tz->device); 162 hwmon->device = hwmon_device_register(NULL);
163 if (IS_ERR(hwmon->device)) { 163 if (IS_ERR(hwmon->device)) {
164 result = PTR_ERR(hwmon->device); 164 result = PTR_ERR(hwmon->device);
165 goto free_mem; 165 goto free_mem;
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index 4f8b9af54a5a..5a47cc8c8f85 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -110,6 +110,7 @@ static inline int ti_thermal_get_temp(struct thermal_zone_device *thermal,
110 } else { 110 } else {
111 dev_err(bgp->dev, 111 dev_err(bgp->dev,
112 "Failed to read PCB state. Using defaults\n"); 112 "Failed to read PCB state. Using defaults\n");
113 ret = 0;
113 } 114 }
114 } 115 }
115 *temp = ti_thermal_hotspot_temperature(tmp, slope, constant); 116 *temp = ti_thermal_hotspot_temperature(tmp, slope, constant);
diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
index f36950e4134f..7722cb9d5a80 100644
--- a/drivers/thermal/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/x86_pkg_temp_thermal.c
@@ -316,18 +316,19 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
316 int phy_id = topology_physical_package_id(cpu); 316 int phy_id = topology_physical_package_id(cpu);
317 struct phy_dev_entry *phdev = pkg_temp_thermal_get_phy_entry(cpu); 317 struct phy_dev_entry *phdev = pkg_temp_thermal_get_phy_entry(cpu);
318 bool notify = false; 318 bool notify = false;
319 unsigned long flags;
319 320
320 if (!phdev) 321 if (!phdev)
321 return; 322 return;
322 323
323 spin_lock(&pkg_work_lock); 324 spin_lock_irqsave(&pkg_work_lock, flags);
324 ++pkg_work_cnt; 325 ++pkg_work_cnt;
325 if (unlikely(phy_id > max_phy_id)) { 326 if (unlikely(phy_id > max_phy_id)) {
326 spin_unlock(&pkg_work_lock); 327 spin_unlock_irqrestore(&pkg_work_lock, flags);
327 return; 328 return;
328 } 329 }
329 pkg_work_scheduled[phy_id] = 0; 330 pkg_work_scheduled[phy_id] = 0;
330 spin_unlock(&pkg_work_lock); 331 spin_unlock_irqrestore(&pkg_work_lock, flags);
331 332
332 enable_pkg_thres_interrupt(); 333 enable_pkg_thres_interrupt();
333 rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); 334 rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
@@ -397,6 +398,7 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
397 int thres_count; 398 int thres_count;
398 u32 eax, ebx, ecx, edx; 399 u32 eax, ebx, ecx, edx;
399 u8 *temp; 400 u8 *temp;
401 unsigned long flags;
400 402
401 cpuid(6, &eax, &ebx, &ecx, &edx); 403 cpuid(6, &eax, &ebx, &ecx, &edx);
402 thres_count = ebx & 0x07; 404 thres_count = ebx & 0x07;
@@ -420,19 +422,19 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
420 goto err_ret_unlock; 422 goto err_ret_unlock;
421 } 423 }
422 424
423 spin_lock(&pkg_work_lock); 425 spin_lock_irqsave(&pkg_work_lock, flags);
424 if (topology_physical_package_id(cpu) > max_phy_id) 426 if (topology_physical_package_id(cpu) > max_phy_id)
425 max_phy_id = topology_physical_package_id(cpu); 427 max_phy_id = topology_physical_package_id(cpu);
426 temp = krealloc(pkg_work_scheduled, 428 temp = krealloc(pkg_work_scheduled,
427 (max_phy_id+1) * sizeof(u8), GFP_ATOMIC); 429 (max_phy_id+1) * sizeof(u8), GFP_ATOMIC);
428 if (!temp) { 430 if (!temp) {
429 spin_unlock(&pkg_work_lock); 431 spin_unlock_irqrestore(&pkg_work_lock, flags);
430 err = -ENOMEM; 432 err = -ENOMEM;
431 goto err_ret_free; 433 goto err_ret_free;
432 } 434 }
433 pkg_work_scheduled = temp; 435 pkg_work_scheduled = temp;
434 pkg_work_scheduled[topology_physical_package_id(cpu)] = 0; 436 pkg_work_scheduled[topology_physical_package_id(cpu)] = 0;
435 spin_unlock(&pkg_work_lock); 437 spin_unlock_irqrestore(&pkg_work_lock, flags);
436 438
437 phy_dev_entry->phys_proc_id = topology_physical_package_id(cpu); 439 phy_dev_entry->phys_proc_id = topology_physical_package_id(cpu);
438 phy_dev_entry->first_cpu = cpu; 440 phy_dev_entry->first_cpu = cpu;
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index e61c36cbb866..c193af6a628f 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -636,6 +636,7 @@ struct console xenboot_console = {
636 .name = "xenboot", 636 .name = "xenboot",
637 .write = xenboot_write_console, 637 .write = xenboot_write_console,
638 .flags = CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME, 638 .flags = CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME,
639 .index = -1,
639}; 640};
640#endif /* CONFIG_EARLY_PRINTK */ 641#endif /* CONFIG_EARLY_PRINTK */
641 642
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index c9a9ddd1d0bc..7a744b69c3d1 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1758,8 +1758,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
1758 canon_change = (old->c_lflag ^ tty->termios.c_lflag) & ICANON; 1758 canon_change = (old->c_lflag ^ tty->termios.c_lflag) & ICANON;
1759 if (canon_change) { 1759 if (canon_change) {
1760 bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE); 1760 bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE);
1761 ldata->line_start = 0; 1761 ldata->line_start = ldata->canon_head = ldata->read_tail;
1762 ldata->canon_head = ldata->read_tail;
1763 ldata->erasing = 0; 1762 ldata->erasing = 0;
1764 ldata->lnext = 0; 1763 ldata->lnext = 0;
1765 } 1764 }
@@ -2184,28 +2183,34 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
2184 2183
2185 if (!input_available_p(tty, 0)) { 2184 if (!input_available_p(tty, 0)) {
2186 if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) { 2185 if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
2187 retval = -EIO; 2186 up_read(&tty->termios_rwsem);
2188 break; 2187 tty_flush_to_ldisc(tty);
2189 } 2188 down_read(&tty->termios_rwsem);
2190 if (tty_hung_up_p(file)) 2189 if (!input_available_p(tty, 0)) {
2191 break; 2190 retval = -EIO;
2192 if (!timeout) 2191 break;
2193 break; 2192 }
2194 if (file->f_flags & O_NONBLOCK) { 2193 } else {
2195 retval = -EAGAIN; 2194 if (tty_hung_up_p(file))
2196 break; 2195 break;
2197 } 2196 if (!timeout)
2198 if (signal_pending(current)) { 2197 break;
2199 retval = -ERESTARTSYS; 2198 if (file->f_flags & O_NONBLOCK) {
2200 break; 2199 retval = -EAGAIN;
2201 } 2200 break;
2202 n_tty_set_room(tty); 2201 }
2203 up_read(&tty->termios_rwsem); 2202 if (signal_pending(current)) {
2203 retval = -ERESTARTSYS;
2204 break;
2205 }
2206 n_tty_set_room(tty);
2207 up_read(&tty->termios_rwsem);
2204 2208
2205 timeout = schedule_timeout(timeout); 2209 timeout = schedule_timeout(timeout);
2206 2210
2207 down_read(&tty->termios_rwsem); 2211 down_read(&tty->termios_rwsem);
2208 continue; 2212 continue;
2213 }
2209 } 2214 }
2210 __set_current_state(TASK_RUNNING); 2215 __set_current_state(TASK_RUNNING);
2211 2216
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index a0ebbc9ce5cd..042aa077b5b3 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1912,9 +1912,6 @@ static int serial_imx_probe_dt(struct imx_port *sport,
1912 1912
1913 sport->devdata = of_id->data; 1913 sport->devdata = of_id->data;
1914 1914
1915 if (of_device_is_stdout_path(np))
1916 add_preferred_console(imx_reg.cons->name, sport->port.line, 0);
1917
1918 return 0; 1915 return 0;
1919} 1916}
1920#else 1917#else
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 52379e56a31e..44077c0b7670 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -667,30 +667,21 @@ static int pop_tx_x(struct eg20t_port *priv, unsigned char *buf)
667 667
668static int dma_push_rx(struct eg20t_port *priv, int size) 668static int dma_push_rx(struct eg20t_port *priv, int size)
669{ 669{
670 struct tty_struct *tty;
671 int room; 670 int room;
672 struct uart_port *port = &priv->port; 671 struct uart_port *port = &priv->port;
673 struct tty_port *tport = &port->state->port; 672 struct tty_port *tport = &port->state->port;
674 673
675 port = &priv->port;
676 tty = tty_port_tty_get(tport);
677 if (!tty) {
678 dev_dbg(priv->port.dev, "%s:tty is busy now", __func__);
679 return 0;
680 }
681
682 room = tty_buffer_request_room(tport, size); 674 room = tty_buffer_request_room(tport, size);
683 675
684 if (room < size) 676 if (room < size)
685 dev_warn(port->dev, "Rx overrun: dropping %u bytes\n", 677 dev_warn(port->dev, "Rx overrun: dropping %u bytes\n",
686 size - room); 678 size - room);
687 if (!room) 679 if (!room)
688 return room; 680 return 0;
689 681
690 tty_insert_flip_string(tport, sg_virt(&priv->sg_rx), size); 682 tty_insert_flip_string(tport, sg_virt(&priv->sg_rx), size);
691 683
692 port->icount.rx += room; 684 port->icount.rx += room;
693 tty_kref_put(tty);
694 685
695 return room; 686 return room;
696} 687}
@@ -1098,6 +1089,8 @@ static void pch_uart_err_ir(struct eg20t_port *priv, unsigned int lsr)
1098 if (tty == NULL) { 1089 if (tty == NULL) {
1099 for (i = 0; error_msg[i] != NULL; i++) 1090 for (i = 0; error_msg[i] != NULL; i++)
1100 dev_err(&priv->pdev->dev, error_msg[i]); 1091 dev_err(&priv->pdev->dev, error_msg[i]);
1092 } else {
1093 tty_kref_put(tty);
1101 } 1094 }
1102} 1095}
1103 1096
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index d0d972f7e43e..0489a2bdcdf9 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -732,7 +732,7 @@ static irqreturn_t tegra_uart_isr(int irq, void *data)
732static void tegra_uart_stop_rx(struct uart_port *u) 732static void tegra_uart_stop_rx(struct uart_port *u)
733{ 733{
734 struct tegra_uart_port *tup = to_tegra_uport(u); 734 struct tegra_uart_port *tup = to_tegra_uport(u);
735 struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port); 735 struct tty_struct *tty;
736 struct tty_port *port = &u->state->port; 736 struct tty_port *port = &u->state->port;
737 struct dma_tx_state state; 737 struct dma_tx_state state;
738 unsigned long ier; 738 unsigned long ier;
@@ -744,6 +744,8 @@ static void tegra_uart_stop_rx(struct uart_port *u)
744 if (!tup->rx_in_progress) 744 if (!tup->rx_in_progress)
745 return; 745 return;
746 746
747 tty = tty_port_tty_get(&tup->uport.state->port);
748
747 tegra_uart_wait_sym_time(tup, 1); /* wait a character interval */ 749 tegra_uart_wait_sym_time(tup, 1); /* wait a character interval */
748 750
749 ier = tup->ier_shadow; 751 ier = tup->ier_shadow;
diff --git a/drivers/tty/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c
index 93b697a0de65..15ad6fcda88b 100644
--- a/drivers/tty/serial/vt8500_serial.c
+++ b/drivers/tty/serial/vt8500_serial.c
@@ -561,12 +561,13 @@ static int vt8500_serial_probe(struct platform_device *pdev)
561 if (!mmres || !irqres) 561 if (!mmres || !irqres)
562 return -ENODEV; 562 return -ENODEV;
563 563
564 if (np) 564 if (np) {
565 port = of_alias_get_id(np, "serial"); 565 port = of_alias_get_id(np, "serial");
566 if (port >= VT8500_MAX_PORTS) 566 if (port >= VT8500_MAX_PORTS)
567 port = -1; 567 port = -1;
568 else 568 } else {
569 port = -1; 569 port = -1;
570 }
570 571
571 if (port < 0) { 572 if (port < 0) {
572 /* calculate the port id */ 573 /* calculate the port id */
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 03ba081c5772..6fd60fece6b4 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -1201,6 +1201,9 @@ int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file,
1201 } 1201 }
1202 return 0; 1202 return 0;
1203 case TCFLSH: 1203 case TCFLSH:
1204 retval = tty_check_change(tty);
1205 if (retval)
1206 return retval;
1204 return __tty_perform_flush(tty, arg); 1207 return __tty_perform_flush(tty, arg);
1205 default: 1208 default:
1206 /* Try the mode commands */ 1209 /* Try the mode commands */
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index 4a851e15e58c..77b47d82c9a6 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -1,6 +1,6 @@
1config USB_CHIPIDEA 1config USB_CHIPIDEA
2 tristate "ChipIdea Highspeed Dual Role Controller" 2 tristate "ChipIdea Highspeed Dual Role Controller"
3 depends on (USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET) 3 depends on ((USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET)) && HAS_DMA
4 help 4 help
5 Say Y here if your system has a dual role high speed USB 5 Say Y here if your system has a dual role high speed USB
6 controller based on ChipIdea silicon IP. Currently, only the 6 controller based on ChipIdea silicon IP. Currently, only the
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 74d998d9b45b..be822a2c1776 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -131,7 +131,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
131 if (ret) { 131 if (ret) {
132 dev_err(&pdev->dev, "usbmisc init failed, ret=%d\n", 132 dev_err(&pdev->dev, "usbmisc init failed, ret=%d\n",
133 ret); 133 ret);
134 goto err_clk; 134 goto err_phy;
135 } 135 }
136 } 136 }
137 137
@@ -143,7 +143,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
143 dev_err(&pdev->dev, 143 dev_err(&pdev->dev,
144 "Can't register ci_hdrc platform device, err=%d\n", 144 "Can't register ci_hdrc platform device, err=%d\n",
145 ret); 145 ret);
146 goto err_clk; 146 goto err_phy;
147 } 147 }
148 148
149 if (data->usbmisc_data) { 149 if (data->usbmisc_data) {
@@ -164,6 +164,9 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
164 164
165disable_device: 165disable_device:
166 ci_hdrc_remove_device(data->ci_pdev); 166 ci_hdrc_remove_device(data->ci_pdev);
167err_phy:
168 if (data->phy)
169 usb_phy_shutdown(data->phy);
167err_clk: 170err_clk:
168 clk_disable_unprepare(data->clk); 171 clk_disable_unprepare(data->clk);
169 return ret; 172 return ret;
diff --git a/drivers/usb/chipidea/ci_hdrc_pci.c b/drivers/usb/chipidea/ci_hdrc_pci.c
index 042320a6c6c7..d514332ac081 100644
--- a/drivers/usb/chipidea/ci_hdrc_pci.c
+++ b/drivers/usb/chipidea/ci_hdrc_pci.c
@@ -129,7 +129,12 @@ static DEFINE_PCI_DEVICE_TABLE(ci_hdrc_pci_id_table) = {
129 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0829), 129 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0829),
130 .driver_data = (kernel_ulong_t)&penwell_pci_platdata, 130 .driver_data = (kernel_ulong_t)&penwell_pci_platdata,
131 }, 131 },
132 { 0, 0, 0, 0, 0, 0, 0 /* end: all zeroes */ } 132 {
133 /* Intel Clovertrail */
134 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe006),
135 .driver_data = (kernel_ulong_t)&penwell_pci_platdata,
136 },
137 { 0 } /* end: all zeroes */
133}; 138};
134MODULE_DEVICE_TABLE(pci, ci_hdrc_pci_id_table); 139MODULE_DEVICE_TABLE(pci, ci_hdrc_pci_id_table);
135 140
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 94626409559a..23763dcec069 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -605,6 +605,7 @@ static int ci_hdrc_remove(struct platform_device *pdev)
605 dbg_remove_files(ci); 605 dbg_remove_files(ci);
606 free_irq(ci->irq, ci); 606 free_irq(ci->irq, ci);
607 ci_role_destroy(ci); 607 ci_role_destroy(ci);
608 kfree(ci->hw_bank.regmap);
608 609
609 return 0; 610 return 0;
610} 611}
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 6f96795dd20c..64d7a6d9a1ad 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -100,8 +100,10 @@ static void host_stop(struct ci_hdrc *ci)
100{ 100{
101 struct usb_hcd *hcd = ci->hcd; 101 struct usb_hcd *hcd = ci->hcd;
102 102
103 usb_remove_hcd(hcd); 103 if (hcd) {
104 usb_put_hcd(hcd); 104 usb_remove_hcd(hcd);
105 usb_put_hcd(hcd);
106 }
105 if (ci->platdata->reg_vbus) 107 if (ci->platdata->reg_vbus)
106 regulator_disable(ci->platdata->reg_vbus); 108 regulator_disable(ci->platdata->reg_vbus);
107} 109}
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 6b4c2f2eb946..9333083dd111 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1600,6 +1600,8 @@ static void destroy_eps(struct ci_hdrc *ci)
1600 for (i = 0; i < ci->hw_ep_max; i++) { 1600 for (i = 0; i < ci->hw_ep_max; i++) {
1601 struct ci_hw_ep *hwep = &ci->ci_hw_ep[i]; 1601 struct ci_hw_ep *hwep = &ci->ci_hw_ep[i];
1602 1602
1603 if (hwep->pending_td)
1604 free_pending_td(hwep);
1603 dma_pool_free(ci->qh_pool, hwep->qh.ptr, hwep->qh.dma); 1605 dma_pool_free(ci->qh_pool, hwep->qh.ptr, hwep->qh.dma);
1604 } 1606 }
1605} 1607}
@@ -1667,13 +1669,13 @@ static int ci_udc_stop(struct usb_gadget *gadget,
1667 if (ci->platdata->notify_event) 1669 if (ci->platdata->notify_event)
1668 ci->platdata->notify_event(ci, 1670 ci->platdata->notify_event(ci,
1669 CI_HDRC_CONTROLLER_STOPPED_EVENT); 1671 CI_HDRC_CONTROLLER_STOPPED_EVENT);
1670 ci->driver = NULL;
1671 spin_unlock_irqrestore(&ci->lock, flags); 1672 spin_unlock_irqrestore(&ci->lock, flags);
1672 _gadget_stop_activity(&ci->gadget); 1673 _gadget_stop_activity(&ci->gadget);
1673 spin_lock_irqsave(&ci->lock, flags); 1674 spin_lock_irqsave(&ci->lock, flags);
1674 pm_runtime_put(&ci->gadget.dev); 1675 pm_runtime_put(&ci->gadget.dev);
1675 } 1676 }
1676 1677
1678 ci->driver = NULL;
1677 spin_unlock_irqrestore(&ci->lock, flags); 1679 spin_unlock_irqrestore(&ci->lock, flags);
1678 1680
1679 return 0; 1681 return 0;
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 737e3c19967b..71dc5d768fa5 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -742,6 +742,22 @@ static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype,
742 if ((index & ~USB_DIR_IN) == 0) 742 if ((index & ~USB_DIR_IN) == 0)
743 return 0; 743 return 0;
744 ret = findintfep(ps->dev, index); 744 ret = findintfep(ps->dev, index);
745 if (ret < 0) {
746 /*
747 * Some not fully compliant Win apps seem to get
748 * index wrong and have the endpoint number here
749 * rather than the endpoint address (with the
750 * correct direction). Win does let this through,
751 * so we'll not reject it here but leave it to
752 * the device to not break KVM. But we warn.
753 */
754 ret = findintfep(ps->dev, index ^ 0x80);
755 if (ret >= 0)
756 dev_info(&ps->dev->dev,
757 "%s: process %i (%s) requesting ep %02x but needs %02x\n",
758 __func__, task_pid_nr(current),
759 current->comm, index, index ^ 0x80);
760 }
745 if (ret >= 0) 761 if (ret >= 0)
746 ret = checkintf(ps, ret); 762 ret = checkintf(ps, ret);
747 break; 763 break;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index dde4c83516a1..e6b682c6c236 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -3426,6 +3426,9 @@ static int usb_req_set_sel(struct usb_device *udev, enum usb3_link_state state)
3426 unsigned long long u2_pel; 3426 unsigned long long u2_pel;
3427 int ret; 3427 int ret;
3428 3428
3429 if (udev->state != USB_STATE_CONFIGURED)
3430 return 0;
3431
3429 /* Convert SEL and PEL stored in ns to us */ 3432 /* Convert SEL and PEL stored in ns to us */
3430 u1_sel = DIV_ROUND_UP(udev->u1_params.sel, 1000); 3433 u1_sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
3431 u1_pel = DIV_ROUND_UP(udev->u1_params.pel, 1000); 3434 u1_pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 5b44cd47da5b..01fe36273f3b 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -97,6 +97,9 @@ static const struct usb_device_id usb_quirk_list[] = {
97 /* Alcor Micro Corp. Hub */ 97 /* Alcor Micro Corp. Hub */
98 { USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME }, 98 { USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME },
99 99
100 /* MicroTouch Systems touchscreen */
101 { USB_DEVICE(0x0596, 0x051e), .driver_info = USB_QUIRK_RESET_RESUME },
102
100 /* appletouch */ 103 /* appletouch */
101 { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME }, 104 { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
102 105
@@ -130,6 +133,9 @@ static const struct usb_device_id usb_quirk_list[] = {
130 /* Broadcom BCM92035DGROM BT dongle */ 133 /* Broadcom BCM92035DGROM BT dongle */
131 { USB_DEVICE(0x0a5c, 0x2021), .driver_info = USB_QUIRK_RESET_RESUME }, 134 { USB_DEVICE(0x0a5c, 0x2021), .driver_info = USB_QUIRK_RESET_RESUME },
132 135
136 /* MAYA44USB sound device */
137 { USB_DEVICE(0x0a92, 0x0091), .driver_info = USB_QUIRK_RESET_RESUME },
138
133 /* Action Semiconductor flash disk */ 139 /* Action Semiconductor flash disk */
134 { USB_DEVICE(0x10d6, 0x2200), .driver_info = 140 { USB_DEVICE(0x10d6, 0x2200), .driver_info =
135 USB_QUIRK_STRING_FETCH_255 }, 141 USB_QUIRK_STRING_FETCH_255 },
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 997ebe420bc9..2e252aae51ca 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -29,6 +29,7 @@
29#define PCI_VENDOR_ID_SYNOPSYS 0x16c3 29#define PCI_VENDOR_ID_SYNOPSYS 0x16c3
30#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd 30#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd
31#define PCI_DEVICE_ID_INTEL_BYT 0x0f37 31#define PCI_DEVICE_ID_INTEL_BYT 0x0f37
32#define PCI_DEVICE_ID_INTEL_MRFLD 0x119e
32 33
33struct dwc3_pci { 34struct dwc3_pci {
34 struct device *dev; 35 struct device *dev;
@@ -189,6 +190,7 @@ static DEFINE_PCI_DEVICE_TABLE(dwc3_pci_id_table) = {
189 PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3), 190 PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3),
190 }, 191 },
191 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), }, 192 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), },
193 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), },
192 { } /* Terminating Entry */ 194 { } /* Terminating Entry */
193}; 195};
194MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table); 196MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table);
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 1a66c5baa0d1..44cf775a8627 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -1034,37 +1034,19 @@ struct ffs_sb_fill_data {
1034 struct ffs_file_perms perms; 1034 struct ffs_file_perms perms;
1035 umode_t root_mode; 1035 umode_t root_mode;
1036 const char *dev_name; 1036 const char *dev_name;
1037 union { 1037 struct ffs_data *ffs_data;
1038 /* set by ffs_fs_mount(), read by ffs_sb_fill() */
1039 void *private_data;
1040 /* set by ffs_sb_fill(), read by ffs_fs_mount */
1041 struct ffs_data *ffs_data;
1042 };
1043}; 1038};
1044 1039
1045static int ffs_sb_fill(struct super_block *sb, void *_data, int silent) 1040static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
1046{ 1041{
1047 struct ffs_sb_fill_data *data = _data; 1042 struct ffs_sb_fill_data *data = _data;
1048 struct inode *inode; 1043 struct inode *inode;
1049 struct ffs_data *ffs; 1044 struct ffs_data *ffs = data->ffs_data;
1050 1045
1051 ENTER(); 1046 ENTER();
1052 1047
1053 /* Initialise data */
1054 ffs = ffs_data_new();
1055 if (unlikely(!ffs))
1056 goto Enomem;
1057
1058 ffs->sb = sb; 1048 ffs->sb = sb;
1059 ffs->dev_name = kstrdup(data->dev_name, GFP_KERNEL); 1049 data->ffs_data = NULL;
1060 if (unlikely(!ffs->dev_name))
1061 goto Enomem;
1062 ffs->file_perms = data->perms;
1063 ffs->private_data = data->private_data;
1064
1065 /* used by the caller of this function */
1066 data->ffs_data = ffs;
1067
1068 sb->s_fs_info = ffs; 1050 sb->s_fs_info = ffs;
1069 sb->s_blocksize = PAGE_CACHE_SIZE; 1051 sb->s_blocksize = PAGE_CACHE_SIZE;
1070 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 1052 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
@@ -1080,17 +1062,14 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
1080 &data->perms); 1062 &data->perms);
1081 sb->s_root = d_make_root(inode); 1063 sb->s_root = d_make_root(inode);
1082 if (unlikely(!sb->s_root)) 1064 if (unlikely(!sb->s_root))
1083 goto Enomem; 1065 return -ENOMEM;
1084 1066
1085 /* EP0 file */ 1067 /* EP0 file */
1086 if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs, 1068 if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs,
1087 &ffs_ep0_operations, NULL))) 1069 &ffs_ep0_operations, NULL)))
1088 goto Enomem; 1070 return -ENOMEM;
1089 1071
1090 return 0; 1072 return 0;
1091
1092Enomem:
1093 return -ENOMEM;
1094} 1073}
1095 1074
1096static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts) 1075static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
@@ -1193,6 +1172,7 @@ ffs_fs_mount(struct file_system_type *t, int flags,
1193 struct dentry *rv; 1172 struct dentry *rv;
1194 int ret; 1173 int ret;
1195 void *ffs_dev; 1174 void *ffs_dev;
1175 struct ffs_data *ffs;
1196 1176
1197 ENTER(); 1177 ENTER();
1198 1178
@@ -1200,18 +1180,30 @@ ffs_fs_mount(struct file_system_type *t, int flags,
1200 if (unlikely(ret < 0)) 1180 if (unlikely(ret < 0))
1201 return ERR_PTR(ret); 1181 return ERR_PTR(ret);
1202 1182
1183 ffs = ffs_data_new();
1184 if (unlikely(!ffs))
1185 return ERR_PTR(-ENOMEM);
1186 ffs->file_perms = data.perms;
1187
1188 ffs->dev_name = kstrdup(dev_name, GFP_KERNEL);
1189 if (unlikely(!ffs->dev_name)) {
1190 ffs_data_put(ffs);
1191 return ERR_PTR(-ENOMEM);
1192 }
1193
1203 ffs_dev = functionfs_acquire_dev_callback(dev_name); 1194 ffs_dev = functionfs_acquire_dev_callback(dev_name);
1204 if (IS_ERR(ffs_dev)) 1195 if (IS_ERR(ffs_dev)) {
1205 return ffs_dev; 1196 ffs_data_put(ffs);
1197 return ERR_CAST(ffs_dev);
1198 }
1199 ffs->private_data = ffs_dev;
1200 data.ffs_data = ffs;
1206 1201
1207 data.dev_name = dev_name;
1208 data.private_data = ffs_dev;
1209 rv = mount_nodev(t, flags, &data, ffs_sb_fill); 1202 rv = mount_nodev(t, flags, &data, ffs_sb_fill);
1210 1203 if (IS_ERR(rv) && data.ffs_data) {
1211 /* data.ffs_data is set by ffs_sb_fill */
1212 if (IS_ERR(rv))
1213 functionfs_release_dev_callback(data.ffs_data); 1204 functionfs_release_dev_callback(data.ffs_data);
1214 1205 ffs_data_put(data.ffs_data);
1206 }
1215 return rv; 1207 return rv;
1216} 1208}
1217 1209
@@ -2264,6 +2256,8 @@ static int ffs_func_bind(struct usb_configuration *c,
2264 data->raw_descs + ret, 2256 data->raw_descs + ret,
2265 (sizeof data->raw_descs) - ret, 2257 (sizeof data->raw_descs) - ret,
2266 __ffs_func_bind_do_descs, func); 2258 __ffs_func_bind_do_descs, func);
2259 if (unlikely(ret < 0))
2260 goto error;
2267 } 2261 }
2268 2262
2269 /* 2263 /*
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index cc9207473dbc..0ac6064aa3b8 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -2054,7 +2054,7 @@ static struct pxa25x_udc memory = {
2054/* 2054/*
2055 * probe - binds to the platform device 2055 * probe - binds to the platform device
2056 */ 2056 */
2057static int __init pxa25x_udc_probe(struct platform_device *pdev) 2057static int pxa25x_udc_probe(struct platform_device *pdev)
2058{ 2058{
2059 struct pxa25x_udc *dev = &memory; 2059 struct pxa25x_udc *dev = &memory;
2060 int retval, irq; 2060 int retval, irq;
@@ -2203,7 +2203,7 @@ static void pxa25x_udc_shutdown(struct platform_device *_dev)
2203 pullup_off(); 2203 pullup_off();
2204} 2204}
2205 2205
2206static int __exit pxa25x_udc_remove(struct platform_device *pdev) 2206static int pxa25x_udc_remove(struct platform_device *pdev)
2207{ 2207{
2208 struct pxa25x_udc *dev = platform_get_drvdata(pdev); 2208 struct pxa25x_udc *dev = platform_get_drvdata(pdev);
2209 2209
@@ -2294,7 +2294,8 @@ static int pxa25x_udc_resume(struct platform_device *dev)
2294 2294
2295static struct platform_driver udc_driver = { 2295static struct platform_driver udc_driver = {
2296 .shutdown = pxa25x_udc_shutdown, 2296 .shutdown = pxa25x_udc_shutdown,
2297 .remove = __exit_p(pxa25x_udc_remove), 2297 .probe = pxa25x_udc_probe,
2298 .remove = pxa25x_udc_remove,
2298 .suspend = pxa25x_udc_suspend, 2299 .suspend = pxa25x_udc_suspend,
2299 .resume = pxa25x_udc_resume, 2300 .resume = pxa25x_udc_resume,
2300 .driver = { 2301 .driver = {
@@ -2303,7 +2304,7 @@ static struct platform_driver udc_driver = {
2303 }, 2304 },
2304}; 2305};
2305 2306
2306module_platform_driver_probe(udc_driver, pxa25x_udc_probe); 2307module_platform_driver(udc_driver);
2307 2308
2308MODULE_DESCRIPTION(DRIVER_DESC); 2309MODULE_DESCRIPTION(DRIVER_DESC);
2309MODULE_AUTHOR("Frank Becker, Robert Schwebel, David Brownell"); 2310MODULE_AUTHOR("Frank Becker, Robert Schwebel, David Brownell");
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index 6bddf1aa2347..a8a99e4748d5 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -543,7 +543,7 @@ static int s3c_hsotg_write_fifo(struct s3c_hsotg *hsotg,
543 * FIFO, requests of >512 cause the endpoint to get stuck with a 543 * FIFO, requests of >512 cause the endpoint to get stuck with a
544 * fragment of the end of the transfer in it. 544 * fragment of the end of the transfer in it.
545 */ 545 */
546 if (can_write > 512) 546 if (can_write > 512 && !periodic)
547 can_write = 512; 547 can_write = 512;
548 548
549 /* 549 /*
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 4449f565d6c6..f2407b2e8a99 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -130,7 +130,7 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
130 } 130 }
131 131
132 /* Enable USB controller, 83xx or 8536 */ 132 /* Enable USB controller, 83xx or 8536 */
133 if (pdata->have_sysif_regs) 133 if (pdata->have_sysif_regs && pdata->controller_ver < FSL_USB_VER_1_6)
134 setbits32(hcd->regs + FSL_SOC_USB_CTRL, 0x4); 134 setbits32(hcd->regs + FSL_SOC_USB_CTRL, 0x4);
135 135
136 /* Don't need to set host mode here. It will be done by tdi_reset() */ 136 /* Don't need to set host mode here. It will be done by tdi_reset() */
@@ -232,15 +232,9 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
232 case FSL_USB2_PHY_ULPI: 232 case FSL_USB2_PHY_ULPI:
233 if (pdata->have_sysif_regs && pdata->controller_ver) { 233 if (pdata->have_sysif_regs && pdata->controller_ver) {
234 /* controller version 1.6 or above */ 234 /* controller version 1.6 or above */
235 clrbits32(non_ehci + FSL_SOC_USB_CTRL, UTMI_PHY_EN);
235 setbits32(non_ehci + FSL_SOC_USB_CTRL, 236 setbits32(non_ehci + FSL_SOC_USB_CTRL,
236 ULPI_PHY_CLK_SEL); 237 ULPI_PHY_CLK_SEL | USB_CTRL_USB_EN);
237 /*
238 * Due to controller issue of PHY_CLK_VALID in ULPI
239 * mode, we set USB_CTRL_USB_EN before checking
240 * PHY_CLK_VALID, otherwise PHY_CLK_VALID doesn't work.
241 */
242 clrsetbits_be32(non_ehci + FSL_SOC_USB_CTRL,
243 UTMI_PHY_EN, USB_CTRL_USB_EN);
244 } 238 }
245 portsc |= PORT_PTS_ULPI; 239 portsc |= PORT_PTS_ULPI;
246 break; 240 break;
@@ -270,8 +264,9 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
270 if (pdata->have_sysif_regs && pdata->controller_ver && 264 if (pdata->have_sysif_regs && pdata->controller_ver &&
271 (phy_mode == FSL_USB2_PHY_ULPI)) { 265 (phy_mode == FSL_USB2_PHY_ULPI)) {
272 /* check PHY_CLK_VALID to get phy clk valid */ 266 /* check PHY_CLK_VALID to get phy clk valid */
273 if (!spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) & 267 if (!(spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) &
274 PHY_CLK_VALID, FSL_USB_PHY_CLK_TIMEOUT, 0)) { 268 PHY_CLK_VALID, FSL_USB_PHY_CLK_TIMEOUT, 0) ||
269 in_be32(non_ehci + FSL_SOC_USB_PRICTRL))) {
275 printk(KERN_WARNING "fsl-ehci: USB PHY clock invalid\n"); 270 printk(KERN_WARNING "fsl-ehci: USB PHY clock invalid\n");
276 return -EINVAL; 271 return -EINVAL;
277 } 272 }
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 6bd299e61f58..854c2ec7b699 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -361,7 +361,7 @@ static struct pci_driver ehci_pci_driver = {
361 .remove = usb_hcd_pci_remove, 361 .remove = usb_hcd_pci_remove,
362 .shutdown = usb_hcd_pci_shutdown, 362 .shutdown = usb_hcd_pci_shutdown,
363 363
364#ifdef CONFIG_PM_SLEEP 364#ifdef CONFIG_PM
365 .driver = { 365 .driver = {
366 .pm = &usb_hcd_pci_pm_ops 366 .pm = &usb_hcd_pci_pm_ops
367 }, 367 },
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
index 60a5de505ca1..adb01d950a16 100644
--- a/drivers/usb/host/imx21-hcd.c
+++ b/drivers/usb/host/imx21-hcd.c
@@ -824,13 +824,13 @@ static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
824 i = DIV_ROUND_UP(wrap_frame( 824 i = DIV_ROUND_UP(wrap_frame(
825 cur_frame - urb->start_frame), 825 cur_frame - urb->start_frame),
826 urb->interval); 826 urb->interval);
827 if (urb->transfer_flags & URB_ISO_ASAP) { 827
828 /* Treat underruns as if URB_ISO_ASAP was set */
829 if ((urb->transfer_flags & URB_ISO_ASAP) ||
830 i >= urb->number_of_packets) {
828 urb->start_frame = wrap_frame(urb->start_frame 831 urb->start_frame = wrap_frame(urb->start_frame
829 + i * urb->interval); 832 + i * urb->interval);
830 i = 0; 833 i = 0;
831 } else if (i >= urb->number_of_packets) {
832 ret = -EXDEV;
833 goto alloc_dmem_failed;
834 } 834 }
835 } 835 }
836 } 836 }
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 8f6b695af6a4..604cad1bcf9c 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -216,31 +216,26 @@ static int ohci_urb_enqueue (
216 frame &= ~(ed->interval - 1); 216 frame &= ~(ed->interval - 1);
217 frame |= ed->branch; 217 frame |= ed->branch;
218 urb->start_frame = frame; 218 urb->start_frame = frame;
219 ed->last_iso = frame + ed->interval * (size - 1);
219 } 220 }
220 } else if (ed->type == PIPE_ISOCHRONOUS) { 221 } else if (ed->type == PIPE_ISOCHRONOUS) {
221 u16 next = ohci_frame_no(ohci) + 1; 222 u16 next = ohci_frame_no(ohci) + 1;
222 u16 frame = ed->last_iso + ed->interval; 223 u16 frame = ed->last_iso + ed->interval;
224 u16 length = ed->interval * (size - 1);
223 225
224 /* Behind the scheduling threshold? */ 226 /* Behind the scheduling threshold? */
225 if (unlikely(tick_before(frame, next))) { 227 if (unlikely(tick_before(frame, next))) {
226 228
227 /* USB_ISO_ASAP: Round up to the first available slot */ 229 /* URB_ISO_ASAP: Round up to the first available slot */
228 if (urb->transfer_flags & URB_ISO_ASAP) { 230 if (urb->transfer_flags & URB_ISO_ASAP) {
229 frame += (next - frame + ed->interval - 1) & 231 frame += (next - frame + ed->interval - 1) &
230 -ed->interval; 232 -ed->interval;
231 233
232 /* 234 /*
233 * Not ASAP: Use the next slot in the stream. If 235 * Not ASAP: Use the next slot in the stream,
234 * the entire URB falls before the threshold, fail. 236 * no matter what.
235 */ 237 */
236 } else { 238 } else {
237 if (tick_before(frame + ed->interval *
238 (urb->number_of_packets - 1), next)) {
239 retval = -EXDEV;
240 usb_hcd_unlink_urb_from_ep(hcd, urb);
241 goto fail;
242 }
243
244 /* 239 /*
245 * Some OHCI hardware doesn't handle late TDs 240 * Some OHCI hardware doesn't handle late TDs
246 * correctly. After retiring them it proceeds 241 * correctly. After retiring them it proceeds
@@ -251,9 +246,16 @@ static int ohci_urb_enqueue (
251 urb_priv->td_cnt = DIV_ROUND_UP( 246 urb_priv->td_cnt = DIV_ROUND_UP(
252 (u16) (next - frame), 247 (u16) (next - frame),
253 ed->interval); 248 ed->interval);
249 if (urb_priv->td_cnt >= urb_priv->length) {
250 ++urb_priv->td_cnt; /* Mark it */
251 ohci_dbg(ohci, "iso underrun %p (%u+%u < %u)\n",
252 urb, frame, length,
253 next);
254 }
254 } 255 }
255 } 256 }
256 urb->start_frame = frame; 257 urb->start_frame = frame;
258 ed->last_iso = frame + length;
257 } 259 }
258 260
259 /* fill the TDs and link them to the ed; and 261 /* fill the TDs and link them to the ed; and
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index df4a6707322d..e7f577e63624 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -41,9 +41,13 @@ finish_urb(struct ohci_hcd *ohci, struct urb *urb, int status)
41__releases(ohci->lock) 41__releases(ohci->lock)
42__acquires(ohci->lock) 42__acquires(ohci->lock)
43{ 43{
44 struct device *dev = ohci_to_hcd(ohci)->self.controller; 44 struct device *dev = ohci_to_hcd(ohci)->self.controller;
45 struct usb_host_endpoint *ep = urb->ep;
46 struct urb_priv *urb_priv;
47
45 // ASSERT (urb->hcpriv != 0); 48 // ASSERT (urb->hcpriv != 0);
46 49
50 restart:
47 urb_free_priv (ohci, urb->hcpriv); 51 urb_free_priv (ohci, urb->hcpriv);
48 urb->hcpriv = NULL; 52 urb->hcpriv = NULL;
49 if (likely(status == -EINPROGRESS)) 53 if (likely(status == -EINPROGRESS))
@@ -80,6 +84,21 @@ __acquires(ohci->lock)
80 ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_IE); 84 ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_IE);
81 ohci_writel (ohci, ohci->hc_control, &ohci->regs->control); 85 ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
82 } 86 }
87
88 /*
89 * An isochronous URB that is sumitted too late won't have any TDs
90 * (marked by the fact that the td_cnt value is larger than the
91 * actual number of TDs). If the next URB on this endpoint is like
92 * that, give it back now.
93 */
94 if (!list_empty(&ep->urb_list)) {
95 urb = list_first_entry(&ep->urb_list, struct urb, urb_list);
96 urb_priv = urb->hcpriv;
97 if (urb_priv->td_cnt > urb_priv->length) {
98 status = 0;
99 goto restart;
100 }
101 }
83} 102}
84 103
85 104
@@ -546,7 +565,6 @@ td_fill (struct ohci_hcd *ohci, u32 info,
546 td->hwCBP = cpu_to_hc32 (ohci, data & 0xFFFFF000); 565 td->hwCBP = cpu_to_hc32 (ohci, data & 0xFFFFF000);
547 *ohci_hwPSWp(ohci, td, 0) = cpu_to_hc16 (ohci, 566 *ohci_hwPSWp(ohci, td, 0) = cpu_to_hc16 (ohci,
548 (data & 0x0FFF) | 0xE000); 567 (data & 0x0FFF) | 0xE000);
549 td->ed->last_iso = info & 0xffff;
550 } else { 568 } else {
551 td->hwCBP = cpu_to_hc32 (ohci, data); 569 td->hwCBP = cpu_to_hc32 (ohci, data);
552 } 570 }
@@ -996,7 +1014,7 @@ rescan_this:
996 urb_priv->td_cnt++; 1014 urb_priv->td_cnt++;
997 1015
998 /* if URB is done, clean up */ 1016 /* if URB is done, clean up */
999 if (urb_priv->td_cnt == urb_priv->length) { 1017 if (urb_priv->td_cnt >= urb_priv->length) {
1000 modified = completed = 1; 1018 modified = completed = 1;
1001 finish_urb(ohci, urb, 0); 1019 finish_urb(ohci, urb, 0);
1002 } 1020 }
@@ -1086,7 +1104,7 @@ static void takeback_td(struct ohci_hcd *ohci, struct td *td)
1086 urb_priv->td_cnt++; 1104 urb_priv->td_cnt++;
1087 1105
1088 /* If all this urb's TDs are done, call complete() */ 1106 /* If all this urb's TDs are done, call complete() */
1089 if (urb_priv->td_cnt == urb_priv->length) 1107 if (urb_priv->td_cnt >= urb_priv->length)
1090 finish_urb(ohci, urb, status); 1108 finish_urb(ohci, urb, status);
1091 1109
1092 /* clean schedule: unlink EDs that are no longer busy */ 1110 /* clean schedule: unlink EDs that are no longer busy */
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 2c76ef1320ea..08ef2829a7e2 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -799,7 +799,7 @@ void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev)
799 * switchable ports. 799 * switchable ports.
800 */ 800 */
801 pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 801 pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
802 cpu_to_le32(ports_available)); 802 ports_available);
803 803
804 pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 804 pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
805 &ports_available); 805 &ports_available);
@@ -821,7 +821,7 @@ void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev)
821 * host. 821 * host.
822 */ 822 */
823 pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 823 pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
824 cpu_to_le32(ports_available)); 824 ports_available);
825 825
826 pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 826 pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
827 &ports_available); 827 &ports_available);
diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c
index c300bd2f7d1c..0f228c46eeda 100644
--- a/drivers/usb/host/uhci-pci.c
+++ b/drivers/usb/host/uhci-pci.c
@@ -293,7 +293,7 @@ static struct pci_driver uhci_pci_driver = {
293 .remove = usb_hcd_pci_remove, 293 .remove = usb_hcd_pci_remove,
294 .shutdown = uhci_shutdown, 294 .shutdown = uhci_shutdown,
295 295
296#ifdef CONFIG_PM_SLEEP 296#ifdef CONFIG_PM
297 .driver = { 297 .driver = {
298 .pm = &usb_hcd_pci_pm_ops 298 .pm = &usb_hcd_pci_pm_ops
299 }, 299 },
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index 041c6ddb695c..da6f56d996ce 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -1303,7 +1303,7 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
1303 } 1303 }
1304 1304
1305 /* Fell behind? */ 1305 /* Fell behind? */
1306 if (uhci_frame_before_eq(frame, next)) { 1306 if (!uhci_frame_before_eq(next, frame)) {
1307 1307
1308 /* USB_ISO_ASAP: Round up to the first available slot */ 1308 /* USB_ISO_ASAP: Round up to the first available slot */
1309 if (urb->transfer_flags & URB_ISO_ASAP) 1309 if (urb->transfer_flags & URB_ISO_ASAP)
@@ -1311,13 +1311,17 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
1311 -qh->period; 1311 -qh->period;
1312 1312
1313 /* 1313 /*
1314 * Not ASAP: Use the next slot in the stream. If 1314 * Not ASAP: Use the next slot in the stream,
1315 * the entire URB falls before the threshold, fail. 1315 * no matter what.
1316 */ 1316 */
1317 else if (!uhci_frame_before_eq(next, 1317 else if (!uhci_frame_before_eq(next,
1318 frame + (urb->number_of_packets - 1) * 1318 frame + (urb->number_of_packets - 1) *
1319 qh->period)) 1319 qh->period))
1320 return -EXDEV; 1320 dev_dbg(uhci_dev(uhci), "iso underrun %p (%u+%u < %u)\n",
1321 urb, frame,
1322 (urb->number_of_packets - 1) *
1323 qh->period,
1324 next);
1321 } 1325 }
1322 } 1326 }
1323 1327
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index fae697ed0b70..e8b4c56dcf62 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -287,7 +287,7 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
287 if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) 287 if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue)
288 xhci_queue_stop_endpoint(xhci, slot_id, i, suspend); 288 xhci_queue_stop_endpoint(xhci, slot_id, i, suspend);
289 } 289 }
290 cmd->command_trb = xhci->cmd_ring->enqueue; 290 cmd->command_trb = xhci_find_next_enqueue(xhci->cmd_ring);
291 list_add_tail(&cmd->cmd_list, &virt_dev->cmd_list); 291 list_add_tail(&cmd->cmd_list, &virt_dev->cmd_list);
292 xhci_queue_stop_endpoint(xhci, slot_id, 0, suspend); 292 xhci_queue_stop_endpoint(xhci, slot_id, 0, suspend);
293 xhci_ring_cmd_db(xhci); 293 xhci_ring_cmd_db(xhci);
@@ -552,11 +552,15 @@ void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status, u16 wIndex)
552 * - Mark a port as being done with device resume, 552 * - Mark a port as being done with device resume,
553 * and ring the endpoint doorbells. 553 * and ring the endpoint doorbells.
554 * - Stop the Synopsys redriver Compliance Mode polling. 554 * - Stop the Synopsys redriver Compliance Mode polling.
555 * - Drop and reacquire the xHCI lock, in order to wait for port resume.
555 */ 556 */
556static u32 xhci_get_port_status(struct usb_hcd *hcd, 557static u32 xhci_get_port_status(struct usb_hcd *hcd,
557 struct xhci_bus_state *bus_state, 558 struct xhci_bus_state *bus_state,
558 __le32 __iomem **port_array, 559 __le32 __iomem **port_array,
559 u16 wIndex, u32 raw_port_status) 560 u16 wIndex, u32 raw_port_status,
561 unsigned long flags)
562 __releases(&xhci->lock)
563 __acquires(&xhci->lock)
560{ 564{
561 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 565 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
562 u32 status = 0; 566 u32 status = 0;
@@ -591,21 +595,42 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
591 return 0xffffffff; 595 return 0xffffffff;
592 if (time_after_eq(jiffies, 596 if (time_after_eq(jiffies,
593 bus_state->resume_done[wIndex])) { 597 bus_state->resume_done[wIndex])) {
598 int time_left;
599
594 xhci_dbg(xhci, "Resume USB2 port %d\n", 600 xhci_dbg(xhci, "Resume USB2 port %d\n",
595 wIndex + 1); 601 wIndex + 1);
596 bus_state->resume_done[wIndex] = 0; 602 bus_state->resume_done[wIndex] = 0;
597 clear_bit(wIndex, &bus_state->resuming_ports); 603 clear_bit(wIndex, &bus_state->resuming_ports);
604
605 set_bit(wIndex, &bus_state->rexit_ports);
598 xhci_set_link_state(xhci, port_array, wIndex, 606 xhci_set_link_state(xhci, port_array, wIndex,
599 XDEV_U0); 607 XDEV_U0);
600 xhci_dbg(xhci, "set port %d resume\n", 608
601 wIndex + 1); 609 spin_unlock_irqrestore(&xhci->lock, flags);
602 slot_id = xhci_find_slot_id_by_port(hcd, xhci, 610 time_left = wait_for_completion_timeout(
603 wIndex + 1); 611 &bus_state->rexit_done[wIndex],
604 if (!slot_id) { 612 msecs_to_jiffies(
605 xhci_dbg(xhci, "slot_id is zero\n"); 613 XHCI_MAX_REXIT_TIMEOUT));
606 return 0xffffffff; 614 spin_lock_irqsave(&xhci->lock, flags);
615
616 if (time_left) {
617 slot_id = xhci_find_slot_id_by_port(hcd,
618 xhci, wIndex + 1);
619 if (!slot_id) {
620 xhci_dbg(xhci, "slot_id is zero\n");
621 return 0xffffffff;
622 }
623 xhci_ring_device(xhci, slot_id);
624 } else {
625 int port_status = xhci_readl(xhci,
626 port_array[wIndex]);
627 xhci_warn(xhci, "Port resume took longer than %i msec, port status = 0x%x\n",
628 XHCI_MAX_REXIT_TIMEOUT,
629 port_status);
630 status |= USB_PORT_STAT_SUSPEND;
631 clear_bit(wIndex, &bus_state->rexit_ports);
607 } 632 }
608 xhci_ring_device(xhci, slot_id); 633
609 bus_state->port_c_suspend |= 1 << wIndex; 634 bus_state->port_c_suspend |= 1 << wIndex;
610 bus_state->suspended_ports &= ~(1 << wIndex); 635 bus_state->suspended_ports &= ~(1 << wIndex);
611 } else { 636 } else {
@@ -728,7 +753,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
728 break; 753 break;
729 } 754 }
730 status = xhci_get_port_status(hcd, bus_state, port_array, 755 status = xhci_get_port_status(hcd, bus_state, port_array,
731 wIndex, temp); 756 wIndex, temp, flags);
732 if (status == 0xffffffff) 757 if (status == 0xffffffff)
733 goto error; 758 goto error;
734 759
@@ -1132,18 +1157,6 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
1132 t1 = xhci_port_state_to_neutral(t1); 1157 t1 = xhci_port_state_to_neutral(t1);
1133 if (t1 != t2) 1158 if (t1 != t2)
1134 xhci_writel(xhci, t2, port_array[port_index]); 1159 xhci_writel(xhci, t2, port_array[port_index]);
1135
1136 if (hcd->speed != HCD_USB3) {
1137 /* enable remote wake up for USB 2.0 */
1138 __le32 __iomem *addr;
1139 u32 tmp;
1140
1141 /* Get the port power control register address. */
1142 addr = port_array[port_index] + PORTPMSC;
1143 tmp = xhci_readl(xhci, addr);
1144 tmp |= PORT_RWE;
1145 xhci_writel(xhci, tmp, addr);
1146 }
1147 } 1160 }
1148 hcd->state = HC_STATE_SUSPENDED; 1161 hcd->state = HC_STATE_SUSPENDED;
1149 bus_state->next_statechange = jiffies + msecs_to_jiffies(10); 1162 bus_state->next_statechange = jiffies + msecs_to_jiffies(10);
@@ -1222,20 +1235,6 @@ int xhci_bus_resume(struct usb_hcd *hcd)
1222 xhci_ring_device(xhci, slot_id); 1235 xhci_ring_device(xhci, slot_id);
1223 } else 1236 } else
1224 xhci_writel(xhci, temp, port_array[port_index]); 1237 xhci_writel(xhci, temp, port_array[port_index]);
1225
1226 if (hcd->speed != HCD_USB3) {
1227 /* disable remote wake up for USB 2.0 */
1228 __le32 __iomem *addr;
1229 u32 tmp;
1230
1231 /* Add one to the port status register address to get
1232 * the port power control register address.
1233 */
1234 addr = port_array[port_index] + PORTPMSC;
1235 tmp = xhci_readl(xhci, addr);
1236 tmp &= ~PORT_RWE;
1237 xhci_writel(xhci, tmp, addr);
1238 }
1239 } 1238 }
1240 1239
1241 (void) xhci_readl(xhci, &xhci->op_regs->command); 1240 (void) xhci_readl(xhci, &xhci->op_regs->command);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 53b972c2a09f..83bcd13622c3 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -2428,6 +2428,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2428 for (i = 0; i < USB_MAXCHILDREN; ++i) { 2428 for (i = 0; i < USB_MAXCHILDREN; ++i) {
2429 xhci->bus_state[0].resume_done[i] = 0; 2429 xhci->bus_state[0].resume_done[i] = 0;
2430 xhci->bus_state[1].resume_done[i] = 0; 2430 xhci->bus_state[1].resume_done[i] = 0;
2431 /* Only the USB 2.0 completions will ever be used. */
2432 init_completion(&xhci->bus_state[1].rexit_done[i]);
2431 } 2433 }
2432 2434
2433 if (scratchpad_alloc(xhci, flags)) 2435 if (scratchpad_alloc(xhci, flags))
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index c2d495057eb5..b8dffd59eb25 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -35,6 +35,9 @@
35#define PCI_VENDOR_ID_ETRON 0x1b6f 35#define PCI_VENDOR_ID_ETRON 0x1b6f
36#define PCI_DEVICE_ID_ASROCK_P67 0x7023 36#define PCI_DEVICE_ID_ASROCK_P67 0x7023
37 37
38#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
39#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
40
38static const char hcd_name[] = "xhci_hcd"; 41static const char hcd_name[] = "xhci_hcd";
39 42
40/* called after powerup, by probe or system-pm "wakeup" */ 43/* called after powerup, by probe or system-pm "wakeup" */
@@ -69,6 +72,14 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
69 "QUIRK: Fresco Logic xHC needs configure" 72 "QUIRK: Fresco Logic xHC needs configure"
70 " endpoint cmd after reset endpoint"); 73 " endpoint cmd after reset endpoint");
71 } 74 }
75 if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
76 pdev->revision == 0x4) {
77 xhci->quirks |= XHCI_SLOW_SUSPEND;
78 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
79 "QUIRK: Fresco Logic xHC revision %u"
80 "must be suspended extra slowly",
81 pdev->revision);
82 }
72 /* Fresco Logic confirms: all revisions of this chip do not 83 /* Fresco Logic confirms: all revisions of this chip do not
73 * support MSI, even though some of them claim to in their PCI 84 * support MSI, even though some of them claim to in their PCI
74 * capabilities. 85 * capabilities.
@@ -110,6 +121,15 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
110 xhci->quirks |= XHCI_SPURIOUS_REBOOT; 121 xhci->quirks |= XHCI_SPURIOUS_REBOOT;
111 xhci->quirks |= XHCI_AVOID_BEI; 122 xhci->quirks |= XHCI_AVOID_BEI;
112 } 123 }
124 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
125 (pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI ||
126 pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI)) {
127 /* Workaround for occasional spurious wakeups from S5 (or
128 * any other sleep) on Haswell machines with LPT and LPT-LP
129 * with the new Intel BIOS
130 */
131 xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
132 }
113 if (pdev->vendor == PCI_VENDOR_ID_ETRON && 133 if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
114 pdev->device == PCI_DEVICE_ID_ASROCK_P67) { 134 pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
115 xhci->quirks |= XHCI_RESET_ON_RESUME; 135 xhci->quirks |= XHCI_RESET_ON_RESUME;
@@ -217,6 +237,11 @@ static void xhci_pci_remove(struct pci_dev *dev)
217 usb_put_hcd(xhci->shared_hcd); 237 usb_put_hcd(xhci->shared_hcd);
218 } 238 }
219 usb_hcd_pci_remove(dev); 239 usb_hcd_pci_remove(dev);
240
241 /* Workaround for spurious wakeups at shutdown with HSW */
242 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
243 pci_set_power_state(dev, PCI_D3hot);
244
220 kfree(xhci); 245 kfree(xhci);
221} 246}
222 247
@@ -351,7 +376,7 @@ static struct pci_driver xhci_pci_driver = {
351 /* suspend and resume implemented later */ 376 /* suspend and resume implemented later */
352 377
353 .shutdown = usb_hcd_pci_shutdown, 378 .shutdown = usb_hcd_pci_shutdown,
354#ifdef CONFIG_PM_SLEEP 379#ifdef CONFIG_PM
355 .driver = { 380 .driver = {
356 .pm = &usb_hcd_pci_pm_ops 381 .pm = &usb_hcd_pci_pm_ops
357 }, 382 },
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 411da1fc7ae8..6bfbd80ec2b9 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -123,6 +123,16 @@ static int enqueue_is_link_trb(struct xhci_ring *ring)
123 return TRB_TYPE_LINK_LE32(link->control); 123 return TRB_TYPE_LINK_LE32(link->control);
124} 124}
125 125
126union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring)
127{
128 /* Enqueue pointer can be left pointing to the link TRB,
129 * we must handle that
130 */
131 if (TRB_TYPE_LINK_LE32(ring->enqueue->link.control))
132 return ring->enq_seg->next->trbs;
133 return ring->enqueue;
134}
135
126/* Updates trb to point to the next TRB in the ring, and updates seg if the next 136/* Updates trb to point to the next TRB in the ring, and updates seg if the next
127 * TRB is in a new segment. This does not skip over link TRBs, and it does not 137 * TRB is in a new segment. This does not skip over link TRBs, and it does not
128 * effect the ring dequeue or enqueue pointers. 138 * effect the ring dequeue or enqueue pointers.
@@ -859,8 +869,12 @@ remove_finished_td:
859 /* Otherwise ring the doorbell(s) to restart queued transfers */ 869 /* Otherwise ring the doorbell(s) to restart queued transfers */
860 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 870 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
861 } 871 }
862 ep->stopped_td = NULL; 872
863 ep->stopped_trb = NULL; 873 /* Clear stopped_td and stopped_trb if endpoint is not halted */
874 if (!(ep->ep_state & EP_HALTED)) {
875 ep->stopped_td = NULL;
876 ep->stopped_trb = NULL;
877 }
864 878
865 /* 879 /*
866 * Drop the lock and complete the URBs in the cancelled TD list. 880 * Drop the lock and complete the URBs in the cancelled TD list.
@@ -1414,6 +1428,12 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1414 inc_deq(xhci, xhci->cmd_ring); 1428 inc_deq(xhci, xhci->cmd_ring);
1415 return; 1429 return;
1416 } 1430 }
1431 /* There is no command to handle if we get a stop event when the
1432 * command ring is empty, event->cmd_trb points to the next
1433 * unset command
1434 */
1435 if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue)
1436 return;
1417 } 1437 }
1418 1438
1419 switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]) 1439 switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
@@ -1743,6 +1763,19 @@ static void handle_port_status(struct xhci_hcd *xhci,
1743 } 1763 }
1744 } 1764 }
1745 1765
1766 /*
1767 * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or
1768 * RExit to a disconnect state). If so, let the the driver know it's
1769 * out of the RExit state.
1770 */
1771 if (!DEV_SUPERSPEED(temp) &&
1772 test_and_clear_bit(faked_port_index,
1773 &bus_state->rexit_ports)) {
1774 complete(&bus_state->rexit_done[faked_port_index]);
1775 bogus_port_status = true;
1776 goto cleanup;
1777 }
1778
1746 if (hcd->speed != HCD_USB3) 1779 if (hcd->speed != HCD_USB3)
1747 xhci_test_and_clear_bit(xhci, port_array, faked_port_index, 1780 xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
1748 PORT_PLC); 1781 PORT_PLC);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 49b6edb84a79..6e0d886bcce5 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -730,6 +730,9 @@ void xhci_shutdown(struct usb_hcd *hcd)
730 730
731 spin_lock_irq(&xhci->lock); 731 spin_lock_irq(&xhci->lock);
732 xhci_halt(xhci); 732 xhci_halt(xhci);
733 /* Workaround for spurious wakeups at shutdown with HSW */
734 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
735 xhci_reset(xhci);
733 spin_unlock_irq(&xhci->lock); 736 spin_unlock_irq(&xhci->lock);
734 737
735 xhci_cleanup_msix(xhci); 738 xhci_cleanup_msix(xhci);
@@ -737,6 +740,10 @@ void xhci_shutdown(struct usb_hcd *hcd)
737 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 740 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
738 "xhci_shutdown completed - status = %x", 741 "xhci_shutdown completed - status = %x",
739 xhci_readl(xhci, &xhci->op_regs->status)); 742 xhci_readl(xhci, &xhci->op_regs->status));
743
744 /* Yet another workaround for spurious wakeups at shutdown with HSW */
745 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
746 pci_set_power_state(to_pci_dev(hcd->self.controller), PCI_D3hot);
740} 747}
741 748
742#ifdef CONFIG_PM 749#ifdef CONFIG_PM
@@ -839,6 +846,7 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
839int xhci_suspend(struct xhci_hcd *xhci) 846int xhci_suspend(struct xhci_hcd *xhci)
840{ 847{
841 int rc = 0; 848 int rc = 0;
849 unsigned int delay = XHCI_MAX_HALT_USEC;
842 struct usb_hcd *hcd = xhci_to_hcd(xhci); 850 struct usb_hcd *hcd = xhci_to_hcd(xhci);
843 u32 command; 851 u32 command;
844 852
@@ -861,8 +869,12 @@ int xhci_suspend(struct xhci_hcd *xhci)
861 command = xhci_readl(xhci, &xhci->op_regs->command); 869 command = xhci_readl(xhci, &xhci->op_regs->command);
862 command &= ~CMD_RUN; 870 command &= ~CMD_RUN;
863 xhci_writel(xhci, command, &xhci->op_regs->command); 871 xhci_writel(xhci, command, &xhci->op_regs->command);
872
873 /* Some chips from Fresco Logic need an extraordinary delay */
874 delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
875
864 if (xhci_handshake(xhci, &xhci->op_regs->status, 876 if (xhci_handshake(xhci, &xhci->op_regs->status,
865 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC)) { 877 STS_HALT, STS_HALT, delay)) {
866 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n"); 878 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
867 spin_unlock_irq(&xhci->lock); 879 spin_unlock_irq(&xhci->lock);
868 return -ETIMEDOUT; 880 return -ETIMEDOUT;
@@ -2598,15 +2610,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2598 if (command) { 2610 if (command) {
2599 cmd_completion = command->completion; 2611 cmd_completion = command->completion;
2600 cmd_status = &command->status; 2612 cmd_status = &command->status;
2601 command->command_trb = xhci->cmd_ring->enqueue; 2613 command->command_trb = xhci_find_next_enqueue(xhci->cmd_ring);
2602
2603 /* Enqueue pointer can be left pointing to the link TRB,
2604 * we must handle that
2605 */
2606 if (TRB_TYPE_LINK_LE32(command->command_trb->link.control))
2607 command->command_trb =
2608 xhci->cmd_ring->enq_seg->next->trbs;
2609
2610 list_add_tail(&command->cmd_list, &virt_dev->cmd_list); 2614 list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
2611 } else { 2615 } else {
2612 cmd_completion = &virt_dev->cmd_completion; 2616 cmd_completion = &virt_dev->cmd_completion;
@@ -2614,7 +2618,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2614 } 2618 }
2615 init_completion(cmd_completion); 2619 init_completion(cmd_completion);
2616 2620
2617 cmd_trb = xhci->cmd_ring->dequeue; 2621 cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
2618 if (!ctx_change) 2622 if (!ctx_change)
2619 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma, 2623 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
2620 udev->slot_id, must_succeed); 2624 udev->slot_id, must_succeed);
@@ -3439,14 +3443,7 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
3439 3443
3440 /* Attempt to submit the Reset Device command to the command ring */ 3444 /* Attempt to submit the Reset Device command to the command ring */
3441 spin_lock_irqsave(&xhci->lock, flags); 3445 spin_lock_irqsave(&xhci->lock, flags);
3442 reset_device_cmd->command_trb = xhci->cmd_ring->enqueue; 3446 reset_device_cmd->command_trb = xhci_find_next_enqueue(xhci->cmd_ring);
3443
3444 /* Enqueue pointer can be left pointing to the link TRB,
3445 * we must handle that
3446 */
3447 if (TRB_TYPE_LINK_LE32(reset_device_cmd->command_trb->link.control))
3448 reset_device_cmd->command_trb =
3449 xhci->cmd_ring->enq_seg->next->trbs;
3450 3447
3451 list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list); 3448 list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
3452 ret = xhci_queue_reset_device(xhci, slot_id); 3449 ret = xhci_queue_reset_device(xhci, slot_id);
@@ -3650,7 +3647,7 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3650 union xhci_trb *cmd_trb; 3647 union xhci_trb *cmd_trb;
3651 3648
3652 spin_lock_irqsave(&xhci->lock, flags); 3649 spin_lock_irqsave(&xhci->lock, flags);
3653 cmd_trb = xhci->cmd_ring->dequeue; 3650 cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
3654 ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0); 3651 ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
3655 if (ret) { 3652 if (ret) {
3656 spin_unlock_irqrestore(&xhci->lock, flags); 3653 spin_unlock_irqrestore(&xhci->lock, flags);
@@ -3785,7 +3782,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3785 slot_ctx->dev_info >> 27); 3782 slot_ctx->dev_info >> 27);
3786 3783
3787 spin_lock_irqsave(&xhci->lock, flags); 3784 spin_lock_irqsave(&xhci->lock, flags);
3788 cmd_trb = xhci->cmd_ring->dequeue; 3785 cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
3789 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma, 3786 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
3790 udev->slot_id); 3787 udev->slot_id);
3791 if (ret) { 3788 if (ret) {
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 46aa14894148..941d5f59e4dc 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1412,8 +1412,18 @@ struct xhci_bus_state {
1412 unsigned long resume_done[USB_MAXCHILDREN]; 1412 unsigned long resume_done[USB_MAXCHILDREN];
1413 /* which ports have started to resume */ 1413 /* which ports have started to resume */
1414 unsigned long resuming_ports; 1414 unsigned long resuming_ports;
1415 /* Which ports are waiting on RExit to U0 transition. */
1416 unsigned long rexit_ports;
1417 struct completion rexit_done[USB_MAXCHILDREN];
1415}; 1418};
1416 1419
1420
1421/*
1422 * It can take up to 20 ms to transition from RExit to U0 on the
1423 * Intel Lynx Point LP xHCI host.
1424 */
1425#define XHCI_MAX_REXIT_TIMEOUT (20 * 1000)
1426
1417static inline unsigned int hcd_index(struct usb_hcd *hcd) 1427static inline unsigned int hcd_index(struct usb_hcd *hcd)
1418{ 1428{
1419 if (hcd->speed == HCD_USB3) 1429 if (hcd->speed == HCD_USB3)
@@ -1538,6 +1548,8 @@ struct xhci_hcd {
1538#define XHCI_COMP_MODE_QUIRK (1 << 14) 1548#define XHCI_COMP_MODE_QUIRK (1 << 14)
1539#define XHCI_AVOID_BEI (1 << 15) 1549#define XHCI_AVOID_BEI (1 << 15)
1540#define XHCI_PLAT (1 << 16) 1550#define XHCI_PLAT (1 << 16)
1551#define XHCI_SLOW_SUSPEND (1 << 17)
1552#define XHCI_SPURIOUS_WAKEUP (1 << 18)
1541 unsigned int num_active_eps; 1553 unsigned int num_active_eps;
1542 unsigned int limit_active_eps; 1554 unsigned int limit_active_eps;
1543 /* There are two roothubs to keep track of bus suspend info for */ 1555 /* There are two roothubs to keep track of bus suspend info for */
@@ -1840,6 +1852,7 @@ int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command,
1840 union xhci_trb *cmd_trb); 1852 union xhci_trb *cmd_trb);
1841void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id, 1853void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
1842 unsigned int ep_index, unsigned int stream_id); 1854 unsigned int ep_index, unsigned int stream_id);
1855union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring);
1843 1856
1844/* xHCI roothub code */ 1857/* xHCI roothub code */
1845void xhci_set_link_state(struct xhci_hcd *xhci, __le32 __iomem **port_array, 1858void xhci_set_link_state(struct xhci_hcd *xhci, __le32 __iomem **port_array,
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index e2b21c1d9c40..ba5f70f92888 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -246,6 +246,6 @@ config USB_EZUSB_FX2
246config USB_HSIC_USB3503 246config USB_HSIC_USB3503
247 tristate "USB3503 HSIC to USB20 Driver" 247 tristate "USB3503 HSIC to USB20 Driver"
248 depends on I2C 248 depends on I2C
249 select REGMAP 249 select REGMAP_I2C
250 help 250 help
251 This option enables support for SMSC USB3503 HSIC to USB 2.0 Driver. 251 This option enables support for SMSC USB3503 HSIC to USB 2.0 Driver.
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 18e877ffe7b7..cd70cc886171 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -922,6 +922,52 @@ static void musb_generic_disable(struct musb *musb)
922} 922}
923 923
924/* 924/*
925 * Program the HDRC to start (enable interrupts, dma, etc.).
926 */
927void musb_start(struct musb *musb)
928{
929 void __iomem *regs = musb->mregs;
930 u8 devctl = musb_readb(regs, MUSB_DEVCTL);
931
932 dev_dbg(musb->controller, "<== devctl %02x\n", devctl);
933
934 /* Set INT enable registers, enable interrupts */
935 musb->intrtxe = musb->epmask;
936 musb_writew(regs, MUSB_INTRTXE, musb->intrtxe);
937 musb->intrrxe = musb->epmask & 0xfffe;
938 musb_writew(regs, MUSB_INTRRXE, musb->intrrxe);
939 musb_writeb(regs, MUSB_INTRUSBE, 0xf7);
940
941 musb_writeb(regs, MUSB_TESTMODE, 0);
942
943 /* put into basic highspeed mode and start session */
944 musb_writeb(regs, MUSB_POWER, MUSB_POWER_ISOUPDATE
945 | MUSB_POWER_HSENAB
946 /* ENSUSPEND wedges tusb */
947 /* | MUSB_POWER_ENSUSPEND */
948 );
949
950 musb->is_active = 0;
951 devctl = musb_readb(regs, MUSB_DEVCTL);
952 devctl &= ~MUSB_DEVCTL_SESSION;
953
954 /* session started after:
955 * (a) ID-grounded irq, host mode;
956 * (b) vbus present/connect IRQ, peripheral mode;
957 * (c) peripheral initiates, using SRP
958 */
959 if (musb->port_mode != MUSB_PORT_MODE_HOST &&
960 (devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) {
961 musb->is_active = 1;
962 } else {
963 devctl |= MUSB_DEVCTL_SESSION;
964 }
965
966 musb_platform_enable(musb);
967 musb_writeb(regs, MUSB_DEVCTL, devctl);
968}
969
970/*
925 * Make the HDRC stop (disable interrupts, etc.); 971 * Make the HDRC stop (disable interrupts, etc.);
926 * reversible by musb_start 972 * reversible by musb_start
927 * called on gadget driver unregister 973 * called on gadget driver unregister
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 65f3917b4fc5..1c5bf75ee8ff 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -503,6 +503,7 @@ static inline void musb_configure_ep0(struct musb *musb)
503extern const char musb_driver_name[]; 503extern const char musb_driver_name[];
504 504
505extern void musb_stop(struct musb *musb); 505extern void musb_stop(struct musb *musb);
506extern void musb_start(struct musb *musb);
506 507
507extern void musb_write_fifo(struct musb_hw_ep *ep, u16 len, const u8 *src); 508extern void musb_write_fifo(struct musb_hw_ep *ep, u16 len, const u8 *src);
508extern void musb_read_fifo(struct musb_hw_ep *ep, u16 len, u8 *dst); 509extern void musb_read_fifo(struct musb_hw_ep *ep, u16 len, u8 *dst);
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 4047cbb91bac..bd4138d80a48 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -535,6 +535,9 @@ static int dsps_probe(struct platform_device *pdev)
535 struct dsps_glue *glue; 535 struct dsps_glue *glue;
536 int ret; 536 int ret;
537 537
538 if (!strcmp(pdev->name, "musb-hdrc"))
539 return -ENODEV;
540
538 match = of_match_node(musb_dsps_of_match, pdev->dev.of_node); 541 match = of_match_node(musb_dsps_of_match, pdev->dev.of_node);
539 if (!match) { 542 if (!match) {
540 dev_err(&pdev->dev, "fail to get matching of_match struct\n"); 543 dev_err(&pdev->dev, "fail to get matching of_match struct\n");
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 9a08679d204d..3671898a4535 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1790,6 +1790,10 @@ int musb_gadget_setup(struct musb *musb)
1790 musb->g.max_speed = USB_SPEED_HIGH; 1790 musb->g.max_speed = USB_SPEED_HIGH;
1791 musb->g.speed = USB_SPEED_UNKNOWN; 1791 musb->g.speed = USB_SPEED_UNKNOWN;
1792 1792
1793 MUSB_DEV_MODE(musb);
1794 musb->xceiv->otg->default_a = 0;
1795 musb->xceiv->state = OTG_STATE_B_IDLE;
1796
1793 /* this "gadget" abstracts/virtualizes the controller */ 1797 /* this "gadget" abstracts/virtualizes the controller */
1794 musb->g.name = musb_driver_name; 1798 musb->g.name = musb_driver_name;
1795 musb->g.is_otg = 1; 1799 musb->g.is_otg = 1;
@@ -1855,6 +1859,8 @@ static int musb_gadget_start(struct usb_gadget *g,
1855 musb->xceiv->state = OTG_STATE_B_IDLE; 1859 musb->xceiv->state = OTG_STATE_B_IDLE;
1856 spin_unlock_irqrestore(&musb->lock, flags); 1860 spin_unlock_irqrestore(&musb->lock, flags);
1857 1861
1862 musb_start(musb);
1863
1858 /* REVISIT: funcall to other code, which also 1864 /* REVISIT: funcall to other code, which also
1859 * handles power budgeting ... this way also 1865 * handles power budgeting ... this way also
1860 * ensures HdrcStart is indirectly called. 1866 * ensures HdrcStart is indirectly called.
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index a523950c2b32..d1d6b83aabca 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -44,52 +44,6 @@
44 44
45#include "musb_core.h" 45#include "musb_core.h"
46 46
47/*
48* Program the HDRC to start (enable interrupts, dma, etc.).
49*/
50static void musb_start(struct musb *musb)
51{
52 void __iomem *regs = musb->mregs;
53 u8 devctl = musb_readb(regs, MUSB_DEVCTL);
54
55 dev_dbg(musb->controller, "<== devctl %02x\n", devctl);
56
57 /* Set INT enable registers, enable interrupts */
58 musb->intrtxe = musb->epmask;
59 musb_writew(regs, MUSB_INTRTXE, musb->intrtxe);
60 musb->intrrxe = musb->epmask & 0xfffe;
61 musb_writew(regs, MUSB_INTRRXE, musb->intrrxe);
62 musb_writeb(regs, MUSB_INTRUSBE, 0xf7);
63
64 musb_writeb(regs, MUSB_TESTMODE, 0);
65
66 /* put into basic highspeed mode and start session */
67 musb_writeb(regs, MUSB_POWER, MUSB_POWER_ISOUPDATE
68 | MUSB_POWER_HSENAB
69 /* ENSUSPEND wedges tusb */
70 /* | MUSB_POWER_ENSUSPEND */
71 );
72
73 musb->is_active = 0;
74 devctl = musb_readb(regs, MUSB_DEVCTL);
75 devctl &= ~MUSB_DEVCTL_SESSION;
76
77 /* session started after:
78 * (a) ID-grounded irq, host mode;
79 * (b) vbus present/connect IRQ, peripheral mode;
80 * (c) peripheral initiates, using SRP
81 */
82 if (musb->port_mode != MUSB_PORT_MODE_HOST &&
83 (devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) {
84 musb->is_active = 1;
85 } else {
86 devctl |= MUSB_DEVCTL_SESSION;
87 }
88
89 musb_platform_enable(musb);
90 musb_writeb(regs, MUSB_DEVCTL, devctl);
91}
92
93static void musb_port_suspend(struct musb *musb, bool do_suspend) 47static void musb_port_suspend(struct musb *musb, bool do_suspend)
94{ 48{
95 struct usb_otg *otg = musb->xceiv->otg; 49 struct usb_otg *otg = musb->xceiv->otg;
diff --git a/drivers/usb/phy/phy-gpio-vbus-usb.c b/drivers/usb/phy/phy-gpio-vbus-usb.c
index b2f29c9aebbf..02799a5efcd4 100644
--- a/drivers/usb/phy/phy-gpio-vbus-usb.c
+++ b/drivers/usb/phy/phy-gpio-vbus-usb.c
@@ -241,7 +241,7 @@ static int gpio_vbus_set_suspend(struct usb_phy *phy, int suspend)
241 241
242/* platform driver interface */ 242/* platform driver interface */
243 243
244static int __init gpio_vbus_probe(struct platform_device *pdev) 244static int gpio_vbus_probe(struct platform_device *pdev)
245{ 245{
246 struct gpio_vbus_mach_info *pdata = dev_get_platdata(&pdev->dev); 246 struct gpio_vbus_mach_info *pdata = dev_get_platdata(&pdev->dev);
247 struct gpio_vbus_data *gpio_vbus; 247 struct gpio_vbus_data *gpio_vbus;
@@ -349,7 +349,7 @@ err_gpio:
349 return err; 349 return err;
350} 350}
351 351
352static int __exit gpio_vbus_remove(struct platform_device *pdev) 352static int gpio_vbus_remove(struct platform_device *pdev)
353{ 353{
354 struct gpio_vbus_data *gpio_vbus = platform_get_drvdata(pdev); 354 struct gpio_vbus_data *gpio_vbus = platform_get_drvdata(pdev);
355 struct gpio_vbus_mach_info *pdata = dev_get_platdata(&pdev->dev); 355 struct gpio_vbus_mach_info *pdata = dev_get_platdata(&pdev->dev);
@@ -398,8 +398,6 @@ static const struct dev_pm_ops gpio_vbus_dev_pm_ops = {
398}; 398};
399#endif 399#endif
400 400
401/* NOTE: the gpio-vbus device may *NOT* be hotplugged */
402
403MODULE_ALIAS("platform:gpio-vbus"); 401MODULE_ALIAS("platform:gpio-vbus");
404 402
405static struct platform_driver gpio_vbus_driver = { 403static struct platform_driver gpio_vbus_driver = {
@@ -410,10 +408,11 @@ static struct platform_driver gpio_vbus_driver = {
410 .pm = &gpio_vbus_dev_pm_ops, 408 .pm = &gpio_vbus_dev_pm_ops,
411#endif 409#endif
412 }, 410 },
413 .remove = __exit_p(gpio_vbus_remove), 411 .probe = gpio_vbus_probe,
412 .remove = gpio_vbus_remove,
414}; 413};
415 414
416module_platform_driver_probe(gpio_vbus_driver, gpio_vbus_probe); 415module_platform_driver(gpio_vbus_driver);
417 416
418MODULE_DESCRIPTION("simple GPIO controlled OTG transceiver driver"); 417MODULE_DESCRIPTION("simple GPIO controlled OTG transceiver driver");
419MODULE_AUTHOR("Philipp Zabel"); 418MODULE_AUTHOR("Philipp Zabel");
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 1cf6f125f5f0..acaee066b99a 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -81,6 +81,7 @@ static void option_instat_callback(struct urb *urb);
81 81
82#define HUAWEI_VENDOR_ID 0x12D1 82#define HUAWEI_VENDOR_ID 0x12D1
83#define HUAWEI_PRODUCT_E173 0x140C 83#define HUAWEI_PRODUCT_E173 0x140C
84#define HUAWEI_PRODUCT_E1750 0x1406
84#define HUAWEI_PRODUCT_K4505 0x1464 85#define HUAWEI_PRODUCT_K4505 0x1464
85#define HUAWEI_PRODUCT_K3765 0x1465 86#define HUAWEI_PRODUCT_K3765 0x1465
86#define HUAWEI_PRODUCT_K4605 0x14C6 87#define HUAWEI_PRODUCT_K4605 0x14C6
@@ -450,6 +451,10 @@ static void option_instat_callback(struct urb *urb);
450#define CHANGHONG_VENDOR_ID 0x2077 451#define CHANGHONG_VENDOR_ID 0x2077
451#define CHANGHONG_PRODUCT_CH690 0x7001 452#define CHANGHONG_PRODUCT_CH690 0x7001
452 453
454/* Inovia */
455#define INOVIA_VENDOR_ID 0x20a6
456#define INOVIA_SEW858 0x1105
457
453/* some devices interfaces need special handling due to a number of reasons */ 458/* some devices interfaces need special handling due to a number of reasons */
454enum option_blacklist_reason { 459enum option_blacklist_reason {
455 OPTION_BLACKLIST_NONE = 0, 460 OPTION_BLACKLIST_NONE = 0,
@@ -567,6 +572,8 @@ static const struct usb_device_id option_ids[] = {
567 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) }, 572 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) },
568 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff), 573 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
569 .driver_info = (kernel_ulong_t) &net_intf1_blacklist }, 574 .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
575 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff),
576 .driver_info = (kernel_ulong_t) &net_intf2_blacklist },
570 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) }, 577 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) },
571 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) }, 578 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) },
572 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff), 579 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
@@ -686,6 +693,222 @@ static const struct usb_device_id option_ids[] = {
686 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) }, 693 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) },
687 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7B) }, 694 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7B) },
688 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7C) }, 695 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7C) },
696 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x01) },
697 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x02) },
698 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x03) },
699 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x04) },
700 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x05) },
701 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x06) },
702 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0A) },
703 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0B) },
704 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0D) },
705 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0E) },
706 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0F) },
707 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x10) },
708 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x12) },
709 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x13) },
710 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x14) },
711 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x15) },
712 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x17) },
713 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x18) },
714 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x19) },
715 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1A) },
716 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1B) },
717 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1C) },
718 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x31) },
719 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x32) },
720 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x33) },
721 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x34) },
722 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x35) },
723 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x36) },
724 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3A) },
725 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3B) },
726 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3D) },
727 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3E) },
728 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3F) },
729 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x48) },
730 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x49) },
731 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4A) },
732 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4B) },
733 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4C) },
734 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x61) },
735 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x62) },
736 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x63) },
737 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x64) },
738 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x65) },
739 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x66) },
740 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6A) },
741 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6B) },
742 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6D) },
743 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6E) },
744 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6F) },
745 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x78) },
746 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x79) },
747 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7A) },
748 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7B) },
749 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7C) },
750 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x01) },
751 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x02) },
752 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x03) },
753 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x04) },
754 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x05) },
755 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x06) },
756 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0A) },
757 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0B) },
758 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0D) },
759 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0E) },
760 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0F) },
761 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x10) },
762 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x12) },
763 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x13) },
764 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x14) },
765 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x15) },
766 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x17) },
767 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x18) },
768 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x19) },
769 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1A) },
770 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1B) },
771 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1C) },
772 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x31) },
773 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x32) },
774 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x33) },
775 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x34) },
776 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x35) },
777 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x36) },
778 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3A) },
779 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3B) },
780 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3D) },
781 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3E) },
782 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3F) },
783 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x48) },
784 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x49) },
785 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4A) },
786 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4B) },
787 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4C) },
788 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x61) },
789 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x62) },
790 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x63) },
791 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x64) },
792 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x65) },
793 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x66) },
794 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6A) },
795 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6B) },
796 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6D) },
797 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6E) },
798 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6F) },
799 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x78) },
800 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x79) },
801 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7A) },
802 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7B) },
803 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7C) },
804 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x01) },
805 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x02) },
806 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x03) },
807 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x04) },
808 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x05) },
809 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x06) },
810 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0A) },
811 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0B) },
812 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0D) },
813 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0E) },
814 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0F) },
815 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x10) },
816 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x12) },
817 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x13) },
818 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x14) },
819 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x15) },
820 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x17) },
821 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x18) },
822 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x19) },
823 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1A) },
824 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1B) },
825 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1C) },
826 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x31) },
827 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x32) },
828 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x33) },
829 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x34) },
830 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x35) },
831 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x36) },
832 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3A) },
833 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3B) },
834 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3D) },
835 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3E) },
836 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3F) },
837 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x48) },
838 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x49) },
839 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4A) },
840 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4B) },
841 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4C) },
842 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x61) },
843 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x62) },
844 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x63) },
845 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x64) },
846 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x65) },
847 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x66) },
848 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6A) },
849 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6B) },
850 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6D) },
851 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6E) },
852 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6F) },
853 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x78) },
854 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x79) },
855 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7A) },
856 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7B) },
857 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7C) },
858 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x01) },
859 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x02) },
860 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x03) },
861 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x04) },
862 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x05) },
863 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x06) },
864 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0A) },
865 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0B) },
866 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0D) },
867 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0E) },
868 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0F) },
869 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x10) },
870 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x12) },
871 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x13) },
872 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x14) },
873 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x15) },
874 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x17) },
875 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x18) },
876 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x19) },
877 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1A) },
878 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1B) },
879 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1C) },
880 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x31) },
881 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x32) },
882 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x33) },
883 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x34) },
884 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x35) },
885 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x36) },
886 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3A) },
887 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3B) },
888 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3D) },
889 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3E) },
890 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3F) },
891 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x48) },
892 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x49) },
893 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4A) },
894 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4B) },
895 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4C) },
896 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x61) },
897 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x62) },
898 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x63) },
899 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x64) },
900 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x65) },
901 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x66) },
902 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6A) },
903 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6B) },
904 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6D) },
905 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6E) },
906 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6F) },
907 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x78) },
908 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x79) },
909 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7A) },
910 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) },
911 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) },
689 912
690 913
691 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, 914 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
@@ -1254,7 +1477,9 @@ static const struct usb_device_id option_ids[] = {
1254 1477
1255 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, 1478 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
1256 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) }, 1479 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
1257 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200) }, 1480 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
1481 .driver_info = (kernel_ulong_t)&net_intf6_blacklist
1482 },
1258 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ 1483 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
1259 { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/ 1484 { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
1260 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) }, 1485 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
@@ -1342,6 +1567,7 @@ static const struct usb_device_id option_ids[] = {
1342 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) }, 1567 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
1343 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ 1568 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
1344 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ 1569 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
1570 { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
1345 { } /* Terminating entry */ 1571 { } /* Terminating entry */
1346}; 1572};
1347MODULE_DEVICE_TABLE(usb, option_ids); 1573MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 760b78560f67..c9a35697ebe9 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -190,6 +190,7 @@ static struct usb_device_id ti_id_table_combined[] = {
190 { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) }, 190 { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
191 { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) }, 191 { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
192 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) }, 192 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) },
193 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) },
193 { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, 194 { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
194 { } /* terminator */ 195 { } /* terminator */
195}; 196};
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 94d75edef77f..18509e6c21ab 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -211,8 +211,11 @@ static int slave_configure(struct scsi_device *sdev)
211 /* 211 /*
212 * Many devices do not respond properly to READ_CAPACITY_16. 212 * Many devices do not respond properly to READ_CAPACITY_16.
213 * Tell the SCSI layer to try READ_CAPACITY_10 first. 213 * Tell the SCSI layer to try READ_CAPACITY_10 first.
214 * However some USB 3.0 drive enclosures return capacity
215 * modulo 2TB. Those must use READ_CAPACITY_16
214 */ 216 */
215 sdev->try_rc_10_first = 1; 217 if (!(us->fflags & US_FL_NEEDS_CAP16))
218 sdev->try_rc_10_first = 1;
216 219
217 /* assume SPC3 or latter devices support sense size > 18 */ 220 /* assume SPC3 or latter devices support sense size > 18 */
218 if (sdev->scsi_level > SCSI_SPC_2) 221 if (sdev->scsi_level > SCSI_SPC_2)
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index c015f2c16729..de32cfa5bfa6 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1925,6 +1925,13 @@ UNUSUAL_DEV( 0x1652, 0x6600, 0x0201, 0x0201,
1925 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 1925 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1926 US_FL_IGNORE_RESIDUE ), 1926 US_FL_IGNORE_RESIDUE ),
1927 1927
1928/* Reported by Oliver Neukum <oneukum@suse.com> */
1929UNUSUAL_DEV( 0x174c, 0x55aa, 0x0100, 0x0100,
1930 "ASMedia",
1931 "AS2105",
1932 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1933 US_FL_NEEDS_CAP16),
1934
1928/* Reported by Jesse Feddema <jdfeddema@gmail.com> */ 1935/* Reported by Jesse Feddema <jdfeddema@gmail.com> */
1929UNUSUAL_DEV( 0x177f, 0x0400, 0x0000, 0x0000, 1936UNUSUAL_DEV( 0x177f, 0x0400, 0x0000, 0x0000,
1930 "Yarvik", 1937 "Yarvik",
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index a9807dea3887..4fb7a8f83c8a 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -545,6 +545,8 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
545 long npage; 545 long npage;
546 int ret = 0, prot = 0; 546 int ret = 0, prot = 0;
547 uint64_t mask; 547 uint64_t mask;
548 struct vfio_dma *dma = NULL;
549 unsigned long pfn;
548 550
549 end = map->iova + map->size; 551 end = map->iova + map->size;
550 552
@@ -587,8 +589,6 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
587 } 589 }
588 590
589 for (iova = map->iova; iova < end; iova += size, vaddr += size) { 591 for (iova = map->iova; iova < end; iova += size, vaddr += size) {
590 struct vfio_dma *dma = NULL;
591 unsigned long pfn;
592 long i; 592 long i;
593 593
594 /* Pin a contiguous chunk of memory */ 594 /* Pin a contiguous chunk of memory */
@@ -597,16 +597,15 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
597 if (npage <= 0) { 597 if (npage <= 0) {
598 WARN_ON(!npage); 598 WARN_ON(!npage);
599 ret = (int)npage; 599 ret = (int)npage;
600 break; 600 goto out;
601 } 601 }
602 602
603 /* Verify pages are not already mapped */ 603 /* Verify pages are not already mapped */
604 for (i = 0; i < npage; i++) { 604 for (i = 0; i < npage; i++) {
605 if (iommu_iova_to_phys(iommu->domain, 605 if (iommu_iova_to_phys(iommu->domain,
606 iova + (i << PAGE_SHIFT))) { 606 iova + (i << PAGE_SHIFT))) {
607 vfio_unpin_pages(pfn, npage, prot, true);
608 ret = -EBUSY; 607 ret = -EBUSY;
609 break; 608 goto out_unpin;
610 } 609 }
611 } 610 }
612 611
@@ -616,8 +615,7 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
616 if (ret) { 615 if (ret) {
617 if (ret != -EBUSY || 616 if (ret != -EBUSY ||
618 map_try_harder(iommu, iova, pfn, npage, prot)) { 617 map_try_harder(iommu, iova, pfn, npage, prot)) {
619 vfio_unpin_pages(pfn, npage, prot, true); 618 goto out_unpin;
620 break;
621 } 619 }
622 } 620 }
623 621
@@ -672,9 +670,8 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
672 dma = kzalloc(sizeof(*dma), GFP_KERNEL); 670 dma = kzalloc(sizeof(*dma), GFP_KERNEL);
673 if (!dma) { 671 if (!dma) {
674 iommu_unmap(iommu->domain, iova, size); 672 iommu_unmap(iommu->domain, iova, size);
675 vfio_unpin_pages(pfn, npage, prot, true);
676 ret = -ENOMEM; 673 ret = -ENOMEM;
677 break; 674 goto out_unpin;
678 } 675 }
679 676
680 dma->size = size; 677 dma->size = size;
@@ -685,16 +682,21 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
685 } 682 }
686 } 683 }
687 684
688 if (ret) { 685 WARN_ON(ret);
689 struct vfio_dma *tmp; 686 mutex_unlock(&iommu->lock);
690 iova = map->iova; 687 return ret;
691 size = map->size; 688
692 while ((tmp = vfio_find_dma(iommu, iova, size))) { 689out_unpin:
693 int r = vfio_remove_dma_overlap(iommu, iova, 690 vfio_unpin_pages(pfn, npage, prot, true);
694 &size, tmp); 691
695 if (WARN_ON(r || !size)) 692out:
696 break; 693 iova = map->iova;
697 } 694 size = map->size;
695 while ((dma = vfio_find_dma(iommu, iova, size))) {
696 int r = vfio_remove_dma_overlap(iommu, iova,
697 &size, dma);
698 if (WARN_ON(r || !size))
699 break;
698 } 700 }
699 701
700 mutex_unlock(&iommu->lock); 702 mutex_unlock(&iommu->lock);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 592b31698fc8..e663921eebb6 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -728,7 +728,12 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq,
728 } 728 }
729 se_sess = tv_nexus->tvn_se_sess; 729 se_sess = tv_nexus->tvn_se_sess;
730 730
731 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_KERNEL); 731 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
732 if (tag < 0) {
733 pr_err("Unable to obtain tag for tcm_vhost_cmd\n");
734 return ERR_PTR(-ENOMEM);
735 }
736
732 cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag]; 737 cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag];
733 sg = cmd->tvc_sgl; 738 sg = cmd->tvc_sgl;
734 pages = cmd->tvc_upages; 739 pages = cmd->tvc_upages;
@@ -1051,7 +1056,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1051 if (data_direction != DMA_NONE) { 1056 if (data_direction != DMA_NONE) {
1052 ret = vhost_scsi_map_iov_to_sgl(cmd, 1057 ret = vhost_scsi_map_iov_to_sgl(cmd,
1053 &vq->iov[data_first], data_num, 1058 &vq->iov[data_first], data_num,
1054 data_direction == DMA_TO_DEVICE); 1059 data_direction == DMA_FROM_DEVICE);
1055 if (unlikely(ret)) { 1060 if (unlikely(ret)) {
1056 vq_err(vq, "Failed to map iov to sgl\n"); 1061 vq_err(vq, "Failed to map iov to sgl\n");
1057 goto err_free; 1062 goto err_free;
diff --git a/drivers/video/mmp/hw/mmp_ctrl.c b/drivers/video/mmp/hw/mmp_ctrl.c
index 75dca19bf214..6ac755270ab4 100644
--- a/drivers/video/mmp/hw/mmp_ctrl.c
+++ b/drivers/video/mmp/hw/mmp_ctrl.c
@@ -514,7 +514,7 @@ static int mmphw_probe(struct platform_device *pdev)
514 if (IS_ERR(ctrl->clk)) { 514 if (IS_ERR(ctrl->clk)) {
515 dev_err(ctrl->dev, "unable to get clk %s\n", mi->clk_name); 515 dev_err(ctrl->dev, "unable to get clk %s\n", mi->clk_name);
516 ret = -ENOENT; 516 ret = -ENOENT;
517 goto failed_get_clk; 517 goto failed;
518 } 518 }
519 clk_prepare_enable(ctrl->clk); 519 clk_prepare_enable(ctrl->clk);
520 520
@@ -551,21 +551,8 @@ failed_path_init:
551 path_deinit(path_plat); 551 path_deinit(path_plat);
552 } 552 }
553 553
554 if (ctrl->clk) { 554 clk_disable_unprepare(ctrl->clk);
555 devm_clk_put(ctrl->dev, ctrl->clk);
556 clk_disable_unprepare(ctrl->clk);
557 }
558failed_get_clk:
559 devm_free_irq(ctrl->dev, ctrl->irq, ctrl);
560failed: 555failed:
561 if (ctrl) {
562 if (ctrl->reg_base)
563 devm_iounmap(ctrl->dev, ctrl->reg_base);
564 devm_release_mem_region(ctrl->dev, res->start,
565 resource_size(res));
566 devm_kfree(ctrl->dev, ctrl);
567 }
568
569 dev_err(&pdev->dev, "device init failed\n"); 556 dev_err(&pdev->dev, "device init failed\n");
570 557
571 return ret; 558 return ret;
diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c
index d250ed0f806d..27197a8048c0 100644
--- a/drivers/video/mxsfb.c
+++ b/drivers/video/mxsfb.c
@@ -620,6 +620,7 @@ static int mxsfb_restore_mode(struct mxsfb_info *host)
620 break; 620 break;
621 case 3: 621 case 3:
622 bits_per_pixel = 32; 622 bits_per_pixel = 32;
623 break;
623 case 1: 624 case 1:
624 default: 625 default:
625 return -EINVAL; 626 return -EINVAL;
diff --git a/drivers/video/neofb.c b/drivers/video/neofb.c
index 7ef079c146e7..c172a5281f9e 100644
--- a/drivers/video/neofb.c
+++ b/drivers/video/neofb.c
@@ -2075,6 +2075,7 @@ static int neofb_probe(struct pci_dev *dev, const struct pci_device_id *id)
2075 if (!fb_find_mode(&info->var, info, mode_option, NULL, 0, 2075 if (!fb_find_mode(&info->var, info, mode_option, NULL, 0,
2076 info->monspecs.modedb, 16)) { 2076 info->monspecs.modedb, 16)) {
2077 printk(KERN_ERR "neofb: Unable to find usable video mode.\n"); 2077 printk(KERN_ERR "neofb: Unable to find usable video mode.\n");
2078 err = -EINVAL;
2078 goto err_map_video; 2079 goto err_map_video;
2079 } 2080 }
2080 2081
@@ -2097,7 +2098,8 @@ static int neofb_probe(struct pci_dev *dev, const struct pci_device_id *id)
2097 info->fix.smem_len >> 10, info->var.xres, 2098 info->fix.smem_len >> 10, info->var.xres,
2098 info->var.yres, h_sync / 1000, h_sync % 1000, v_sync); 2099 info->var.yres, h_sync / 1000, h_sync % 1000, v_sync);
2099 2100
2100 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) 2101 err = fb_alloc_cmap(&info->cmap, 256, 0);
2102 if (err < 0)
2101 goto err_map_video; 2103 goto err_map_video;
2102 2104
2103 err = register_framebuffer(info); 2105 err = register_framebuffer(info);
diff --git a/drivers/video/of_display_timing.c b/drivers/video/of_display_timing.c
index 171821ddd78d..ba5b40f581f6 100644
--- a/drivers/video/of_display_timing.c
+++ b/drivers/video/of_display_timing.c
@@ -120,7 +120,7 @@ int of_get_display_timing(struct device_node *np, const char *name,
120 return -EINVAL; 120 return -EINVAL;
121 } 121 }
122 122
123 timing_np = of_find_node_by_name(np, name); 123 timing_np = of_get_child_by_name(np, name);
124 if (!timing_np) { 124 if (!timing_np) {
125 pr_err("%s: could not find node '%s'\n", 125 pr_err("%s: could not find node '%s'\n",
126 of_node_full_name(np), name); 126 of_node_full_name(np), name);
@@ -143,11 +143,11 @@ struct display_timings *of_get_display_timings(struct device_node *np)
143 struct display_timings *disp; 143 struct display_timings *disp;
144 144
145 if (!np) { 145 if (!np) {
146 pr_err("%s: no devicenode given\n", of_node_full_name(np)); 146 pr_err("%s: no device node given\n", of_node_full_name(np));
147 return NULL; 147 return NULL;
148 } 148 }
149 149
150 timings_np = of_find_node_by_name(np, "display-timings"); 150 timings_np = of_get_child_by_name(np, "display-timings");
151 if (!timings_np) { 151 if (!timings_np) {
152 pr_err("%s: could not find display-timings node\n", 152 pr_err("%s: could not find display-timings node\n",
153 of_node_full_name(np)); 153 of_node_full_name(np));
diff --git a/drivers/video/omap2/displays-new/Kconfig b/drivers/video/omap2/displays-new/Kconfig
index 6c90885b0940..10b25e7cd878 100644
--- a/drivers/video/omap2/displays-new/Kconfig
+++ b/drivers/video/omap2/displays-new/Kconfig
@@ -35,6 +35,7 @@ config DISPLAY_PANEL_DPI
35 35
36config DISPLAY_PANEL_DSI_CM 36config DISPLAY_PANEL_DSI_CM
37 tristate "Generic DSI Command Mode Panel" 37 tristate "Generic DSI Command Mode Panel"
38 depends on BACKLIGHT_CLASS_DEVICE
38 help 39 help
39 Driver for generic DSI command mode panels. 40 Driver for generic DSI command mode panels.
40 41
diff --git a/drivers/video/omap2/displays-new/connector-analog-tv.c b/drivers/video/omap2/displays-new/connector-analog-tv.c
index 1b60698f141e..ccd9073f706f 100644
--- a/drivers/video/omap2/displays-new/connector-analog-tv.c
+++ b/drivers/video/omap2/displays-new/connector-analog-tv.c
@@ -191,7 +191,7 @@ static int tvc_probe_pdata(struct platform_device *pdev)
191 in = omap_dss_find_output(pdata->source); 191 in = omap_dss_find_output(pdata->source);
192 if (in == NULL) { 192 if (in == NULL) {
193 dev_err(&pdev->dev, "Failed to find video source\n"); 193 dev_err(&pdev->dev, "Failed to find video source\n");
194 return -ENODEV; 194 return -EPROBE_DEFER;
195 } 195 }
196 196
197 ddata->in = in; 197 ddata->in = in;
diff --git a/drivers/video/omap2/displays-new/connector-dvi.c b/drivers/video/omap2/displays-new/connector-dvi.c
index bc5f8ceda371..63d88ee6dfe4 100644
--- a/drivers/video/omap2/displays-new/connector-dvi.c
+++ b/drivers/video/omap2/displays-new/connector-dvi.c
@@ -263,7 +263,7 @@ static int dvic_probe_pdata(struct platform_device *pdev)
263 in = omap_dss_find_output(pdata->source); 263 in = omap_dss_find_output(pdata->source);
264 if (in == NULL) { 264 if (in == NULL) {
265 dev_err(&pdev->dev, "Failed to find video source\n"); 265 dev_err(&pdev->dev, "Failed to find video source\n");
266 return -ENODEV; 266 return -EPROBE_DEFER;
267 } 267 }
268 268
269 ddata->in = in; 269 ddata->in = in;
diff --git a/drivers/video/omap2/displays-new/connector-hdmi.c b/drivers/video/omap2/displays-new/connector-hdmi.c
index c5826716d6ab..9abe2c039ae9 100644
--- a/drivers/video/omap2/displays-new/connector-hdmi.c
+++ b/drivers/video/omap2/displays-new/connector-hdmi.c
@@ -290,7 +290,7 @@ static int hdmic_probe_pdata(struct platform_device *pdev)
290 in = omap_dss_find_output(pdata->source); 290 in = omap_dss_find_output(pdata->source);
291 if (in == NULL) { 291 if (in == NULL) {
292 dev_err(&pdev->dev, "Failed to find video source\n"); 292 dev_err(&pdev->dev, "Failed to find video source\n");
293 return -ENODEV; 293 return -EPROBE_DEFER;
294 } 294 }
295 295
296 ddata->in = in; 296 ddata->in = in;
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 02a7340111df..477975009eee 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -3691,6 +3691,7 @@ static int __init omap_dispchw_probe(struct platform_device *pdev)
3691 } 3691 }
3692 3692
3693 pm_runtime_enable(&pdev->dev); 3693 pm_runtime_enable(&pdev->dev);
3694 pm_runtime_irq_safe(&pdev->dev);
3694 3695
3695 r = dispc_runtime_get(); 3696 r = dispc_runtime_get();
3696 if (r) 3697 if (r)
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index 47ca86c5c6c0..d838ba829459 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -1336,14 +1336,7 @@ static int s3_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1336 (info->var.bits_per_pixel * info->var.xres_virtual); 1336 (info->var.bits_per_pixel * info->var.xres_virtual);
1337 if (info->var.yres_virtual < info->var.yres) { 1337 if (info->var.yres_virtual < info->var.yres) {
1338 dev_err(info->device, "virtual vertical size smaller than real\n"); 1338 dev_err(info->device, "virtual vertical size smaller than real\n");
1339 goto err_find_mode; 1339 rc = -EINVAL;
1340 }
1341
1342 /* maximize virtual vertical size for fast scrolling */
1343 info->var.yres_virtual = info->fix.smem_len * 8 /
1344 (info->var.bits_per_pixel * info->var.xres_virtual);
1345 if (info->var.yres_virtual < info->var.yres) {
1346 dev_err(info->device, "virtual vertical size smaller than real\n");
1347 goto err_find_mode; 1340 goto err_find_mode;
1348 } 1341 }
1349 1342
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index c7c64f18773d..fa932c2f7d97 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -613,6 +613,9 @@ static int w1_bus_notify(struct notifier_block *nb, unsigned long action,
613 sl = dev_to_w1_slave(dev); 613 sl = dev_to_w1_slave(dev);
614 fops = sl->family->fops; 614 fops = sl->family->fops;
615 615
616 if (!fops)
617 return 0;
618
616 switch (action) { 619 switch (action) {
617 case BUS_NOTIFY_ADD_DEVICE: 620 case BUS_NOTIFY_ADD_DEVICE:
618 /* if the family driver needs to initialize something... */ 621 /* if the family driver needs to initialize something... */
@@ -713,7 +716,10 @@ static int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
713 atomic_set(&sl->refcnt, 0); 716 atomic_set(&sl->refcnt, 0);
714 init_completion(&sl->released); 717 init_completion(&sl->released);
715 718
719 /* slave modules need to be loaded in a context with unlocked mutex */
720 mutex_unlock(&dev->mutex);
716 request_module("w1-family-0x%0x", rn->family); 721 request_module("w1-family-0x%0x", rn->family);
722 mutex_lock(&dev->mutex);
717 723
718 spin_lock(&w1_flock); 724 spin_lock(&w1_flock);
719 f = w1_family_registered(rn->family); 725 f = w1_family_registered(rn->family);
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 5be5e3d14f79..19f3c3fc65f4 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -802,6 +802,12 @@ static int hpwdt_init_one(struct pci_dev *dev,
802 return -ENODEV; 802 return -ENODEV;
803 } 803 }
804 804
805 /*
806 * Ignore all auxilary iLO devices with the following PCI ID
807 */
808 if (dev->subsystem_device == 0x1979)
809 return -ENODEV;
810
805 if (pci_enable_device(dev)) { 811 if (pci_enable_device(dev)) {
806 dev_warn(&dev->dev, 812 dev_warn(&dev->dev,
807 "Not possible to enable PCI Device: 0x%x:0x%x.\n", 813 "Not possible to enable PCI Device: 0x%x:0x%x.\n",
diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c
index 491419e0772a..5c3d4df63e68 100644
--- a/drivers/watchdog/kempld_wdt.c
+++ b/drivers/watchdog/kempld_wdt.c
@@ -35,7 +35,7 @@
35#define KEMPLD_WDT_STAGE_TIMEOUT(x) (0x1b + (x) * 4) 35#define KEMPLD_WDT_STAGE_TIMEOUT(x) (0x1b + (x) * 4)
36#define KEMPLD_WDT_STAGE_CFG(x) (0x18 + (x)) 36#define KEMPLD_WDT_STAGE_CFG(x) (0x18 + (x))
37#define STAGE_CFG_GET_PRESCALER(x) (((x) & 0x30) >> 4) 37#define STAGE_CFG_GET_PRESCALER(x) (((x) & 0x30) >> 4)
38#define STAGE_CFG_SET_PRESCALER(x) (((x) & 0x30) << 4) 38#define STAGE_CFG_SET_PRESCALER(x) (((x) & 0x3) << 4)
39#define STAGE_CFG_PRESCALER_MASK 0x30 39#define STAGE_CFG_PRESCALER_MASK 0x30
40#define STAGE_CFG_ACTION_MASK 0x7 40#define STAGE_CFG_ACTION_MASK 0x7
41#define STAGE_CFG_ASSERT (1 << 3) 41#define STAGE_CFG_ASSERT (1 << 3)
diff --git a/drivers/watchdog/sunxi_wdt.c b/drivers/watchdog/sunxi_wdt.c
index 1f94b42764aa..f6caa77151c7 100644
--- a/drivers/watchdog/sunxi_wdt.c
+++ b/drivers/watchdog/sunxi_wdt.c
@@ -146,7 +146,7 @@ static const struct watchdog_ops sunxi_wdt_ops = {
146 .set_timeout = sunxi_wdt_set_timeout, 146 .set_timeout = sunxi_wdt_set_timeout,
147}; 147};
148 148
149static int __init sunxi_wdt_probe(struct platform_device *pdev) 149static int sunxi_wdt_probe(struct platform_device *pdev)
150{ 150{
151 struct sunxi_wdt_dev *sunxi_wdt; 151 struct sunxi_wdt_dev *sunxi_wdt;
152 struct resource *res; 152 struct resource *res;
@@ -187,7 +187,7 @@ static int __init sunxi_wdt_probe(struct platform_device *pdev)
187 return 0; 187 return 0;
188} 188}
189 189
190static int __exit sunxi_wdt_remove(struct platform_device *pdev) 190static int sunxi_wdt_remove(struct platform_device *pdev)
191{ 191{
192 struct sunxi_wdt_dev *sunxi_wdt = platform_get_drvdata(pdev); 192 struct sunxi_wdt_dev *sunxi_wdt = platform_get_drvdata(pdev);
193 193
diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
index 42913f131dc2..c9b0c627fe7e 100644
--- a/drivers/watchdog/ts72xx_wdt.c
+++ b/drivers/watchdog/ts72xx_wdt.c
@@ -310,7 +310,8 @@ static long ts72xx_wdt_ioctl(struct file *file, unsigned int cmd,
310 310
311 case WDIOC_GETSTATUS: 311 case WDIOC_GETSTATUS:
312 case WDIOC_GETBOOTSTATUS: 312 case WDIOC_GETBOOTSTATUS:
313 return put_user(0, p); 313 error = put_user(0, p);
314 break;
314 315
315 case WDIOC_KEEPALIVE: 316 case WDIOC_KEEPALIVE:
316 ts72xx_wdt_kick(wdt); 317 ts72xx_wdt_kick(wdt);
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index a50c6e3a7cc4..b232908a6192 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -398,8 +398,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
398 if (nr_pages > ARRAY_SIZE(frame_list)) 398 if (nr_pages > ARRAY_SIZE(frame_list))
399 nr_pages = ARRAY_SIZE(frame_list); 399 nr_pages = ARRAY_SIZE(frame_list);
400 400
401 scratch_page = get_balloon_scratch_page();
402
403 for (i = 0; i < nr_pages; i++) { 401 for (i = 0; i < nr_pages; i++) {
404 page = alloc_page(gfp); 402 page = alloc_page(gfp);
405 if (page == NULL) { 403 if (page == NULL) {
@@ -413,6 +411,12 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
413 411
414 scrub_page(page); 412 scrub_page(page);
415 413
414 /*
415 * Ballooned out frames are effectively replaced with
416 * a scratch frame. Ensure direct mappings and the
417 * p2m are consistent.
418 */
419 scratch_page = get_balloon_scratch_page();
416#ifdef CONFIG_XEN_HAVE_PVMMU 420#ifdef CONFIG_XEN_HAVE_PVMMU
417 if (xen_pv_domain() && !PageHighMem(page)) { 421 if (xen_pv_domain() && !PageHighMem(page)) {
418 ret = HYPERVISOR_update_va_mapping( 422 ret = HYPERVISOR_update_va_mapping(
@@ -422,24 +426,19 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
422 BUG_ON(ret); 426 BUG_ON(ret);
423 } 427 }
424#endif 428#endif
425 }
426
427 /* Ensure that ballooned highmem pages don't have kmaps. */
428 kmap_flush_unused();
429 flush_tlb_all();
430
431 /* No more mappings: invalidate P2M and add to balloon. */
432 for (i = 0; i < nr_pages; i++) {
433 pfn = mfn_to_pfn(frame_list[i]);
434 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 429 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
435 unsigned long p; 430 unsigned long p;
436 p = page_to_pfn(scratch_page); 431 p = page_to_pfn(scratch_page);
437 __set_phys_to_machine(pfn, pfn_to_mfn(p)); 432 __set_phys_to_machine(pfn, pfn_to_mfn(p));
438 } 433 }
434 put_balloon_scratch_page();
435
439 balloon_append(pfn_to_page(pfn)); 436 balloon_append(pfn_to_page(pfn));
440 } 437 }
441 438
442 put_balloon_scratch_page(); 439 /* Ensure that ballooned highmem pages don't have kmaps. */
440 kmap_flush_unused();
441 flush_tlb_all();
443 442
444 set_xen_guest_handle(reservation.extent_start, frame_list); 443 set_xen_guest_handle(reservation.extent_start, frame_list);
445 reservation.nr_extents = nr_pages; 444 reservation.nr_extents = nr_pages;
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 646337dc5201..529300327f45 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -600,9 +600,6 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
600 600
601 /* lock down the parent dentry so we can peer at it */ 601 /* lock down the parent dentry so we can peer at it */
602 parent = dget_parent(dentry); 602 parent = dget_parent(dentry);
603 if (!parent->d_inode)
604 goto out_bad;
605
606 dir = AFS_FS_I(parent->d_inode); 603 dir = AFS_FS_I(parent->d_inode);
607 604
608 /* validate the parent directory */ 605 /* validate the parent directory */
diff --git a/fs/aio.c b/fs/aio.c
index 6b868f0e0c4c..067e3d340c35 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -167,10 +167,25 @@ static int __init aio_setup(void)
167} 167}
168__initcall(aio_setup); 168__initcall(aio_setup);
169 169
170static void put_aio_ring_file(struct kioctx *ctx)
171{
172 struct file *aio_ring_file = ctx->aio_ring_file;
173 if (aio_ring_file) {
174 truncate_setsize(aio_ring_file->f_inode, 0);
175
176 /* Prevent further access to the kioctx from migratepages */
177 spin_lock(&aio_ring_file->f_inode->i_mapping->private_lock);
178 aio_ring_file->f_inode->i_mapping->private_data = NULL;
179 ctx->aio_ring_file = NULL;
180 spin_unlock(&aio_ring_file->f_inode->i_mapping->private_lock);
181
182 fput(aio_ring_file);
183 }
184}
185
170static void aio_free_ring(struct kioctx *ctx) 186static void aio_free_ring(struct kioctx *ctx)
171{ 187{
172 int i; 188 int i;
173 struct file *aio_ring_file = ctx->aio_ring_file;
174 189
175 for (i = 0; i < ctx->nr_pages; i++) { 190 for (i = 0; i < ctx->nr_pages; i++) {
176 pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i, 191 pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i,
@@ -178,14 +193,10 @@ static void aio_free_ring(struct kioctx *ctx)
178 put_page(ctx->ring_pages[i]); 193 put_page(ctx->ring_pages[i]);
179 } 194 }
180 195
196 put_aio_ring_file(ctx);
197
181 if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) 198 if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages)
182 kfree(ctx->ring_pages); 199 kfree(ctx->ring_pages);
183
184 if (aio_ring_file) {
185 truncate_setsize(aio_ring_file->f_inode, 0);
186 fput(aio_ring_file);
187 ctx->aio_ring_file = NULL;
188 }
189} 200}
190 201
191static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma) 202static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
@@ -207,9 +218,8 @@ static int aio_set_page_dirty(struct page *page)
207static int aio_migratepage(struct address_space *mapping, struct page *new, 218static int aio_migratepage(struct address_space *mapping, struct page *new,
208 struct page *old, enum migrate_mode mode) 219 struct page *old, enum migrate_mode mode)
209{ 220{
210 struct kioctx *ctx = mapping->private_data; 221 struct kioctx *ctx;
211 unsigned long flags; 222 unsigned long flags;
212 unsigned idx = old->index;
213 int rc; 223 int rc;
214 224
215 /* Writeback must be complete */ 225 /* Writeback must be complete */
@@ -224,10 +234,23 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
224 234
225 get_page(new); 235 get_page(new);
226 236
227 spin_lock_irqsave(&ctx->completion_lock, flags); 237 /* We can potentially race against kioctx teardown here. Use the
228 migrate_page_copy(new, old); 238 * address_space's private data lock to protect the mapping's
229 ctx->ring_pages[idx] = new; 239 * private_data.
230 spin_unlock_irqrestore(&ctx->completion_lock, flags); 240 */
241 spin_lock(&mapping->private_lock);
242 ctx = mapping->private_data;
243 if (ctx) {
244 pgoff_t idx;
245 spin_lock_irqsave(&ctx->completion_lock, flags);
246 migrate_page_copy(new, old);
247 idx = old->index;
248 if (idx < (pgoff_t)ctx->nr_pages)
249 ctx->ring_pages[idx] = new;
250 spin_unlock_irqrestore(&ctx->completion_lock, flags);
251 } else
252 rc = -EBUSY;
253 spin_unlock(&mapping->private_lock);
231 254
232 return rc; 255 return rc;
233} 256}
@@ -617,8 +640,7 @@ out_freepcpu:
617out_freeref: 640out_freeref:
618 free_percpu(ctx->users.pcpu_count); 641 free_percpu(ctx->users.pcpu_count);
619out_freectx: 642out_freectx:
620 if (ctx->aio_ring_file) 643 put_aio_ring_file(ctx);
621 fput(ctx->aio_ring_file);
622 kmem_cache_free(kioctx_cachep, ctx); 644 kmem_cache_free(kioctx_cachep, ctx);
623 pr_debug("error allocating ioctx %d\n", err); 645 pr_debug("error allocating ioctx %d\n", err);
624 return ERR_PTR(err); 646 return ERR_PTR(err);
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 100edcc5e312..4c94a79991bb 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1413,7 +1413,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
1413 * long file_ofs 1413 * long file_ofs
1414 * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL... 1414 * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL...
1415 */ 1415 */
1416static void fill_files_note(struct memelfnote *note) 1416static int fill_files_note(struct memelfnote *note)
1417{ 1417{
1418 struct vm_area_struct *vma; 1418 struct vm_area_struct *vma;
1419 unsigned count, size, names_ofs, remaining, n; 1419 unsigned count, size, names_ofs, remaining, n;
@@ -1428,11 +1428,11 @@ static void fill_files_note(struct memelfnote *note)
1428 names_ofs = (2 + 3 * count) * sizeof(data[0]); 1428 names_ofs = (2 + 3 * count) * sizeof(data[0]);
1429 alloc: 1429 alloc:
1430 if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */ 1430 if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */
1431 goto err; 1431 return -EINVAL;
1432 size = round_up(size, PAGE_SIZE); 1432 size = round_up(size, PAGE_SIZE);
1433 data = vmalloc(size); 1433 data = vmalloc(size);
1434 if (!data) 1434 if (!data)
1435 goto err; 1435 return -ENOMEM;
1436 1436
1437 start_end_ofs = data + 2; 1437 start_end_ofs = data + 2;
1438 name_base = name_curpos = ((char *)data) + names_ofs; 1438 name_base = name_curpos = ((char *)data) + names_ofs;
@@ -1485,7 +1485,7 @@ static void fill_files_note(struct memelfnote *note)
1485 1485
1486 size = name_curpos - (char *)data; 1486 size = name_curpos - (char *)data;
1487 fill_note(note, "CORE", NT_FILE, size, data); 1487 fill_note(note, "CORE", NT_FILE, size, data);
1488 err: ; 1488 return 0;
1489} 1489}
1490 1490
1491#ifdef CORE_DUMP_USE_REGSET 1491#ifdef CORE_DUMP_USE_REGSET
@@ -1686,8 +1686,8 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
1686 fill_auxv_note(&info->auxv, current->mm); 1686 fill_auxv_note(&info->auxv, current->mm);
1687 info->size += notesize(&info->auxv); 1687 info->size += notesize(&info->auxv);
1688 1688
1689 fill_files_note(&info->files); 1689 if (fill_files_note(&info->files) == 0)
1690 info->size += notesize(&info->files); 1690 info->size += notesize(&info->files);
1691 1691
1692 return 1; 1692 return 1;
1693} 1693}
@@ -1719,7 +1719,8 @@ static int write_note_info(struct elf_note_info *info,
1719 return 0; 1719 return 0;
1720 if (first && !writenote(&info->auxv, file, foffset)) 1720 if (first && !writenote(&info->auxv, file, foffset))
1721 return 0; 1721 return 0;
1722 if (first && !writenote(&info->files, file, foffset)) 1722 if (first && info->files.data &&
1723 !writenote(&info->files, file, foffset))
1723 return 0; 1724 return 0;
1724 1725
1725 for (i = 1; i < info->thread_notes; ++i) 1726 for (i = 1; i < info->thread_notes; ++i)
@@ -1806,6 +1807,7 @@ static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1806 1807
1807struct elf_note_info { 1808struct elf_note_info {
1808 struct memelfnote *notes; 1809 struct memelfnote *notes;
1810 struct memelfnote *notes_files;
1809 struct elf_prstatus *prstatus; /* NT_PRSTATUS */ 1811 struct elf_prstatus *prstatus; /* NT_PRSTATUS */
1810 struct elf_prpsinfo *psinfo; /* NT_PRPSINFO */ 1812 struct elf_prpsinfo *psinfo; /* NT_PRPSINFO */
1811 struct list_head thread_list; 1813 struct list_head thread_list;
@@ -1896,9 +1898,12 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
1896 1898
1897 fill_siginfo_note(info->notes + 2, &info->csigdata, siginfo); 1899 fill_siginfo_note(info->notes + 2, &info->csigdata, siginfo);
1898 fill_auxv_note(info->notes + 3, current->mm); 1900 fill_auxv_note(info->notes + 3, current->mm);
1899 fill_files_note(info->notes + 4); 1901 info->numnote = 4;
1900 1902
1901 info->numnote = 5; 1903 if (fill_files_note(info->notes + info->numnote) == 0) {
1904 info->notes_files = info->notes + info->numnote;
1905 info->numnote++;
1906 }
1902 1907
1903 /* Try to dump the FPU. */ 1908 /* Try to dump the FPU. */
1904 info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, 1909 info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs,
@@ -1960,8 +1965,9 @@ static void free_note_info(struct elf_note_info *info)
1960 kfree(list_entry(tmp, struct elf_thread_status, list)); 1965 kfree(list_entry(tmp, struct elf_thread_status, list));
1961 } 1966 }
1962 1967
1963 /* Free data allocated by fill_files_note(): */ 1968 /* Free data possibly allocated by fill_files_note(): */
1964 vfree(info->notes[4].data); 1969 if (info->notes_files)
1970 vfree(info->notes_files->data);
1965 1971
1966 kfree(info->prstatus); 1972 kfree(info->prstatus);
1967 kfree(info->psinfo); 1973 kfree(info->psinfo);
@@ -2044,7 +2050,7 @@ static int elf_core_dump(struct coredump_params *cprm)
2044 struct vm_area_struct *vma, *gate_vma; 2050 struct vm_area_struct *vma, *gate_vma;
2045 struct elfhdr *elf = NULL; 2051 struct elfhdr *elf = NULL;
2046 loff_t offset = 0, dataoff, foffset; 2052 loff_t offset = 0, dataoff, foffset;
2047 struct elf_note_info info; 2053 struct elf_note_info info = { };
2048 struct elf_phdr *phdr4note = NULL; 2054 struct elf_phdr *phdr4note = NULL;
2049 struct elf_shdr *shdr4extnum = NULL; 2055 struct elf_shdr *shdr4extnum = NULL;
2050 Elf_Half e_phnum; 2056 Elf_Half e_phnum;
diff --git a/fs/bio.c b/fs/bio.c
index b3b20ed9510e..ea5035da4d9a 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -917,8 +917,8 @@ void bio_copy_data(struct bio *dst, struct bio *src)
917 src_p = kmap_atomic(src_bv->bv_page); 917 src_p = kmap_atomic(src_bv->bv_page);
918 dst_p = kmap_atomic(dst_bv->bv_page); 918 dst_p = kmap_atomic(dst_bv->bv_page);
919 919
920 memcpy(dst_p + dst_bv->bv_offset, 920 memcpy(dst_p + dst_offset,
921 src_p + src_bv->bv_offset, 921 src_p + src_offset,
922 bytes); 922 bytes);
923 923
924 kunmap_atomic(dst_p); 924 kunmap_atomic(dst_p);
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 58b7d14b08ee..08cc08f037a6 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -107,7 +107,8 @@ static void check_idle_worker(struct btrfs_worker_thread *worker)
107 worker->idle = 1; 107 worker->idle = 1;
108 108
109 /* the list may be empty if the worker is just starting */ 109 /* the list may be empty if the worker is just starting */
110 if (!list_empty(&worker->worker_list)) { 110 if (!list_empty(&worker->worker_list) &&
111 !worker->workers->stopping) {
111 list_move(&worker->worker_list, 112 list_move(&worker->worker_list,
112 &worker->workers->idle_list); 113 &worker->workers->idle_list);
113 } 114 }
@@ -127,7 +128,8 @@ static void check_busy_worker(struct btrfs_worker_thread *worker)
127 spin_lock_irqsave(&worker->workers->lock, flags); 128 spin_lock_irqsave(&worker->workers->lock, flags);
128 worker->idle = 0; 129 worker->idle = 0;
129 130
130 if (!list_empty(&worker->worker_list)) { 131 if (!list_empty(&worker->worker_list) &&
132 !worker->workers->stopping) {
131 list_move_tail(&worker->worker_list, 133 list_move_tail(&worker->worker_list,
132 &worker->workers->worker_list); 134 &worker->workers->worker_list);
133 } 135 }
@@ -412,6 +414,7 @@ void btrfs_stop_workers(struct btrfs_workers *workers)
412 int can_stop; 414 int can_stop;
413 415
414 spin_lock_irq(&workers->lock); 416 spin_lock_irq(&workers->lock);
417 workers->stopping = 1;
415 list_splice_init(&workers->idle_list, &workers->worker_list); 418 list_splice_init(&workers->idle_list, &workers->worker_list);
416 while (!list_empty(&workers->worker_list)) { 419 while (!list_empty(&workers->worker_list)) {
417 cur = workers->worker_list.next; 420 cur = workers->worker_list.next;
@@ -455,6 +458,7 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
455 workers->ordered = 0; 458 workers->ordered = 0;
456 workers->atomic_start_pending = 0; 459 workers->atomic_start_pending = 0;
457 workers->atomic_worker_start = async_helper; 460 workers->atomic_worker_start = async_helper;
461 workers->stopping = 0;
458} 462}
459 463
460/* 464/*
@@ -480,15 +484,19 @@ static int __btrfs_start_workers(struct btrfs_workers *workers)
480 atomic_set(&worker->num_pending, 0); 484 atomic_set(&worker->num_pending, 0);
481 atomic_set(&worker->refs, 1); 485 atomic_set(&worker->refs, 1);
482 worker->workers = workers; 486 worker->workers = workers;
483 worker->task = kthread_run(worker_loop, worker, 487 worker->task = kthread_create(worker_loop, worker,
484 "btrfs-%s-%d", workers->name, 488 "btrfs-%s-%d", workers->name,
485 workers->num_workers + 1); 489 workers->num_workers + 1);
486 if (IS_ERR(worker->task)) { 490 if (IS_ERR(worker->task)) {
487 ret = PTR_ERR(worker->task); 491 ret = PTR_ERR(worker->task);
488 kfree(worker);
489 goto fail; 492 goto fail;
490 } 493 }
494
491 spin_lock_irq(&workers->lock); 495 spin_lock_irq(&workers->lock);
496 if (workers->stopping) {
497 spin_unlock_irq(&workers->lock);
498 goto fail_kthread;
499 }
492 list_add_tail(&worker->worker_list, &workers->idle_list); 500 list_add_tail(&worker->worker_list, &workers->idle_list);
493 worker->idle = 1; 501 worker->idle = 1;
494 workers->num_workers++; 502 workers->num_workers++;
@@ -496,8 +504,13 @@ static int __btrfs_start_workers(struct btrfs_workers *workers)
496 WARN_ON(workers->num_workers_starting < 0); 504 WARN_ON(workers->num_workers_starting < 0);
497 spin_unlock_irq(&workers->lock); 505 spin_unlock_irq(&workers->lock);
498 506
507 wake_up_process(worker->task);
499 return 0; 508 return 0;
509
510fail_kthread:
511 kthread_stop(worker->task);
500fail: 512fail:
513 kfree(worker);
501 spin_lock_irq(&workers->lock); 514 spin_lock_irq(&workers->lock);
502 workers->num_workers_starting--; 515 workers->num_workers_starting--;
503 spin_unlock_irq(&workers->lock); 516 spin_unlock_irq(&workers->lock);
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
index 063698b90ce2..1f26792683ed 100644
--- a/fs/btrfs/async-thread.h
+++ b/fs/btrfs/async-thread.h
@@ -107,6 +107,8 @@ struct btrfs_workers {
107 107
108 /* extra name for this worker, used for current->name */ 108 /* extra name for this worker, used for current->name */
109 char *name; 109 char *name;
110
111 int stopping;
110}; 112};
111 113
112void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work); 114void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 70681686e8dc..9efb94e95858 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -535,10 +535,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
535 list_add(&tgt_device->dev_alloc_list, &fs_info->fs_devices->alloc_list); 535 list_add(&tgt_device->dev_alloc_list, &fs_info->fs_devices->alloc_list);
536 536
537 btrfs_rm_dev_replace_srcdev(fs_info, src_device); 537 btrfs_rm_dev_replace_srcdev(fs_info, src_device);
538 if (src_device->bdev) { 538
539 /* zero out the old super */
540 btrfs_scratch_superblock(src_device);
541 }
542 /* 539 /*
543 * this is again a consistent state where no dev_replace procedure 540 * this is again a consistent state where no dev_replace procedure
544 * is running, the target device is part of the filesystem, the 541 * is running, the target device is part of the filesystem, the
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 4ae17ed13b32..62176ad89846 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1561,8 +1561,9 @@ int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
1561 return ret; 1561 return ret;
1562} 1562}
1563 1563
1564struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info, 1564struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
1565 struct btrfs_key *location) 1565 struct btrfs_key *location,
1566 bool check_ref)
1566{ 1567{
1567 struct btrfs_root *root; 1568 struct btrfs_root *root;
1568 int ret; 1569 int ret;
@@ -1586,7 +1587,7 @@ struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
1586again: 1587again:
1587 root = btrfs_lookup_fs_root(fs_info, location->objectid); 1588 root = btrfs_lookup_fs_root(fs_info, location->objectid);
1588 if (root) { 1589 if (root) {
1589 if (btrfs_root_refs(&root->root_item) == 0) 1590 if (check_ref && btrfs_root_refs(&root->root_item) == 0)
1590 return ERR_PTR(-ENOENT); 1591 return ERR_PTR(-ENOENT);
1591 return root; 1592 return root;
1592 } 1593 }
@@ -1595,7 +1596,7 @@ again:
1595 if (IS_ERR(root)) 1596 if (IS_ERR(root))
1596 return root; 1597 return root;
1597 1598
1598 if (btrfs_root_refs(&root->root_item) == 0) { 1599 if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1599 ret = -ENOENT; 1600 ret = -ENOENT;
1600 goto fail; 1601 goto fail;
1601 } 1602 }
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index b71acd6e1e5b..5ce2a7da8b11 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -68,8 +68,17 @@ struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
68int btrfs_init_fs_root(struct btrfs_root *root); 68int btrfs_init_fs_root(struct btrfs_root *root);
69int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info, 69int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
70 struct btrfs_root *root); 70 struct btrfs_root *root);
71struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info, 71
72 struct btrfs_key *location); 72struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
73 struct btrfs_key *key,
74 bool check_ref);
75static inline struct btrfs_root *
76btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
77 struct btrfs_key *location)
78{
79 return btrfs_get_fs_root(fs_info, location, true);
80}
81
73int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info); 82int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info);
74void btrfs_btree_balance_dirty(struct btrfs_root *root); 83void btrfs_btree_balance_dirty(struct btrfs_root *root);
75void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root); 84void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index c09a40db53db..51731b76900d 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -145,8 +145,16 @@ int __init extent_io_init(void)
145 offsetof(struct btrfs_io_bio, bio)); 145 offsetof(struct btrfs_io_bio, bio));
146 if (!btrfs_bioset) 146 if (!btrfs_bioset)
147 goto free_buffer_cache; 147 goto free_buffer_cache;
148
149 if (bioset_integrity_create(btrfs_bioset, BIO_POOL_SIZE))
150 goto free_bioset;
151
148 return 0; 152 return 0;
149 153
154free_bioset:
155 bioset_free(btrfs_bioset);
156 btrfs_bioset = NULL;
157
150free_buffer_cache: 158free_buffer_cache:
151 kmem_cache_destroy(extent_buffer_cache); 159 kmem_cache_destroy(extent_buffer_cache);
152 extent_buffer_cache = NULL; 160 extent_buffer_cache = NULL;
@@ -1482,10 +1490,8 @@ static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1482 cur_start = state->end + 1; 1490 cur_start = state->end + 1;
1483 node = rb_next(node); 1491 node = rb_next(node);
1484 total_bytes += state->end - state->start + 1; 1492 total_bytes += state->end - state->start + 1;
1485 if (total_bytes >= max_bytes) { 1493 if (total_bytes >= max_bytes)
1486 *end = *start + max_bytes - 1;
1487 break; 1494 break;
1488 }
1489 if (!node) 1495 if (!node)
1490 break; 1496 break;
1491 } 1497 }
@@ -1614,7 +1620,7 @@ again:
1614 *start = delalloc_start; 1620 *start = delalloc_start;
1615 *end = delalloc_end; 1621 *end = delalloc_end;
1616 free_extent_state(cached_state); 1622 free_extent_state(cached_state);
1617 return found; 1623 return 0;
1618 } 1624 }
1619 1625
1620 /* 1626 /*
@@ -1627,10 +1633,9 @@ again:
1627 1633
1628 /* 1634 /*
1629 * make sure to limit the number of pages we try to lock down 1635 * make sure to limit the number of pages we try to lock down
1630 * if we're looping.
1631 */ 1636 */
1632 if (delalloc_end + 1 - delalloc_start > max_bytes && loops) 1637 if (delalloc_end + 1 - delalloc_start > max_bytes)
1633 delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1; 1638 delalloc_end = delalloc_start + max_bytes - 1;
1634 1639
1635 /* step two, lock all the pages after the page that has start */ 1640 /* step two, lock all the pages after the page that has start */
1636 ret = lock_delalloc_pages(inode, locked_page, 1641 ret = lock_delalloc_pages(inode, locked_page,
@@ -1641,8 +1646,7 @@ again:
1641 */ 1646 */
1642 free_extent_state(cached_state); 1647 free_extent_state(cached_state);
1643 if (!loops) { 1648 if (!loops) {
1644 unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1); 1649 max_bytes = PAGE_CACHE_SIZE;
1645 max_bytes = PAGE_CACHE_SIZE - offset;
1646 loops = 1; 1650 loops = 1;
1647 goto again; 1651 goto again;
1648 } else { 1652 } else {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 22ebc13b6c99..51e3afa78354 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -6437,6 +6437,7 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
6437 6437
6438 if (btrfs_extent_readonly(root, disk_bytenr)) 6438 if (btrfs_extent_readonly(root, disk_bytenr))
6439 goto out; 6439 goto out;
6440 btrfs_release_path(path);
6440 6441
6441 /* 6442 /*
6442 * look for other files referencing this extent, if we 6443 * look for other files referencing this extent, if we
@@ -7986,7 +7987,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
7986 7987
7987 7988
7988 /* check for collisions, even if the name isn't there */ 7989 /* check for collisions, even if the name isn't there */
7989 ret = btrfs_check_dir_item_collision(root, new_dir->i_ino, 7990 ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino,
7990 new_dentry->d_name.name, 7991 new_dentry->d_name.name,
7991 new_dentry->d_name.len); 7992 new_dentry->d_name.len);
7992 7993
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index a5a26320503f..4a355726151e 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -588,7 +588,7 @@ static struct btrfs_root *read_fs_root(struct btrfs_fs_info *fs_info,
588 else 588 else
589 key.offset = (u64)-1; 589 key.offset = (u64)-1;
590 590
591 return btrfs_read_fs_root_no_name(fs_info, &key); 591 return btrfs_get_fs_root(fs_info, &key, false);
592} 592}
593 593
594#ifdef BTRFS_COMPAT_EXTENT_TREE_V0 594#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 0b1f4ef8db98..ec71ea44d2b4 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -299,11 +299,6 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
299 continue; 299 continue;
300 } 300 }
301 301
302 if (btrfs_root_refs(&root->root_item) == 0) {
303 btrfs_add_dead_root(root);
304 continue;
305 }
306
307 err = btrfs_init_fs_root(root); 302 err = btrfs_init_fs_root(root);
308 if (err) { 303 if (err) {
309 btrfs_free_fs_root(root); 304 btrfs_free_fs_root(root);
@@ -318,6 +313,9 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
318 btrfs_free_fs_root(root); 313 btrfs_free_fs_root(root);
319 break; 314 break;
320 } 315 }
316
317 if (btrfs_root_refs(&root->root_item) == 0)
318 btrfs_add_dead_root(root);
321 } 319 }
322 320
323 btrfs_free_path(path); 321 btrfs_free_path(path);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index e7a95356df83..8c81bdc1ef9b 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -1838,11 +1838,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1838 assert_qgroups_uptodate(trans); 1838 assert_qgroups_uptodate(trans);
1839 update_super_roots(root); 1839 update_super_roots(root);
1840 1840
1841 if (!root->fs_info->log_root_recovering) { 1841 btrfs_set_super_log_root(root->fs_info->super_copy, 0);
1842 btrfs_set_super_log_root(root->fs_info->super_copy, 0); 1842 btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
1843 btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
1844 }
1845
1846 memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy, 1843 memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
1847 sizeof(*root->fs_info->super_copy)); 1844 sizeof(*root->fs_info->super_copy));
1848 1845
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index a10645830223..043b215769c2 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1716,6 +1716,7 @@ void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info,
1716 struct btrfs_device *srcdev) 1716 struct btrfs_device *srcdev)
1717{ 1717{
1718 WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex)); 1718 WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
1719
1719 list_del_rcu(&srcdev->dev_list); 1720 list_del_rcu(&srcdev->dev_list);
1720 list_del_rcu(&srcdev->dev_alloc_list); 1721 list_del_rcu(&srcdev->dev_alloc_list);
1721 fs_info->fs_devices->num_devices--; 1722 fs_info->fs_devices->num_devices--;
@@ -1725,9 +1726,13 @@ void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info,
1725 } 1726 }
1726 if (srcdev->can_discard) 1727 if (srcdev->can_discard)
1727 fs_info->fs_devices->num_can_discard--; 1728 fs_info->fs_devices->num_can_discard--;
1728 if (srcdev->bdev) 1729 if (srcdev->bdev) {
1729 fs_info->fs_devices->open_devices--; 1730 fs_info->fs_devices->open_devices--;
1730 1731
1732 /* zero out the old super */
1733 btrfs_scratch_superblock(srcdev);
1734 }
1735
1731 call_rcu(&srcdev->rcu, free_device); 1736 call_rcu(&srcdev->rcu, free_device);
1732} 1737}
1733 1738
diff --git a/fs/buffer.c b/fs/buffer.c
index 4d7433534f5c..6024877335ca 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1005,9 +1005,19 @@ grow_dev_page(struct block_device *bdev, sector_t block,
1005 struct buffer_head *bh; 1005 struct buffer_head *bh;
1006 sector_t end_block; 1006 sector_t end_block;
1007 int ret = 0; /* Will call free_more_memory() */ 1007 int ret = 0; /* Will call free_more_memory() */
1008 gfp_t gfp_mask;
1008 1009
1009 page = find_or_create_page(inode->i_mapping, index, 1010 gfp_mask = mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS;
1010 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE); 1011 gfp_mask |= __GFP_MOVABLE;
1012 /*
1013 * XXX: __getblk_slow() can not really deal with failure and
1014 * will endlessly loop on improvised global reclaim. Prefer
1015 * looping in the allocator rather than here, at least that
1016 * code knows what it's doing.
1017 */
1018 gfp_mask |= __GFP_NOFAIL;
1019
1020 page = find_or_create_page(inode->i_mapping, index, gfp_mask);
1011 if (!page) 1021 if (!page)
1012 return ret; 1022 return ret;
1013 1023
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index a16b4e58bcc6..77fc5e181077 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -120,14 +120,16 @@ cifs_read_super(struct super_block *sb)
120{ 120{
121 struct inode *inode; 121 struct inode *inode;
122 struct cifs_sb_info *cifs_sb; 122 struct cifs_sb_info *cifs_sb;
123 struct cifs_tcon *tcon;
123 int rc = 0; 124 int rc = 0;
124 125
125 cifs_sb = CIFS_SB(sb); 126 cifs_sb = CIFS_SB(sb);
127 tcon = cifs_sb_master_tcon(cifs_sb);
126 128
127 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL) 129 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
128 sb->s_flags |= MS_POSIXACL; 130 sb->s_flags |= MS_POSIXACL;
129 131
130 if (cifs_sb_master_tcon(cifs_sb)->ses->capabilities & CAP_LARGE_FILES) 132 if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
131 sb->s_maxbytes = MAX_LFS_FILESIZE; 133 sb->s_maxbytes = MAX_LFS_FILESIZE;
132 else 134 else
133 sb->s_maxbytes = MAX_NON_LFS; 135 sb->s_maxbytes = MAX_NON_LFS;
@@ -147,7 +149,7 @@ cifs_read_super(struct super_block *sb)
147 goto out_no_root; 149 goto out_no_root;
148 } 150 }
149 151
150 if (cifs_sb_master_tcon(cifs_sb)->nocase) 152 if (tcon->nocase)
151 sb->s_d_op = &cifs_ci_dentry_ops; 153 sb->s_d_op = &cifs_ci_dentry_ops;
152 else 154 else
153 sb->s_d_op = &cifs_dentry_ops; 155 sb->s_d_op = &cifs_dentry_ops;
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index ea723a5e8226..6d0b07217ac9 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -132,5 +132,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
132extern const struct export_operations cifs_export_ops; 132extern const struct export_operations cifs_export_ops;
133#endif /* CONFIG_CIFS_NFSD_EXPORT */ 133#endif /* CONFIG_CIFS_NFSD_EXPORT */
134 134
135#define CIFS_VERSION "2.01" 135#define CIFS_VERSION "2.02"
136#endif /* _CIFSFS_H */ 136#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index cfa14c80ef3b..52b6f6c26bfc 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -547,9 +547,6 @@ struct TCP_Server_Info {
547 unsigned int max_rw; /* maxRw specifies the maximum */ 547 unsigned int max_rw; /* maxRw specifies the maximum */
548 /* message size the server can send or receive for */ 548 /* message size the server can send or receive for */
549 /* SMB_COM_WRITE_RAW or SMB_COM_READ_RAW. */ 549 /* SMB_COM_WRITE_RAW or SMB_COM_READ_RAW. */
550 unsigned int max_vcs; /* maximum number of smb sessions, at least
551 those that can be specified uniquely with
552 vcnumbers */
553 unsigned int capabilities; /* selective disabling of caps by smb sess */ 550 unsigned int capabilities; /* selective disabling of caps by smb sess */
554 int timeAdj; /* Adjust for difference in server time zone in sec */ 551 int timeAdj; /* Adjust for difference in server time zone in sec */
555 __u64 CurrentMid; /* multiplex id - rotating counter */ 552 __u64 CurrentMid; /* multiplex id - rotating counter */
@@ -715,7 +712,6 @@ struct cifs_ses {
715 enum statusEnum status; 712 enum statusEnum status;
716 unsigned overrideSecFlg; /* if non-zero override global sec flags */ 713 unsigned overrideSecFlg; /* if non-zero override global sec flags */
717 __u16 ipc_tid; /* special tid for connection to IPC share */ 714 __u16 ipc_tid; /* special tid for connection to IPC share */
718 __u16 vcnum;
719 char *serverOS; /* name of operating system underlying server */ 715 char *serverOS; /* name of operating system underlying server */
720 char *serverNOS; /* name of network operating system of server */ 716 char *serverNOS; /* name of network operating system of server */
721 char *serverDomain; /* security realm of server */ 717 char *serverDomain; /* security realm of server */
@@ -1272,6 +1268,7 @@ struct dfs_info3_param {
1272#define CIFS_FATTR_DELETE_PENDING 0x2 1268#define CIFS_FATTR_DELETE_PENDING 0x2
1273#define CIFS_FATTR_NEED_REVAL 0x4 1269#define CIFS_FATTR_NEED_REVAL 0x4
1274#define CIFS_FATTR_INO_COLLISION 0x8 1270#define CIFS_FATTR_INO_COLLISION 0x8
1271#define CIFS_FATTR_UNKNOWN_NLINK 0x10
1275 1272
1276struct cifs_fattr { 1273struct cifs_fattr {
1277 u32 cf_flags; 1274 u32 cf_flags;
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index 948676db8e2e..08f9dfb1a894 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -1491,15 +1491,30 @@ struct file_notify_information {
1491 __u8 FileName[0]; 1491 __u8 FileName[0];
1492} __attribute__((packed)); 1492} __attribute__((packed));
1493 1493
1494struct reparse_data { 1494/* For IO_REPARSE_TAG_SYMLINK */
1495 __u32 ReparseTag; 1495struct reparse_symlink_data {
1496 __u16 ReparseDataLength; 1496 __le32 ReparseTag;
1497 __le16 ReparseDataLength;
1497 __u16 Reserved; 1498 __u16 Reserved;
1498 __u16 SubstituteNameOffset; 1499 __le16 SubstituteNameOffset;
1499 __u16 SubstituteNameLength; 1500 __le16 SubstituteNameLength;
1500 __u16 PrintNameOffset; 1501 __le16 PrintNameOffset;
1501 __u16 PrintNameLength; 1502 __le16 PrintNameLength;
1502 __u32 Flags; 1503 __le32 Flags;
1504 char PathBuffer[0];
1505} __attribute__((packed));
1506
1507/* For IO_REPARSE_TAG_NFS */
1508#define NFS_SPECFILE_LNK 0x00000000014B4E4C
1509#define NFS_SPECFILE_CHR 0x0000000000524843
1510#define NFS_SPECFILE_BLK 0x00000000004B4C42
1511#define NFS_SPECFILE_FIFO 0x000000004F464946
1512#define NFS_SPECFILE_SOCK 0x000000004B434F53
1513struct reparse_posix_data {
1514 __le32 ReparseTag;
1515 __le16 ReparseDataLength;
1516 __u16 Reserved;
1517 __le64 InodeType; /* LNK, FIFO, CHR etc. */
1503 char PathBuffer[0]; 1518 char PathBuffer[0];
1504} __attribute__((packed)); 1519} __attribute__((packed));
1505 1520
@@ -2652,26 +2667,7 @@ typedef struct file_xattr_info {
2652} __attribute__((packed)) FILE_XATTR_INFO; /* extended attribute info 2667} __attribute__((packed)) FILE_XATTR_INFO; /* extended attribute info
2653 level 0x205 */ 2668 level 0x205 */
2654 2669
2655 2670/* flags for lsattr and chflags commands removed arein uapi/linux/fs.h */
2656/* flags for chattr command */
2657#define EXT_SECURE_DELETE 0x00000001 /* EXT3_SECRM_FL */
2658#define EXT_ENABLE_UNDELETE 0x00000002 /* EXT3_UNRM_FL */
2659/* Reserved for compress file 0x4 */
2660#define EXT_SYNCHRONOUS 0x00000008 /* EXT3_SYNC_FL */
2661#define EXT_IMMUTABLE_FL 0x00000010 /* EXT3_IMMUTABLE_FL */
2662#define EXT_OPEN_APPEND_ONLY 0x00000020 /* EXT3_APPEND_FL */
2663#define EXT_DO_NOT_BACKUP 0x00000040 /* EXT3_NODUMP_FL */
2664#define EXT_NO_UPDATE_ATIME 0x00000080 /* EXT3_NOATIME_FL */
2665/* 0x100 through 0x800 reserved for compression flags and are GET-ONLY */
2666#define EXT_HASH_TREE_INDEXED_DIR 0x00001000 /* GET-ONLY EXT3_INDEX_FL */
2667/* 0x2000 reserved for IMAGIC_FL */
2668#define EXT_JOURNAL_THIS_FILE 0x00004000 /* GET-ONLY EXT3_JOURNAL_DATA_FL */
2669/* 0x8000 reserved for EXT3_NOTAIL_FL */
2670#define EXT_SYNCHRONOUS_DIR 0x00010000 /* EXT3_DIRSYNC_FL */
2671#define EXT_TOPDIR 0x00020000 /* EXT3_TOPDIR_FL */
2672
2673#define EXT_SET_MASK 0x000300FF
2674#define EXT_GET_MASK 0x0003DFFF
2675 2671
2676typedef struct file_chattr_info { 2672typedef struct file_chattr_info {
2677 __le64 mask; /* list of all possible attribute bits */ 2673 __le64 mask; /* list of all possible attribute bits */
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index a3d74fea1623..ccd31ab815d4 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -463,7 +463,6 @@ decode_lanman_negprot_rsp(struct TCP_Server_Info *server, NEGOTIATE_RSP *pSMBr)
463 cifs_max_pending); 463 cifs_max_pending);
464 set_credits(server, server->maxReq); 464 set_credits(server, server->maxReq);
465 server->maxBuf = le16_to_cpu(rsp->MaxBufSize); 465 server->maxBuf = le16_to_cpu(rsp->MaxBufSize);
466 server->max_vcs = le16_to_cpu(rsp->MaxNumberVcs);
467 /* even though we do not use raw we might as well set this 466 /* even though we do not use raw we might as well set this
468 accurately, in case we ever find a need for it */ 467 accurately, in case we ever find a need for it */
469 if ((le16_to_cpu(rsp->RawMode) & RAW_ENABLE) == RAW_ENABLE) { 468 if ((le16_to_cpu(rsp->RawMode) & RAW_ENABLE) == RAW_ENABLE) {
@@ -3089,7 +3088,8 @@ CIFSSMBQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon,
3089 bool is_unicode; 3088 bool is_unicode;
3090 unsigned int sub_len; 3089 unsigned int sub_len;
3091 char *sub_start; 3090 char *sub_start;
3092 struct reparse_data *reparse_buf; 3091 struct reparse_symlink_data *reparse_buf;
3092 struct reparse_posix_data *posix_buf;
3093 __u32 data_offset, data_count; 3093 __u32 data_offset, data_count;
3094 char *end_of_smb; 3094 char *end_of_smb;
3095 3095
@@ -3138,20 +3138,47 @@ CIFSSMBQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon,
3138 goto qreparse_out; 3138 goto qreparse_out;
3139 } 3139 }
3140 end_of_smb = 2 + get_bcc(&pSMBr->hdr) + (char *)&pSMBr->ByteCount; 3140 end_of_smb = 2 + get_bcc(&pSMBr->hdr) + (char *)&pSMBr->ByteCount;
3141 reparse_buf = (struct reparse_data *) 3141 reparse_buf = (struct reparse_symlink_data *)
3142 ((char *)&pSMBr->hdr.Protocol + data_offset); 3142 ((char *)&pSMBr->hdr.Protocol + data_offset);
3143 if ((char *)reparse_buf >= end_of_smb) { 3143 if ((char *)reparse_buf >= end_of_smb) {
3144 rc = -EIO; 3144 rc = -EIO;
3145 goto qreparse_out; 3145 goto qreparse_out;
3146 } 3146 }
3147 if ((reparse_buf->PathBuffer + reparse_buf->PrintNameOffset + 3147 if (reparse_buf->ReparseTag == cpu_to_le32(IO_REPARSE_TAG_NFS)) {
3148 reparse_buf->PrintNameLength) > end_of_smb) { 3148 cifs_dbg(FYI, "NFS style reparse tag\n");
3149 posix_buf = (struct reparse_posix_data *)reparse_buf;
3150
3151 if (posix_buf->InodeType != cpu_to_le64(NFS_SPECFILE_LNK)) {
3152 cifs_dbg(FYI, "unsupported file type 0x%llx\n",
3153 le64_to_cpu(posix_buf->InodeType));
3154 rc = -EOPNOTSUPP;
3155 goto qreparse_out;
3156 }
3157 is_unicode = true;
3158 sub_len = le16_to_cpu(reparse_buf->ReparseDataLength);
3159 if (posix_buf->PathBuffer + sub_len > end_of_smb) {
3160 cifs_dbg(FYI, "reparse buf beyond SMB\n");
3161 rc = -EIO;
3162 goto qreparse_out;
3163 }
3164 *symlinkinfo = cifs_strndup_from_utf16(posix_buf->PathBuffer,
3165 sub_len, is_unicode, nls_codepage);
3166 goto qreparse_out;
3167 } else if (reparse_buf->ReparseTag !=
3168 cpu_to_le32(IO_REPARSE_TAG_SYMLINK)) {
3169 rc = -EOPNOTSUPP;
3170 goto qreparse_out;
3171 }
3172
3173 /* Reparse tag is NTFS symlink */
3174 sub_start = le16_to_cpu(reparse_buf->SubstituteNameOffset) +
3175 reparse_buf->PathBuffer;
3176 sub_len = le16_to_cpu(reparse_buf->SubstituteNameLength);
3177 if (sub_start + sub_len > end_of_smb) {
3149 cifs_dbg(FYI, "reparse buf beyond SMB\n"); 3178 cifs_dbg(FYI, "reparse buf beyond SMB\n");
3150 rc = -EIO; 3179 rc = -EIO;
3151 goto qreparse_out; 3180 goto qreparse_out;
3152 } 3181 }
3153 sub_start = reparse_buf->SubstituteNameOffset + reparse_buf->PathBuffer;
3154 sub_len = reparse_buf->SubstituteNameLength;
3155 if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) 3182 if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE)
3156 is_unicode = true; 3183 is_unicode = true;
3157 else 3184 else
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index eb955b525e55..7ddddf2e2504 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -3254,6 +3254,9 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
3254 /* 3254 /*
3255 * Reads as many pages as possible from fscache. Returns -ENOBUFS 3255 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3256 * immediately if the cookie is negative 3256 * immediately if the cookie is negative
3257 *
3258 * After this point, every page in the list might have PG_fscache set,
3259 * so we will need to clean that up off of every page we don't use.
3257 */ 3260 */
3258 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list, 3261 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3259 &num_pages); 3262 &num_pages);
@@ -3376,6 +3379,11 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
3376 kref_put(&rdata->refcount, cifs_readdata_release); 3379 kref_put(&rdata->refcount, cifs_readdata_release);
3377 } 3380 }
3378 3381
3382 /* Any pages that have been shown to fscache but didn't get added to
3383 * the pagecache must be uncached before they get returned to the
3384 * allocator.
3385 */
3386 cifs_fscache_readpages_cancel(mapping->host, page_list);
3379 return rc; 3387 return rc;
3380} 3388}
3381 3389
diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c
index 2f4bc5a58054..b3258f35e88a 100644
--- a/fs/cifs/fscache.c
+++ b/fs/cifs/fscache.c
@@ -223,6 +223,13 @@ void __cifs_readpage_to_fscache(struct inode *inode, struct page *page)
223 fscache_uncache_page(CIFS_I(inode)->fscache, page); 223 fscache_uncache_page(CIFS_I(inode)->fscache, page);
224} 224}
225 225
226void __cifs_fscache_readpages_cancel(struct inode *inode, struct list_head *pages)
227{
228 cifs_dbg(FYI, "%s: (fsc: %p, i: %p)\n",
229 __func__, CIFS_I(inode)->fscache, inode);
230 fscache_readpages_cancel(CIFS_I(inode)->fscache, pages);
231}
232
226void __cifs_fscache_invalidate_page(struct page *page, struct inode *inode) 233void __cifs_fscache_invalidate_page(struct page *page, struct inode *inode)
227{ 234{
228 struct cifsInodeInfo *cifsi = CIFS_I(inode); 235 struct cifsInodeInfo *cifsi = CIFS_I(inode);
diff --git a/fs/cifs/fscache.h b/fs/cifs/fscache.h
index 63539323e0b9..24794b6cd8ec 100644
--- a/fs/cifs/fscache.h
+++ b/fs/cifs/fscache.h
@@ -54,6 +54,7 @@ extern int __cifs_readpages_from_fscache(struct inode *,
54 struct address_space *, 54 struct address_space *,
55 struct list_head *, 55 struct list_head *,
56 unsigned *); 56 unsigned *);
57extern void __cifs_fscache_readpages_cancel(struct inode *, struct list_head *);
57 58
58extern void __cifs_readpage_to_fscache(struct inode *, struct page *); 59extern void __cifs_readpage_to_fscache(struct inode *, struct page *);
59 60
@@ -91,6 +92,13 @@ static inline void cifs_readpage_to_fscache(struct inode *inode,
91 __cifs_readpage_to_fscache(inode, page); 92 __cifs_readpage_to_fscache(inode, page);
92} 93}
93 94
95static inline void cifs_fscache_readpages_cancel(struct inode *inode,
96 struct list_head *pages)
97{
98 if (CIFS_I(inode)->fscache)
99 return __cifs_fscache_readpages_cancel(inode, pages);
100}
101
94#else /* CONFIG_CIFS_FSCACHE */ 102#else /* CONFIG_CIFS_FSCACHE */
95static inline int cifs_fscache_register(void) { return 0; } 103static inline int cifs_fscache_register(void) { return 0; }
96static inline void cifs_fscache_unregister(void) {} 104static inline void cifs_fscache_unregister(void) {}
@@ -131,6 +139,11 @@ static inline int cifs_readpages_from_fscache(struct inode *inode,
131static inline void cifs_readpage_to_fscache(struct inode *inode, 139static inline void cifs_readpage_to_fscache(struct inode *inode,
132 struct page *page) {} 140 struct page *page) {}
133 141
142static inline void cifs_fscache_readpages_cancel(struct inode *inode,
143 struct list_head *pages)
144{
145}
146
134#endif /* CONFIG_CIFS_FSCACHE */ 147#endif /* CONFIG_CIFS_FSCACHE */
135 148
136#endif /* _CIFS_FSCACHE_H */ 149#endif /* _CIFS_FSCACHE_H */
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index f9ff9c173f78..867b7cdc794a 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -120,6 +120,33 @@ cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
120 cifs_i->invalid_mapping = true; 120 cifs_i->invalid_mapping = true;
121} 121}
122 122
123/*
124 * copy nlink to the inode, unless it wasn't provided. Provide
125 * sane values if we don't have an existing one and none was provided
126 */
127static void
128cifs_nlink_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
129{
130 /*
131 * if we're in a situation where we can't trust what we
132 * got from the server (readdir, some non-unix cases)
133 * fake reasonable values
134 */
135 if (fattr->cf_flags & CIFS_FATTR_UNKNOWN_NLINK) {
136 /* only provide fake values on a new inode */
137 if (inode->i_state & I_NEW) {
138 if (fattr->cf_cifsattrs & ATTR_DIRECTORY)
139 set_nlink(inode, 2);
140 else
141 set_nlink(inode, 1);
142 }
143 return;
144 }
145
146 /* we trust the server, so update it */
147 set_nlink(inode, fattr->cf_nlink);
148}
149
123/* populate an inode with info from a cifs_fattr struct */ 150/* populate an inode with info from a cifs_fattr struct */
124void 151void
125cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr) 152cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
@@ -134,7 +161,7 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
134 inode->i_mtime = fattr->cf_mtime; 161 inode->i_mtime = fattr->cf_mtime;
135 inode->i_ctime = fattr->cf_ctime; 162 inode->i_ctime = fattr->cf_ctime;
136 inode->i_rdev = fattr->cf_rdev; 163 inode->i_rdev = fattr->cf_rdev;
137 set_nlink(inode, fattr->cf_nlink); 164 cifs_nlink_fattr_to_inode(inode, fattr);
138 inode->i_uid = fattr->cf_uid; 165 inode->i_uid = fattr->cf_uid;
139 inode->i_gid = fattr->cf_gid; 166 inode->i_gid = fattr->cf_gid;
140 167
@@ -541,6 +568,7 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
541 fattr->cf_bytes = le64_to_cpu(info->AllocationSize); 568 fattr->cf_bytes = le64_to_cpu(info->AllocationSize);
542 fattr->cf_createtime = le64_to_cpu(info->CreationTime); 569 fattr->cf_createtime = le64_to_cpu(info->CreationTime);
543 570
571 fattr->cf_nlink = le32_to_cpu(info->NumberOfLinks);
544 if (fattr->cf_cifsattrs & ATTR_DIRECTORY) { 572 if (fattr->cf_cifsattrs & ATTR_DIRECTORY) {
545 fattr->cf_mode = S_IFDIR | cifs_sb->mnt_dir_mode; 573 fattr->cf_mode = S_IFDIR | cifs_sb->mnt_dir_mode;
546 fattr->cf_dtype = DT_DIR; 574 fattr->cf_dtype = DT_DIR;
@@ -548,7 +576,8 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
548 * Server can return wrong NumberOfLinks value for directories 576 * Server can return wrong NumberOfLinks value for directories
549 * when Unix extensions are disabled - fake it. 577 * when Unix extensions are disabled - fake it.
550 */ 578 */
551 fattr->cf_nlink = 2; 579 if (!tcon->unix_ext)
580 fattr->cf_flags |= CIFS_FATTR_UNKNOWN_NLINK;
552 } else if (fattr->cf_cifsattrs & ATTR_REPARSE) { 581 } else if (fattr->cf_cifsattrs & ATTR_REPARSE) {
553 fattr->cf_mode = S_IFLNK; 582 fattr->cf_mode = S_IFLNK;
554 fattr->cf_dtype = DT_LNK; 583 fattr->cf_dtype = DT_LNK;
@@ -561,11 +590,15 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
561 if (fattr->cf_cifsattrs & ATTR_READONLY) 590 if (fattr->cf_cifsattrs & ATTR_READONLY)
562 fattr->cf_mode &= ~(S_IWUGO); 591 fattr->cf_mode &= ~(S_IWUGO);
563 592
564 fattr->cf_nlink = le32_to_cpu(info->NumberOfLinks); 593 /*
565 if (fattr->cf_nlink < 1) { 594 * Don't accept zero nlink from non-unix servers unless
566 cifs_dbg(1, "replacing bogus file nlink value %u\n", 595 * delete is pending. Instead mark it as unknown.
596 */
597 if ((fattr->cf_nlink < 1) && !tcon->unix_ext &&
598 !info->DeletePending) {
599 cifs_dbg(1, "bogus file nlink value %u\n",
567 fattr->cf_nlink); 600 fattr->cf_nlink);
568 fattr->cf_nlink = 1; 601 fattr->cf_flags |= CIFS_FATTR_UNKNOWN_NLINK;
569 } 602 }
570 } 603 }
571 604
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index af847e1cf1c1..651a5279607b 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -780,7 +780,9 @@ static const struct {
780 ERRDOS, ERRnoaccess, 0xc0000290}, { 780 ERRDOS, ERRnoaccess, 0xc0000290}, {
781 ERRDOS, ERRbadfunc, 0xc000029c}, { 781 ERRDOS, ERRbadfunc, 0xc000029c}, {
782 ERRDOS, ERRsymlink, NT_STATUS_STOPPED_ON_SYMLINK}, { 782 ERRDOS, ERRsymlink, NT_STATUS_STOPPED_ON_SYMLINK}, {
783 ERRDOS, ERRinvlevel, 0x007c0001}, }; 783 ERRDOS, ERRinvlevel, 0x007c0001}, {
784 0, 0, 0 }
785};
784 786
785/***************************************************************************** 787/*****************************************************************************
786 Print an error message from the status code 788 Print an error message from the status code
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 42ef03be089f..53a75f3d0179 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -180,6 +180,9 @@ cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb)
180 fattr->cf_dtype = DT_REG; 180 fattr->cf_dtype = DT_REG;
181 } 181 }
182 182
183 /* non-unix readdir doesn't provide nlink */
184 fattr->cf_flags |= CIFS_FATTR_UNKNOWN_NLINK;
185
183 if (fattr->cf_cifsattrs & ATTR_READONLY) 186 if (fattr->cf_cifsattrs & ATTR_READONLY)
184 fattr->cf_mode &= ~S_IWUGO; 187 fattr->cf_mode &= ~S_IWUGO;
185 188
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 5f99b7f19e78..e87387dbf39f 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -32,88 +32,6 @@
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include "cifs_spnego.h" 33#include "cifs_spnego.h"
34 34
35/*
36 * Checks if this is the first smb session to be reconnected after
37 * the socket has been reestablished (so we know whether to use vc 0).
38 * Called while holding the cifs_tcp_ses_lock, so do not block
39 */
40static bool is_first_ses_reconnect(struct cifs_ses *ses)
41{
42 struct list_head *tmp;
43 struct cifs_ses *tmp_ses;
44
45 list_for_each(tmp, &ses->server->smb_ses_list) {
46 tmp_ses = list_entry(tmp, struct cifs_ses,
47 smb_ses_list);
48 if (tmp_ses->need_reconnect == false)
49 return false;
50 }
51 /* could not find a session that was already connected,
52 this must be the first one we are reconnecting */
53 return true;
54}
55
56/*
57 * vc number 0 is treated specially by some servers, and should be the
58 * first one we request. After that we can use vcnumbers up to maxvcs,
59 * one for each smb session (some Windows versions set maxvcs incorrectly
60 * so maxvc=1 can be ignored). If we have too many vcs, we can reuse
61 * any vc but zero (some servers reset the connection on vcnum zero)
62 *
63 */
64static __le16 get_next_vcnum(struct cifs_ses *ses)
65{
66 __u16 vcnum = 0;
67 struct list_head *tmp;
68 struct cifs_ses *tmp_ses;
69 __u16 max_vcs = ses->server->max_vcs;
70 __u16 i;
71 int free_vc_found = 0;
72
73 /* Quoting the MS-SMB specification: "Windows-based SMB servers set this
74 field to one but do not enforce this limit, which allows an SMB client
75 to establish more virtual circuits than allowed by this value ... but
76 other server implementations can enforce this limit." */
77 if (max_vcs < 2)
78 max_vcs = 0xFFFF;
79
80 spin_lock(&cifs_tcp_ses_lock);
81 if ((ses->need_reconnect) && is_first_ses_reconnect(ses))
82 goto get_vc_num_exit; /* vcnum will be zero */
83 for (i = ses->server->srv_count - 1; i < max_vcs; i++) {
84 if (i == 0) /* this is the only connection, use vc 0 */
85 break;
86
87 free_vc_found = 1;
88
89 list_for_each(tmp, &ses->server->smb_ses_list) {
90 tmp_ses = list_entry(tmp, struct cifs_ses,
91 smb_ses_list);
92 if (tmp_ses->vcnum == i) {
93 free_vc_found = 0;
94 break; /* found duplicate, try next vcnum */
95 }
96 }
97 if (free_vc_found)
98 break; /* we found a vcnumber that will work - use it */
99 }
100
101 if (i == 0)
102 vcnum = 0; /* for most common case, ie if one smb session, use
103 vc zero. Also for case when no free vcnum, zero
104 is safest to send (some clients only send zero) */
105 else if (free_vc_found == 0)
106 vcnum = 1; /* we can not reuse vc=0 safely, since some servers
107 reset all uids on that, but 1 is ok. */
108 else
109 vcnum = i;
110 ses->vcnum = vcnum;
111get_vc_num_exit:
112 spin_unlock(&cifs_tcp_ses_lock);
113
114 return cpu_to_le16(vcnum);
115}
116
117static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, SESSION_SETUP_ANDX *pSMB) 35static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, SESSION_SETUP_ANDX *pSMB)
118{ 36{
119 __u32 capabilities = 0; 37 __u32 capabilities = 0;
@@ -128,7 +46,7 @@ static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, SESSION_SETUP_ANDX *pSMB)
128 CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4, 46 CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4,
129 USHRT_MAX)); 47 USHRT_MAX));
130 pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq); 48 pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq);
131 pSMB->req.VcNumber = get_next_vcnum(ses); 49 pSMB->req.VcNumber = __constant_cpu_to_le16(1);
132 50
133 /* Now no need to set SMBFLG_CASELESS or obsolete CANONICAL PATH */ 51 /* Now no need to set SMBFLG_CASELESS or obsolete CANONICAL PATH */
134 52
@@ -582,9 +500,9 @@ select_sectype(struct TCP_Server_Info *server, enum securityEnum requested)
582 return NTLMv2; 500 return NTLMv2;
583 if (global_secflags & CIFSSEC_MAY_NTLM) 501 if (global_secflags & CIFSSEC_MAY_NTLM)
584 return NTLM; 502 return NTLM;
585 /* Fallthrough */
586 default: 503 default:
587 return Unspecified; 504 /* Fallthrough to attempt LANMAN authentication next */
505 break;
588 } 506 }
589 case CIFS_NEGFLAVOR_LANMAN: 507 case CIFS_NEGFLAVOR_LANMAN:
590 switch (requested) { 508 switch (requested) {
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index eba0efde66d7..edccb5252462 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -687,6 +687,10 @@ SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
687 else 687 else
688 return -EIO; 688 return -EIO;
689 689
690 /* no need to send SMB logoff if uid already closed due to reconnect */
691 if (ses->need_reconnect)
692 goto smb2_session_already_dead;
693
690 rc = small_smb2_init(SMB2_LOGOFF, NULL, (void **) &req); 694 rc = small_smb2_init(SMB2_LOGOFF, NULL, (void **) &req);
691 if (rc) 695 if (rc)
692 return rc; 696 return rc;
@@ -701,6 +705,8 @@ SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
701 * No tcon so can't do 705 * No tcon so can't do
702 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]); 706 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
703 */ 707 */
708
709smb2_session_already_dead:
704 return rc; 710 return rc;
705} 711}
706 712
diff --git a/fs/cifs/smbfsctl.h b/fs/cifs/smbfsctl.h
index d952ee48f4dc..a4b2391fe66e 100644
--- a/fs/cifs/smbfsctl.h
+++ b/fs/cifs/smbfsctl.h
@@ -97,9 +97,23 @@
97#define FSCTL_QUERY_NETWORK_INTERFACE_INFO 0x001401FC /* BB add struct */ 97#define FSCTL_QUERY_NETWORK_INTERFACE_INFO 0x001401FC /* BB add struct */
98#define FSCTL_SRV_READ_HASH 0x001441BB /* BB add struct */ 98#define FSCTL_SRV_READ_HASH 0x001441BB /* BB add struct */
99 99
100/* See FSCC 2.1.2.5 */
100#define IO_REPARSE_TAG_MOUNT_POINT 0xA0000003 101#define IO_REPARSE_TAG_MOUNT_POINT 0xA0000003
101#define IO_REPARSE_TAG_HSM 0xC0000004 102#define IO_REPARSE_TAG_HSM 0xC0000004
102#define IO_REPARSE_TAG_SIS 0x80000007 103#define IO_REPARSE_TAG_SIS 0x80000007
104#define IO_REPARSE_TAG_HSM2 0x80000006
105#define IO_REPARSE_TAG_DRIVER_EXTENDER 0x80000005
106/* Used by the DFS filter. See MS-DFSC */
107#define IO_REPARSE_TAG_DFS 0x8000000A
108/* Used by the DFS filter See MS-DFSC */
109#define IO_REPARSE_TAG_DFSR 0x80000012
110#define IO_REPARSE_TAG_FILTER_MANAGER 0x8000000B
111/* See section MS-FSCC 2.1.2.4 */
112#define IO_REPARSE_TAG_SYMLINK 0xA000000C
113#define IO_REPARSE_TAG_DEDUP 0x80000013
114#define IO_REPARSE_APPXSTREAM 0xC0000014
115/* NFS symlinks, Win 8/SMB3 and later */
116#define IO_REPARSE_TAG_NFS 0x80000014
103 117
104/* fsctl flags */ 118/* fsctl flags */
105/* If Flags is set to this value, the request is an FSCTL not ioctl request */ 119/* If Flags is set to this value, the request is an FSCTL not ioctl request */
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 6fdcb1b4a106..800b938e4061 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -410,8 +410,13 @@ static int
410wait_for_free_request(struct TCP_Server_Info *server, const int timeout, 410wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
411 const int optype) 411 const int optype)
412{ 412{
413 return wait_for_free_credits(server, timeout, 413 int *val;
414 server->ops->get_credits_field(server, optype)); 414
415 val = server->ops->get_credits_field(server, optype);
416 /* Since an echo is already inflight, no need to wait to send another */
417 if (*val <= 0 && optype == CIFS_ECHO_OP)
418 return -EAGAIN;
419 return wait_for_free_credits(server, timeout, val);
415} 420}
416 421
417static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf, 422static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
diff --git a/fs/dcache.c b/fs/dcache.c
index 41000305d716..20532cb0b06e 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1331,14 +1331,6 @@ rename_retry:
1331 * list is non-empty and continue searching. 1331 * list is non-empty and continue searching.
1332 */ 1332 */
1333 1333
1334/**
1335 * have_submounts - check for mounts over a dentry
1336 * @parent: dentry to check.
1337 *
1338 * Return true if the parent or its subdirectories contain
1339 * a mount point
1340 */
1341
1342static enum d_walk_ret check_mount(void *data, struct dentry *dentry) 1334static enum d_walk_ret check_mount(void *data, struct dentry *dentry)
1343{ 1335{
1344 int *ret = data; 1336 int *ret = data;
@@ -1349,6 +1341,13 @@ static enum d_walk_ret check_mount(void *data, struct dentry *dentry)
1349 return D_WALK_CONTINUE; 1341 return D_WALK_CONTINUE;
1350} 1342}
1351 1343
1344/**
1345 * have_submounts - check for mounts over a dentry
1346 * @parent: dentry to check.
1347 *
1348 * Return true if the parent or its subdirectories contain
1349 * a mount point
1350 */
1352int have_submounts(struct dentry *parent) 1351int have_submounts(struct dentry *parent)
1353{ 1352{
1354 int ret = 0; 1353 int ret = 0;
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index c88e355f7635..000eae2782b6 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -408,7 +408,7 @@ static loff_t lower_offset_for_page(struct ecryptfs_crypt_stat *crypt_stat,
408 struct page *page) 408 struct page *page)
409{ 409{
410 return ecryptfs_lower_header_size(crypt_stat) + 410 return ecryptfs_lower_header_size(crypt_stat) +
411 (page->index << PAGE_CACHE_SHIFT); 411 ((loff_t)page->index << PAGE_CACHE_SHIFT);
412} 412}
413 413
414/** 414/**
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 7d52806c2119..4725a07f003c 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -1149,7 +1149,7 @@ decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
1149 struct ecryptfs_msg_ctx *msg_ctx; 1149 struct ecryptfs_msg_ctx *msg_ctx;
1150 struct ecryptfs_message *msg = NULL; 1150 struct ecryptfs_message *msg = NULL;
1151 char *auth_tok_sig; 1151 char *auth_tok_sig;
1152 char *payload; 1152 char *payload = NULL;
1153 size_t payload_len = 0; 1153 size_t payload_len = 0;
1154 int rc; 1154 int rc;
1155 1155
@@ -1203,6 +1203,7 @@ decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
1203 } 1203 }
1204out: 1204out:
1205 kfree(msg); 1205 kfree(msg);
1206 kfree(payload);
1206 return rc; 1207 return rc;
1207} 1208}
1208 1209
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index 1194b1f0f839..f8cde46de9cd 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -1783,7 +1783,7 @@ retry:
1783 d_tmpfile(dentry, inode); 1783 d_tmpfile(dentry, inode);
1784 err = ext3_orphan_add(handle, inode); 1784 err = ext3_orphan_add(handle, inode);
1785 if (err) 1785 if (err)
1786 goto err_drop_inode; 1786 goto err_unlock_inode;
1787 mark_inode_dirty(inode); 1787 mark_inode_dirty(inode);
1788 unlock_new_inode(inode); 1788 unlock_new_inode(inode);
1789 } 1789 }
@@ -1791,10 +1791,9 @@ retry:
1791 if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries)) 1791 if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
1792 goto retry; 1792 goto retry;
1793 return err; 1793 return err;
1794err_drop_inode: 1794err_unlock_inode:
1795 ext3_journal_stop(handle); 1795 ext3_journal_stop(handle);
1796 unlock_new_inode(inode); 1796 unlock_new_inode(inode);
1797 iput(inode);
1798 return err; 1797 return err;
1799} 1798}
1800 1799
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 0d424d7ac02b..e274e9c1171f 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2563,7 +2563,7 @@ retry:
2563 break; 2563 break;
2564 } 2564 }
2565 blk_finish_plug(&plug); 2565 blk_finish_plug(&plug);
2566 if (!ret && !cycled) { 2566 if (!ret && !cycled && wbc->nr_to_write > 0) {
2567 cycled = 1; 2567 cycled = 1;
2568 mpd.last_page = writeback_index - 1; 2568 mpd.last_page = writeback_index - 1;
2569 mpd.first_page = 0; 2569 mpd.first_page = 0;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 1bec5a5c1e45..5a0408d7b114 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -2319,7 +2319,7 @@ retry:
2319 d_tmpfile(dentry, inode); 2319 d_tmpfile(dentry, inode);
2320 err = ext4_orphan_add(handle, inode); 2320 err = ext4_orphan_add(handle, inode);
2321 if (err) 2321 if (err)
2322 goto err_drop_inode; 2322 goto err_unlock_inode;
2323 mark_inode_dirty(inode); 2323 mark_inode_dirty(inode);
2324 unlock_new_inode(inode); 2324 unlock_new_inode(inode);
2325 } 2325 }
@@ -2328,10 +2328,9 @@ retry:
2328 if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) 2328 if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
2329 goto retry; 2329 goto retry;
2330 return err; 2330 return err;
2331err_drop_inode: 2331err_unlock_inode:
2332 ext4_journal_stop(handle); 2332 ext4_journal_stop(handle);
2333 unlock_new_inode(inode); 2333 unlock_new_inode(inode);
2334 iput(inode);
2335 return err; 2334 return err;
2336} 2335}
2337 2336
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index c081e34f717f..03e9bebba198 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -1350,6 +1350,8 @@ retry:
1350 s_min_extra_isize) { 1350 s_min_extra_isize) {
1351 tried_min_extra_isize++; 1351 tried_min_extra_isize++;
1352 new_extra_isize = s_min_extra_isize; 1352 new_extra_isize = s_min_extra_isize;
1353 kfree(is); is = NULL;
1354 kfree(bs); bs = NULL;
1353 goto retry; 1355 goto retry;
1354 } 1356 }
1355 error = -1; 1357 error = -1;
diff --git a/fs/file_table.c b/fs/file_table.c
index abdd15ad13c9..e900ca518635 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -297,7 +297,7 @@ void flush_delayed_fput(void)
297 delayed_fput(NULL); 297 delayed_fput(NULL);
298} 298}
299 299
300static DECLARE_WORK(delayed_fput_work, delayed_fput); 300static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput);
301 301
302void fput(struct file *file) 302void fput(struct file *file)
303{ 303{
@@ -317,7 +317,7 @@ void fput(struct file *file)
317 } 317 }
318 318
319 if (llist_add(&file->f_u.fu_llist, &delayed_fput_list)) 319 if (llist_add(&file->f_u.fu_llist, &delayed_fput_list))
320 schedule_work(&delayed_fput_work); 320 schedule_delayed_work(&delayed_fput_work, 1);
321 } 321 }
322} 322}
323 323
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 62b43b577bfc..b7989f2ab4c4 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -182,6 +182,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
182 struct inode *inode; 182 struct inode *inode;
183 struct dentry *parent; 183 struct dentry *parent;
184 struct fuse_conn *fc; 184 struct fuse_conn *fc;
185 struct fuse_inode *fi;
185 int ret; 186 int ret;
186 187
187 inode = ACCESS_ONCE(entry->d_inode); 188 inode = ACCESS_ONCE(entry->d_inode);
@@ -228,7 +229,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
228 if (!err && !outarg.nodeid) 229 if (!err && !outarg.nodeid)
229 err = -ENOENT; 230 err = -ENOENT;
230 if (!err) { 231 if (!err) {
231 struct fuse_inode *fi = get_fuse_inode(inode); 232 fi = get_fuse_inode(inode);
232 if (outarg.nodeid != get_node_id(inode)) { 233 if (outarg.nodeid != get_node_id(inode)) {
233 fuse_queue_forget(fc, forget, outarg.nodeid, 1); 234 fuse_queue_forget(fc, forget, outarg.nodeid, 1);
234 goto invalid; 235 goto invalid;
@@ -246,8 +247,11 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
246 attr_version); 247 attr_version);
247 fuse_change_entry_timeout(entry, &outarg); 248 fuse_change_entry_timeout(entry, &outarg);
248 } else if (inode) { 249 } else if (inode) {
249 fc = get_fuse_conn(inode); 250 fi = get_fuse_inode(inode);
250 if (fc->readdirplus_auto) { 251 if (flags & LOOKUP_RCU) {
252 if (test_bit(FUSE_I_INIT_RDPLUS, &fi->state))
253 return -ECHILD;
254 } else if (test_and_clear_bit(FUSE_I_INIT_RDPLUS, &fi->state)) {
251 parent = dget_parent(entry); 255 parent = dget_parent(entry);
252 fuse_advise_use_readdirplus(parent->d_inode); 256 fuse_advise_use_readdirplus(parent->d_inode);
253 dput(parent); 257 dput(parent);
@@ -259,7 +263,8 @@ out:
259 263
260invalid: 264invalid:
261 ret = 0; 265 ret = 0;
262 if (check_submounts_and_drop(entry) != 0) 266
267 if (!(flags & LOOKUP_RCU) && check_submounts_and_drop(entry) != 0)
263 ret = 1; 268 ret = 1;
264 goto out; 269 goto out;
265} 270}
@@ -1063,6 +1068,8 @@ static int fuse_access(struct inode *inode, int mask)
1063 struct fuse_access_in inarg; 1068 struct fuse_access_in inarg;
1064 int err; 1069 int err;
1065 1070
1071 BUG_ON(mask & MAY_NOT_BLOCK);
1072
1066 if (fc->no_access) 1073 if (fc->no_access)
1067 return 0; 1074 return 0;
1068 1075
@@ -1150,9 +1157,6 @@ static int fuse_permission(struct inode *inode, int mask)
1150 noticed immediately, only after the attribute 1157 noticed immediately, only after the attribute
1151 timeout has expired */ 1158 timeout has expired */
1152 } else if (mask & (MAY_ACCESS | MAY_CHDIR)) { 1159 } else if (mask & (MAY_ACCESS | MAY_CHDIR)) {
1153 if (mask & MAY_NOT_BLOCK)
1154 return -ECHILD;
1155
1156 err = fuse_access(inode, mask); 1160 err = fuse_access(inode, mask);
1157 } else if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode)) { 1161 } else if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode)) {
1158 if (!(inode->i_mode & S_IXUGO)) { 1162 if (!(inode->i_mode & S_IXUGO)) {
@@ -1291,6 +1295,8 @@ static int fuse_direntplus_link(struct file *file,
1291 } 1295 }
1292 1296
1293found: 1297found:
1298 if (fc->readdirplus_auto)
1299 set_bit(FUSE_I_INIT_RDPLUS, &get_fuse_inode(inode)->state);
1294 fuse_change_entry_timeout(dentry, o); 1300 fuse_change_entry_timeout(dentry, o);
1295 1301
1296 err = 0; 1302 err = 0;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index d409deafc67b..4598345ab87d 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -2467,6 +2467,7 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
2467{ 2467{
2468 struct fuse_file *ff = file->private_data; 2468 struct fuse_file *ff = file->private_data;
2469 struct inode *inode = file->f_inode; 2469 struct inode *inode = file->f_inode;
2470 struct fuse_inode *fi = get_fuse_inode(inode);
2470 struct fuse_conn *fc = ff->fc; 2471 struct fuse_conn *fc = ff->fc;
2471 struct fuse_req *req; 2472 struct fuse_req *req;
2472 struct fuse_fallocate_in inarg = { 2473 struct fuse_fallocate_in inarg = {
@@ -2484,10 +2485,20 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
2484 2485
2485 if (lock_inode) { 2486 if (lock_inode) {
2486 mutex_lock(&inode->i_mutex); 2487 mutex_lock(&inode->i_mutex);
2487 if (mode & FALLOC_FL_PUNCH_HOLE) 2488 if (mode & FALLOC_FL_PUNCH_HOLE) {
2488 fuse_set_nowrite(inode); 2489 loff_t endbyte = offset + length - 1;
2490 err = filemap_write_and_wait_range(inode->i_mapping,
2491 offset, endbyte);
2492 if (err)
2493 goto out;
2494
2495 fuse_sync_writes(inode);
2496 }
2489 } 2497 }
2490 2498
2499 if (!(mode & FALLOC_FL_KEEP_SIZE))
2500 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
2501
2491 req = fuse_get_req_nopages(fc); 2502 req = fuse_get_req_nopages(fc);
2492 if (IS_ERR(req)) { 2503 if (IS_ERR(req)) {
2493 err = PTR_ERR(req); 2504 err = PTR_ERR(req);
@@ -2520,11 +2531,11 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
2520 fuse_invalidate_attr(inode); 2531 fuse_invalidate_attr(inode);
2521 2532
2522out: 2533out:
2523 if (lock_inode) { 2534 if (!(mode & FALLOC_FL_KEEP_SIZE))
2524 if (mode & FALLOC_FL_PUNCH_HOLE) 2535 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
2525 fuse_release_nowrite(inode); 2536
2537 if (lock_inode)
2526 mutex_unlock(&inode->i_mutex); 2538 mutex_unlock(&inode->i_mutex);
2527 }
2528 2539
2529 return err; 2540 return err;
2530} 2541}
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 5ced199b50bb..5b9e6f3b6aef 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -115,6 +115,8 @@ struct fuse_inode {
115enum { 115enum {
116 /** Advise readdirplus */ 116 /** Advise readdirplus */
117 FUSE_I_ADVISE_RDPLUS, 117 FUSE_I_ADVISE_RDPLUS,
118 /** Initialized with readdirplus */
119 FUSE_I_INIT_RDPLUS,
118 /** An operation changing file size is in progress */ 120 /** An operation changing file size is in progress */
119 FUSE_I_SIZE_UNSTABLE, 121 FUSE_I_SIZE_UNSTABLE,
120}; 122};
diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c
index c1a3e603279c..7f464c513ba0 100644
--- a/fs/jfs/jfs_inode.c
+++ b/fs/jfs/jfs_inode.c
@@ -95,7 +95,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
95 95
96 if (insert_inode_locked(inode) < 0) { 96 if (insert_inode_locked(inode) < 0) {
97 rc = -EINVAL; 97 rc = -EINVAL;
98 goto fail_unlock; 98 goto fail_put;
99 } 99 }
100 100
101 inode_init_owner(inode, parent, mode); 101 inode_init_owner(inode, parent, mode);
@@ -156,7 +156,6 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
156fail_drop: 156fail_drop:
157 dquot_drop(inode); 157 dquot_drop(inode);
158 inode->i_flags |= S_NOQUOTA; 158 inode->i_flags |= S_NOQUOTA;
159fail_unlock:
160 clear_nlink(inode); 159 clear_nlink(inode);
161 unlock_new_inode(inode); 160 unlock_new_inode(inode);
162fail_put: 161fail_put:
diff --git a/fs/namei.c b/fs/namei.c
index 645268f23eb6..caa28051e197 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2294,10 +2294,11 @@ out:
2294 * path_mountpoint - look up a path to be umounted 2294 * path_mountpoint - look up a path to be umounted
2295 * @dfd: directory file descriptor to start walk from 2295 * @dfd: directory file descriptor to start walk from
2296 * @name: full pathname to walk 2296 * @name: full pathname to walk
2297 * @path: pointer to container for result
2297 * @flags: lookup flags 2298 * @flags: lookup flags
2298 * 2299 *
2299 * Look up the given name, but don't attempt to revalidate the last component. 2300 * Look up the given name, but don't attempt to revalidate the last component.
2300 * Returns 0 and "path" will be valid on success; Retuns error otherwise. 2301 * Returns 0 and "path" will be valid on success; Returns error otherwise.
2301 */ 2302 */
2302static int 2303static int
2303path_mountpoint(int dfd, const char *name, struct path *path, unsigned int flags) 2304path_mountpoint(int dfd, const char *name, struct path *path, unsigned int flags)
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 854a8f05a610..02b0df769e2d 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1458,7 +1458,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
1458 1458
1459 trace_nfs_atomic_open_enter(dir, ctx, open_flags); 1459 trace_nfs_atomic_open_enter(dir, ctx, open_flags);
1460 nfs_block_sillyrename(dentry->d_parent); 1460 nfs_block_sillyrename(dentry->d_parent);
1461 inode = NFS_PROTO(dir)->open_context(dir, ctx, open_flags, &attr); 1461 inode = NFS_PROTO(dir)->open_context(dir, ctx, open_flags, &attr, opened);
1462 nfs_unblock_sillyrename(dentry->d_parent); 1462 nfs_unblock_sillyrename(dentry->d_parent);
1463 if (IS_ERR(inode)) { 1463 if (IS_ERR(inode)) {
1464 err = PTR_ERR(inode); 1464 err = PTR_ERR(inode);
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index e5b804dd944c..77efaf15ec90 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -19,6 +19,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
19 struct inode *dir; 19 struct inode *dir;
20 unsigned openflags = filp->f_flags; 20 unsigned openflags = filp->f_flags;
21 struct iattr attr; 21 struct iattr attr;
22 int opened = 0;
22 int err; 23 int err;
23 24
24 /* 25 /*
@@ -55,7 +56,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
55 nfs_wb_all(inode); 56 nfs_wb_all(inode);
56 } 57 }
57 58
58 inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr); 59 inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr, &opened);
59 if (IS_ERR(inode)) { 60 if (IS_ERR(inode)) {
60 err = PTR_ERR(inode); 61 err = PTR_ERR(inode);
61 switch (err) { 62 switch (err) {
diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
index 95604f64cab8..c7c295e556ed 100644
--- a/fs/nfs/nfs4filelayoutdev.c
+++ b/fs/nfs/nfs4filelayoutdev.c
@@ -185,6 +185,7 @@ nfs4_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds)
185 if (status) 185 if (status)
186 goto out_put; 186 goto out_put;
187 187
188 smp_wmb();
188 ds->ds_clp = clp; 189 ds->ds_clp = clp;
189 dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr); 190 dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr);
190out: 191out:
@@ -801,34 +802,35 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
801 struct nfs4_file_layout_dsaddr *dsaddr = FILELAYOUT_LSEG(lseg)->dsaddr; 802 struct nfs4_file_layout_dsaddr *dsaddr = FILELAYOUT_LSEG(lseg)->dsaddr;
802 struct nfs4_pnfs_ds *ds = dsaddr->ds_list[ds_idx]; 803 struct nfs4_pnfs_ds *ds = dsaddr->ds_list[ds_idx];
803 struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg); 804 struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg);
804 805 struct nfs4_pnfs_ds *ret = ds;
805 if (filelayout_test_devid_unavailable(devid))
806 return NULL;
807 806
808 if (ds == NULL) { 807 if (ds == NULL) {
809 printk(KERN_ERR "NFS: %s: No data server for offset index %d\n", 808 printk(KERN_ERR "NFS: %s: No data server for offset index %d\n",
810 __func__, ds_idx); 809 __func__, ds_idx);
811 filelayout_mark_devid_invalid(devid); 810 filelayout_mark_devid_invalid(devid);
812 return NULL; 811 goto out;
813 } 812 }
813 smp_rmb();
814 if (ds->ds_clp) 814 if (ds->ds_clp)
815 return ds; 815 goto out_test_devid;
816 816
817 if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) { 817 if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) {
818 struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode); 818 struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode);
819 int err; 819 int err;
820 820
821 err = nfs4_ds_connect(s, ds); 821 err = nfs4_ds_connect(s, ds);
822 if (err) { 822 if (err)
823 nfs4_mark_deviceid_unavailable(devid); 823 nfs4_mark_deviceid_unavailable(devid);
824 ds = NULL;
825 }
826 nfs4_clear_ds_conn_bit(ds); 824 nfs4_clear_ds_conn_bit(ds);
827 } else { 825 } else {
828 /* Either ds is connected, or ds is NULL */ 826 /* Either ds is connected, or ds is NULL */
829 nfs4_wait_ds_connect(ds); 827 nfs4_wait_ds_connect(ds);
830 } 828 }
831 return ds; 829out_test_devid:
830 if (filelayout_test_devid_unavailable(devid))
831 ret = NULL;
832out:
833 return ret;
832} 834}
833 835
834module_param(dataserver_retrans, uint, 0644); 836module_param(dataserver_retrans, uint, 0644);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 989bb9d3074d..d53d6785cba2 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -912,6 +912,7 @@ struct nfs4_opendata {
912 struct iattr attrs; 912 struct iattr attrs;
913 unsigned long timestamp; 913 unsigned long timestamp;
914 unsigned int rpc_done : 1; 914 unsigned int rpc_done : 1;
915 unsigned int file_created : 1;
915 unsigned int is_recover : 1; 916 unsigned int is_recover : 1;
916 int rpc_status; 917 int rpc_status;
917 int cancelled; 918 int cancelled;
@@ -1946,8 +1947,13 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
1946 1947
1947 nfs_fattr_map_and_free_names(server, &data->f_attr); 1948 nfs_fattr_map_and_free_names(server, &data->f_attr);
1948 1949
1949 if (o_arg->open_flags & O_CREAT) 1950 if (o_arg->open_flags & O_CREAT) {
1950 update_changeattr(dir, &o_res->cinfo); 1951 update_changeattr(dir, &o_res->cinfo);
1952 if (o_arg->open_flags & O_EXCL)
1953 data->file_created = 1;
1954 else if (o_res->cinfo.before != o_res->cinfo.after)
1955 data->file_created = 1;
1956 }
1951 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) 1957 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
1952 server->caps &= ~NFS_CAP_POSIX_LOCK; 1958 server->caps &= ~NFS_CAP_POSIX_LOCK;
1953 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 1959 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
@@ -2191,7 +2197,8 @@ static int _nfs4_do_open(struct inode *dir,
2191 struct nfs_open_context *ctx, 2197 struct nfs_open_context *ctx,
2192 int flags, 2198 int flags,
2193 struct iattr *sattr, 2199 struct iattr *sattr,
2194 struct nfs4_label *label) 2200 struct nfs4_label *label,
2201 int *opened)
2195{ 2202{
2196 struct nfs4_state_owner *sp; 2203 struct nfs4_state_owner *sp;
2197 struct nfs4_state *state = NULL; 2204 struct nfs4_state *state = NULL;
@@ -2261,6 +2268,8 @@ static int _nfs4_do_open(struct inode *dir,
2261 nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel); 2268 nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel);
2262 } 2269 }
2263 } 2270 }
2271 if (opendata->file_created)
2272 *opened |= FILE_CREATED;
2264 2273
2265 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) 2274 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server))
2266 *ctx_th = opendata->f_attr.mdsthreshold; 2275 *ctx_th = opendata->f_attr.mdsthreshold;
@@ -2289,7 +2298,8 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir,
2289 struct nfs_open_context *ctx, 2298 struct nfs_open_context *ctx,
2290 int flags, 2299 int flags,
2291 struct iattr *sattr, 2300 struct iattr *sattr,
2292 struct nfs4_label *label) 2301 struct nfs4_label *label,
2302 int *opened)
2293{ 2303{
2294 struct nfs_server *server = NFS_SERVER(dir); 2304 struct nfs_server *server = NFS_SERVER(dir);
2295 struct nfs4_exception exception = { }; 2305 struct nfs4_exception exception = { };
@@ -2297,7 +2307,7 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir,
2297 int status; 2307 int status;
2298 2308
2299 do { 2309 do {
2300 status = _nfs4_do_open(dir, ctx, flags, sattr, label); 2310 status = _nfs4_do_open(dir, ctx, flags, sattr, label, opened);
2301 res = ctx->state; 2311 res = ctx->state;
2302 trace_nfs4_open_file(ctx, flags, status); 2312 trace_nfs4_open_file(ctx, flags, status);
2303 if (status == 0) 2313 if (status == 0)
@@ -2659,7 +2669,8 @@ out:
2659} 2669}
2660 2670
2661static struct inode * 2671static struct inode *
2662nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags, struct iattr *attr) 2672nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
2673 int open_flags, struct iattr *attr, int *opened)
2663{ 2674{
2664 struct nfs4_state *state; 2675 struct nfs4_state *state;
2665 struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL; 2676 struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL;
@@ -2667,7 +2678,7 @@ nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags
2667 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l); 2678 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l);
2668 2679
2669 /* Protect against concurrent sillydeletes */ 2680 /* Protect against concurrent sillydeletes */
2670 state = nfs4_do_open(dir, ctx, open_flags, attr, label); 2681 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened);
2671 2682
2672 nfs4_label_release_security(label); 2683 nfs4_label_release_security(label);
2673 2684
@@ -3332,6 +3343,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
3332 struct nfs4_label l, *ilabel = NULL; 3343 struct nfs4_label l, *ilabel = NULL;
3333 struct nfs_open_context *ctx; 3344 struct nfs_open_context *ctx;
3334 struct nfs4_state *state; 3345 struct nfs4_state *state;
3346 int opened = 0;
3335 int status = 0; 3347 int status = 0;
3336 3348
3337 ctx = alloc_nfs_open_context(dentry, FMODE_READ); 3349 ctx = alloc_nfs_open_context(dentry, FMODE_READ);
@@ -3341,7 +3353,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
3341 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l); 3353 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l);
3342 3354
3343 sattr->ia_mode &= ~current_umask(); 3355 sattr->ia_mode &= ~current_umask();
3344 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel); 3356 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, &opened);
3345 if (IS_ERR(state)) { 3357 if (IS_ERR(state)) {
3346 status = PTR_ERR(state); 3358 status = PTR_ERR(state);
3347 goto out; 3359 goto out;
@@ -7564,8 +7576,10 @@ nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
7564{ 7576{
7565 int err; 7577 int err;
7566 struct page *page; 7578 struct page *page;
7567 rpc_authflavor_t flavor; 7579 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR;
7568 struct nfs4_secinfo_flavors *flavors; 7580 struct nfs4_secinfo_flavors *flavors;
7581 struct nfs4_secinfo4 *secinfo;
7582 int i;
7569 7583
7570 page = alloc_page(GFP_KERNEL); 7584 page = alloc_page(GFP_KERNEL);
7571 if (!page) { 7585 if (!page) {
@@ -7587,9 +7601,31 @@ nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
7587 if (err) 7601 if (err)
7588 goto out_freepage; 7602 goto out_freepage;
7589 7603
7590 flavor = nfs_find_best_sec(flavors); 7604 for (i = 0; i < flavors->num_flavors; i++) {
7591 if (err == 0) 7605 secinfo = &flavors->flavors[i];
7592 err = nfs4_lookup_root_sec(server, fhandle, info, flavor); 7606
7607 switch (secinfo->flavor) {
7608 case RPC_AUTH_NULL:
7609 case RPC_AUTH_UNIX:
7610 case RPC_AUTH_GSS:
7611 flavor = rpcauth_get_pseudoflavor(secinfo->flavor,
7612 &secinfo->flavor_info);
7613 break;
7614 default:
7615 flavor = RPC_AUTH_MAXFLAVOR;
7616 break;
7617 }
7618
7619 if (flavor != RPC_AUTH_MAXFLAVOR) {
7620 err = nfs4_lookup_root_sec(server, fhandle,
7621 info, flavor);
7622 if (!err)
7623 break;
7624 }
7625 }
7626
7627 if (flavor == RPC_AUTH_MAXFLAVOR)
7628 err = -EPERM;
7593 7629
7594out_freepage: 7630out_freepage:
7595 put_page(page); 7631 put_page(page);
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index 0ba679866e50..da276640f776 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -94,6 +94,7 @@ void nilfs_forget_buffer(struct buffer_head *bh)
94 clear_buffer_nilfs_volatile(bh); 94 clear_buffer_nilfs_volatile(bh);
95 clear_buffer_nilfs_checked(bh); 95 clear_buffer_nilfs_checked(bh);
96 clear_buffer_nilfs_redirected(bh); 96 clear_buffer_nilfs_redirected(bh);
97 clear_buffer_async_write(bh);
97 clear_buffer_dirty(bh); 98 clear_buffer_dirty(bh);
98 if (nilfs_page_buffers_clean(page)) 99 if (nilfs_page_buffers_clean(page))
99 __nilfs_clear_page_dirty(page); 100 __nilfs_clear_page_dirty(page);
@@ -429,6 +430,7 @@ void nilfs_clear_dirty_page(struct page *page, bool silent)
429 "discard block %llu, size %zu", 430 "discard block %llu, size %zu",
430 (u64)bh->b_blocknr, bh->b_size); 431 (u64)bh->b_blocknr, bh->b_size);
431 } 432 }
433 clear_buffer_async_write(bh);
432 clear_buffer_dirty(bh); 434 clear_buffer_dirty(bh);
433 clear_buffer_nilfs_volatile(bh); 435 clear_buffer_nilfs_volatile(bh);
434 clear_buffer_nilfs_checked(bh); 436 clear_buffer_nilfs_checked(bh);
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index bd88a7461063..9f6b486b6c01 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -665,7 +665,7 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
665 665
666 bh = head = page_buffers(page); 666 bh = head = page_buffers(page);
667 do { 667 do {
668 if (!buffer_dirty(bh)) 668 if (!buffer_dirty(bh) || buffer_async_write(bh))
669 continue; 669 continue;
670 get_bh(bh); 670 get_bh(bh);
671 list_add_tail(&bh->b_assoc_buffers, listp); 671 list_add_tail(&bh->b_assoc_buffers, listp);
@@ -699,7 +699,8 @@ static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
699 for (i = 0; i < pagevec_count(&pvec); i++) { 699 for (i = 0; i < pagevec_count(&pvec); i++) {
700 bh = head = page_buffers(pvec.pages[i]); 700 bh = head = page_buffers(pvec.pages[i]);
701 do { 701 do {
702 if (buffer_dirty(bh)) { 702 if (buffer_dirty(bh) &&
703 !buffer_async_write(bh)) {
703 get_bh(bh); 704 get_bh(bh);
704 list_add_tail(&bh->b_assoc_buffers, 705 list_add_tail(&bh->b_assoc_buffers,
705 listp); 706 listp);
@@ -1579,6 +1580,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
1579 1580
1580 list_for_each_entry(bh, &segbuf->sb_segsum_buffers, 1581 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1581 b_assoc_buffers) { 1582 b_assoc_buffers) {
1583 set_buffer_async_write(bh);
1582 if (bh->b_page != bd_page) { 1584 if (bh->b_page != bd_page) {
1583 if (bd_page) { 1585 if (bd_page) {
1584 lock_page(bd_page); 1586 lock_page(bd_page);
@@ -1592,6 +1594,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
1592 1594
1593 list_for_each_entry(bh, &segbuf->sb_payload_buffers, 1595 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1594 b_assoc_buffers) { 1596 b_assoc_buffers) {
1597 set_buffer_async_write(bh);
1595 if (bh == segbuf->sb_super_root) { 1598 if (bh == segbuf->sb_super_root) {
1596 if (bh->b_page != bd_page) { 1599 if (bh->b_page != bd_page) {
1597 lock_page(bd_page); 1600 lock_page(bd_page);
@@ -1677,6 +1680,7 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
1677 list_for_each_entry(segbuf, logs, sb_list) { 1680 list_for_each_entry(segbuf, logs, sb_list) {
1678 list_for_each_entry(bh, &segbuf->sb_segsum_buffers, 1681 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1679 b_assoc_buffers) { 1682 b_assoc_buffers) {
1683 clear_buffer_async_write(bh);
1680 if (bh->b_page != bd_page) { 1684 if (bh->b_page != bd_page) {
1681 if (bd_page) 1685 if (bd_page)
1682 end_page_writeback(bd_page); 1686 end_page_writeback(bd_page);
@@ -1686,6 +1690,7 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
1686 1690
1687 list_for_each_entry(bh, &segbuf->sb_payload_buffers, 1691 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1688 b_assoc_buffers) { 1692 b_assoc_buffers) {
1693 clear_buffer_async_write(bh);
1689 if (bh == segbuf->sb_super_root) { 1694 if (bh == segbuf->sb_super_root) {
1690 if (bh->b_page != bd_page) { 1695 if (bh->b_page != bd_page) {
1691 end_page_writeback(bd_page); 1696 end_page_writeback(bd_page);
@@ -1755,6 +1760,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
1755 b_assoc_buffers) { 1760 b_assoc_buffers) {
1756 set_buffer_uptodate(bh); 1761 set_buffer_uptodate(bh);
1757 clear_buffer_dirty(bh); 1762 clear_buffer_dirty(bh);
1763 clear_buffer_async_write(bh);
1758 if (bh->b_page != bd_page) { 1764 if (bh->b_page != bd_page) {
1759 if (bd_page) 1765 if (bd_page)
1760 end_page_writeback(bd_page); 1766 end_page_writeback(bd_page);
@@ -1776,6 +1782,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
1776 b_assoc_buffers) { 1782 b_assoc_buffers) {
1777 set_buffer_uptodate(bh); 1783 set_buffer_uptodate(bh);
1778 clear_buffer_dirty(bh); 1784 clear_buffer_dirty(bh);
1785 clear_buffer_async_write(bh);
1779 clear_buffer_delay(bh); 1786 clear_buffer_delay(bh);
1780 clear_buffer_nilfs_volatile(bh); 1787 clear_buffer_nilfs_volatile(bh);
1781 clear_buffer_nilfs_redirected(bh); 1788 clear_buffer_nilfs_redirected(bh);
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
index ef999729e274..0d3a97d2d5f6 100644
--- a/fs/ocfs2/dcache.c
+++ b/fs/ocfs2/dcache.c
@@ -70,9 +70,10 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry, unsigned int flags)
70 */ 70 */
71 if (inode == NULL) { 71 if (inode == NULL) {
72 unsigned long gen = (unsigned long) dentry->d_fsdata; 72 unsigned long gen = (unsigned long) dentry->d_fsdata;
73 unsigned long pgen = 73 unsigned long pgen;
74 OCFS2_I(dentry->d_parent->d_inode)->ip_dir_lock_gen; 74 spin_lock(&dentry->d_lock);
75 75 pgen = OCFS2_I(dentry->d_parent->d_inode)->ip_dir_lock_gen;
76 spin_unlock(&dentry->d_lock);
76 trace_ocfs2_dentry_revalidate_negative(dentry->d_name.len, 77 trace_ocfs2_dentry_revalidate_negative(dentry->d_name.len,
77 dentry->d_name.name, 78 dentry->d_name.name,
78 pgen, gen); 79 pgen, gen);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 121da2dc3be8..d4e81e4a9b04 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1924,7 +1924,7 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
1924{ 1924{
1925 int tmp, hangup_needed = 0; 1925 int tmp, hangup_needed = 0;
1926 struct ocfs2_super *osb = NULL; 1926 struct ocfs2_super *osb = NULL;
1927 char nodestr[8]; 1927 char nodestr[12];
1928 1928
1929 trace_ocfs2_dismount_volume(sb); 1929 trace_ocfs2_dismount_volume(sb);
1930 1930
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 9f8ef9b7674d..8eaa1ba793fc 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -288,10 +288,14 @@ static int proc_reg_mmap(struct file *file, struct vm_area_struct *vma)
288static unsigned long proc_reg_get_unmapped_area(struct file *file, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags) 288static unsigned long proc_reg_get_unmapped_area(struct file *file, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
289{ 289{
290 struct proc_dir_entry *pde = PDE(file_inode(file)); 290 struct proc_dir_entry *pde = PDE(file_inode(file));
291 int rv = -EIO; 291 unsigned long rv = -EIO;
292 unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); 292 unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) = NULL;
293 if (use_pde(pde)) { 293 if (use_pde(pde)) {
294 get_unmapped_area = pde->proc_fops->get_unmapped_area; 294#ifdef CONFIG_MMU
295 get_unmapped_area = current->mm->get_unmapped_area;
296#endif
297 if (pde->proc_fops->get_unmapped_area)
298 get_unmapped_area = pde->proc_fops->get_unmapped_area;
295 if (get_unmapped_area) 299 if (get_unmapped_area)
296 rv = get_unmapped_area(file, orig_addr, len, pgoff, flags); 300 rv = get_unmapped_area(file, orig_addr, len, pgoff, flags);
297 unuse_pde(pde); 301 unuse_pde(pde);
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 7366e9d63cee..390bdab01c3c 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -941,6 +941,8 @@ static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
941 frame = pte_pfn(pte); 941 frame = pte_pfn(pte);
942 flags = PM_PRESENT; 942 flags = PM_PRESENT;
943 page = vm_normal_page(vma, addr, pte); 943 page = vm_normal_page(vma, addr, pte);
944 if (pte_soft_dirty(pte))
945 flags2 |= __PM_SOFT_DIRTY;
944 } else if (is_swap_pte(pte)) { 946 } else if (is_swap_pte(pte)) {
945 swp_entry_t entry; 947 swp_entry_t entry;
946 if (pte_swp_soft_dirty(pte)) 948 if (pte_swp_soft_dirty(pte))
@@ -960,7 +962,7 @@ static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
960 962
961 if (page && !PageAnon(page)) 963 if (page && !PageAnon(page))
962 flags |= PM_FILE; 964 flags |= PM_FILE;
963 if ((vma->vm_flags & VM_SOFTDIRTY) || pte_soft_dirty(pte)) 965 if ((vma->vm_flags & VM_SOFTDIRTY))
964 flags2 |= __PM_SOFT_DIRTY; 966 flags2 |= __PM_SOFT_DIRTY;
965 967
966 *pme = make_pme(PM_PFRAME(frame) | PM_STATUS2(pm->v2, flags2) | flags); 968 *pme = make_pme(PM_PFRAME(frame) | PM_STATUS2(pm->v2, flags2) | flags);
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 73feacc49b2e..fd777032c2ba 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -1163,21 +1163,6 @@ static struct reiserfs_journal_list *find_newer_jl_for_cn(struct
1163 return NULL; 1163 return NULL;
1164} 1164}
1165 1165
1166static int newer_jl_done(struct reiserfs_journal_cnode *cn)
1167{
1168 struct super_block *sb = cn->sb;
1169 b_blocknr_t blocknr = cn->blocknr;
1170
1171 cn = cn->hprev;
1172 while (cn) {
1173 if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist &&
1174 atomic_read(&cn->jlist->j_commit_left) != 0)
1175 return 0;
1176 cn = cn->hprev;
1177 }
1178 return 1;
1179}
1180
1181static void remove_journal_hash(struct super_block *, 1166static void remove_journal_hash(struct super_block *,
1182 struct reiserfs_journal_cnode **, 1167 struct reiserfs_journal_cnode **,
1183 struct reiserfs_journal_list *, unsigned long, 1168 struct reiserfs_journal_list *, unsigned long,
@@ -1353,7 +1338,6 @@ static int flush_journal_list(struct super_block *s,
1353 reiserfs_warning(s, "clm-2048", "called with wcount %d", 1338 reiserfs_warning(s, "clm-2048", "called with wcount %d",
1354 atomic_read(&journal->j_wcount)); 1339 atomic_read(&journal->j_wcount));
1355 } 1340 }
1356 BUG_ON(jl->j_trans_id == 0);
1357 1341
1358 /* if flushall == 0, the lock is already held */ 1342 /* if flushall == 0, the lock is already held */
1359 if (flushall) { 1343 if (flushall) {
@@ -1593,31 +1577,6 @@ static int flush_journal_list(struct super_block *s,
1593 return err; 1577 return err;
1594} 1578}
1595 1579
1596static int test_transaction(struct super_block *s,
1597 struct reiserfs_journal_list *jl)
1598{
1599 struct reiserfs_journal_cnode *cn;
1600
1601 if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0)
1602 return 1;
1603
1604 cn = jl->j_realblock;
1605 while (cn) {
1606 /* if the blocknr == 0, this has been cleared from the hash,
1607 ** skip it
1608 */
1609 if (cn->blocknr == 0) {
1610 goto next;
1611 }
1612 if (cn->bh && !newer_jl_done(cn))
1613 return 0;
1614 next:
1615 cn = cn->next;
1616 cond_resched();
1617 }
1618 return 0;
1619}
1620
1621static int write_one_transaction(struct super_block *s, 1580static int write_one_transaction(struct super_block *s,
1622 struct reiserfs_journal_list *jl, 1581 struct reiserfs_journal_list *jl,
1623 struct buffer_chunk *chunk) 1582 struct buffer_chunk *chunk)
@@ -1805,6 +1764,8 @@ static int flush_used_journal_lists(struct super_block *s,
1805 break; 1764 break;
1806 tjl = JOURNAL_LIST_ENTRY(tjl->j_list.next); 1765 tjl = JOURNAL_LIST_ENTRY(tjl->j_list.next);
1807 } 1766 }
1767 get_journal_list(jl);
1768 get_journal_list(flush_jl);
1808 /* try to find a group of blocks we can flush across all the 1769 /* try to find a group of blocks we can flush across all the
1809 ** transactions, but only bother if we've actually spanned 1770 ** transactions, but only bother if we've actually spanned
1810 ** across multiple lists 1771 ** across multiple lists
@@ -1813,6 +1774,8 @@ static int flush_used_journal_lists(struct super_block *s,
1813 ret = kupdate_transactions(s, jl, &tjl, &trans_id, len, i); 1774 ret = kupdate_transactions(s, jl, &tjl, &trans_id, len, i);
1814 } 1775 }
1815 flush_journal_list(s, flush_jl, 1); 1776 flush_journal_list(s, flush_jl, 1);
1777 put_journal_list(s, flush_jl);
1778 put_journal_list(s, jl);
1816 return 0; 1779 return 0;
1817} 1780}
1818 1781
@@ -3868,27 +3831,6 @@ int reiserfs_prepare_for_journal(struct super_block *sb,
3868 return 1; 3831 return 1;
3869} 3832}
3870 3833
3871static void flush_old_journal_lists(struct super_block *s)
3872{
3873 struct reiserfs_journal *journal = SB_JOURNAL(s);
3874 struct reiserfs_journal_list *jl;
3875 struct list_head *entry;
3876 time_t now = get_seconds();
3877
3878 while (!list_empty(&journal->j_journal_list)) {
3879 entry = journal->j_journal_list.next;
3880 jl = JOURNAL_LIST_ENTRY(entry);
3881 /* this check should always be run, to send old lists to disk */
3882 if (jl->j_timestamp < (now - (JOURNAL_MAX_TRANS_AGE * 4)) &&
3883 atomic_read(&jl->j_commit_left) == 0 &&
3884 test_transaction(s, jl)) {
3885 flush_used_journal_lists(s, jl);
3886 } else {
3887 break;
3888 }
3889 }
3890}
3891
3892/* 3834/*
3893** long and ugly. If flush, will not return until all commit 3835** long and ugly. If flush, will not return until all commit
3894** blocks and all real buffers in the trans are on disk. 3836** blocks and all real buffers in the trans are on disk.
@@ -4232,7 +4174,6 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
4232 } 4174 }
4233 } 4175 }
4234 } 4176 }
4235 flush_old_journal_lists(sb);
4236 4177
4237 journal->j_current_jl->j_list_bitmap = 4178 journal->j_current_jl->j_list_bitmap =
4238 get_list_bitmap(sb, journal->j_current_jl); 4179 get_list_bitmap(sb, journal->j_current_jl);
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 3135c2525c76..a290157265ef 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -328,6 +328,8 @@ loff_t seq_lseek(struct file *file, loff_t offset, int whence)
328 m->read_pos = offset; 328 m->read_pos = offset;
329 retval = file->f_pos = offset; 329 retval = file->f_pos = offset;
330 } 330 }
331 } else {
332 file->f_pos = offset;
331 } 333 }
332 } 334 }
333 file->f_version = m->version; 335 file->f_version = m->version;
diff --git a/fs/statfs.c b/fs/statfs.c
index c219e733f553..083dc0ac9140 100644
--- a/fs/statfs.c
+++ b/fs/statfs.c
@@ -94,7 +94,7 @@ retry:
94 94
95int fd_statfs(int fd, struct kstatfs *st) 95int fd_statfs(int fd, struct kstatfs *st)
96{ 96{
97 struct fd f = fdget(fd); 97 struct fd f = fdget_raw(fd);
98 int error = -EBADF; 98 int error = -EBADF;
99 if (f.file) { 99 if (f.file) {
100 error = vfs_statfs(&f.file->f_path, st); 100 error = vfs_statfs(&f.file->f_path, st);
diff --git a/fs/super.c b/fs/super.c
index 3a96c9783a8b..0225c20f8770 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -264,6 +264,8 @@ out_free_sb:
264 */ 264 */
265static inline void destroy_super(struct super_block *s) 265static inline void destroy_super(struct super_block *s)
266{ 266{
267 list_lru_destroy(&s->s_dentry_lru);
268 list_lru_destroy(&s->s_inode_lru);
267#ifdef CONFIG_SMP 269#ifdef CONFIG_SMP
268 free_percpu(s->s_files); 270 free_percpu(s->s_files);
269#endif 271#endif
@@ -323,8 +325,6 @@ void deactivate_locked_super(struct super_block *s)
323 325
324 /* caches are now gone, we can safely kill the shrinker now */ 326 /* caches are now gone, we can safely kill the shrinker now */
325 unregister_shrinker(&s->s_shrink); 327 unregister_shrinker(&s->s_shrink);
326 list_lru_destroy(&s->s_dentry_lru);
327 list_lru_destroy(&s->s_inode_lru);
328 328
329 put_filesystem(fs); 329 put_filesystem(fs);
330 put_super(s); 330 put_super(s);
diff --git a/fs/sysv/super.c b/fs/sysv/super.c
index d0c6a007ce83..eda10959714f 100644
--- a/fs/sysv/super.c
+++ b/fs/sysv/super.c
@@ -487,6 +487,7 @@ static int v7_fill_super(struct super_block *sb, void *data, int silent)
487 sbi->s_sb = sb; 487 sbi->s_sb = sb;
488 sbi->s_block_base = 0; 488 sbi->s_block_base = 0;
489 sbi->s_type = FSTYPE_V7; 489 sbi->s_type = FSTYPE_V7;
490 mutex_init(&sbi->s_lock);
490 sb->s_fs_info = sbi; 491 sb->s_fs_info = sbi;
491 492
492 sb_set_blocksize(sb, 512); 493 sb_set_blocksize(sb, 512);
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index 7e5aae4bf46f..6eaf5edf1ea1 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -30,18 +30,17 @@ void udf_free_inode(struct inode *inode)
30{ 30{
31 struct super_block *sb = inode->i_sb; 31 struct super_block *sb = inode->i_sb;
32 struct udf_sb_info *sbi = UDF_SB(sb); 32 struct udf_sb_info *sbi = UDF_SB(sb);
33 struct logicalVolIntegrityDescImpUse *lvidiu = udf_sb_lvidiu(sb);
33 34
34 mutex_lock(&sbi->s_alloc_mutex); 35 if (lvidiu) {
35 if (sbi->s_lvid_bh) { 36 mutex_lock(&sbi->s_alloc_mutex);
36 struct logicalVolIntegrityDescImpUse *lvidiu =
37 udf_sb_lvidiu(sbi);
38 if (S_ISDIR(inode->i_mode)) 37 if (S_ISDIR(inode->i_mode))
39 le32_add_cpu(&lvidiu->numDirs, -1); 38 le32_add_cpu(&lvidiu->numDirs, -1);
40 else 39 else
41 le32_add_cpu(&lvidiu->numFiles, -1); 40 le32_add_cpu(&lvidiu->numFiles, -1);
42 udf_updated_lvid(sb); 41 udf_updated_lvid(sb);
42 mutex_unlock(&sbi->s_alloc_mutex);
43 } 43 }
44 mutex_unlock(&sbi->s_alloc_mutex);
45 44
46 udf_free_blocks(sb, NULL, &UDF_I(inode)->i_location, 0, 1); 45 udf_free_blocks(sb, NULL, &UDF_I(inode)->i_location, 0, 1);
47} 46}
@@ -55,6 +54,7 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err)
55 uint32_t start = UDF_I(dir)->i_location.logicalBlockNum; 54 uint32_t start = UDF_I(dir)->i_location.logicalBlockNum;
56 struct udf_inode_info *iinfo; 55 struct udf_inode_info *iinfo;
57 struct udf_inode_info *dinfo = UDF_I(dir); 56 struct udf_inode_info *dinfo = UDF_I(dir);
57 struct logicalVolIntegrityDescImpUse *lvidiu;
58 58
59 inode = new_inode(sb); 59 inode = new_inode(sb);
60 60
@@ -92,12 +92,10 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err)
92 return NULL; 92 return NULL;
93 } 93 }
94 94
95 if (sbi->s_lvid_bh) { 95 lvidiu = udf_sb_lvidiu(sb);
96 struct logicalVolIntegrityDescImpUse *lvidiu; 96 if (lvidiu) {
97
98 iinfo->i_unique = lvid_get_unique_id(sb); 97 iinfo->i_unique = lvid_get_unique_id(sb);
99 mutex_lock(&sbi->s_alloc_mutex); 98 mutex_lock(&sbi->s_alloc_mutex);
100 lvidiu = udf_sb_lvidiu(sbi);
101 if (S_ISDIR(mode)) 99 if (S_ISDIR(mode))
102 le32_add_cpu(&lvidiu->numDirs, 1); 100 le32_add_cpu(&lvidiu->numDirs, 1);
103 else 101 else
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 839a2bad7f45..91219385691d 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -94,13 +94,25 @@ static unsigned int udf_count_free(struct super_block *);
94static int udf_statfs(struct dentry *, struct kstatfs *); 94static int udf_statfs(struct dentry *, struct kstatfs *);
95static int udf_show_options(struct seq_file *, struct dentry *); 95static int udf_show_options(struct seq_file *, struct dentry *);
96 96
97struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct udf_sb_info *sbi) 97struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb)
98{ 98{
99 struct logicalVolIntegrityDesc *lvid = 99 struct logicalVolIntegrityDesc *lvid;
100 (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data; 100 unsigned int partnum;
101 __u32 number_of_partitions = le32_to_cpu(lvid->numOfPartitions); 101 unsigned int offset;
102 __u32 offset = number_of_partitions * 2 * 102
103 sizeof(uint32_t)/sizeof(uint8_t); 103 if (!UDF_SB(sb)->s_lvid_bh)
104 return NULL;
105 lvid = (struct logicalVolIntegrityDesc *)UDF_SB(sb)->s_lvid_bh->b_data;
106 partnum = le32_to_cpu(lvid->numOfPartitions);
107 if ((sb->s_blocksize - sizeof(struct logicalVolIntegrityDescImpUse) -
108 offsetof(struct logicalVolIntegrityDesc, impUse)) /
109 (2 * sizeof(uint32_t)) < partnum) {
110 udf_err(sb, "Logical volume integrity descriptor corrupted "
111 "(numOfPartitions = %u)!\n", partnum);
112 return NULL;
113 }
114 /* The offset is to skip freeSpaceTable and sizeTable arrays */
115 offset = partnum * 2 * sizeof(uint32_t);
104 return (struct logicalVolIntegrityDescImpUse *)&(lvid->impUse[offset]); 116 return (struct logicalVolIntegrityDescImpUse *)&(lvid->impUse[offset]);
105} 117}
106 118
@@ -629,9 +641,10 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
629 struct udf_options uopt; 641 struct udf_options uopt;
630 struct udf_sb_info *sbi = UDF_SB(sb); 642 struct udf_sb_info *sbi = UDF_SB(sb);
631 int error = 0; 643 int error = 0;
644 struct logicalVolIntegrityDescImpUse *lvidiu = udf_sb_lvidiu(sb);
632 645
633 if (sbi->s_lvid_bh) { 646 if (lvidiu) {
634 int write_rev = le16_to_cpu(udf_sb_lvidiu(sbi)->minUDFWriteRev); 647 int write_rev = le16_to_cpu(lvidiu->minUDFWriteRev);
635 if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & MS_RDONLY)) 648 if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & MS_RDONLY))
636 return -EACCES; 649 return -EACCES;
637 } 650 }
@@ -1905,11 +1918,12 @@ static void udf_open_lvid(struct super_block *sb)
1905 1918
1906 if (!bh) 1919 if (!bh)
1907 return; 1920 return;
1908
1909 mutex_lock(&sbi->s_alloc_mutex);
1910 lvid = (struct logicalVolIntegrityDesc *)bh->b_data; 1921 lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
1911 lvidiu = udf_sb_lvidiu(sbi); 1922 lvidiu = udf_sb_lvidiu(sb);
1923 if (!lvidiu)
1924 return;
1912 1925
1926 mutex_lock(&sbi->s_alloc_mutex);
1913 lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; 1927 lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1914 lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; 1928 lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1915 udf_time_to_disk_stamp(&lvid->recordingDateAndTime, 1929 udf_time_to_disk_stamp(&lvid->recordingDateAndTime,
@@ -1937,10 +1951,12 @@ static void udf_close_lvid(struct super_block *sb)
1937 1951
1938 if (!bh) 1952 if (!bh)
1939 return; 1953 return;
1954 lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
1955 lvidiu = udf_sb_lvidiu(sb);
1956 if (!lvidiu)
1957 return;
1940 1958
1941 mutex_lock(&sbi->s_alloc_mutex); 1959 mutex_lock(&sbi->s_alloc_mutex);
1942 lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
1943 lvidiu = udf_sb_lvidiu(sbi);
1944 lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; 1960 lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1945 lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; 1961 lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1946 udf_time_to_disk_stamp(&lvid->recordingDateAndTime, CURRENT_TIME); 1962 udf_time_to_disk_stamp(&lvid->recordingDateAndTime, CURRENT_TIME);
@@ -2093,15 +2109,19 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
2093 2109
2094 if (sbi->s_lvid_bh) { 2110 if (sbi->s_lvid_bh) {
2095 struct logicalVolIntegrityDescImpUse *lvidiu = 2111 struct logicalVolIntegrityDescImpUse *lvidiu =
2096 udf_sb_lvidiu(sbi); 2112 udf_sb_lvidiu(sb);
2097 uint16_t minUDFReadRev = le16_to_cpu(lvidiu->minUDFReadRev); 2113 uint16_t minUDFReadRev;
2098 uint16_t minUDFWriteRev = le16_to_cpu(lvidiu->minUDFWriteRev); 2114 uint16_t minUDFWriteRev;
2099 /* uint16_t maxUDFWriteRev =
2100 le16_to_cpu(lvidiu->maxUDFWriteRev); */
2101 2115
2116 if (!lvidiu) {
2117 ret = -EINVAL;
2118 goto error_out;
2119 }
2120 minUDFReadRev = le16_to_cpu(lvidiu->minUDFReadRev);
2121 minUDFWriteRev = le16_to_cpu(lvidiu->minUDFWriteRev);
2102 if (minUDFReadRev > UDF_MAX_READ_VERSION) { 2122 if (minUDFReadRev > UDF_MAX_READ_VERSION) {
2103 udf_err(sb, "minUDFReadRev=%x (max is %x)\n", 2123 udf_err(sb, "minUDFReadRev=%x (max is %x)\n",
2104 le16_to_cpu(lvidiu->minUDFReadRev), 2124 minUDFReadRev,
2105 UDF_MAX_READ_VERSION); 2125 UDF_MAX_READ_VERSION);
2106 ret = -EINVAL; 2126 ret = -EINVAL;
2107 goto error_out; 2127 goto error_out;
@@ -2265,11 +2285,7 @@ static int udf_statfs(struct dentry *dentry, struct kstatfs *buf)
2265 struct logicalVolIntegrityDescImpUse *lvidiu; 2285 struct logicalVolIntegrityDescImpUse *lvidiu;
2266 u64 id = huge_encode_dev(sb->s_bdev->bd_dev); 2286 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
2267 2287
2268 if (sbi->s_lvid_bh != NULL) 2288 lvidiu = udf_sb_lvidiu(sb);
2269 lvidiu = udf_sb_lvidiu(sbi);
2270 else
2271 lvidiu = NULL;
2272
2273 buf->f_type = UDF_SUPER_MAGIC; 2289 buf->f_type = UDF_SUPER_MAGIC;
2274 buf->f_bsize = sb->s_blocksize; 2290 buf->f_bsize = sb->s_blocksize;
2275 buf->f_blocks = sbi->s_partmaps[sbi->s_partition].s_partition_len; 2291 buf->f_blocks = sbi->s_partmaps[sbi->s_partition].s_partition_len;
diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
index ed401e94aa8c..1f32c7bd9f57 100644
--- a/fs/udf/udf_sb.h
+++ b/fs/udf/udf_sb.h
@@ -162,7 +162,7 @@ static inline struct udf_sb_info *UDF_SB(struct super_block *sb)
162 return sb->s_fs_info; 162 return sb->s_fs_info;
163} 163}
164 164
165struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct udf_sb_info *sbi); 165struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb);
166 166
167int udf_compute_nr_groups(struct super_block *sb, u32 partition); 167int udf_compute_nr_groups(struct super_block *sb, u32 partition);
168 168
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 88c5ea75ebf6..f1d85cfc0a54 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -628,6 +628,7 @@ xfs_buf_item_unlock(
628 else if (aborted) { 628 else if (aborted) {
629 ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp)); 629 ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp));
630 if (lip->li_flags & XFS_LI_IN_AIL) { 630 if (lip->li_flags & XFS_LI_IN_AIL) {
631 spin_lock(&lip->li_ailp->xa_lock);
631 xfs_trans_ail_delete(lip->li_ailp, lip, 632 xfs_trans_ail_delete(lip->li_ailp, lip,
632 SHUTDOWN_LOG_IO_ERROR); 633 SHUTDOWN_LOG_IO_ERROR);
633 } 634 }
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index 069537c845e5..20bf8e8002d6 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -1224,6 +1224,7 @@ xfs_da3_node_toosmall(
1224 /* start with smaller blk num */ 1224 /* start with smaller blk num */
1225 forward = nodehdr.forw < nodehdr.back; 1225 forward = nodehdr.forw < nodehdr.back;
1226 for (i = 0; i < 2; forward = !forward, i++) { 1226 for (i = 0; i < 2; forward = !forward, i++) {
1227 struct xfs_da3_icnode_hdr thdr;
1227 if (forward) 1228 if (forward)
1228 blkno = nodehdr.forw; 1229 blkno = nodehdr.forw;
1229 else 1230 else
@@ -1236,10 +1237,10 @@ xfs_da3_node_toosmall(
1236 return(error); 1237 return(error);
1237 1238
1238 node = bp->b_addr; 1239 node = bp->b_addr;
1239 xfs_da3_node_hdr_from_disk(&nodehdr, node); 1240 xfs_da3_node_hdr_from_disk(&thdr, node);
1240 xfs_trans_brelse(state->args->trans, bp); 1241 xfs_trans_brelse(state->args->trans, bp);
1241 1242
1242 if (count - nodehdr.count >= 0) 1243 if (count - thdr.count >= 0)
1243 break; /* fits with at least 25% to spare */ 1244 break; /* fits with at least 25% to spare */
1244 } 1245 }
1245 if (i >= 2) { 1246 if (i >= 2) {
diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c
index 0957aa98b6c0..12dad188939d 100644
--- a/fs/xfs/xfs_dir2_block.c
+++ b/fs/xfs/xfs_dir2_block.c
@@ -1158,7 +1158,7 @@ xfs_dir2_sf_to_block(
1158 /* 1158 /*
1159 * Create entry for . 1159 * Create entry for .
1160 */ 1160 */
1161 dep = xfs_dir3_data_dot_entry_p(hdr); 1161 dep = xfs_dir3_data_dot_entry_p(mp, hdr);
1162 dep->inumber = cpu_to_be64(dp->i_ino); 1162 dep->inumber = cpu_to_be64(dp->i_ino);
1163 dep->namelen = 1; 1163 dep->namelen = 1;
1164 dep->name[0] = '.'; 1164 dep->name[0] = '.';
@@ -1172,7 +1172,7 @@ xfs_dir2_sf_to_block(
1172 /* 1172 /*
1173 * Create entry for .. 1173 * Create entry for ..
1174 */ 1174 */
1175 dep = xfs_dir3_data_dotdot_entry_p(hdr); 1175 dep = xfs_dir3_data_dotdot_entry_p(mp, hdr);
1176 dep->inumber = cpu_to_be64(xfs_dir2_sf_get_parent_ino(sfp)); 1176 dep->inumber = cpu_to_be64(xfs_dir2_sf_get_parent_ino(sfp));
1177 dep->namelen = 2; 1177 dep->namelen = 2;
1178 dep->name[0] = dep->name[1] = '.'; 1178 dep->name[0] = dep->name[1] = '.';
@@ -1183,7 +1183,7 @@ xfs_dir2_sf_to_block(
1183 blp[1].hashval = cpu_to_be32(xfs_dir_hash_dotdot); 1183 blp[1].hashval = cpu_to_be32(xfs_dir_hash_dotdot);
1184 blp[1].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp, 1184 blp[1].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp,
1185 (char *)dep - (char *)hdr)); 1185 (char *)dep - (char *)hdr));
1186 offset = xfs_dir3_data_first_offset(hdr); 1186 offset = xfs_dir3_data_first_offset(mp);
1187 /* 1187 /*
1188 * Loop over existing entries, stuff them in. 1188 * Loop over existing entries, stuff them in.
1189 */ 1189 */
diff --git a/fs/xfs/xfs_dir2_format.h b/fs/xfs/xfs_dir2_format.h
index a0961a61ac1a..9cf67381adf6 100644
--- a/fs/xfs/xfs_dir2_format.h
+++ b/fs/xfs/xfs_dir2_format.h
@@ -497,69 +497,58 @@ xfs_dir3_data_unused_p(struct xfs_dir2_data_hdr *hdr)
497/* 497/*
498 * Offsets of . and .. in data space (always block 0) 498 * Offsets of . and .. in data space (always block 0)
499 * 499 *
500 * The macros are used for shortform directories as they have no headers to read
501 * the magic number out of. Shortform directories need to know the size of the
502 * data block header because the sfe embeds the block offset of the entry into
503 * it so that it doesn't change when format conversion occurs. Bad Things Happen
504 * if we don't follow this rule.
505 *
506 * XXX: there is scope for significant optimisation of the logic here. Right 500 * XXX: there is scope for significant optimisation of the logic here. Right
507 * now we are checking for "dir3 format" over and over again. Ideally we should 501 * now we are checking for "dir3 format" over and over again. Ideally we should
508 * only do it once for each operation. 502 * only do it once for each operation.
509 */ 503 */
510#define XFS_DIR3_DATA_DOT_OFFSET(mp) \
511 xfs_dir3_data_hdr_size(xfs_sb_version_hascrc(&(mp)->m_sb))
512#define XFS_DIR3_DATA_DOTDOT_OFFSET(mp) \
513 (XFS_DIR3_DATA_DOT_OFFSET(mp) + xfs_dir3_data_entsize(mp, 1))
514#define XFS_DIR3_DATA_FIRST_OFFSET(mp) \
515 (XFS_DIR3_DATA_DOTDOT_OFFSET(mp) + xfs_dir3_data_entsize(mp, 2))
516
517static inline xfs_dir2_data_aoff_t 504static inline xfs_dir2_data_aoff_t
518xfs_dir3_data_dot_offset(struct xfs_dir2_data_hdr *hdr) 505xfs_dir3_data_dot_offset(struct xfs_mount *mp)
519{ 506{
520 return xfs_dir3_data_entry_offset(hdr); 507 return xfs_dir3_data_hdr_size(xfs_sb_version_hascrc(&mp->m_sb));
521} 508}
522 509
523static inline xfs_dir2_data_aoff_t 510static inline xfs_dir2_data_aoff_t
524xfs_dir3_data_dotdot_offset(struct xfs_dir2_data_hdr *hdr) 511xfs_dir3_data_dotdot_offset(struct xfs_mount *mp)
525{ 512{
526 bool dir3 = hdr->magic == cpu_to_be32(XFS_DIR3_DATA_MAGIC) || 513 return xfs_dir3_data_dot_offset(mp) +
527 hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC); 514 xfs_dir3_data_entsize(mp, 1);
528 return xfs_dir3_data_dot_offset(hdr) +
529 __xfs_dir3_data_entsize(dir3, 1);
530} 515}
531 516
532static inline xfs_dir2_data_aoff_t 517static inline xfs_dir2_data_aoff_t
533xfs_dir3_data_first_offset(struct xfs_dir2_data_hdr *hdr) 518xfs_dir3_data_first_offset(struct xfs_mount *mp)
534{ 519{
535 bool dir3 = hdr->magic == cpu_to_be32(XFS_DIR3_DATA_MAGIC) || 520 return xfs_dir3_data_dotdot_offset(mp) +
536 hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC); 521 xfs_dir3_data_entsize(mp, 2);
537 return xfs_dir3_data_dotdot_offset(hdr) +
538 __xfs_dir3_data_entsize(dir3, 2);
539} 522}
540 523
541/* 524/*
542 * location of . and .. in data space (always block 0) 525 * location of . and .. in data space (always block 0)
543 */ 526 */
544static inline struct xfs_dir2_data_entry * 527static inline struct xfs_dir2_data_entry *
545xfs_dir3_data_dot_entry_p(struct xfs_dir2_data_hdr *hdr) 528xfs_dir3_data_dot_entry_p(
529 struct xfs_mount *mp,
530 struct xfs_dir2_data_hdr *hdr)
546{ 531{
547 return (struct xfs_dir2_data_entry *) 532 return (struct xfs_dir2_data_entry *)
548 ((char *)hdr + xfs_dir3_data_dot_offset(hdr)); 533 ((char *)hdr + xfs_dir3_data_dot_offset(mp));
549} 534}
550 535
551static inline struct xfs_dir2_data_entry * 536static inline struct xfs_dir2_data_entry *
552xfs_dir3_data_dotdot_entry_p(struct xfs_dir2_data_hdr *hdr) 537xfs_dir3_data_dotdot_entry_p(
538 struct xfs_mount *mp,
539 struct xfs_dir2_data_hdr *hdr)
553{ 540{
554 return (struct xfs_dir2_data_entry *) 541 return (struct xfs_dir2_data_entry *)
555 ((char *)hdr + xfs_dir3_data_dotdot_offset(hdr)); 542 ((char *)hdr + xfs_dir3_data_dotdot_offset(mp));
556} 543}
557 544
558static inline struct xfs_dir2_data_entry * 545static inline struct xfs_dir2_data_entry *
559xfs_dir3_data_first_entry_p(struct xfs_dir2_data_hdr *hdr) 546xfs_dir3_data_first_entry_p(
547 struct xfs_mount *mp,
548 struct xfs_dir2_data_hdr *hdr)
560{ 549{
561 return (struct xfs_dir2_data_entry *) 550 return (struct xfs_dir2_data_entry *)
562 ((char *)hdr + xfs_dir3_data_first_offset(hdr)); 551 ((char *)hdr + xfs_dir3_data_first_offset(mp));
563} 552}
564 553
565/* 554/*
diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
index 8993ec17452c..8f84153e98a8 100644
--- a/fs/xfs/xfs_dir2_readdir.c
+++ b/fs/xfs/xfs_dir2_readdir.c
@@ -119,9 +119,9 @@ xfs_dir2_sf_getdents(
119 * mp->m_dirdatablk. 119 * mp->m_dirdatablk.
120 */ 120 */
121 dot_offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk, 121 dot_offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
122 XFS_DIR3_DATA_DOT_OFFSET(mp)); 122 xfs_dir3_data_dot_offset(mp));
123 dotdot_offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk, 123 dotdot_offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
124 XFS_DIR3_DATA_DOTDOT_OFFSET(mp)); 124 xfs_dir3_data_dotdot_offset(mp));
125 125
126 /* 126 /*
127 * Put . entry unless we're starting past it. 127 * Put . entry unless we're starting past it.
diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
index bb6e2848f473..3ef6d402084c 100644
--- a/fs/xfs/xfs_dir2_sf.c
+++ b/fs/xfs/xfs_dir2_sf.c
@@ -557,7 +557,7 @@ xfs_dir2_sf_addname_hard(
557 * to insert the new entry. 557 * to insert the new entry.
558 * If it's going to end up at the end then oldsfep will point there. 558 * If it's going to end up at the end then oldsfep will point there.
559 */ 559 */
560 for (offset = XFS_DIR3_DATA_FIRST_OFFSET(mp), 560 for (offset = xfs_dir3_data_first_offset(mp),
561 oldsfep = xfs_dir2_sf_firstentry(oldsfp), 561 oldsfep = xfs_dir2_sf_firstentry(oldsfp),
562 add_datasize = xfs_dir3_data_entsize(mp, args->namelen), 562 add_datasize = xfs_dir3_data_entsize(mp, args->namelen),
563 eof = (char *)oldsfep == &buf[old_isize]; 563 eof = (char *)oldsfep == &buf[old_isize];
@@ -640,7 +640,7 @@ xfs_dir2_sf_addname_pick(
640 640
641 sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; 641 sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
642 size = xfs_dir3_data_entsize(mp, args->namelen); 642 size = xfs_dir3_data_entsize(mp, args->namelen);
643 offset = XFS_DIR3_DATA_FIRST_OFFSET(mp); 643 offset = xfs_dir3_data_first_offset(mp);
644 sfep = xfs_dir2_sf_firstentry(sfp); 644 sfep = xfs_dir2_sf_firstentry(sfp);
645 holefit = 0; 645 holefit = 0;
646 /* 646 /*
@@ -713,7 +713,7 @@ xfs_dir2_sf_check(
713 mp = dp->i_mount; 713 mp = dp->i_mount;
714 714
715 sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; 715 sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
716 offset = XFS_DIR3_DATA_FIRST_OFFSET(mp); 716 offset = xfs_dir3_data_first_offset(mp);
717 ino = xfs_dir2_sf_get_parent_ino(sfp); 717 ino = xfs_dir2_sf_get_parent_ino(sfp);
718 i8count = ino > XFS_DIR2_MAX_SHORT_INUM; 718 i8count = ino > XFS_DIR2_MAX_SHORT_INUM;
719 719
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index 71520e6e5d65..1ee776d477c3 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -64,7 +64,8 @@ int xfs_dqerror_mod = 33;
64struct kmem_zone *xfs_qm_dqtrxzone; 64struct kmem_zone *xfs_qm_dqtrxzone;
65static struct kmem_zone *xfs_qm_dqzone; 65static struct kmem_zone *xfs_qm_dqzone;
66 66
67static struct lock_class_key xfs_dquot_other_class; 67static struct lock_class_key xfs_dquot_group_class;
68static struct lock_class_key xfs_dquot_project_class;
68 69
69/* 70/*
70 * This is called to free all the memory associated with a dquot 71 * This is called to free all the memory associated with a dquot
@@ -703,8 +704,20 @@ xfs_qm_dqread(
703 * Make sure group quotas have a different lock class than user 704 * Make sure group quotas have a different lock class than user
704 * quotas. 705 * quotas.
705 */ 706 */
706 if (!(type & XFS_DQ_USER)) 707 switch (type) {
707 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_other_class); 708 case XFS_DQ_USER:
709 /* uses the default lock class */
710 break;
711 case XFS_DQ_GROUP:
712 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class);
713 break;
714 case XFS_DQ_PROJ:
715 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class);
716 break;
717 default:
718 ASSERT(0);
719 break;
720 }
708 721
709 XFS_STATS_INC(xs_qm_dquot); 722 XFS_STATS_INC(xs_qm_dquot);
710 723
diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h
index 1edb5cc3e5f4..18272c766a50 100644
--- a/fs/xfs/xfs_fs.h
+++ b/fs/xfs/xfs_fs.h
@@ -515,7 +515,7 @@ typedef struct xfs_swapext
515/* XFS_IOC_GETBIOSIZE ---- deprecated 47 */ 515/* XFS_IOC_GETBIOSIZE ---- deprecated 47 */
516#define XFS_IOC_GETBMAPX _IOWR('X', 56, struct getbmap) 516#define XFS_IOC_GETBMAPX _IOWR('X', 56, struct getbmap)
517#define XFS_IOC_ZERO_RANGE _IOW ('X', 57, struct xfs_flock64) 517#define XFS_IOC_ZERO_RANGE _IOW ('X', 57, struct xfs_flock64)
518#define XFS_IOC_FREE_EOFBLOCKS _IOR ('X', 58, struct xfs_eofblocks) 518#define XFS_IOC_FREE_EOFBLOCKS _IOR ('X', 58, struct xfs_fs_eofblocks)
519 519
520/* 520/*
521 * ioctl commands that replace IRIX syssgi()'s 521 * ioctl commands that replace IRIX syssgi()'s
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 193206ba4358..474807a401c8 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -119,11 +119,6 @@ xfs_inode_free(
119 ip->i_itemp = NULL; 119 ip->i_itemp = NULL;
120 } 120 }
121 121
122 /* asserts to verify all state is correct here */
123 ASSERT(atomic_read(&ip->i_pincount) == 0);
124 ASSERT(!spin_is_locked(&ip->i_flags_lock));
125 ASSERT(!xfs_isiflocked(ip));
126
127 /* 122 /*
128 * Because we use RCU freeing we need to ensure the inode always 123 * Because we use RCU freeing we need to ensure the inode always
129 * appears to be reclaimed with an invalid inode number when in the 124 * appears to be reclaimed with an invalid inode number when in the
@@ -135,6 +130,10 @@ xfs_inode_free(
135 ip->i_ino = 0; 130 ip->i_ino = 0;
136 spin_unlock(&ip->i_flags_lock); 131 spin_unlock(&ip->i_flags_lock);
137 132
133 /* asserts to verify all state is correct here */
134 ASSERT(atomic_read(&ip->i_pincount) == 0);
135 ASSERT(!xfs_isiflocked(ip));
136
138 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback); 137 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
139} 138}
140 139
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index dabda9521b4b..39797490a1f1 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1585,6 +1585,7 @@ xlog_recover_add_to_trans(
1585 "bad number of regions (%d) in inode log format", 1585 "bad number of regions (%d) in inode log format",
1586 in_f->ilf_size); 1586 in_f->ilf_size);
1587 ASSERT(0); 1587 ASSERT(0);
1588 kmem_free(ptr);
1588 return XFS_ERROR(EIO); 1589 return XFS_ERROR(EIO);
1589 } 1590 }
1590 1591
@@ -1970,6 +1971,13 @@ xlog_recover_do_inode_buffer(
1970 * magic number. If we don't recognise the magic number in the buffer, then 1971 * magic number. If we don't recognise the magic number in the buffer, then
1971 * return a LSN of -1 so that the caller knows it was an unrecognised block and 1972 * return a LSN of -1 so that the caller knows it was an unrecognised block and
1972 * so can recover the buffer. 1973 * so can recover the buffer.
1974 *
1975 * Note: we cannot rely solely on magic number matches to determine that the
1976 * buffer has a valid LSN - we also need to verify that it belongs to this
1977 * filesystem, so we need to extract the object's LSN and compare it to that
1978 * which we read from the superblock. If the UUIDs don't match, then we've got a
1979 * stale metadata block from an old filesystem instance that we need to recover
1980 * over the top of.
1973 */ 1981 */
1974static xfs_lsn_t 1982static xfs_lsn_t
1975xlog_recover_get_buf_lsn( 1983xlog_recover_get_buf_lsn(
@@ -1980,6 +1988,8 @@ xlog_recover_get_buf_lsn(
1980 __uint16_t magic16; 1988 __uint16_t magic16;
1981 __uint16_t magicda; 1989 __uint16_t magicda;
1982 void *blk = bp->b_addr; 1990 void *blk = bp->b_addr;
1991 uuid_t *uuid;
1992 xfs_lsn_t lsn = -1;
1983 1993
1984 /* v4 filesystems always recover immediately */ 1994 /* v4 filesystems always recover immediately */
1985 if (!xfs_sb_version_hascrc(&mp->m_sb)) 1995 if (!xfs_sb_version_hascrc(&mp->m_sb))
@@ -1992,43 +2002,79 @@ xlog_recover_get_buf_lsn(
1992 case XFS_ABTB_MAGIC: 2002 case XFS_ABTB_MAGIC:
1993 case XFS_ABTC_MAGIC: 2003 case XFS_ABTC_MAGIC:
1994 case XFS_IBT_CRC_MAGIC: 2004 case XFS_IBT_CRC_MAGIC:
1995 case XFS_IBT_MAGIC: 2005 case XFS_IBT_MAGIC: {
1996 return be64_to_cpu( 2006 struct xfs_btree_block *btb = blk;
1997 ((struct xfs_btree_block *)blk)->bb_u.s.bb_lsn); 2007
2008 lsn = be64_to_cpu(btb->bb_u.s.bb_lsn);
2009 uuid = &btb->bb_u.s.bb_uuid;
2010 break;
2011 }
1998 case XFS_BMAP_CRC_MAGIC: 2012 case XFS_BMAP_CRC_MAGIC:
1999 case XFS_BMAP_MAGIC: 2013 case XFS_BMAP_MAGIC: {
2000 return be64_to_cpu( 2014 struct xfs_btree_block *btb = blk;
2001 ((struct xfs_btree_block *)blk)->bb_u.l.bb_lsn); 2015
2016 lsn = be64_to_cpu(btb->bb_u.l.bb_lsn);
2017 uuid = &btb->bb_u.l.bb_uuid;
2018 break;
2019 }
2002 case XFS_AGF_MAGIC: 2020 case XFS_AGF_MAGIC:
2003 return be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn); 2021 lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
2022 uuid = &((struct xfs_agf *)blk)->agf_uuid;
2023 break;
2004 case XFS_AGFL_MAGIC: 2024 case XFS_AGFL_MAGIC:
2005 return be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn); 2025 lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
2026 uuid = &((struct xfs_agfl *)blk)->agfl_uuid;
2027 break;
2006 case XFS_AGI_MAGIC: 2028 case XFS_AGI_MAGIC:
2007 return be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn); 2029 lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
2030 uuid = &((struct xfs_agi *)blk)->agi_uuid;
2031 break;
2008 case XFS_SYMLINK_MAGIC: 2032 case XFS_SYMLINK_MAGIC:
2009 return be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn); 2033 lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
2034 uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid;
2035 break;
2010 case XFS_DIR3_BLOCK_MAGIC: 2036 case XFS_DIR3_BLOCK_MAGIC:
2011 case XFS_DIR3_DATA_MAGIC: 2037 case XFS_DIR3_DATA_MAGIC:
2012 case XFS_DIR3_FREE_MAGIC: 2038 case XFS_DIR3_FREE_MAGIC:
2013 return be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn); 2039 lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
2040 uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
2041 break;
2014 case XFS_ATTR3_RMT_MAGIC: 2042 case XFS_ATTR3_RMT_MAGIC:
2015 return be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn); 2043 lsn = be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn);
2044 uuid = &((struct xfs_attr3_rmt_hdr *)blk)->rm_uuid;
2045 break;
2016 case XFS_SB_MAGIC: 2046 case XFS_SB_MAGIC:
2017 return be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn); 2047 lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
2048 uuid = &((struct xfs_dsb *)blk)->sb_uuid;
2049 break;
2018 default: 2050 default:
2019 break; 2051 break;
2020 } 2052 }
2021 2053
2054 if (lsn != (xfs_lsn_t)-1) {
2055 if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
2056 goto recover_immediately;
2057 return lsn;
2058 }
2059
2022 magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic); 2060 magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic);
2023 switch (magicda) { 2061 switch (magicda) {
2024 case XFS_DIR3_LEAF1_MAGIC: 2062 case XFS_DIR3_LEAF1_MAGIC:
2025 case XFS_DIR3_LEAFN_MAGIC: 2063 case XFS_DIR3_LEAFN_MAGIC:
2026 case XFS_DA3_NODE_MAGIC: 2064 case XFS_DA3_NODE_MAGIC:
2027 return be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn); 2065 lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
2066 uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
2067 break;
2028 default: 2068 default:
2029 break; 2069 break;
2030 } 2070 }
2031 2071
2072 if (lsn != (xfs_lsn_t)-1) {
2073 if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
2074 goto recover_immediately;
2075 return lsn;
2076 }
2077
2032 /* 2078 /*
2033 * We do individual object checks on dquot and inode buffers as they 2079 * We do individual object checks on dquot and inode buffers as they
2034 * have their own individual LSN records. Also, we could have a stale 2080 * have their own individual LSN records. Also, we could have a stale
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 02e113bb8b7d..d9019821aa60 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -311,7 +311,6 @@ struct acpi_device {
311 unsigned int physical_node_count; 311 unsigned int physical_node_count;
312 struct list_head physical_node_list; 312 struct list_head physical_node_list;
313 struct mutex physical_node_lock; 313 struct mutex physical_node_lock;
314 struct list_head power_dependent;
315 void (*remove)(struct acpi_device *); 314 void (*remove)(struct acpi_device *);
316}; 315};
317 316
@@ -456,8 +455,6 @@ acpi_status acpi_add_pm_notifier(struct acpi_device *adev,
456acpi_status acpi_remove_pm_notifier(struct acpi_device *adev, 455acpi_status acpi_remove_pm_notifier(struct acpi_device *adev,
457 acpi_notify_handler handler); 456 acpi_notify_handler handler);
458int acpi_pm_device_sleep_state(struct device *, int *, int); 457int acpi_pm_device_sleep_state(struct device *, int *, int);
459void acpi_dev_pm_add_dependent(acpi_handle handle, struct device *depdev);
460void acpi_dev_pm_remove_dependent(acpi_handle handle, struct device *depdev);
461#else 458#else
462static inline acpi_status acpi_add_pm_notifier(struct acpi_device *adev, 459static inline acpi_status acpi_add_pm_notifier(struct acpi_device *adev,
463 acpi_notify_handler handler, 460 acpi_notify_handler handler,
@@ -478,10 +475,6 @@ static inline int acpi_pm_device_sleep_state(struct device *d, int *p, int m)
478 return (m >= ACPI_STATE_D0 && m <= ACPI_STATE_D3_COLD) ? 475 return (m >= ACPI_STATE_D0 && m <= ACPI_STATE_D3_COLD) ?
479 m : ACPI_STATE_D0; 476 m : ACPI_STATE_D0;
480} 477}
481static inline void acpi_dev_pm_add_dependent(acpi_handle handle,
482 struct device *depdev) {}
483static inline void acpi_dev_pm_remove_dependent(acpi_handle handle,
484 struct device *depdev) {}
485#endif 478#endif
486 479
487#ifdef CONFIG_PM_RUNTIME 480#ifdef CONFIG_PM_RUNTIME
diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h
index d06079c774a0..99b490b4d05a 100644
--- a/include/asm-generic/hugetlb.h
+++ b/include/asm-generic/hugetlb.h
@@ -6,12 +6,12 @@ static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
6 return mk_pte(page, pgprot); 6 return mk_pte(page, pgprot);
7} 7}
8 8
9static inline int huge_pte_write(pte_t pte) 9static inline unsigned long huge_pte_write(pte_t pte)
10{ 10{
11 return pte_write(pte); 11 return pte_write(pte);
12} 12}
13 13
14static inline int huge_pte_dirty(pte_t pte) 14static inline unsigned long huge_pte_dirty(pte_t pte)
15{ 15{
16 return pte_dirty(pte); 16 return pte_dirty(pte);
17} 17}
diff --git a/include/asm-generic/vtime.h b/include/asm-generic/vtime.h
index e69de29bb2d1..b1a49677fe25 100644
--- a/include/asm-generic/vtime.h
+++ b/include/asm-generic/vtime.h
@@ -0,0 +1 @@
/* no content, but patch(1) dislikes empty files */
diff --git a/include/dt-bindings/pinctrl/omap.h b/include/dt-bindings/pinctrl/omap.h
index edbd250809cb..bed35e36fd27 100644
--- a/include/dt-bindings/pinctrl/omap.h
+++ b/include/dt-bindings/pinctrl/omap.h
@@ -23,7 +23,7 @@
23#define PULL_UP (1 << 4) 23#define PULL_UP (1 << 4)
24#define ALTELECTRICALSEL (1 << 5) 24#define ALTELECTRICALSEL (1 << 5)
25 25
26/* 34xx specific mux bit defines */ 26/* omap3/4/5 specific mux bit defines */
27#define INPUT_EN (1 << 8) 27#define INPUT_EN (1 << 8)
28#define OFF_EN (1 << 9) 28#define OFF_EN (1 << 9)
29#define OFFOUT_EN (1 << 10) 29#define OFFOUT_EN (1 << 10)
@@ -31,8 +31,6 @@
31#define OFF_PULL_EN (1 << 12) 31#define OFF_PULL_EN (1 << 12)
32#define OFF_PULL_UP (1 << 13) 32#define OFF_PULL_UP (1 << 13)
33#define WAKEUP_EN (1 << 14) 33#define WAKEUP_EN (1 << 14)
34
35/* 44xx specific mux bit defines */
36#define WAKEUP_EVENT (1 << 15) 34#define WAKEUP_EVENT (1 << 15)
37 35
38/* Active pin states */ 36/* Active pin states */
diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h
index f7f1d7169b11..089743ade734 100644
--- a/include/linux/balloon_compaction.h
+++ b/include/linux/balloon_compaction.h
@@ -159,6 +159,26 @@ static inline bool balloon_page_movable(struct page *page)
159} 159}
160 160
161/* 161/*
162 * isolated_balloon_page - identify an isolated balloon page on private
163 * compaction/migration page lists.
164 *
165 * After a compaction thread isolates a balloon page for migration, it raises
166 * the page refcount to prevent concurrent compaction threads from re-isolating
167 * the same page. For that reason putback_movable_pages(), or other routines
168 * that need to identify isolated balloon pages on private pagelists, cannot
169 * rely on balloon_page_movable() to accomplish the task.
170 */
171static inline bool isolated_balloon_page(struct page *page)
172{
173 /* Already isolated balloon pages, by default, have a raised refcount */
174 if (page_flags_cleared(page) && !page_mapped(page) &&
175 page_count(page) >= 2)
176 return __is_movable_balloon_page(page);
177
178 return false;
179}
180
181/*
162 * balloon_page_insert - insert a page into the balloon's page list and make 182 * balloon_page_insert - insert a page into the balloon's page list and make
163 * the page->mapping assignment accordingly. 183 * the page->mapping assignment accordingly.
164 * @page : page to be assigned as a 'balloon page' 184 * @page : page to be assigned as a 'balloon page'
@@ -243,6 +263,11 @@ static inline bool balloon_page_movable(struct page *page)
243 return false; 263 return false;
244} 264}
245 265
266static inline bool isolated_balloon_page(struct page *page)
267{
268 return false;
269}
270
246static inline bool balloon_page_isolate(struct page *page) 271static inline bool balloon_page_isolate(struct page *page)
247{ 272{
248 return false; 273 return false;
diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h
index d66033f418c9..0333e605ea0d 100644
--- a/include/linux/bcma/bcma_driver_pci.h
+++ b/include/linux/bcma/bcma_driver_pci.h
@@ -242,6 +242,7 @@ extern int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc,
242 struct bcma_device *core, bool enable); 242 struct bcma_device *core, bool enable);
243extern void bcma_core_pci_up(struct bcma_bus *bus); 243extern void bcma_core_pci_up(struct bcma_bus *bus);
244extern void bcma_core_pci_down(struct bcma_bus *bus); 244extern void bcma_core_pci_down(struct bcma_bus *bus);
245extern void bcma_core_pci_power_save(struct bcma_bus *bus, bool up);
245 246
246extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev); 247extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev);
247extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev); 248extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev);
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
index 842de225055f..ded429966c1f 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
@@ -65,6 +65,21 @@
65#define __visible __attribute__((externally_visible)) 65#define __visible __attribute__((externally_visible))
66#endif 66#endif
67 67
68/*
69 * GCC 'asm goto' miscompiles certain code sequences:
70 *
71 * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
72 *
73 * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
74 * Fixed in GCC 4.8.2 and later versions.
75 *
76 * (asm goto is automatically volatile - the naming reflects this.)
77 */
78#if GCC_VERSION <= 40801
79# define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
80#else
81# define asm_volatile_goto(x...) do { asm goto(x); } while (0)
82#endif
68 83
69#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP 84#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
70#if GCC_VERSION >= 40400 85#if GCC_VERSION >= 40400
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 653073de09e3..ed419c62dde1 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -406,13 +406,14 @@ int dm_noflush_suspending(struct dm_target *ti);
406union map_info *dm_get_mapinfo(struct bio *bio); 406union map_info *dm_get_mapinfo(struct bio *bio);
407union map_info *dm_get_rq_mapinfo(struct request *rq); 407union map_info *dm_get_rq_mapinfo(struct request *rq);
408 408
409struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
410
409/* 411/*
410 * Geometry functions. 412 * Geometry functions.
411 */ 413 */
412int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo); 414int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
413int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo); 415int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
414 416
415
416/*----------------------------------------------------------------- 417/*-----------------------------------------------------------------
417 * Functions for manipulating device-mapper tables. 418 * Functions for manipulating device-mapper tables.
418 *---------------------------------------------------------------*/ 419 *---------------------------------------------------------------*/
diff --git a/include/linux/filter.h b/include/linux/filter.h
index a6ac84871d6d..ff4e40cd45b1 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -6,6 +6,7 @@
6 6
7#include <linux/atomic.h> 7#include <linux/atomic.h>
8#include <linux/compat.h> 8#include <linux/compat.h>
9#include <linux/workqueue.h>
9#include <uapi/linux/filter.h> 10#include <uapi/linux/filter.h>
10 11
11#ifdef CONFIG_COMPAT 12#ifdef CONFIG_COMPAT
@@ -25,15 +26,19 @@ struct sk_filter
25{ 26{
26 atomic_t refcnt; 27 atomic_t refcnt;
27 unsigned int len; /* Number of filter blocks */ 28 unsigned int len; /* Number of filter blocks */
29 struct rcu_head rcu;
28 unsigned int (*bpf_func)(const struct sk_buff *skb, 30 unsigned int (*bpf_func)(const struct sk_buff *skb,
29 const struct sock_filter *filter); 31 const struct sock_filter *filter);
30 struct rcu_head rcu; 32 union {
31 struct sock_filter insns[0]; 33 struct sock_filter insns[0];
34 struct work_struct work;
35 };
32}; 36};
33 37
34static inline unsigned int sk_filter_len(const struct sk_filter *fp) 38static inline unsigned int sk_filter_size(unsigned int proglen)
35{ 39{
36 return fp->len * sizeof(struct sock_filter) + sizeof(*fp); 40 return max(sizeof(struct sk_filter),
41 offsetof(struct sk_filter, insns[proglen]));
37} 42}
38 43
39extern int sk_filter(struct sock *sk, struct sk_buff *skb); 44extern int sk_filter(struct sock *sk, struct sk_buff *skb);
@@ -67,11 +72,13 @@ static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
67} 72}
68#define SK_RUN_FILTER(FILTER, SKB) (*FILTER->bpf_func)(SKB, FILTER->insns) 73#define SK_RUN_FILTER(FILTER, SKB) (*FILTER->bpf_func)(SKB, FILTER->insns)
69#else 74#else
75#include <linux/slab.h>
70static inline void bpf_jit_compile(struct sk_filter *fp) 76static inline void bpf_jit_compile(struct sk_filter *fp)
71{ 77{
72} 78}
73static inline void bpf_jit_free(struct sk_filter *fp) 79static inline void bpf_jit_free(struct sk_filter *fp)
74{ 80{
81 kfree(fp);
75} 82}
76#define SK_RUN_FILTER(FILTER, SKB) sk_run_filter(SKB, FILTER->insns) 83#define SK_RUN_FILTER(FILTER, SKB) sk_run_filter(SKB, FILTER->insns)
77#endif 84#endif
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index a3b8b2e2d244..d98503bde7e9 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -30,10 +30,13 @@
30/* 30/*
31 * Framework version for util services. 31 * Framework version for util services.
32 */ 32 */
33#define UTIL_FW_MINOR 0
34
35#define UTIL_WS2K8_FW_MAJOR 1
36#define UTIL_WS2K8_FW_VERSION (UTIL_WS2K8_FW_MAJOR << 16 | UTIL_FW_MINOR)
33 37
34#define UTIL_FW_MAJOR 3 38#define UTIL_FW_MAJOR 3
35#define UTIL_FW_MINOR 0 39#define UTIL_FW_VERSION (UTIL_FW_MAJOR << 16 | UTIL_FW_MINOR)
36#define UTIL_FW_MAJOR_MINOR (UTIL_FW_MAJOR << 16 | UTIL_FW_MINOR)
37 40
38 41
39/* 42/*
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 78e2ada50cd5..d380c5e68008 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -55,7 +55,7 @@
55#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */ 55#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */
56#define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */ 56#define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */
57#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */ 57#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */
58#define DMAR_ICS_REG 0x98 /* Invalidation complete status register */ 58#define DMAR_ICS_REG 0x9c /* Invalidation complete status register */
59#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */ 59#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */
60 60
61#define OFFSET_STRIDE (9) 61#define OFFSET_STRIDE (9)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 482ad2d84a32..672ddc4de4af 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -439,6 +439,17 @@ static inline char *hex_byte_pack(char *buf, u8 byte)
439 return buf; 439 return buf;
440} 440}
441 441
442extern const char hex_asc_upper[];
443#define hex_asc_upper_lo(x) hex_asc_upper[((x) & 0x0f)]
444#define hex_asc_upper_hi(x) hex_asc_upper[((x) & 0xf0) >> 4]
445
446static inline char *hex_byte_pack_upper(char *buf, u8 byte)
447{
448 *buf++ = hex_asc_upper_hi(byte);
449 *buf++ = hex_asc_upper_lo(byte);
450 return buf;
451}
452
442static inline char * __deprecated pack_hex_byte(char *buf, u8 byte) 453static inline char * __deprecated pack_hex_byte(char *buf, u8 byte)
443{ 454{
444 return hex_byte_pack(buf, byte); 455 return hex_byte_pack(buf, byte);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 60e95872da29..b3e7a667e03c 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -53,23 +53,6 @@ struct mem_cgroup_reclaim_cookie {
53 unsigned int generation; 53 unsigned int generation;
54}; 54};
55 55
56enum mem_cgroup_filter_t {
57 VISIT, /* visit current node */
58 SKIP, /* skip the current node and continue traversal */
59 SKIP_TREE, /* skip the whole subtree and continue traversal */
60};
61
62/*
63 * mem_cgroup_filter_t predicate might instruct mem_cgroup_iter_cond how to
64 * iterate through the hierarchy tree. Each tree element is checked by the
65 * predicate before it is returned by the iterator. If a filter returns
66 * SKIP or SKIP_TREE then the iterator code continues traversal (with the
67 * next node down the hierarchy or the next node that doesn't belong under the
68 * memcg's subtree).
69 */
70typedef enum mem_cgroup_filter_t
71(*mem_cgroup_iter_filter)(struct mem_cgroup *memcg, struct mem_cgroup *root);
72
73#ifdef CONFIG_MEMCG 56#ifdef CONFIG_MEMCG
74/* 57/*
75 * All "charge" functions with gfp_mask should use GFP_KERNEL or 58 * All "charge" functions with gfp_mask should use GFP_KERNEL or
@@ -137,18 +120,9 @@ mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
137extern void mem_cgroup_end_migration(struct mem_cgroup *memcg, 120extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
138 struct page *oldpage, struct page *newpage, bool migration_ok); 121 struct page *oldpage, struct page *newpage, bool migration_ok);
139 122
140struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root, 123struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
141 struct mem_cgroup *prev, 124 struct mem_cgroup *,
142 struct mem_cgroup_reclaim_cookie *reclaim, 125 struct mem_cgroup_reclaim_cookie *);
143 mem_cgroup_iter_filter cond);
144
145static inline struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
146 struct mem_cgroup *prev,
147 struct mem_cgroup_reclaim_cookie *reclaim)
148{
149 return mem_cgroup_iter_cond(root, prev, reclaim, NULL);
150}
151
152void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); 126void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
153 127
154/* 128/*
@@ -163,47 +137,24 @@ extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
163extern void mem_cgroup_replace_page_cache(struct page *oldpage, 137extern void mem_cgroup_replace_page_cache(struct page *oldpage,
164 struct page *newpage); 138 struct page *newpage);
165 139
166/** 140static inline void mem_cgroup_oom_enable(void)
167 * mem_cgroup_toggle_oom - toggle the memcg OOM killer for the current task
168 * @new: true to enable, false to disable
169 *
170 * Toggle whether a failed memcg charge should invoke the OOM killer
171 * or just return -ENOMEM. Returns the previous toggle state.
172 *
173 * NOTE: Any path that enables the OOM killer before charging must
174 * call mem_cgroup_oom_synchronize() afterward to finalize the
175 * OOM handling and clean up.
176 */
177static inline bool mem_cgroup_toggle_oom(bool new)
178{ 141{
179 bool old; 142 WARN_ON(current->memcg_oom.may_oom);
180 143 current->memcg_oom.may_oom = 1;
181 old = current->memcg_oom.may_oom;
182 current->memcg_oom.may_oom = new;
183
184 return old;
185} 144}
186 145
187static inline void mem_cgroup_enable_oom(void) 146static inline void mem_cgroup_oom_disable(void)
188{ 147{
189 bool old = mem_cgroup_toggle_oom(true); 148 WARN_ON(!current->memcg_oom.may_oom);
190 149 current->memcg_oom.may_oom = 0;
191 WARN_ON(old == true);
192}
193
194static inline void mem_cgroup_disable_oom(void)
195{
196 bool old = mem_cgroup_toggle_oom(false);
197
198 WARN_ON(old == false);
199} 150}
200 151
201static inline bool task_in_memcg_oom(struct task_struct *p) 152static inline bool task_in_memcg_oom(struct task_struct *p)
202{ 153{
203 return p->memcg_oom.in_memcg_oom; 154 return p->memcg_oom.memcg;
204} 155}
205 156
206bool mem_cgroup_oom_synchronize(void); 157bool mem_cgroup_oom_synchronize(bool wait);
207 158
208#ifdef CONFIG_MEMCG_SWAP 159#ifdef CONFIG_MEMCG_SWAP
209extern int do_swap_account; 160extern int do_swap_account;
@@ -260,9 +211,9 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
260 mem_cgroup_update_page_stat(page, idx, -1); 211 mem_cgroup_update_page_stat(page, idx, -1);
261} 212}
262 213
263enum mem_cgroup_filter_t 214unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
264mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg, 215 gfp_t gfp_mask,
265 struct mem_cgroup *root); 216 unsigned long *total_scanned);
266 217
267void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx); 218void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
268static inline void mem_cgroup_count_vm_event(struct mm_struct *mm, 219static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
@@ -376,15 +327,6 @@ static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
376 struct page *oldpage, struct page *newpage, bool migration_ok) 327 struct page *oldpage, struct page *newpage, bool migration_ok)
377{ 328{
378} 329}
379static inline struct mem_cgroup *
380mem_cgroup_iter_cond(struct mem_cgroup *root,
381 struct mem_cgroup *prev,
382 struct mem_cgroup_reclaim_cookie *reclaim,
383 mem_cgroup_iter_filter cond)
384{
385 /* first call must return non-NULL, second return NULL */
386 return (struct mem_cgroup *)(unsigned long)!prev;
387}
388 330
389static inline struct mem_cgroup * 331static inline struct mem_cgroup *
390mem_cgroup_iter(struct mem_cgroup *root, 332mem_cgroup_iter(struct mem_cgroup *root,
@@ -437,16 +379,11 @@ static inline void mem_cgroup_end_update_page_stat(struct page *page,
437{ 379{
438} 380}
439 381
440static inline bool mem_cgroup_toggle_oom(bool new) 382static inline void mem_cgroup_oom_enable(void)
441{ 383{
442 return false;
443} 384}
444 385
445static inline void mem_cgroup_enable_oom(void) 386static inline void mem_cgroup_oom_disable(void)
446{
447}
448
449static inline void mem_cgroup_disable_oom(void)
450{ 387{
451} 388}
452 389
@@ -455,7 +392,7 @@ static inline bool task_in_memcg_oom(struct task_struct *p)
455 return false; 392 return false;
456} 393}
457 394
458static inline bool mem_cgroup_oom_synchronize(void) 395static inline bool mem_cgroup_oom_synchronize(bool wait)
459{ 396{
460 return false; 397 return false;
461} 398}
@@ -471,11 +408,11 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
471} 408}
472 409
473static inline 410static inline
474enum mem_cgroup_filter_t 411unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
475mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg, 412 gfp_t gfp_mask,
476 struct mem_cgroup *root) 413 unsigned long *total_scanned)
477{ 414{
478 return VISIT; 415 return 0;
479} 416}
480 417
481static inline void mem_cgroup_split_huge_fixup(struct page *head) 418static inline void mem_cgroup_split_huge_fixup(struct page *head)
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index 09c2300ddb37..cb358355ef43 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -45,6 +45,7 @@
45#define MAPPER_CTRL_MINOR 236 45#define MAPPER_CTRL_MINOR 236
46#define LOOP_CTRL_MINOR 237 46#define LOOP_CTRL_MINOR 237
47#define VHOST_NET_MINOR 238 47#define VHOST_NET_MINOR 238
48#define UHID_MINOR 239
48#define MISC_DYNAMIC_MINOR 255 49#define MISC_DYNAMIC_MINOR 255
49 50
50struct device; 51struct device;
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 68029b30c3dc..5eb4e31af22b 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -181,7 +181,7 @@ enum {
181 MLX5_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39, 181 MLX5_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39,
182 MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40, 182 MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40,
183 MLX5_DEV_CAP_FLAG_DCT = 1LL << 41, 183 MLX5_DEV_CAP_FLAG_DCT = 1LL << 41,
184 MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 1LL << 46, 184 MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46,
185}; 185};
186 186
187enum { 187enum {
@@ -417,7 +417,7 @@ struct mlx5_init_seg {
417 struct health_buffer health; 417 struct health_buffer health;
418 __be32 rsvd2[884]; 418 __be32 rsvd2[884];
419 __be32 health_counter; 419 __be32 health_counter;
420 __be32 rsvd3[1023]; 420 __be32 rsvd3[1019];
421 __be64 ieee1588_clk; 421 __be64 ieee1588_clk;
422 __be32 ieee1588_clk_type; 422 __be32 ieee1588_clk_type;
423 __be32 clr_intx; 423 __be32 clr_intx;
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 8888381fc150..6b8c496572c8 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -82,7 +82,7 @@ enum {
82}; 82};
83 83
84enum { 84enum {
85 MLX5_MAX_EQ_NAME = 20 85 MLX5_MAX_EQ_NAME = 32
86}; 86};
87 87
88enum { 88enum {
@@ -747,8 +747,7 @@ static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
747 747
748enum { 748enum {
749 MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0, 749 MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
750 MLX5_PROF_MASK_CMDIF_CSUM = (u64)1 << 1, 750 MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
751 MLX5_PROF_MASK_MR_CACHE = (u64)1 << 2,
752}; 751};
753 752
754enum { 753enum {
@@ -758,7 +757,6 @@ enum {
758struct mlx5_profile { 757struct mlx5_profile {
759 u64 mask; 758 u64 mask;
760 u32 log_max_qp; 759 u32 log_max_qp;
761 int cmdif_csum;
762 struct { 760 struct {
763 int size; 761 int size;
764 int limit; 762 int limit;
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index ccd4260834c5..bab49da8a0f0 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -15,8 +15,8 @@
15#include <linux/spinlock_types.h> 15#include <linux/spinlock_types.h>
16#include <linux/linkage.h> 16#include <linux/linkage.h>
17#include <linux/lockdep.h> 17#include <linux/lockdep.h>
18
19#include <linux/atomic.h> 18#include <linux/atomic.h>
19#include <asm/processor.h>
20 20
21/* 21/*
22 * Simple, straightforward mutexes with strict semantics: 22 * Simple, straightforward mutexes with strict semantics:
@@ -175,8 +175,8 @@ extern void mutex_unlock(struct mutex *lock);
175 175
176extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); 176extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
177 177
178#ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX 178#ifndef arch_mutex_cpu_relax
179#define arch_mutex_cpu_relax() cpu_relax() 179# define arch_mutex_cpu_relax() cpu_relax()
180#endif 180#endif
181 181
182#endif 182#endif
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 3de49aca4519..25f5d2d11e7c 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2264,11 +2264,12 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2264} 2264}
2265 2265
2266#ifdef CONFIG_XPS 2266#ifdef CONFIG_XPS
2267extern int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, 2267extern int netif_set_xps_queue(struct net_device *dev,
2268 const struct cpumask *mask,
2268 u16 index); 2269 u16 index);
2269#else 2270#else
2270static inline int netif_set_xps_queue(struct net_device *dev, 2271static inline int netif_set_xps_queue(struct net_device *dev,
2271 struct cpumask *mask, 2272 const struct cpumask *mask,
2272 u16 index) 2273 u16 index)
2273{ 2274{
2274 return 0; 2275 return 0;
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 01fd84b566f7..49f52c8f4422 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1455,7 +1455,8 @@ struct nfs_rpc_ops {
1455 struct inode * (*open_context) (struct inode *dir, 1455 struct inode * (*open_context) (struct inode *dir,
1456 struct nfs_open_context *ctx, 1456 struct nfs_open_context *ctx,
1457 int open_flags, 1457 int open_flags,
1458 struct iattr *iattr); 1458 struct iattr *iattr,
1459 int *);
1459 int (*have_delegation)(struct inode *, fmode_t); 1460 int (*have_delegation)(struct inode *, fmode_t);
1460 int (*return_delegation)(struct inode *); 1461 int (*return_delegation)(struct inode *);
1461 struct nfs_client *(*alloc_client) (const struct nfs_client_initdata *); 1462 struct nfs_client *(*alloc_client) (const struct nfs_client_initdata *);
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index 535cecf1e02f..fcd63baee5f2 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -1,8 +1,6 @@
1#ifndef __OF_IRQ_H 1#ifndef __OF_IRQ_H
2#define __OF_IRQ_H 2#define __OF_IRQ_H
3 3
4#if defined(CONFIG_OF)
5struct of_irq;
6#include <linux/types.h> 4#include <linux/types.h>
7#include <linux/errno.h> 5#include <linux/errno.h>
8#include <linux/irq.h> 6#include <linux/irq.h>
@@ -10,14 +8,6 @@ struct of_irq;
10#include <linux/ioport.h> 8#include <linux/ioport.h>
11#include <linux/of.h> 9#include <linux/of.h>
12 10
13/*
14 * irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC
15 * implements it differently. However, the prototype is the same for all,
16 * so declare it here regardless of the CONFIG_OF_IRQ setting.
17 */
18extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
19
20#if defined(CONFIG_OF_IRQ)
21/** 11/**
22 * of_irq - container for device_node/irq_specifier pair for an irq controller 12 * of_irq - container for device_node/irq_specifier pair for an irq controller
23 * @controller: pointer to interrupt controller device tree node 13 * @controller: pointer to interrupt controller device tree node
@@ -71,11 +61,17 @@ extern int of_irq_to_resource(struct device_node *dev, int index,
71extern int of_irq_count(struct device_node *dev); 61extern int of_irq_count(struct device_node *dev);
72extern int of_irq_to_resource_table(struct device_node *dev, 62extern int of_irq_to_resource_table(struct device_node *dev,
73 struct resource *res, int nr_irqs); 63 struct resource *res, int nr_irqs);
74extern struct device_node *of_irq_find_parent(struct device_node *child);
75 64
76extern void of_irq_init(const struct of_device_id *matches); 65extern void of_irq_init(const struct of_device_id *matches);
77 66
78#endif /* CONFIG_OF_IRQ */ 67#if defined(CONFIG_OF)
68/*
69 * irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC
70 * implements it differently. However, the prototype is the same for all,
71 * so declare it here regardless of the CONFIG_OF_IRQ setting.
72 */
73extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
74extern struct device_node *of_irq_find_parent(struct device_node *child);
79 75
80#else /* !CONFIG_OF */ 76#else /* !CONFIG_OF */
81static inline unsigned int irq_of_parse_and_map(struct device_node *dev, 77static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h
deleted file mode 100644
index c84128255814..000000000000
--- a/include/linux/of_reserved_mem.h
+++ /dev/null
@@ -1,14 +0,0 @@
1#ifndef __OF_RESERVED_MEM_H
2#define __OF_RESERVED_MEM_H
3
4#ifdef CONFIG_OF_RESERVED_MEM
5void of_reserved_mem_device_init(struct device *dev);
6void of_reserved_mem_device_release(struct device *dev);
7void early_init_dt_scan_reserved_mem(void);
8#else
9static inline void of_reserved_mem_device_init(struct device *dev) { }
10static inline void of_reserved_mem_device_release(struct device *dev) { }
11static inline void early_init_dt_scan_reserved_mem(void) { }
12#endif
13
14#endif /* __OF_RESERVED_MEM_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 866e85c5eb94..c8ba627c1d60 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -294,9 +294,31 @@ struct ring_buffer;
294 */ 294 */
295struct perf_event { 295struct perf_event {
296#ifdef CONFIG_PERF_EVENTS 296#ifdef CONFIG_PERF_EVENTS
297 struct list_head group_entry; 297 /*
298 * entry onto perf_event_context::event_list;
299 * modifications require ctx->lock
300 * RCU safe iterations.
301 */
298 struct list_head event_entry; 302 struct list_head event_entry;
303
304 /*
305 * XXX: group_entry and sibling_list should be mutually exclusive;
306 * either you're a sibling on a group, or you're the group leader.
307 * Rework the code to always use the same list element.
308 *
309 * Locked for modification by both ctx->mutex and ctx->lock; holding
310 * either sufficies for read.
311 */
312 struct list_head group_entry;
299 struct list_head sibling_list; 313 struct list_head sibling_list;
314
315 /*
316 * We need storage to track the entries in perf_pmu_migrate_context; we
317 * cannot use the event_entry because of RCU and we want to keep the
318 * group in tact which avoids us using the other two entries.
319 */
320 struct list_head migrate_entry;
321
300 struct hlist_node hlist_entry; 322 struct hlist_node hlist_entry;
301 int nr_siblings; 323 int nr_siblings;
302 int group_flags; 324 int group_flags;
diff --git a/include/linux/random.h b/include/linux/random.h
index 3b9377d6b7a5..6312dd9ba449 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -17,6 +17,7 @@ extern void add_interrupt_randomness(int irq, int irq_flags);
17extern void get_random_bytes(void *buf, int nbytes); 17extern void get_random_bytes(void *buf, int nbytes);
18extern void get_random_bytes_arch(void *buf, int nbytes); 18extern void get_random_bytes_arch(void *buf, int nbytes);
19void generate_random_uuid(unsigned char uuid_out[16]); 19void generate_random_uuid(unsigned char uuid_out[16]);
20extern int random_int_secret_init(void);
20 21
21#ifndef MODULE 22#ifndef MODULE
22extern const struct file_operations random_fops, urandom_fops; 23extern const struct file_operations random_fops, urandom_fops;
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 67e13aa5a478..9bdad43ad228 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -40,6 +40,8 @@ enum regulator_status {
40}; 40};
41 41
42/** 42/**
43 * struct regulator_linear_range - specify linear voltage ranges
44 *
43 * Specify a range of voltages for regulator_map_linar_range() and 45 * Specify a range of voltages for regulator_map_linar_range() and
44 * regulator_list_linear_range(). 46 * regulator_list_linear_range().
45 * 47 *
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6682da36b293..e27baeeda3f4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1394,11 +1394,10 @@ struct task_struct {
1394 } memcg_batch; 1394 } memcg_batch;
1395 unsigned int memcg_kmem_skip_account; 1395 unsigned int memcg_kmem_skip_account;
1396 struct memcg_oom_info { 1396 struct memcg_oom_info {
1397 struct mem_cgroup *memcg;
1398 gfp_t gfp_mask;
1399 int order;
1397 unsigned int may_oom:1; 1400 unsigned int may_oom:1;
1398 unsigned int in_memcg_oom:1;
1399 unsigned int oom_locked:1;
1400 int wakeups;
1401 struct mem_cgroup *wait_on_memcg;
1402 } memcg_oom; 1401 } memcg_oom;
1403#endif 1402#endif
1404#ifdef CONFIG_UPROBES 1403#ifdef CONFIG_UPROBES
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 2ddb48d9312c..c2d89335f637 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -498,7 +498,7 @@ struct sk_buff {
498 * headers if needed 498 * headers if needed
499 */ 499 */
500 __u8 encapsulation:1; 500 __u8 encapsulation:1;
501 /* 7/9 bit hole (depending on ndisc_nodetype presence) */ 501 /* 6/8 bit hole (depending on ndisc_nodetype presence) */
502 kmemcheck_bitfield_end(flags2); 502 kmemcheck_bitfield_end(flags2);
503 503
504#if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL 504#if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL
diff --git a/include/linux/smp.h b/include/linux/smp.h
index cfb7ca094b38..731f5237d5f4 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -155,6 +155,12 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
155 155
156static inline void kick_all_cpus_sync(void) { } 156static inline void kick_all_cpus_sync(void) { }
157 157
158static inline void __smp_call_function_single(int cpuid,
159 struct call_single_data *data, int wait)
160{
161 on_each_cpu(data->func, data->info, wait);
162}
163
158#endif /* !SMP */ 164#endif /* !SMP */
159 165
160/* 166/*
diff --git a/include/linux/timex.h b/include/linux/timex.h
index dd3edd7dfc94..9d3f1a5b6178 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -64,6 +64,20 @@
64 64
65#include <asm/timex.h> 65#include <asm/timex.h>
66 66
67#ifndef random_get_entropy
68/*
69 * The random_get_entropy() function is used by the /dev/random driver
70 * in order to extract entropy via the relative unpredictability of
71 * when an interrupt takes places versus a high speed, fine-grained
72 * timing source or cycle counter. Since it will be occurred on every
73 * single interrupt, it must have a very low cost/overhead.
74 *
75 * By default we use get_cycles() for this purpose, but individual
76 * architectures may override this in their asm/timex.h header file.
77 */
78#define random_get_entropy() get_cycles()
79#endif
80
67/* 81/*
68 * SHIFT_PLL is used as a dampening factor to define how much we 82 * SHIFT_PLL is used as a dampening factor to define how much we
69 * adjust the frequency correction for a given offset in PLL mode. 83 * adjust the frequency correction for a given offset in PLL mode.
diff --git a/include/linux/usb/usb_phy_gen_xceiv.h b/include/linux/usb/usb_phy_gen_xceiv.h
index f9a7e7bc925b..11d85b9c1b08 100644
--- a/include/linux/usb/usb_phy_gen_xceiv.h
+++ b/include/linux/usb/usb_phy_gen_xceiv.h
@@ -12,7 +12,7 @@ struct usb_phy_gen_xceiv_platform_data {
12 unsigned int needs_reset:1; 12 unsigned int needs_reset:1;
13}; 13};
14 14
15#if IS_ENABLED(CONFIG_NOP_USB_XCEIV) 15#if defined(CONFIG_NOP_USB_XCEIV) || (defined(CONFIG_NOP_USB_XCEIV_MODULE) && defined(MODULE))
16/* sometimes transceivers are accessed only through e.g. ULPI */ 16/* sometimes transceivers are accessed only through e.g. ULPI */
17extern void usb_nop_xceiv_register(void); 17extern void usb_nop_xceiv_register(void);
18extern void usb_nop_xceiv_unregister(void); 18extern void usb_nop_xceiv_unregister(void);
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 9cb2fe8ca944..e303eef94dd5 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -42,6 +42,7 @@ struct usbnet {
42 struct usb_host_endpoint *status; 42 struct usb_host_endpoint *status;
43 unsigned maxpacket; 43 unsigned maxpacket;
44 struct timer_list delay; 44 struct timer_list delay;
45 const char *padding_pkt;
45 46
46 /* protocol/interface state */ 47 /* protocol/interface state */
47 struct net_device *net; 48 struct net_device *net;
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index bf99cd01be20..630356866030 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -66,7 +66,9 @@
66 US_FLAG(INITIAL_READ10, 0x00100000) \ 66 US_FLAG(INITIAL_READ10, 0x00100000) \
67 /* Initial READ(10) (and others) must be retried */ \ 67 /* Initial READ(10) (and others) must be retried */ \
68 US_FLAG(WRITE_CACHE, 0x00200000) \ 68 US_FLAG(WRITE_CACHE, 0x00200000) \
69 /* Write Cache status is not available */ 69 /* Write Cache status is not available */ \
70 US_FLAG(NEEDS_CAP16, 0x00400000)
71 /* cannot handle READ_CAPACITY_10 */
70 72
71#define US_FLAG(name, value) US_FL_##name = value , 73#define US_FLAG(name, value) US_FL_##name = value ,
72enum { US_DO_ALL_FLAGS }; 74enum { US_DO_ALL_FLAGS };
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h
index 80cf8173a65b..2c02f3a8d2ba 100644
--- a/include/linux/vgaarb.h
+++ b/include/linux/vgaarb.h
@@ -65,15 +65,8 @@ struct pci_dev;
65 * out of the arbitration process (and can be safe to take 65 * out of the arbitration process (and can be safe to take
66 * interrupts at any time. 66 * interrupts at any time.
67 */ 67 */
68#if defined(CONFIG_VGA_ARB)
69extern void vga_set_legacy_decoding(struct pci_dev *pdev, 68extern void vga_set_legacy_decoding(struct pci_dev *pdev,
70 unsigned int decodes); 69 unsigned int decodes);
71#else
72static inline void vga_set_legacy_decoding(struct pci_dev *pdev,
73 unsigned int decodes)
74{
75}
76#endif
77 70
78/** 71/**
79 * vga_get - acquire & locks VGA resources 72 * vga_get - acquire & locks VGA resources
diff --git a/include/linux/yam.h b/include/linux/yam.h
index 7fe28228b274..512cdc2fb80f 100644
--- a/include/linux/yam.h
+++ b/include/linux/yam.h
@@ -77,6 +77,6 @@ struct yamdrv_ioctl_cfg {
77 77
78struct yamdrv_ioctl_mcs { 78struct yamdrv_ioctl_mcs {
79 int cmd; 79 int cmd;
80 int bitrate; 80 unsigned int bitrate;
81 unsigned char bits[YAM_FPGA_SIZE]; 81 unsigned char bits[YAM_FPGA_SIZE];
82}; 82};
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index fb314de2b61b..86505bfa5d2c 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -67,6 +67,10 @@ int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
67int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr); 67int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr);
68#endif 68#endif
69 69
70bool ipv6_chk_custom_prefix(const struct in6_addr *addr,
71 const unsigned int prefix_len,
72 struct net_device *dev);
73
70int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev); 74int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev);
71 75
72struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, 76struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net,
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index aaeaf0938ec0..15f10841e2b5 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -104,6 +104,7 @@ enum {
104enum { 104enum {
105 HCI_SETUP, 105 HCI_SETUP,
106 HCI_AUTO_OFF, 106 HCI_AUTO_OFF,
107 HCI_RFKILLED,
107 HCI_MGMT, 108 HCI_MGMT,
108 HCI_PAIRABLE, 109 HCI_PAIRABLE,
109 HCI_SERVICE_CACHE, 110 HCI_SERVICE_CACHE,
diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h
index a7a683e30b64..a8c2ef6d3b93 100644
--- a/include/net/cipso_ipv4.h
+++ b/include/net/cipso_ipv4.h
@@ -290,6 +290,7 @@ static inline int cipso_v4_validate(const struct sk_buff *skb,
290 unsigned char err_offset = 0; 290 unsigned char err_offset = 0;
291 u8 opt_len = opt[1]; 291 u8 opt_len = opt[1];
292 u8 opt_iter; 292 u8 opt_iter;
293 u8 tag_len;
293 294
294 if (opt_len < 8) { 295 if (opt_len < 8) {
295 err_offset = 1; 296 err_offset = 1;
@@ -302,11 +303,12 @@ static inline int cipso_v4_validate(const struct sk_buff *skb,
302 } 303 }
303 304
304 for (opt_iter = 6; opt_iter < opt_len;) { 305 for (opt_iter = 6; opt_iter < opt_len;) {
305 if (opt[opt_iter + 1] > (opt_len - opt_iter)) { 306 tag_len = opt[opt_iter + 1];
307 if ((tag_len == 0) || (opt[opt_iter + 1] > (opt_len - opt_iter))) {
306 err_offset = opt_iter + 1; 308 err_offset = opt_iter + 1;
307 goto out; 309 goto out;
308 } 310 }
309 opt_iter += opt[opt_iter + 1]; 311 opt_iter += tag_len;
310 } 312 }
311 313
312out: 314out:
diff --git a/include/net/dst.h b/include/net/dst.h
index 3bc4865f8267..3c4c944096c9 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -479,10 +479,22 @@ static inline struct dst_entry *xfrm_lookup(struct net *net,
479{ 479{
480 return dst_orig; 480 return dst_orig;
481} 481}
482
483static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
484{
485 return NULL;
486}
487
482#else 488#else
483extern struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, 489extern struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
484 const struct flowi *fl, struct sock *sk, 490 const struct flowi *fl, struct sock *sk,
485 int flags); 491 int flags);
492
493/* skb attached with this dst needs transformation if dst->xfrm is valid */
494static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
495{
496 return dst->xfrm;
497}
486#endif 498#endif
487 499
488#endif /* _NET_DST_H */ 500#endif /* _NET_DST_H */
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index f525e7038cca..2b786b7e3585 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -194,11 +194,9 @@ static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
194 skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb)); 194 skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
195} 195}
196 196
197static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt, struct in6_addr *dest) 197static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt)
198{ 198{
199 if (rt->rt6i_flags & RTF_GATEWAY) 199 return &rt->rt6i_gateway;
200 return &rt->rt6i_gateway;
201 return dest;
202} 200}
203 201
204#endif 202#endif
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index f0d70f066f3d..9c4d37ec45a1 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -723,8 +723,6 @@ struct ip_vs_dest_dst {
723 struct rcu_head rcu_head; 723 struct rcu_head rcu_head;
724}; 724};
725 725
726/* In grace period after removing */
727#define IP_VS_DEST_STATE_REMOVING 0x01
728/* 726/*
729 * The real server destination forwarding entry 727 * The real server destination forwarding entry
730 * with ip address, port number, and so on. 728 * with ip address, port number, and so on.
@@ -742,7 +740,7 @@ struct ip_vs_dest {
742 740
743 atomic_t refcnt; /* reference counter */ 741 atomic_t refcnt; /* reference counter */
744 struct ip_vs_stats stats; /* statistics */ 742 struct ip_vs_stats stats; /* statistics */
745 unsigned long state; /* state flags */ 743 unsigned long idle_start; /* start time, jiffies */
746 744
747 /* connection counters and thresholds */ 745 /* connection counters and thresholds */
748 atomic_t activeconns; /* active connections */ 746 atomic_t activeconns; /* active connections */
@@ -756,14 +754,13 @@ struct ip_vs_dest {
756 struct ip_vs_dest_dst __rcu *dest_dst; /* cached dst info */ 754 struct ip_vs_dest_dst __rcu *dest_dst; /* cached dst info */
757 755
758 /* for virtual service */ 756 /* for virtual service */
759 struct ip_vs_service *svc; /* service it belongs to */ 757 struct ip_vs_service __rcu *svc; /* service it belongs to */
760 __u16 protocol; /* which protocol (TCP/UDP) */ 758 __u16 protocol; /* which protocol (TCP/UDP) */
761 __be16 vport; /* virtual port number */ 759 __be16 vport; /* virtual port number */
762 union nf_inet_addr vaddr; /* virtual IP address */ 760 union nf_inet_addr vaddr; /* virtual IP address */
763 __u32 vfwmark; /* firewall mark of service */ 761 __u32 vfwmark; /* firewall mark of service */
764 762
765 struct list_head t_list; /* in dest_trash */ 763 struct list_head t_list; /* in dest_trash */
766 struct rcu_head rcu_head;
767 unsigned int in_rs_table:1; /* we are in rs_table */ 764 unsigned int in_rs_table:1; /* we are in rs_table */
768}; 765};
769 766
@@ -1649,7 +1646,7 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
1649/* CONFIG_IP_VS_NFCT */ 1646/* CONFIG_IP_VS_NFCT */
1650#endif 1647#endif
1651 1648
1652static inline unsigned int 1649static inline int
1653ip_vs_dest_conn_overhead(struct ip_vs_dest *dest) 1650ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
1654{ 1651{
1655 /* 1652 /*
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index d0d11df9cba1..807d6b7a943f 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -133,7 +133,7 @@ struct ieee802154_ops {
133 133
134/* Basic interface to register ieee802154 device */ 134/* Basic interface to register ieee802154 device */
135struct ieee802154_dev * 135struct ieee802154_dev *
136ieee802154_alloc_device(size_t priv_data_lex, struct ieee802154_ops *ops); 136ieee802154_alloc_device(size_t priv_data_len, struct ieee802154_ops *ops);
137void ieee802154_free_device(struct ieee802154_dev *dev); 137void ieee802154_free_device(struct ieee802154_dev *dev);
138int ieee802154_register_device(struct ieee802154_dev *dev); 138int ieee802154_register_device(struct ieee802154_dev *dev);
139void ieee802154_unregister_device(struct ieee802154_dev *dev); 139void ieee802154_unregister_device(struct ieee802154_dev *dev);
diff --git a/include/net/mrp.h b/include/net/mrp.h
index 4fbf02aa2ec1..0f7558b638ae 100644
--- a/include/net/mrp.h
+++ b/include/net/mrp.h
@@ -112,6 +112,7 @@ struct mrp_applicant {
112 struct mrp_application *app; 112 struct mrp_application *app;
113 struct net_device *dev; 113 struct net_device *dev;
114 struct timer_list join_timer; 114 struct timer_list join_timer;
115 struct timer_list periodic_timer;
115 116
116 spinlock_t lock; 117 spinlock_t lock;
117 struct sk_buff_head queue; 118 struct sk_buff_head queue;
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 1313456a0994..9d22f08896c6 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -74,6 +74,7 @@ struct net {
74 struct hlist_head *dev_index_head; 74 struct hlist_head *dev_index_head;
75 unsigned int dev_base_seq; /* protected by rtnl_mutex */ 75 unsigned int dev_base_seq; /* protected by rtnl_mutex */
76 int ifindex; 76 int ifindex;
77 unsigned int dev_unreg_count;
77 78
78 /* core fib_rules */ 79 /* core fib_rules */
79 struct list_head rules_ops; 80 struct list_head rules_ops;
diff --git a/include/net/netfilter/nf_conntrack_synproxy.h b/include/net/netfilter/nf_conntrack_synproxy.h
index 806f54a290d6..f572f313d6f1 100644
--- a/include/net/netfilter/nf_conntrack_synproxy.h
+++ b/include/net/netfilter/nf_conntrack_synproxy.h
@@ -56,7 +56,7 @@ struct synproxy_options {
56 56
57struct tcphdr; 57struct tcphdr;
58struct xt_synproxy_info; 58struct xt_synproxy_info;
59extern void synproxy_parse_options(const struct sk_buff *skb, unsigned int doff, 59extern bool synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
60 const struct tcphdr *th, 60 const struct tcphdr *th,
61 struct synproxy_options *opts); 61 struct synproxy_options *opts);
62extern unsigned int synproxy_options_size(const struct synproxy_options *opts); 62extern unsigned int synproxy_options_size(const struct synproxy_options *opts);
diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
index 6ca975bebd37..c2e542b27a5a 100644
--- a/include/net/secure_seq.h
+++ b/include/net/secure_seq.h
@@ -3,7 +3,6 @@
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5 5
6extern void net_secret_init(void);
7extern __u32 secure_ip_id(__be32 daddr); 6extern __u32 secure_ip_id(__be32 daddr);
8extern __u32 secure_ipv6_id(const __be32 daddr[4]); 7extern __u32 secure_ipv6_id(const __be32 daddr[4]);
9extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport); 8extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
diff --git a/include/net/sock.h b/include/net/sock.h
index 6ba2e7b0e2b1..808cbc2ec6c1 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -409,6 +409,11 @@ struct sock {
409 void (*sk_destruct)(struct sock *sk); 409 void (*sk_destruct)(struct sock *sk);
410}; 410};
411 411
412#define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
413
414#define rcu_dereference_sk_user_data(sk) rcu_dereference(__sk_user_data((sk)))
415#define rcu_assign_sk_user_data(sk, ptr) rcu_assign_pointer(__sk_user_data((sk)), ptr)
416
412/* 417/*
413 * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK 418 * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK
414 * or not whether his port will be reused by someone else. SK_FORCE_REUSE 419 * or not whether his port will be reused by someone else. SK_FORCE_REUSE
@@ -1625,16 +1630,14 @@ static inline void sk_filter_release(struct sk_filter *fp)
1625 1630
1626static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp) 1631static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
1627{ 1632{
1628 unsigned int size = sk_filter_len(fp); 1633 atomic_sub(sk_filter_size(fp->len), &sk->sk_omem_alloc);
1629
1630 atomic_sub(size, &sk->sk_omem_alloc);
1631 sk_filter_release(fp); 1634 sk_filter_release(fp);
1632} 1635}
1633 1636
1634static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp) 1637static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
1635{ 1638{
1636 atomic_inc(&fp->refcnt); 1639 atomic_inc(&fp->refcnt);
1637 atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc); 1640 atomic_add(sk_filter_size(fp->len), &sk->sk_omem_alloc);
1638} 1641}
1639 1642
1640/* 1643/*
diff --git a/include/sound/rcar_snd.h b/include/sound/rcar_snd.h
index fe66533e9b7a..fb0a312bcb81 100644
--- a/include/sound/rcar_snd.h
+++ b/include/sound/rcar_snd.h
@@ -68,6 +68,7 @@ struct rsnd_scu_platform_info {
68 * 68 *
69 * A : generation 69 * A : generation
70 */ 70 */
71#define RSND_GEN_MASK (0xF << 0)
71#define RSND_GEN1 (1 << 0) /* fixme */ 72#define RSND_GEN1 (1 << 0) /* fixme */
72#define RSND_GEN2 (2 << 0) /* fixme */ 73#define RSND_GEN2 (2 << 0) /* fixme */
73 74
diff --git a/include/trace/events/target.h b/include/trace/events/target.h
index aef8fc354025..da9cc0f05c93 100644
--- a/include/trace/events/target.h
+++ b/include/trace/events/target.h
@@ -144,7 +144,7 @@ TRACE_EVENT(target_sequencer_start,
144 ), 144 ),
145 145
146 TP_fast_assign( 146 TP_fast_assign(
147 __entry->unpacked_lun = cmd->se_lun->unpacked_lun; 147 __entry->unpacked_lun = cmd->orig_fe_lun;
148 __entry->opcode = cmd->t_task_cdb[0]; 148 __entry->opcode = cmd->t_task_cdb[0];
149 __entry->data_length = cmd->data_length; 149 __entry->data_length = cmd->data_length;
150 __entry->task_attribute = cmd->sam_task_attr; 150 __entry->task_attribute = cmd->sam_task_attr;
@@ -182,7 +182,7 @@ TRACE_EVENT(target_cmd_complete,
182 ), 182 ),
183 183
184 TP_fast_assign( 184 TP_fast_assign(
185 __entry->unpacked_lun = cmd->se_lun->unpacked_lun; 185 __entry->unpacked_lun = cmd->orig_fe_lun;
186 __entry->opcode = cmd->t_task_cdb[0]; 186 __entry->opcode = cmd->t_task_cdb[0];
187 __entry->data_length = cmd->data_length; 187 __entry->data_length = cmd->data_length;
188 __entry->task_attribute = cmd->sam_task_attr; 188 __entry->task_attribute = cmd->sam_task_attr;
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 550811712f78..28acbaf4a81e 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -223,6 +223,8 @@ struct drm_mode_get_connector {
223 __u32 connection; 223 __u32 connection;
224 __u32 mm_width, mm_height; /**< HxW in millimeters */ 224 __u32 mm_width, mm_height; /**< HxW in millimeters */
225 __u32 subpixel; 225 __u32 subpixel;
226
227 __u32 pad;
226}; 228};
227 229
228#define DRM_MODE_PROP_PENDING (1<<0) 230#define DRM_MODE_PROP_PENDING (1<<0)
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index fa8b3adf9ffb..46d41e8b0dcc 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -1007,4 +1007,6 @@ struct drm_radeon_info {
1007#define SI_TILE_MODE_DEPTH_STENCIL_2D_4AA 3 1007#define SI_TILE_MODE_DEPTH_STENCIL_2D_4AA 3
1008#define SI_TILE_MODE_DEPTH_STENCIL_2D_8AA 2 1008#define SI_TILE_MODE_DEPTH_STENCIL_2D_8AA 2
1009 1009
1010#define CIK_TILE_MODE_DEPTH_STENCIL_1D 5
1011
1010#endif 1012#endif
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 40a1fb807396..009a655a5d35 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -380,10 +380,13 @@ struct perf_event_mmap_page {
380 union { 380 union {
381 __u64 capabilities; 381 __u64 capabilities;
382 struct { 382 struct {
383 __u64 cap_usr_time : 1, 383 __u64 cap_bit0 : 1, /* Always 0, deprecated, see commit 860f085b74e9 */
384 cap_usr_rdpmc : 1, 384 cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */
385 cap_usr_time_zero : 1, 385
386 cap_____res : 61; 386 cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */
387 cap_user_time : 1, /* The time_* fields are used */
388 cap_user_time_zero : 1, /* The time_zero field is used */
389 cap_____res : 59;
387 }; 390 };
388 }; 391 };
389 392
@@ -442,12 +445,13 @@ struct perf_event_mmap_page {
442 * ((rem * time_mult) >> time_shift); 445 * ((rem * time_mult) >> time_shift);
443 */ 446 */
444 __u64 time_zero; 447 __u64 time_zero;
448 __u32 size; /* Header size up to __reserved[] fields. */
445 449
446 /* 450 /*
447 * Hole for extension of the self monitor capabilities 451 * Hole for extension of the self monitor capabilities
448 */ 452 */
449 453
450 __u64 __reserved[119]; /* align to 1k */ 454 __u8 __reserved[118*8+4]; /* align to 1k. */
451 455
452 /* 456 /*
453 * Control data for the mmap() data buffer. 457 * Control data for the mmap() data buffer.
@@ -528,6 +532,7 @@ enum perf_event_type {
528 * u64 len; 532 * u64 len;
529 * u64 pgoff; 533 * u64 pgoff;
530 * char filename[]; 534 * char filename[];
535 * struct sample_id sample_id;
531 * }; 536 * };
532 */ 537 */
533 PERF_RECORD_MMAP = 1, 538 PERF_RECORD_MMAP = 1,
diff --git a/include/uapi/linux/tc_act/Kbuild b/include/uapi/linux/tc_act/Kbuild
index 0623ec4e728f..56f121605c99 100644
--- a/include/uapi/linux/tc_act/Kbuild
+++ b/include/uapi/linux/tc_act/Kbuild
@@ -1,5 +1,6 @@
1# UAPI Header export list 1# UAPI Header export list
2header-y += tc_csum.h 2header-y += tc_csum.h
3header-y += tc_defact.h
3header-y += tc_gact.h 4header-y += tc_gact.h
4header-y += tc_ipt.h 5header-y += tc_ipt.h
5header-y += tc_mirred.h 6header-y += tc_mirred.h
diff --git a/include/linux/tc_act/tc_defact.h b/include/uapi/linux/tc_act/tc_defact.h
index 6f65d07c7ce2..17dddb40f740 100644
--- a/include/linux/tc_act/tc_defact.h
+++ b/include/uapi/linux/tc_act/tc_defact.h
@@ -6,7 +6,7 @@
6struct tc_defact { 6struct tc_defact {
7 tc_gen; 7 tc_gen;
8}; 8};
9 9
10enum { 10enum {
11 TCA_DEF_UNSPEC, 11 TCA_DEF_UNSPEC,
12 TCA_DEF_TM, 12 TCA_DEF_TM,
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 0b233c56b0e4..e3ddd86c90a6 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -87,8 +87,10 @@ enum {
87 IB_USER_VERBS_CMD_CLOSE_XRCD, 87 IB_USER_VERBS_CMD_CLOSE_XRCD,
88 IB_USER_VERBS_CMD_CREATE_XSRQ, 88 IB_USER_VERBS_CMD_CREATE_XSRQ,
89 IB_USER_VERBS_CMD_OPEN_QP, 89 IB_USER_VERBS_CMD_OPEN_QP,
90#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
90 IB_USER_VERBS_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD, 91 IB_USER_VERBS_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD,
91 IB_USER_VERBS_CMD_DESTROY_FLOW 92 IB_USER_VERBS_CMD_DESTROY_FLOW
93#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
92}; 94};
93 95
94/* 96/*
@@ -126,6 +128,7 @@ struct ib_uverbs_cmd_hdr {
126 __u16 out_words; 128 __u16 out_words;
127}; 129};
128 130
131#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
129struct ib_uverbs_cmd_hdr_ex { 132struct ib_uverbs_cmd_hdr_ex {
130 __u32 command; 133 __u32 command;
131 __u16 in_words; 134 __u16 in_words;
@@ -134,6 +137,7 @@ struct ib_uverbs_cmd_hdr_ex {
134 __u16 provider_out_words; 137 __u16 provider_out_words;
135 __u32 cmd_hdr_reserved; 138 __u32 cmd_hdr_reserved;
136}; 139};
140#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
137 141
138struct ib_uverbs_get_context { 142struct ib_uverbs_get_context {
139 __u64 response; 143 __u64 response;
@@ -696,6 +700,7 @@ struct ib_uverbs_detach_mcast {
696 __u64 driver_data[0]; 700 __u64 driver_data[0];
697}; 701};
698 702
703#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
699struct ib_kern_eth_filter { 704struct ib_kern_eth_filter {
700 __u8 dst_mac[6]; 705 __u8 dst_mac[6];
701 __u8 src_mac[6]; 706 __u8 src_mac[6];
@@ -780,6 +785,7 @@ struct ib_uverbs_destroy_flow {
780 __u32 comp_mask; 785 __u32 comp_mask;
781 __u32 flow_handle; 786 __u32 flow_handle;
782}; 787};
788#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
783 789
784struct ib_uverbs_create_srq { 790struct ib_uverbs_create_srq {
785 __u64 response; 791 __u64 response;
diff --git a/init/main.c b/init/main.c
index af310afbef28..63d3e8f2970c 100644
--- a/init/main.c
+++ b/init/main.c
@@ -76,6 +76,7 @@
76#include <linux/elevator.h> 76#include <linux/elevator.h>
77#include <linux/sched_clock.h> 77#include <linux/sched_clock.h>
78#include <linux/context_tracking.h> 78#include <linux/context_tracking.h>
79#include <linux/random.h>
79 80
80#include <asm/io.h> 81#include <asm/io.h>
81#include <asm/bugs.h> 82#include <asm/bugs.h>
@@ -780,6 +781,7 @@ static void __init do_basic_setup(void)
780 do_ctors(); 781 do_ctors();
781 usermodehelper_enable(); 782 usermodehelper_enable();
782 do_initcalls(); 783 do_initcalls();
784 random_int_secret_init();
783} 785}
784 786
785static void __init do_pre_smp_initcalls(void) 787static void __init do_pre_smp_initcalls(void)
diff --git a/ipc/msg.c b/ipc/msg.c
index b0d541d42677..558aa91186b6 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -165,6 +165,15 @@ static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s)
165 ipc_rmid(&msg_ids(ns), &s->q_perm); 165 ipc_rmid(&msg_ids(ns), &s->q_perm);
166} 166}
167 167
168static void msg_rcu_free(struct rcu_head *head)
169{
170 struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
171 struct msg_queue *msq = ipc_rcu_to_struct(p);
172
173 security_msg_queue_free(msq);
174 ipc_rcu_free(head);
175}
176
168/** 177/**
169 * newque - Create a new msg queue 178 * newque - Create a new msg queue
170 * @ns: namespace 179 * @ns: namespace
@@ -189,15 +198,14 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
189 msq->q_perm.security = NULL; 198 msq->q_perm.security = NULL;
190 retval = security_msg_queue_alloc(msq); 199 retval = security_msg_queue_alloc(msq);
191 if (retval) { 200 if (retval) {
192 ipc_rcu_putref(msq); 201 ipc_rcu_putref(msq, ipc_rcu_free);
193 return retval; 202 return retval;
194 } 203 }
195 204
196 /* ipc_addid() locks msq upon success. */ 205 /* ipc_addid() locks msq upon success. */
197 id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); 206 id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
198 if (id < 0) { 207 if (id < 0) {
199 security_msg_queue_free(msq); 208 ipc_rcu_putref(msq, msg_rcu_free);
200 ipc_rcu_putref(msq);
201 return id; 209 return id;
202 } 210 }
203 211
@@ -276,8 +284,7 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
276 free_msg(msg); 284 free_msg(msg);
277 } 285 }
278 atomic_sub(msq->q_cbytes, &ns->msg_bytes); 286 atomic_sub(msq->q_cbytes, &ns->msg_bytes);
279 security_msg_queue_free(msq); 287 ipc_rcu_putref(msq, msg_rcu_free);
280 ipc_rcu_putref(msq);
281} 288}
282 289
283/* 290/*
@@ -688,6 +695,12 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
688 if (ipcperms(ns, &msq->q_perm, S_IWUGO)) 695 if (ipcperms(ns, &msq->q_perm, S_IWUGO))
689 goto out_unlock0; 696 goto out_unlock0;
690 697
698 /* raced with RMID? */
699 if (msq->q_perm.deleted) {
700 err = -EIDRM;
701 goto out_unlock0;
702 }
703
691 err = security_msg_queue_msgsnd(msq, msg, msgflg); 704 err = security_msg_queue_msgsnd(msq, msg, msgflg);
692 if (err) 705 if (err)
693 goto out_unlock0; 706 goto out_unlock0;
@@ -717,7 +730,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
717 rcu_read_lock(); 730 rcu_read_lock();
718 ipc_lock_object(&msq->q_perm); 731 ipc_lock_object(&msq->q_perm);
719 732
720 ipc_rcu_putref(msq); 733 ipc_rcu_putref(msq, ipc_rcu_free);
721 if (msq->q_perm.deleted) { 734 if (msq->q_perm.deleted) {
722 err = -EIDRM; 735 err = -EIDRM;
723 goto out_unlock0; 736 goto out_unlock0;
@@ -894,6 +907,13 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl
894 goto out_unlock1; 907 goto out_unlock1;
895 908
896 ipc_lock_object(&msq->q_perm); 909 ipc_lock_object(&msq->q_perm);
910
911 /* raced with RMID? */
912 if (msq->q_perm.deleted) {
913 msg = ERR_PTR(-EIDRM);
914 goto out_unlock0;
915 }
916
897 msg = find_msg(msq, &msgtyp, mode); 917 msg = find_msg(msq, &msgtyp, mode);
898 if (!IS_ERR(msg)) { 918 if (!IS_ERR(msg)) {
899 /* 919 /*
diff --git a/ipc/sem.c b/ipc/sem.c
index 69b6a21f3844..db9d241af133 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -243,71 +243,122 @@ static void merge_queues(struct sem_array *sma)
243 } 243 }
244} 244}
245 245
246static void sem_rcu_free(struct rcu_head *head)
247{
248 struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
249 struct sem_array *sma = ipc_rcu_to_struct(p);
250
251 security_sem_free(sma);
252 ipc_rcu_free(head);
253}
254
255/*
256 * Wait until all currently ongoing simple ops have completed.
257 * Caller must own sem_perm.lock.
258 * New simple ops cannot start, because simple ops first check
259 * that sem_perm.lock is free.
260 * that a) sem_perm.lock is free and b) complex_count is 0.
261 */
262static void sem_wait_array(struct sem_array *sma)
263{
264 int i;
265 struct sem *sem;
266
267 if (sma->complex_count) {
268 /* The thread that increased sma->complex_count waited on
269 * all sem->lock locks. Thus we don't need to wait again.
270 */
271 return;
272 }
273
274 for (i = 0; i < sma->sem_nsems; i++) {
275 sem = sma->sem_base + i;
276 spin_unlock_wait(&sem->lock);
277 }
278}
279
246/* 280/*
247 * If the request contains only one semaphore operation, and there are 281 * If the request contains only one semaphore operation, and there are
248 * no complex transactions pending, lock only the semaphore involved. 282 * no complex transactions pending, lock only the semaphore involved.
249 * Otherwise, lock the entire semaphore array, since we either have 283 * Otherwise, lock the entire semaphore array, since we either have
250 * multiple semaphores in our own semops, or we need to look at 284 * multiple semaphores in our own semops, or we need to look at
251 * semaphores from other pending complex operations. 285 * semaphores from other pending complex operations.
252 *
253 * Carefully guard against sma->complex_count changing between zero
254 * and non-zero while we are spinning for the lock. The value of
255 * sma->complex_count cannot change while we are holding the lock,
256 * so sem_unlock should be fine.
257 *
258 * The global lock path checks that all the local locks have been released,
259 * checking each local lock once. This means that the local lock paths
260 * cannot start their critical sections while the global lock is held.
261 */ 286 */
262static inline int sem_lock(struct sem_array *sma, struct sembuf *sops, 287static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
263 int nsops) 288 int nsops)
264{ 289{
265 int locknum; 290 struct sem *sem;
266 again:
267 if (nsops == 1 && !sma->complex_count) {
268 struct sem *sem = sma->sem_base + sops->sem_num;
269 291
270 /* Lock just the semaphore we are interested in. */ 292 if (nsops != 1) {
271 spin_lock(&sem->lock); 293 /* Complex operation - acquire a full lock */
294 ipc_lock_object(&sma->sem_perm);
272 295
273 /* 296 /* And wait until all simple ops that are processed
274 * If sma->complex_count was set while we were spinning, 297 * right now have dropped their locks.
275 * we may need to look at things we did not lock here.
276 */ 298 */
277 if (unlikely(sma->complex_count)) { 299 sem_wait_array(sma);
278 spin_unlock(&sem->lock); 300 return -1;
279 goto lock_array; 301 }
280 } 302
303 /*
304 * Only one semaphore affected - try to optimize locking.
305 * The rules are:
306 * - optimized locking is possible if no complex operation
307 * is either enqueued or processed right now.
308 * - The test for enqueued complex ops is simple:
309 * sma->complex_count != 0
310 * - Testing for complex ops that are processed right now is
311 * a bit more difficult. Complex ops acquire the full lock
312 * and first wait that the running simple ops have completed.
313 * (see above)
314 * Thus: If we own a simple lock and the global lock is free
315 * and complex_count is now 0, then it will stay 0 and
316 * thus just locking sem->lock is sufficient.
317 */
318 sem = sma->sem_base + sops->sem_num;
281 319
320 if (sma->complex_count == 0) {
282 /* 321 /*
283 * Another process is holding the global lock on the 322 * It appears that no complex operation is around.
284 * sem_array; we cannot enter our critical section, 323 * Acquire the per-semaphore lock.
285 * but have to wait for the global lock to be released.
286 */ 324 */
287 if (unlikely(spin_is_locked(&sma->sem_perm.lock))) { 325 spin_lock(&sem->lock);
288 spin_unlock(&sem->lock); 326
289 spin_unlock_wait(&sma->sem_perm.lock); 327 /* Then check that the global lock is free */
290 goto again; 328 if (!spin_is_locked(&sma->sem_perm.lock)) {
329 /* spin_is_locked() is not a memory barrier */
330 smp_mb();
331
332 /* Now repeat the test of complex_count:
333 * It can't change anymore until we drop sem->lock.
334 * Thus: if is now 0, then it will stay 0.
335 */
336 if (sma->complex_count == 0) {
337 /* fast path successful! */
338 return sops->sem_num;
339 }
291 } 340 }
341 spin_unlock(&sem->lock);
342 }
292 343
293 locknum = sops->sem_num; 344 /* slow path: acquire the full lock */
345 ipc_lock_object(&sma->sem_perm);
346
347 if (sma->complex_count == 0) {
348 /* False alarm:
349 * There is no complex operation, thus we can switch
350 * back to the fast path.
351 */
352 spin_lock(&sem->lock);
353 ipc_unlock_object(&sma->sem_perm);
354 return sops->sem_num;
294 } else { 355 } else {
295 int i; 356 /* Not a false alarm, thus complete the sequence for a
296 /* 357 * full lock.
297 * Lock the semaphore array, and wait for all of the
298 * individual semaphore locks to go away. The code
299 * above ensures no new single-lock holders will enter
300 * their critical section while the array lock is held.
301 */ 358 */
302 lock_array: 359 sem_wait_array(sma);
303 ipc_lock_object(&sma->sem_perm); 360 return -1;
304 for (i = 0; i < sma->sem_nsems; i++) {
305 struct sem *sem = sma->sem_base + i;
306 spin_unlock_wait(&sem->lock);
307 }
308 locknum = -1;
309 } 361 }
310 return locknum;
311} 362}
312 363
313static inline void sem_unlock(struct sem_array *sma, int locknum) 364static inline void sem_unlock(struct sem_array *sma, int locknum)
@@ -374,12 +425,7 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
374static inline void sem_lock_and_putref(struct sem_array *sma) 425static inline void sem_lock_and_putref(struct sem_array *sma)
375{ 426{
376 sem_lock(sma, NULL, -1); 427 sem_lock(sma, NULL, -1);
377 ipc_rcu_putref(sma); 428 ipc_rcu_putref(sma, ipc_rcu_free);
378}
379
380static inline void sem_putref(struct sem_array *sma)
381{
382 ipc_rcu_putref(sma);
383} 429}
384 430
385static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) 431static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
@@ -458,14 +504,13 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
458 sma->sem_perm.security = NULL; 504 sma->sem_perm.security = NULL;
459 retval = security_sem_alloc(sma); 505 retval = security_sem_alloc(sma);
460 if (retval) { 506 if (retval) {
461 ipc_rcu_putref(sma); 507 ipc_rcu_putref(sma, ipc_rcu_free);
462 return retval; 508 return retval;
463 } 509 }
464 510
465 id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni); 511 id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
466 if (id < 0) { 512 if (id < 0) {
467 security_sem_free(sma); 513 ipc_rcu_putref(sma, sem_rcu_free);
468 ipc_rcu_putref(sma);
469 return id; 514 return id;
470 } 515 }
471 ns->used_sems += nsems; 516 ns->used_sems += nsems;
@@ -873,6 +918,24 @@ again:
873} 918}
874 919
875/** 920/**
921 * set_semotime(sma, sops) - set sem_otime
922 * @sma: semaphore array
923 * @sops: operations that modified the array, may be NULL
924 *
925 * sem_otime is replicated to avoid cache line trashing.
926 * This function sets one instance to the current time.
927 */
928static void set_semotime(struct sem_array *sma, struct sembuf *sops)
929{
930 if (sops == NULL) {
931 sma->sem_base[0].sem_otime = get_seconds();
932 } else {
933 sma->sem_base[sops[0].sem_num].sem_otime =
934 get_seconds();
935 }
936}
937
938/**
876 * do_smart_update(sma, sops, nsops, otime, pt) - optimized update_queue 939 * do_smart_update(sma, sops, nsops, otime, pt) - optimized update_queue
877 * @sma: semaphore array 940 * @sma: semaphore array
878 * @sops: operations that were performed 941 * @sops: operations that were performed
@@ -922,17 +985,10 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop
922 } 985 }
923 } 986 }
924 } 987 }
925 if (otime) { 988 if (otime)
926 if (sops == NULL) { 989 set_semotime(sma, sops);
927 sma->sem_base[0].sem_otime = get_seconds();
928 } else {
929 sma->sem_base[sops[0].sem_num].sem_otime =
930 get_seconds();
931 }
932 }
933} 990}
934 991
935
936/* The following counts are associated to each semaphore: 992/* The following counts are associated to each semaphore:
937 * semncnt number of tasks waiting on semval being nonzero 993 * semncnt number of tasks waiting on semval being nonzero
938 * semzcnt number of tasks waiting on semval being zero 994 * semzcnt number of tasks waiting on semval being zero
@@ -1047,8 +1103,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1047 1103
1048 wake_up_sem_queue_do(&tasks); 1104 wake_up_sem_queue_do(&tasks);
1049 ns->used_sems -= sma->sem_nsems; 1105 ns->used_sems -= sma->sem_nsems;
1050 security_sem_free(sma); 1106 ipc_rcu_putref(sma, sem_rcu_free);
1051 ipc_rcu_putref(sma);
1052} 1107}
1053 1108
1054static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version) 1109static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
@@ -1227,6 +1282,12 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
1227 1282
1228 sem_lock(sma, NULL, -1); 1283 sem_lock(sma, NULL, -1);
1229 1284
1285 if (sma->sem_perm.deleted) {
1286 sem_unlock(sma, -1);
1287 rcu_read_unlock();
1288 return -EIDRM;
1289 }
1290
1230 curr = &sma->sem_base[semnum]; 1291 curr = &sma->sem_base[semnum];
1231 1292
1232 ipc_assert_locked_object(&sma->sem_perm); 1293 ipc_assert_locked_object(&sma->sem_perm);
@@ -1281,28 +1342,28 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1281 int i; 1342 int i;
1282 1343
1283 sem_lock(sma, NULL, -1); 1344 sem_lock(sma, NULL, -1);
1345 if (sma->sem_perm.deleted) {
1346 err = -EIDRM;
1347 goto out_unlock;
1348 }
1284 if(nsems > SEMMSL_FAST) { 1349 if(nsems > SEMMSL_FAST) {
1285 if (!ipc_rcu_getref(sma)) { 1350 if (!ipc_rcu_getref(sma)) {
1286 sem_unlock(sma, -1);
1287 rcu_read_unlock();
1288 err = -EIDRM; 1351 err = -EIDRM;
1289 goto out_free; 1352 goto out_unlock;
1290 } 1353 }
1291 sem_unlock(sma, -1); 1354 sem_unlock(sma, -1);
1292 rcu_read_unlock(); 1355 rcu_read_unlock();
1293 sem_io = ipc_alloc(sizeof(ushort)*nsems); 1356 sem_io = ipc_alloc(sizeof(ushort)*nsems);
1294 if(sem_io == NULL) { 1357 if(sem_io == NULL) {
1295 sem_putref(sma); 1358 ipc_rcu_putref(sma, ipc_rcu_free);
1296 return -ENOMEM; 1359 return -ENOMEM;
1297 } 1360 }
1298 1361
1299 rcu_read_lock(); 1362 rcu_read_lock();
1300 sem_lock_and_putref(sma); 1363 sem_lock_and_putref(sma);
1301 if (sma->sem_perm.deleted) { 1364 if (sma->sem_perm.deleted) {
1302 sem_unlock(sma, -1);
1303 rcu_read_unlock();
1304 err = -EIDRM; 1365 err = -EIDRM;
1305 goto out_free; 1366 goto out_unlock;
1306 } 1367 }
1307 } 1368 }
1308 for (i = 0; i < sma->sem_nsems; i++) 1369 for (i = 0; i < sma->sem_nsems; i++)
@@ -1320,28 +1381,28 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1320 struct sem_undo *un; 1381 struct sem_undo *un;
1321 1382
1322 if (!ipc_rcu_getref(sma)) { 1383 if (!ipc_rcu_getref(sma)) {
1323 rcu_read_unlock(); 1384 err = -EIDRM;
1324 return -EIDRM; 1385 goto out_rcu_wakeup;
1325 } 1386 }
1326 rcu_read_unlock(); 1387 rcu_read_unlock();
1327 1388
1328 if(nsems > SEMMSL_FAST) { 1389 if(nsems > SEMMSL_FAST) {
1329 sem_io = ipc_alloc(sizeof(ushort)*nsems); 1390 sem_io = ipc_alloc(sizeof(ushort)*nsems);
1330 if(sem_io == NULL) { 1391 if(sem_io == NULL) {
1331 sem_putref(sma); 1392 ipc_rcu_putref(sma, ipc_rcu_free);
1332 return -ENOMEM; 1393 return -ENOMEM;
1333 } 1394 }
1334 } 1395 }
1335 1396
1336 if (copy_from_user (sem_io, p, nsems*sizeof(ushort))) { 1397 if (copy_from_user (sem_io, p, nsems*sizeof(ushort))) {
1337 sem_putref(sma); 1398 ipc_rcu_putref(sma, ipc_rcu_free);
1338 err = -EFAULT; 1399 err = -EFAULT;
1339 goto out_free; 1400 goto out_free;
1340 } 1401 }
1341 1402
1342 for (i = 0; i < nsems; i++) { 1403 for (i = 0; i < nsems; i++) {
1343 if (sem_io[i] > SEMVMX) { 1404 if (sem_io[i] > SEMVMX) {
1344 sem_putref(sma); 1405 ipc_rcu_putref(sma, ipc_rcu_free);
1345 err = -ERANGE; 1406 err = -ERANGE;
1346 goto out_free; 1407 goto out_free;
1347 } 1408 }
@@ -1349,10 +1410,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1349 rcu_read_lock(); 1410 rcu_read_lock();
1350 sem_lock_and_putref(sma); 1411 sem_lock_and_putref(sma);
1351 if (sma->sem_perm.deleted) { 1412 if (sma->sem_perm.deleted) {
1352 sem_unlock(sma, -1);
1353 rcu_read_unlock();
1354 err = -EIDRM; 1413 err = -EIDRM;
1355 goto out_free; 1414 goto out_unlock;
1356 } 1415 }
1357 1416
1358 for (i = 0; i < nsems; i++) 1417 for (i = 0; i < nsems; i++)
@@ -1376,6 +1435,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1376 goto out_rcu_wakeup; 1435 goto out_rcu_wakeup;
1377 1436
1378 sem_lock(sma, NULL, -1); 1437 sem_lock(sma, NULL, -1);
1438 if (sma->sem_perm.deleted) {
1439 err = -EIDRM;
1440 goto out_unlock;
1441 }
1379 curr = &sma->sem_base[semnum]; 1442 curr = &sma->sem_base[semnum];
1380 1443
1381 switch (cmd) { 1444 switch (cmd) {
@@ -1629,7 +1692,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1629 /* step 2: allocate new undo structure */ 1692 /* step 2: allocate new undo structure */
1630 new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); 1693 new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1631 if (!new) { 1694 if (!new) {
1632 sem_putref(sma); 1695 ipc_rcu_putref(sma, ipc_rcu_free);
1633 return ERR_PTR(-ENOMEM); 1696 return ERR_PTR(-ENOMEM);
1634 } 1697 }
1635 1698
@@ -1781,6 +1844,10 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1781 if (error) 1844 if (error)
1782 goto out_rcu_wakeup; 1845 goto out_rcu_wakeup;
1783 1846
1847 error = -EIDRM;
1848 locknum = sem_lock(sma, sops, nsops);
1849 if (sma->sem_perm.deleted)
1850 goto out_unlock_free;
1784 /* 1851 /*
1785 * semid identifiers are not unique - find_alloc_undo may have 1852 * semid identifiers are not unique - find_alloc_undo may have
1786 * allocated an undo structure, it was invalidated by an RMID 1853 * allocated an undo structure, it was invalidated by an RMID
@@ -1788,19 +1855,22 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1788 * This case can be detected checking un->semid. The existence of 1855 * This case can be detected checking un->semid. The existence of
1789 * "un" itself is guaranteed by rcu. 1856 * "un" itself is guaranteed by rcu.
1790 */ 1857 */
1791 error = -EIDRM;
1792 locknum = sem_lock(sma, sops, nsops);
1793 if (un && un->semid == -1) 1858 if (un && un->semid == -1)
1794 goto out_unlock_free; 1859 goto out_unlock_free;
1795 1860
1796 error = perform_atomic_semop(sma, sops, nsops, un, 1861 error = perform_atomic_semop(sma, sops, nsops, un,
1797 task_tgid_vnr(current)); 1862 task_tgid_vnr(current));
1798 if (error <= 0) { 1863 if (error == 0) {
1799 if (alter && error == 0) 1864 /* If the operation was successful, then do
1865 * the required updates.
1866 */
1867 if (alter)
1800 do_smart_update(sma, sops, nsops, 1, &tasks); 1868 do_smart_update(sma, sops, nsops, 1, &tasks);
1801 1869 else
1802 goto out_unlock_free; 1870 set_semotime(sma, sops);
1803 } 1871 }
1872 if (error <= 0)
1873 goto out_unlock_free;
1804 1874
1805 /* We need to sleep on this operation, so we put the current 1875 /* We need to sleep on this operation, so we put the current
1806 * task into the pending queue and go to sleep. 1876 * task into the pending queue and go to sleep.
@@ -1997,6 +2067,12 @@ void exit_sem(struct task_struct *tsk)
1997 } 2067 }
1998 2068
1999 sem_lock(sma, NULL, -1); 2069 sem_lock(sma, NULL, -1);
2070 /* exit_sem raced with IPC_RMID, nothing to do */
2071 if (sma->sem_perm.deleted) {
2072 sem_unlock(sma, -1);
2073 rcu_read_unlock();
2074 continue;
2075 }
2000 un = __lookup_undo(ulp, semid); 2076 un = __lookup_undo(ulp, semid);
2001 if (un == NULL) { 2077 if (un == NULL) {
2002 /* exit_sem raced with IPC_RMID+semget() that created 2078 /* exit_sem raced with IPC_RMID+semget() that created
@@ -2059,6 +2135,14 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
2059 struct sem_array *sma = it; 2135 struct sem_array *sma = it;
2060 time_t sem_otime; 2136 time_t sem_otime;
2061 2137
2138 /*
2139 * The proc interface isn't aware of sem_lock(), it calls
2140 * ipc_lock_object() directly (in sysvipc_find_ipc).
2141 * In order to stay compatible with sem_lock(), we must wait until
2142 * all simple semop() calls have left their critical regions.
2143 */
2144 sem_wait_array(sma);
2145
2062 sem_otime = get_semotime(sma); 2146 sem_otime = get_semotime(sma);
2063 2147
2064 return seq_printf(s, 2148 return seq_printf(s,
diff --git a/ipc/shm.c b/ipc/shm.c
index 2821cdf93adb..d69739610fd4 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -167,6 +167,15 @@ static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
167 ipc_lock_object(&ipcp->shm_perm); 167 ipc_lock_object(&ipcp->shm_perm);
168} 168}
169 169
170static void shm_rcu_free(struct rcu_head *head)
171{
172 struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
173 struct shmid_kernel *shp = ipc_rcu_to_struct(p);
174
175 security_shm_free(shp);
176 ipc_rcu_free(head);
177}
178
170static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) 179static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
171{ 180{
172 ipc_rmid(&shm_ids(ns), &s->shm_perm); 181 ipc_rmid(&shm_ids(ns), &s->shm_perm);
@@ -208,8 +217,7 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
208 user_shm_unlock(file_inode(shp->shm_file)->i_size, 217 user_shm_unlock(file_inode(shp->shm_file)->i_size,
209 shp->mlock_user); 218 shp->mlock_user);
210 fput (shp->shm_file); 219 fput (shp->shm_file);
211 security_shm_free(shp); 220 ipc_rcu_putref(shp, shm_rcu_free);
212 ipc_rcu_putref(shp);
213} 221}
214 222
215/* 223/*
@@ -497,7 +505,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
497 shp->shm_perm.security = NULL; 505 shp->shm_perm.security = NULL;
498 error = security_shm_alloc(shp); 506 error = security_shm_alloc(shp);
499 if (error) { 507 if (error) {
500 ipc_rcu_putref(shp); 508 ipc_rcu_putref(shp, ipc_rcu_free);
501 return error; 509 return error;
502 } 510 }
503 511
@@ -566,8 +574,7 @@ no_id:
566 user_shm_unlock(size, shp->mlock_user); 574 user_shm_unlock(size, shp->mlock_user);
567 fput(file); 575 fput(file);
568no_file: 576no_file:
569 security_shm_free(shp); 577 ipc_rcu_putref(shp, shm_rcu_free);
570 ipc_rcu_putref(shp);
571 return error; 578 return error;
572} 579}
573 580
diff --git a/ipc/util.c b/ipc/util.c
index e829da9ed01f..7684f41bce76 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -17,12 +17,27 @@
17 * Pavel Emelianov <xemul@openvz.org> 17 * Pavel Emelianov <xemul@openvz.org>
18 * 18 *
19 * General sysv ipc locking scheme: 19 * General sysv ipc locking scheme:
20 * when doing ipc id lookups, take the ids->rwsem 20 * rcu_read_lock()
21 * rcu_read_lock() 21 * obtain the ipc object (kern_ipc_perm) by looking up the id in an idr
22 * obtain the ipc object (kern_ipc_perm) 22 * tree.
23 * perform security, capabilities, auditing and permission checks, etc. 23 * - perform initial checks (capabilities, auditing and permission,
24 * acquire the ipc lock (kern_ipc_perm.lock) throught ipc_lock_object() 24 * etc).
25 * perform data updates (ie: SET, RMID, LOCK/UNLOCK commands) 25 * - perform read-only operations, such as STAT, INFO commands.
26 * acquire the ipc lock (kern_ipc_perm.lock) through
27 * ipc_lock_object()
28 * - perform data updates, such as SET, RMID commands and
29 * mechanism-specific operations (semop/semtimedop,
30 * msgsnd/msgrcv, shmat/shmdt).
31 * drop the ipc lock, through ipc_unlock_object().
32 * rcu_read_unlock()
33 *
34 * The ids->rwsem must be taken when:
35 * - creating, removing and iterating the existing entries in ipc
36 * identifier sets.
37 * - iterating through files under /proc/sysvipc/
38 *
39 * Note that sems have a special fast path that avoids kern_ipc_perm.lock -
40 * see sem_lock().
26 */ 41 */
27 42
28#include <linux/mm.h> 43#include <linux/mm.h>
@@ -474,11 +489,6 @@ void ipc_free(void* ptr, int size)
474 kfree(ptr); 489 kfree(ptr);
475} 490}
476 491
477struct ipc_rcu {
478 struct rcu_head rcu;
479 atomic_t refcount;
480} ____cacheline_aligned_in_smp;
481
482/** 492/**
483 * ipc_rcu_alloc - allocate ipc and rcu space 493 * ipc_rcu_alloc - allocate ipc and rcu space
484 * @size: size desired 494 * @size: size desired
@@ -505,27 +515,24 @@ int ipc_rcu_getref(void *ptr)
505 return atomic_inc_not_zero(&p->refcount); 515 return atomic_inc_not_zero(&p->refcount);
506} 516}
507 517
508/** 518void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head))
509 * ipc_schedule_free - free ipc + rcu space
510 * @head: RCU callback structure for queued work
511 */
512static void ipc_schedule_free(struct rcu_head *head)
513{
514 vfree(container_of(head, struct ipc_rcu, rcu));
515}
516
517void ipc_rcu_putref(void *ptr)
518{ 519{
519 struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1; 520 struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1;
520 521
521 if (!atomic_dec_and_test(&p->refcount)) 522 if (!atomic_dec_and_test(&p->refcount))
522 return; 523 return;
523 524
524 if (is_vmalloc_addr(ptr)) { 525 call_rcu(&p->rcu, func);
525 call_rcu(&p->rcu, ipc_schedule_free); 526}
526 } else { 527
527 kfree_rcu(p, rcu); 528void ipc_rcu_free(struct rcu_head *head)
528 } 529{
530 struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
531
532 if (is_vmalloc_addr(p))
533 vfree(p);
534 else
535 kfree(p);
529} 536}
530 537
531/** 538/**
diff --git a/ipc/util.h b/ipc/util.h
index c5f3338ba1fa..f2f5036f2eed 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -47,6 +47,13 @@ static inline void msg_exit_ns(struct ipc_namespace *ns) { }
47static inline void shm_exit_ns(struct ipc_namespace *ns) { } 47static inline void shm_exit_ns(struct ipc_namespace *ns) { }
48#endif 48#endif
49 49
50struct ipc_rcu {
51 struct rcu_head rcu;
52 atomic_t refcount;
53} ____cacheline_aligned_in_smp;
54
55#define ipc_rcu_to_struct(p) ((void *)(p+1))
56
50/* 57/*
51 * Structure that holds the parameters needed by the ipc operations 58 * Structure that holds the parameters needed by the ipc operations
52 * (see after) 59 * (see after)
@@ -120,7 +127,8 @@ void ipc_free(void* ptr, int size);
120 */ 127 */
121void* ipc_rcu_alloc(int size); 128void* ipc_rcu_alloc(int size);
122int ipc_rcu_getref(void *ptr); 129int ipc_rcu_getref(void *ptr);
123void ipc_rcu_putref(void *ptr); 130void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head));
131void ipc_rcu_free(struct rcu_head *head);
124 132
125struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int); 133struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int);
126struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id); 134struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id);
diff --git a/kernel/audit.c b/kernel/audit.c
index 91e53d04b6a9..7b0e23a740ce 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -1117,9 +1117,10 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
1117 1117
1118 sleep_time = timeout_start + audit_backlog_wait_time - 1118 sleep_time = timeout_start + audit_backlog_wait_time -
1119 jiffies; 1119 jiffies;
1120 if ((long)sleep_time > 0) 1120 if ((long)sleep_time > 0) {
1121 wait_for_auditd(sleep_time); 1121 wait_for_auditd(sleep_time);
1122 continue; 1122 continue;
1123 }
1123 } 1124 }
1124 if (audit_rate_check() && printk_ratelimit()) 1125 if (audit_rate_check() && printk_ratelimit())
1125 printk(KERN_WARNING 1126 printk(KERN_WARNING
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 2418b6e71a85..8bd9cfdc70d7 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2039,7 +2039,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
2039 2039
2040 /* @tsk either already exited or can't exit until the end */ 2040 /* @tsk either already exited or can't exit until the end */
2041 if (tsk->flags & PF_EXITING) 2041 if (tsk->flags & PF_EXITING)
2042 continue; 2042 goto next;
2043 2043
2044 /* as per above, nr_threads may decrease, but not increase. */ 2044 /* as per above, nr_threads may decrease, but not increase. */
2045 BUG_ON(i >= group_size); 2045 BUG_ON(i >= group_size);
@@ -2047,7 +2047,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
2047 ent.cgrp = task_cgroup_from_root(tsk, root); 2047 ent.cgrp = task_cgroup_from_root(tsk, root);
2048 /* nothing to do if this task is already in the cgroup */ 2048 /* nothing to do if this task is already in the cgroup */
2049 if (ent.cgrp == cgrp) 2049 if (ent.cgrp == cgrp)
2050 continue; 2050 goto next;
2051 /* 2051 /*
2052 * saying GFP_ATOMIC has no effect here because we did prealloc 2052 * saying GFP_ATOMIC has no effect here because we did prealloc
2053 * earlier, but it's good form to communicate our expectations. 2053 * earlier, but it's good form to communicate our expectations.
@@ -2055,7 +2055,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
2055 retval = flex_array_put(group, i, &ent, GFP_ATOMIC); 2055 retval = flex_array_put(group, i, &ent, GFP_ATOMIC);
2056 BUG_ON(retval != 0); 2056 BUG_ON(retval != 0);
2057 i++; 2057 i++;
2058 2058 next:
2059 if (!threadgroup) 2059 if (!threadgroup)
2060 break; 2060 break;
2061 } while_each_thread(leader, tsk); 2061 } while_each_thread(leader, tsk);
@@ -3188,11 +3188,9 @@ css_next_descendant_post(struct cgroup_subsys_state *pos,
3188 3188
3189 WARN_ON_ONCE(!rcu_read_lock_held()); 3189 WARN_ON_ONCE(!rcu_read_lock_held());
3190 3190
3191 /* if first iteration, visit the leftmost descendant */ 3191 /* if first iteration, visit leftmost descendant which may be @root */
3192 if (!pos) { 3192 if (!pos)
3193 next = css_leftmost_descendant(root); 3193 return css_leftmost_descendant(root);
3194 return next != root ? next : NULL;
3195 }
3196 3194
3197 /* if we visited @root, we're done */ 3195 /* if we visited @root, we're done */
3198 if (pos == root) 3196 if (pos == root)
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index 247091bf0587..859c8dfd78a1 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -51,6 +51,15 @@ void context_tracking_user_enter(void)
51 unsigned long flags; 51 unsigned long flags;
52 52
53 /* 53 /*
54 * Repeat the user_enter() check here because some archs may be calling
55 * this from asm and if no CPU needs context tracking, they shouldn't
56 * go further. Repeat the check here until they support the static key
57 * check.
58 */
59 if (!static_key_false(&context_tracking_enabled))
60 return;
61
62 /*
54 * Some contexts may involve an exception occuring in an irq, 63 * Some contexts may involve an exception occuring in an irq,
55 * leading to that nesting: 64 * leading to that nesting:
56 * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() 65 * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
@@ -151,6 +160,9 @@ void context_tracking_user_exit(void)
151{ 160{
152 unsigned long flags; 161 unsigned long flags;
153 162
163 if (!static_key_false(&context_tracking_enabled))
164 return;
165
154 if (in_interrupt()) 166 if (in_interrupt())
155 return; 167 return;
156 168
diff --git a/kernel/events/core.c b/kernel/events/core.c
index dd236b66ca3a..953c14348375 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3660,6 +3660,26 @@ static void calc_timer_values(struct perf_event *event,
3660 *running = ctx_time - event->tstamp_running; 3660 *running = ctx_time - event->tstamp_running;
3661} 3661}
3662 3662
3663static void perf_event_init_userpage(struct perf_event *event)
3664{
3665 struct perf_event_mmap_page *userpg;
3666 struct ring_buffer *rb;
3667
3668 rcu_read_lock();
3669 rb = rcu_dereference(event->rb);
3670 if (!rb)
3671 goto unlock;
3672
3673 userpg = rb->user_page;
3674
3675 /* Allow new userspace to detect that bit 0 is deprecated */
3676 userpg->cap_bit0_is_deprecated = 1;
3677 userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
3678
3679unlock:
3680 rcu_read_unlock();
3681}
3682
3663void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) 3683void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
3664{ 3684{
3665} 3685}
@@ -4044,6 +4064,7 @@ again:
4044 ring_buffer_attach(event, rb); 4064 ring_buffer_attach(event, rb);
4045 rcu_assign_pointer(event->rb, rb); 4065 rcu_assign_pointer(event->rb, rb);
4046 4066
4067 perf_event_init_userpage(event);
4047 perf_event_update_userpage(event); 4068 perf_event_update_userpage(event);
4048 4069
4049unlock: 4070unlock:
@@ -6746,6 +6767,10 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
6746 if (ret) 6767 if (ret)
6747 return -EFAULT; 6768 return -EFAULT;
6748 6769
6770 /* disabled for now */
6771 if (attr->mmap2)
6772 return -EINVAL;
6773
6749 if (attr->__reserved_1) 6774 if (attr->__reserved_1)
6750 return -EINVAL; 6775 return -EINVAL;
6751 6776
@@ -7213,15 +7238,15 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
7213 perf_remove_from_context(event); 7238 perf_remove_from_context(event);
7214 unaccount_event_cpu(event, src_cpu); 7239 unaccount_event_cpu(event, src_cpu);
7215 put_ctx(src_ctx); 7240 put_ctx(src_ctx);
7216 list_add(&event->event_entry, &events); 7241 list_add(&event->migrate_entry, &events);
7217 } 7242 }
7218 mutex_unlock(&src_ctx->mutex); 7243 mutex_unlock(&src_ctx->mutex);
7219 7244
7220 synchronize_rcu(); 7245 synchronize_rcu();
7221 7246
7222 mutex_lock(&dst_ctx->mutex); 7247 mutex_lock(&dst_ctx->mutex);
7223 list_for_each_entry_safe(event, tmp, &events, event_entry) { 7248 list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
7224 list_del(&event->event_entry); 7249 list_del(&event->migrate_entry);
7225 if (event->state >= PERF_EVENT_STATE_OFF) 7250 if (event->state >= PERF_EVENT_STATE_OFF)
7226 event->state = PERF_EVENT_STATE_INACTIVE; 7251 event->state = PERF_EVENT_STATE_INACTIVE;
7227 account_event_cpu(event, dst_cpu); 7252 account_event_cpu(event, dst_cpu);
diff --git a/kernel/kmod.c b/kernel/kmod.c
index fb326365b694..b086006c59e7 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -571,6 +571,10 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
571 DECLARE_COMPLETION_ONSTACK(done); 571 DECLARE_COMPLETION_ONSTACK(done);
572 int retval = 0; 572 int retval = 0;
573 573
574 if (!sub_info->path) {
575 call_usermodehelper_freeinfo(sub_info);
576 return -EINVAL;
577 }
574 helper_lock(); 578 helper_lock();
575 if (!khelper_wq || usermodehelper_disabled) { 579 if (!khelper_wq || usermodehelper_disabled) {
576 retval = -EBUSY; 580 retval = -EBUSY;
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 6d647aedffea..d24105b1b794 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -410,7 +410,7 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock,
410static __always_inline int __sched 410static __always_inline int __sched
411__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, 411__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
412 struct lockdep_map *nest_lock, unsigned long ip, 412 struct lockdep_map *nest_lock, unsigned long ip,
413 struct ww_acquire_ctx *ww_ctx) 413 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
414{ 414{
415 struct task_struct *task = current; 415 struct task_struct *task = current;
416 struct mutex_waiter waiter; 416 struct mutex_waiter waiter;
@@ -450,7 +450,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
450 struct task_struct *owner; 450 struct task_struct *owner;
451 struct mspin_node node; 451 struct mspin_node node;
452 452
453 if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) { 453 if (use_ww_ctx && ww_ctx->acquired > 0) {
454 struct ww_mutex *ww; 454 struct ww_mutex *ww;
455 455
456 ww = container_of(lock, struct ww_mutex, base); 456 ww = container_of(lock, struct ww_mutex, base);
@@ -480,7 +480,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
480 if ((atomic_read(&lock->count) == 1) && 480 if ((atomic_read(&lock->count) == 1) &&
481 (atomic_cmpxchg(&lock->count, 1, 0) == 1)) { 481 (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
482 lock_acquired(&lock->dep_map, ip); 482 lock_acquired(&lock->dep_map, ip);
483 if (!__builtin_constant_p(ww_ctx == NULL)) { 483 if (use_ww_ctx) {
484 struct ww_mutex *ww; 484 struct ww_mutex *ww;
485 ww = container_of(lock, struct ww_mutex, base); 485 ww = container_of(lock, struct ww_mutex, base);
486 486
@@ -551,7 +551,7 @@ slowpath:
551 goto err; 551 goto err;
552 } 552 }
553 553
554 if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) { 554 if (use_ww_ctx && ww_ctx->acquired > 0) {
555 ret = __mutex_lock_check_stamp(lock, ww_ctx); 555 ret = __mutex_lock_check_stamp(lock, ww_ctx);
556 if (ret) 556 if (ret)
557 goto err; 557 goto err;
@@ -575,7 +575,7 @@ skip_wait:
575 lock_acquired(&lock->dep_map, ip); 575 lock_acquired(&lock->dep_map, ip);
576 mutex_set_owner(lock); 576 mutex_set_owner(lock);
577 577
578 if (!__builtin_constant_p(ww_ctx == NULL)) { 578 if (use_ww_ctx) {
579 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); 579 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
580 struct mutex_waiter *cur; 580 struct mutex_waiter *cur;
581 581
@@ -615,7 +615,7 @@ mutex_lock_nested(struct mutex *lock, unsigned int subclass)
615{ 615{
616 might_sleep(); 616 might_sleep();
617 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 617 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
618 subclass, NULL, _RET_IP_, NULL); 618 subclass, NULL, _RET_IP_, NULL, 0);
619} 619}
620 620
621EXPORT_SYMBOL_GPL(mutex_lock_nested); 621EXPORT_SYMBOL_GPL(mutex_lock_nested);
@@ -625,7 +625,7 @@ _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
625{ 625{
626 might_sleep(); 626 might_sleep();
627 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 627 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
628 0, nest, _RET_IP_, NULL); 628 0, nest, _RET_IP_, NULL, 0);
629} 629}
630 630
631EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); 631EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
@@ -635,7 +635,7 @@ mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
635{ 635{
636 might_sleep(); 636 might_sleep();
637 return __mutex_lock_common(lock, TASK_KILLABLE, 637 return __mutex_lock_common(lock, TASK_KILLABLE,
638 subclass, NULL, _RET_IP_, NULL); 638 subclass, NULL, _RET_IP_, NULL, 0);
639} 639}
640EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); 640EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
641 641
@@ -644,7 +644,7 @@ mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
644{ 644{
645 might_sleep(); 645 might_sleep();
646 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 646 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
647 subclass, NULL, _RET_IP_, NULL); 647 subclass, NULL, _RET_IP_, NULL, 0);
648} 648}
649 649
650EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); 650EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
@@ -682,7 +682,7 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
682 682
683 might_sleep(); 683 might_sleep();
684 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 684 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
685 0, &ctx->dep_map, _RET_IP_, ctx); 685 0, &ctx->dep_map, _RET_IP_, ctx, 1);
686 if (!ret && ctx->acquired > 1) 686 if (!ret && ctx->acquired > 1)
687 return ww_mutex_deadlock_injection(lock, ctx); 687 return ww_mutex_deadlock_injection(lock, ctx);
688 688
@@ -697,7 +697,7 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
697 697
698 might_sleep(); 698 might_sleep();
699 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 699 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
700 0, &ctx->dep_map, _RET_IP_, ctx); 700 0, &ctx->dep_map, _RET_IP_, ctx, 1);
701 701
702 if (!ret && ctx->acquired > 1) 702 if (!ret && ctx->acquired > 1)
703 return ww_mutex_deadlock_injection(lock, ctx); 703 return ww_mutex_deadlock_injection(lock, ctx);
@@ -809,28 +809,28 @@ __mutex_lock_slowpath(atomic_t *lock_count)
809 struct mutex *lock = container_of(lock_count, struct mutex, count); 809 struct mutex *lock = container_of(lock_count, struct mutex, count);
810 810
811 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, 811 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
812 NULL, _RET_IP_, NULL); 812 NULL, _RET_IP_, NULL, 0);
813} 813}
814 814
815static noinline int __sched 815static noinline int __sched
816__mutex_lock_killable_slowpath(struct mutex *lock) 816__mutex_lock_killable_slowpath(struct mutex *lock)
817{ 817{
818 return __mutex_lock_common(lock, TASK_KILLABLE, 0, 818 return __mutex_lock_common(lock, TASK_KILLABLE, 0,
819 NULL, _RET_IP_, NULL); 819 NULL, _RET_IP_, NULL, 0);
820} 820}
821 821
822static noinline int __sched 822static noinline int __sched
823__mutex_lock_interruptible_slowpath(struct mutex *lock) 823__mutex_lock_interruptible_slowpath(struct mutex *lock)
824{ 824{
825 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, 825 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
826 NULL, _RET_IP_, NULL); 826 NULL, _RET_IP_, NULL, 0);
827} 827}
828 828
829static noinline int __sched 829static noinline int __sched
830__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 830__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
831{ 831{
832 return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0, 832 return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
833 NULL, _RET_IP_, ctx); 833 NULL, _RET_IP_, ctx, 1);
834} 834}
835 835
836static noinline int __sched 836static noinline int __sched
@@ -838,7 +838,7 @@ __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
838 struct ww_acquire_ctx *ctx) 838 struct ww_acquire_ctx *ctx)
839{ 839{
840 return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0, 840 return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
841 NULL, _RET_IP_, ctx); 841 NULL, _RET_IP_, ctx, 1);
842} 842}
843 843
844#endif 844#endif
diff --git a/kernel/params.c b/kernel/params.c
index 81c4e78c8f4c..c00d5b502aa4 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -254,11 +254,11 @@ int parse_args(const char *doing,
254 254
255 255
256STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", unsigned long, kstrtoul); 256STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", unsigned long, kstrtoul);
257STANDARD_PARAM_DEF(short, short, "%hi", long, kstrtoul); 257STANDARD_PARAM_DEF(short, short, "%hi", long, kstrtol);
258STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", unsigned long, kstrtoul); 258STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", unsigned long, kstrtoul);
259STANDARD_PARAM_DEF(int, int, "%i", long, kstrtoul); 259STANDARD_PARAM_DEF(int, int, "%i", long, kstrtol);
260STANDARD_PARAM_DEF(uint, unsigned int, "%u", unsigned long, kstrtoul); 260STANDARD_PARAM_DEF(uint, unsigned int, "%u", unsigned long, kstrtoul);
261STANDARD_PARAM_DEF(long, long, "%li", long, kstrtoul); 261STANDARD_PARAM_DEF(long, long, "%li", long, kstrtol);
262STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", unsigned long, kstrtoul); 262STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", unsigned long, kstrtoul);
263 263
264int param_set_charp(const char *val, const struct kernel_param *kp) 264int param_set_charp(const char *val, const struct kernel_param *kp)
diff --git a/kernel/pid.c b/kernel/pid.c
index ebe5e80b10f8..9b9a26698144 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -273,6 +273,11 @@ void free_pid(struct pid *pid)
273 */ 273 */
274 wake_up_process(ns->child_reaper); 274 wake_up_process(ns->child_reaper);
275 break; 275 break;
276 case PIDNS_HASH_ADDING:
277 /* Handle a fork failure of the first process */
278 WARN_ON(ns->child_reaper);
279 ns->nr_hashed = 0;
280 /* fall through */
276 case 0: 281 case 0:
277 schedule_work(&ns->proc_work); 282 schedule_work(&ns->proc_work);
278 break; 283 break;
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index c9c759d5a15c..0121dab83f43 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -846,7 +846,7 @@ static int software_resume(void)
846 goto Finish; 846 goto Finish;
847} 847}
848 848
849late_initcall(software_resume); 849late_initcall_sync(software_resume);
850 850
851 851
852static const char * const hibernation_modes[] = { 852static const char * const hibernation_modes[] = {
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 358a146fd4da..98c3b34a4cff 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -743,7 +743,10 @@ int create_basic_memory_bitmaps(void)
743 struct memory_bitmap *bm1, *bm2; 743 struct memory_bitmap *bm1, *bm2;
744 int error = 0; 744 int error = 0;
745 745
746 BUG_ON(forbidden_pages_map || free_pages_map); 746 if (forbidden_pages_map && free_pages_map)
747 return 0;
748 else
749 BUG_ON(forbidden_pages_map || free_pages_map);
747 750
748 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL); 751 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
749 if (!bm1) 752 if (!bm1)
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 72e8f4fd616d..957f06164ad1 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -39,6 +39,7 @@ static struct snapshot_data {
39 char frozen; 39 char frozen;
40 char ready; 40 char ready;
41 char platform_support; 41 char platform_support;
42 bool free_bitmaps;
42} snapshot_state; 43} snapshot_state;
43 44
44atomic_t snapshot_device_available = ATOMIC_INIT(1); 45atomic_t snapshot_device_available = ATOMIC_INIT(1);
@@ -82,6 +83,10 @@ static int snapshot_open(struct inode *inode, struct file *filp)
82 data->swap = -1; 83 data->swap = -1;
83 data->mode = O_WRONLY; 84 data->mode = O_WRONLY;
84 error = pm_notifier_call_chain(PM_RESTORE_PREPARE); 85 error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
86 if (!error) {
87 error = create_basic_memory_bitmaps();
88 data->free_bitmaps = !error;
89 }
85 if (error) 90 if (error)
86 pm_notifier_call_chain(PM_POST_RESTORE); 91 pm_notifier_call_chain(PM_POST_RESTORE);
87 } 92 }
@@ -111,6 +116,8 @@ static int snapshot_release(struct inode *inode, struct file *filp)
111 pm_restore_gfp_mask(); 116 pm_restore_gfp_mask();
112 free_basic_memory_bitmaps(); 117 free_basic_memory_bitmaps();
113 thaw_processes(); 118 thaw_processes();
119 } else if (data->free_bitmaps) {
120 free_basic_memory_bitmaps();
114 } 121 }
115 pm_notifier_call_chain(data->mode == O_RDONLY ? 122 pm_notifier_call_chain(data->mode == O_RDONLY ?
116 PM_POST_HIBERNATION : PM_POST_RESTORE); 123 PM_POST_HIBERNATION : PM_POST_RESTORE);
@@ -231,6 +238,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
231 break; 238 break;
232 pm_restore_gfp_mask(); 239 pm_restore_gfp_mask();
233 free_basic_memory_bitmaps(); 240 free_basic_memory_bitmaps();
241 data->free_bitmaps = false;
234 thaw_processes(); 242 thaw_processes();
235 data->frozen = 0; 243 data->frozen = 0;
236 break; 244 break;
diff --git a/kernel/reboot.c b/kernel/reboot.c
index 269ed9384cc4..f813b3474646 100644
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -32,7 +32,14 @@ EXPORT_SYMBOL(cad_pid);
32#endif 32#endif
33enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE; 33enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE;
34 34
35int reboot_default; 35/*
36 * This variable is used privately to keep track of whether or not
37 * reboot_type is still set to its default value (i.e., reboot= hasn't
38 * been set on the command line). This is needed so that we can
39 * suppress DMI scanning for reboot quirks. Without it, it's
40 * impossible to override a faulty reboot quirk without recompiling.
41 */
42int reboot_default = 1;
36int reboot_cpu; 43int reboot_cpu;
37enum reboot_type reboot_type = BOOT_ACPI; 44enum reboot_type reboot_type = BOOT_ACPI;
38int reboot_force; 45int reboot_force;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 11cd13667359..7c70201fbc61 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4242,7 +4242,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
4242 } 4242 }
4243 4243
4244 if (!se) { 4244 if (!se) {
4245 cfs_rq->h_load = rq->avg.load_avg_contrib; 4245 cfs_rq->h_load = cfs_rq->runnable_load_avg;
4246 cfs_rq->last_h_load_update = now; 4246 cfs_rq->last_h_load_update = now;
4247 } 4247 }
4248 4248
@@ -4823,8 +4823,8 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
4823 (busiest->load_per_task * SCHED_POWER_SCALE) / 4823 (busiest->load_per_task * SCHED_POWER_SCALE) /
4824 busiest->group_power; 4824 busiest->group_power;
4825 4825
4826 if (busiest->avg_load - local->avg_load + scaled_busy_load_per_task >= 4826 if (busiest->avg_load + scaled_busy_load_per_task >=
4827 (scaled_busy_load_per_task * imbn)) { 4827 local->avg_load + (scaled_busy_load_per_task * imbn)) {
4828 env->imbalance = busiest->load_per_task; 4828 env->imbalance = busiest->load_per_task;
4829 return; 4829 return;
4830 } 4830 }
@@ -4896,7 +4896,8 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
4896 * max load less than avg load(as we skip the groups at or below 4896 * max load less than avg load(as we skip the groups at or below
4897 * its cpu_power, while calculating max_load..) 4897 * its cpu_power, while calculating max_load..)
4898 */ 4898 */
4899 if (busiest->avg_load < sds->avg_load) { 4899 if (busiest->avg_load <= sds->avg_load ||
4900 local->avg_load >= sds->avg_load) {
4900 env->imbalance = 0; 4901 env->imbalance = 0;
4901 return fix_small_imbalance(env, sds); 4902 return fix_small_imbalance(env, sds);
4902 } 4903 }
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 53cc09ceb0b8..d7d498d8cc4f 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -328,10 +328,19 @@ void irq_enter(void)
328 328
329static inline void invoke_softirq(void) 329static inline void invoke_softirq(void)
330{ 330{
331 if (!force_irqthreads) 331 if (!force_irqthreads) {
332 __do_softirq(); 332 /*
333 else 333 * We can safely execute softirq on the current stack if
334 * it is the irq stack, because it should be near empty
335 * at this stage. But we have no way to know if the arch
336 * calls irq_exit() on the irq stack. So call softirq
337 * in its own stack to prevent from any overrun on top
338 * of a potentially deep task stack.
339 */
340 do_softirq();
341 } else {
334 wakeup_softirqd(); 342 wakeup_softirqd();
343 }
335} 344}
336 345
337static inline void tick_irq_exit(void) 346static inline void tick_irq_exit(void)
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 38959c866789..662c5798a685 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -33,29 +33,64 @@ struct ce_unbind {
33 int res; 33 int res;
34}; 34};
35 35
36/** 36static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt,
37 * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds 37 bool ismax)
38 * @latch: value to convert
39 * @evt: pointer to clock event device descriptor
40 *
41 * Math helper, returns latch value converted to nanoseconds (bound checked)
42 */
43u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
44{ 38{
45 u64 clc = (u64) latch << evt->shift; 39 u64 clc = (u64) latch << evt->shift;
40 u64 rnd;
46 41
47 if (unlikely(!evt->mult)) { 42 if (unlikely(!evt->mult)) {
48 evt->mult = 1; 43 evt->mult = 1;
49 WARN_ON(1); 44 WARN_ON(1);
50 } 45 }
46 rnd = (u64) evt->mult - 1;
47
48 /*
49 * Upper bound sanity check. If the backwards conversion is
50 * not equal latch, we know that the above shift overflowed.
51 */
52 if ((clc >> evt->shift) != (u64)latch)
53 clc = ~0ULL;
54
55 /*
56 * Scaled math oddities:
57 *
58 * For mult <= (1 << shift) we can safely add mult - 1 to
59 * prevent integer rounding loss. So the backwards conversion
60 * from nsec to device ticks will be correct.
61 *
62 * For mult > (1 << shift), i.e. device frequency is > 1GHz we
63 * need to be careful. Adding mult - 1 will result in a value
64 * which when converted back to device ticks can be larger
65 * than latch by up to (mult - 1) >> shift. For the min_delta
66 * calculation we still want to apply this in order to stay
67 * above the minimum device ticks limit. For the upper limit
68 * we would end up with a latch value larger than the upper
69 * limit of the device, so we omit the add to stay below the
70 * device upper boundary.
71 *
72 * Also omit the add if it would overflow the u64 boundary.
73 */
74 if ((~0ULL - clc > rnd) &&
75 (!ismax || evt->mult <= (1U << evt->shift)))
76 clc += rnd;
51 77
52 do_div(clc, evt->mult); 78 do_div(clc, evt->mult);
53 if (clc < 1000)
54 clc = 1000;
55 if (clc > KTIME_MAX)
56 clc = KTIME_MAX;
57 79
58 return clc; 80 /* Deltas less than 1usec are pointless noise */
81 return clc > 1000 ? clc : 1000;
82}
83
84/**
85 * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
86 * @latch: value to convert
87 * @evt: pointer to clock event device descriptor
88 *
89 * Math helper, returns latch value converted to nanoseconds (bound checked)
90 */
91u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
92{
93 return cev_delta2ns(latch, evt, false);
59} 94}
60EXPORT_SYMBOL_GPL(clockevent_delta2ns); 95EXPORT_SYMBOL_GPL(clockevent_delta2ns);
61 96
@@ -380,8 +415,8 @@ void clockevents_config(struct clock_event_device *dev, u32 freq)
380 sec = 600; 415 sec = 600;
381 416
382 clockevents_calc_mult_shift(dev, freq, sec); 417 clockevents_calc_mult_shift(dev, freq, sec);
383 dev->min_delta_ns = clockevent_delta2ns(dev->min_delta_ticks, dev); 418 dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false);
384 dev->max_delta_ns = clockevent_delta2ns(dev->max_delta_ticks, dev); 419 dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true);
385} 420}
386 421
387/** 422/**
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 51c4f34d258e..4431610f049a 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -486,7 +486,52 @@ static struct smp_hotplug_thread watchdog_threads = {
486 .unpark = watchdog_enable, 486 .unpark = watchdog_enable,
487}; 487};
488 488
489static int watchdog_enable_all_cpus(void) 489static void restart_watchdog_hrtimer(void *info)
490{
491 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
492 int ret;
493
494 /*
495 * No need to cancel and restart hrtimer if it is currently executing
496 * because it will reprogram itself with the new period now.
497 * We should never see it unqueued here because we are running per-cpu
498 * with interrupts disabled.
499 */
500 ret = hrtimer_try_to_cancel(hrtimer);
501 if (ret == 1)
502 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
503 HRTIMER_MODE_REL_PINNED);
504}
505
506static void update_timers(int cpu)
507{
508 struct call_single_data data = {.func = restart_watchdog_hrtimer};
509 /*
510 * Make sure that perf event counter will adopt to a new
511 * sampling period. Updating the sampling period directly would
512 * be much nicer but we do not have an API for that now so
513 * let's use a big hammer.
514 * Hrtimer will adopt the new period on the next tick but this
515 * might be late already so we have to restart the timer as well.
516 */
517 watchdog_nmi_disable(cpu);
518 __smp_call_function_single(cpu, &data, 1);
519 watchdog_nmi_enable(cpu);
520}
521
522static void update_timers_all_cpus(void)
523{
524 int cpu;
525
526 get_online_cpus();
527 preempt_disable();
528 for_each_online_cpu(cpu)
529 update_timers(cpu);
530 preempt_enable();
531 put_online_cpus();
532}
533
534static int watchdog_enable_all_cpus(bool sample_period_changed)
490{ 535{
491 int err = 0; 536 int err = 0;
492 537
@@ -496,6 +541,8 @@ static int watchdog_enable_all_cpus(void)
496 pr_err("Failed to create watchdog threads, disabled\n"); 541 pr_err("Failed to create watchdog threads, disabled\n");
497 else 542 else
498 watchdog_running = 1; 543 watchdog_running = 1;
544 } else if (sample_period_changed) {
545 update_timers_all_cpus();
499 } 546 }
500 547
501 return err; 548 return err;
@@ -520,13 +567,15 @@ int proc_dowatchdog(struct ctl_table *table, int write,
520 void __user *buffer, size_t *lenp, loff_t *ppos) 567 void __user *buffer, size_t *lenp, loff_t *ppos)
521{ 568{
522 int err, old_thresh, old_enabled; 569 int err, old_thresh, old_enabled;
570 static DEFINE_MUTEX(watchdog_proc_mutex);
523 571
572 mutex_lock(&watchdog_proc_mutex);
524 old_thresh = ACCESS_ONCE(watchdog_thresh); 573 old_thresh = ACCESS_ONCE(watchdog_thresh);
525 old_enabled = ACCESS_ONCE(watchdog_user_enabled); 574 old_enabled = ACCESS_ONCE(watchdog_user_enabled);
526 575
527 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 576 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
528 if (err || !write) 577 if (err || !write)
529 return err; 578 goto out;
530 579
531 set_sample_period(); 580 set_sample_period();
532 /* 581 /*
@@ -535,7 +584,7 @@ int proc_dowatchdog(struct ctl_table *table, int write,
535 * watchdog_*_all_cpus() function takes care of this. 584 * watchdog_*_all_cpus() function takes care of this.
536 */ 585 */
537 if (watchdog_user_enabled && watchdog_thresh) 586 if (watchdog_user_enabled && watchdog_thresh)
538 err = watchdog_enable_all_cpus(); 587 err = watchdog_enable_all_cpus(old_thresh != watchdog_thresh);
539 else 588 else
540 watchdog_disable_all_cpus(); 589 watchdog_disable_all_cpus();
541 590
@@ -544,7 +593,8 @@ int proc_dowatchdog(struct ctl_table *table, int write,
544 watchdog_thresh = old_thresh; 593 watchdog_thresh = old_thresh;
545 watchdog_user_enabled = old_enabled; 594 watchdog_user_enabled = old_enabled;
546 } 595 }
547 596out:
597 mutex_unlock(&watchdog_proc_mutex);
548 return err; 598 return err;
549} 599}
550#endif /* CONFIG_SYSCTL */ 600#endif /* CONFIG_SYSCTL */
@@ -554,5 +604,5 @@ void __init lockup_detector_init(void)
554 set_sample_period(); 604 set_sample_period();
555 605
556 if (watchdog_user_enabled) 606 if (watchdog_user_enabled)
557 watchdog_enable_all_cpus(); 607 watchdog_enable_all_cpus(false);
558} 608}
diff --git a/lib/hexdump.c b/lib/hexdump.c
index 3f0494c9d57a..8499c810909a 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -14,6 +14,8 @@
14 14
15const char hex_asc[] = "0123456789abcdef"; 15const char hex_asc[] = "0123456789abcdef";
16EXPORT_SYMBOL(hex_asc); 16EXPORT_SYMBOL(hex_asc);
17const char hex_asc_upper[] = "0123456789ABCDEF";
18EXPORT_SYMBOL(hex_asc_upper);
17 19
18/** 20/**
19 * hex_to_bin - convert a hex digit to its real value 21 * hex_to_bin - convert a hex digit to its real value
diff --git a/lib/kobject.c b/lib/kobject.c
index 962175134702..084f7b18d0c0 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -592,7 +592,7 @@ static void kobject_release(struct kref *kref)
592{ 592{
593 struct kobject *kobj = container_of(kref, struct kobject, kref); 593 struct kobject *kobj = container_of(kref, struct kobject, kref);
594#ifdef CONFIG_DEBUG_KOBJECT_RELEASE 594#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
595 pr_debug("kobject: '%s' (%p): %s, parent %p (delayed)\n", 595 pr_info("kobject: '%s' (%p): %s, parent %p (delayed)\n",
596 kobject_name(kobj), kobj, __func__, kobj->parent); 596 kobject_name(kobj), kobj, __func__, kobj->parent);
597 INIT_DELAYED_WORK(&kobj->release, kobject_delayed_cleanup); 597 INIT_DELAYED_WORK(&kobj->release, kobject_delayed_cleanup);
598 schedule_delayed_work(&kobj->release, HZ); 598 schedule_delayed_work(&kobj->release, HZ);
@@ -933,10 +933,7 @@ const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj)
933 933
934bool kobj_ns_current_may_mount(enum kobj_ns_type type) 934bool kobj_ns_current_may_mount(enum kobj_ns_type type)
935{ 935{
936 bool may_mount = false; 936 bool may_mount = true;
937
938 if (type == KOBJ_NS_TYPE_NONE)
939 return true;
940 937
941 spin_lock(&kobj_ns_type_lock); 938 spin_lock(&kobj_ns_type_lock);
942 if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) && 939 if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
diff --git a/lib/lockref.c b/lib/lockref.c
index 677d036cf3c7..6f9d434c1521 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -4,6 +4,22 @@
4#ifdef CONFIG_CMPXCHG_LOCKREF 4#ifdef CONFIG_CMPXCHG_LOCKREF
5 5
6/* 6/*
7 * Allow weakly-ordered memory architectures to provide barrier-less
8 * cmpxchg semantics for lockref updates.
9 */
10#ifndef cmpxchg64_relaxed
11# define cmpxchg64_relaxed cmpxchg64
12#endif
13
14/*
15 * Allow architectures to override the default cpu_relax() within CMPXCHG_LOOP.
16 * This is useful for architectures with an expensive cpu_relax().
17 */
18#ifndef arch_mutex_cpu_relax
19# define arch_mutex_cpu_relax() cpu_relax()
20#endif
21
22/*
7 * Note that the "cmpxchg()" reloads the "old" value for the 23 * Note that the "cmpxchg()" reloads the "old" value for the
8 * failure case. 24 * failure case.
9 */ 25 */
@@ -14,12 +30,13 @@
14 while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \ 30 while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
15 struct lockref new = old, prev = old; \ 31 struct lockref new = old, prev = old; \
16 CODE \ 32 CODE \
17 old.lock_count = cmpxchg64(&lockref->lock_count, \ 33 old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \
18 old.lock_count, new.lock_count); \ 34 old.lock_count, \
35 new.lock_count); \
19 if (likely(old.lock_count == prev.lock_count)) { \ 36 if (likely(old.lock_count == prev.lock_count)) { \
20 SUCCESS; \ 37 SUCCESS; \
21 } \ 38 } \
22 cpu_relax(); \ 39 arch_mutex_cpu_relax(); \
23 } \ 40 } \
24} while (0) 41} while (0)
25 42
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 7deeb6297a48..1a53d497a8c5 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -53,6 +53,7 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release)
53 ref->release = release; 53 ref->release = release;
54 return 0; 54 return 0;
55} 55}
56EXPORT_SYMBOL_GPL(percpu_ref_init);
56 57
57/** 58/**
58 * percpu_ref_cancel_init - cancel percpu_ref_init() 59 * percpu_ref_cancel_init - cancel percpu_ref_init()
@@ -84,6 +85,7 @@ void percpu_ref_cancel_init(struct percpu_ref *ref)
84 free_percpu(ref->pcpu_count); 85 free_percpu(ref->pcpu_count);
85 } 86 }
86} 87}
88EXPORT_SYMBOL_GPL(percpu_ref_cancel_init);
87 89
88static void percpu_ref_kill_rcu(struct rcu_head *rcu) 90static void percpu_ref_kill_rcu(struct rcu_head *rcu)
89{ 91{
@@ -156,3 +158,4 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
156 158
157 call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu); 159 call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu);
158} 160}
161EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
diff --git a/mm/Kconfig b/mm/Kconfig
index 026771a9b097..394838f489eb 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -183,7 +183,7 @@ config MEMORY_HOTPLUG_SPARSE
183config MEMORY_HOTREMOVE 183config MEMORY_HOTREMOVE
184 bool "Allow for memory hot remove" 184 bool "Allow for memory hot remove"
185 select MEMORY_ISOLATION 185 select MEMORY_ISOLATION
186 select HAVE_BOOTMEM_INFO_NODE if X86_64 186 select HAVE_BOOTMEM_INFO_NODE if (X86_64 || PPC64)
187 depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE 187 depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE
188 depends on MIGRATION 188 depends on MIGRATION
189 189
diff --git a/mm/bounce.c b/mm/bounce.c
index c9f0a4339a7d..5a7d58fb883b 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -204,6 +204,8 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
204 struct bio_vec *to, *from; 204 struct bio_vec *to, *from;
205 unsigned i; 205 unsigned i;
206 206
207 if (force)
208 goto bounce;
207 bio_for_each_segment(from, *bio_orig, i) 209 bio_for_each_segment(from, *bio_orig, i)
208 if (page_to_pfn(from->bv_page) > queue_bounce_pfn(q)) 210 if (page_to_pfn(from->bv_page) > queue_bounce_pfn(q))
209 goto bounce; 211 goto bounce;
diff --git a/mm/compaction.c b/mm/compaction.c
index c43789388cd8..b5326b141a25 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -677,6 +677,13 @@ static void isolate_freepages(struct zone *zone,
677 pfn -= pageblock_nr_pages) { 677 pfn -= pageblock_nr_pages) {
678 unsigned long isolated; 678 unsigned long isolated;
679 679
680 /*
681 * This can iterate a massively long zone without finding any
682 * suitable migration targets, so periodically check if we need
683 * to schedule.
684 */
685 cond_resched();
686
680 if (!pfn_valid(pfn)) 687 if (!pfn_valid(pfn))
681 continue; 688 continue;
682 689
diff --git a/mm/filemap.c b/mm/filemap.c
index 1e6aec4a2d2e..ae4846ff4849 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1616,7 +1616,6 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1616 struct inode *inode = mapping->host; 1616 struct inode *inode = mapping->host;
1617 pgoff_t offset = vmf->pgoff; 1617 pgoff_t offset = vmf->pgoff;
1618 struct page *page; 1618 struct page *page;
1619 bool memcg_oom;
1620 pgoff_t size; 1619 pgoff_t size;
1621 int ret = 0; 1620 int ret = 0;
1622 1621
@@ -1625,11 +1624,7 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1625 return VM_FAULT_SIGBUS; 1624 return VM_FAULT_SIGBUS;
1626 1625
1627 /* 1626 /*
1628 * Do we have something in the page cache already? Either 1627 * Do we have something in the page cache already?
1629 * way, try readahead, but disable the memcg OOM killer for it
1630 * as readahead is optional and no errors are propagated up
1631 * the fault stack. The OOM killer is enabled while trying to
1632 * instantiate the faulting page individually below.
1633 */ 1628 */
1634 page = find_get_page(mapping, offset); 1629 page = find_get_page(mapping, offset);
1635 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) { 1630 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
@@ -1637,14 +1632,10 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1637 * We found the page, so try async readahead before 1632 * We found the page, so try async readahead before
1638 * waiting for the lock. 1633 * waiting for the lock.
1639 */ 1634 */
1640 memcg_oom = mem_cgroup_toggle_oom(false);
1641 do_async_mmap_readahead(vma, ra, file, page, offset); 1635 do_async_mmap_readahead(vma, ra, file, page, offset);
1642 mem_cgroup_toggle_oom(memcg_oom);
1643 } else if (!page) { 1636 } else if (!page) {
1644 /* No page in the page cache at all */ 1637 /* No page in the page cache at all */
1645 memcg_oom = mem_cgroup_toggle_oom(false);
1646 do_sync_mmap_readahead(vma, ra, file, offset); 1638 do_sync_mmap_readahead(vma, ra, file, offset);
1647 mem_cgroup_toggle_oom(memcg_oom);
1648 count_vm_event(PGMAJFAULT); 1639 count_vm_event(PGMAJFAULT);
1649 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); 1640 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1650 ret = VM_FAULT_MAJOR; 1641 ret = VM_FAULT_MAJOR;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 7489884682d8..610e3df2768a 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2697,6 +2697,7 @@ void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
2697 2697
2698 mmun_start = haddr; 2698 mmun_start = haddr;
2699 mmun_end = haddr + HPAGE_PMD_SIZE; 2699 mmun_end = haddr + HPAGE_PMD_SIZE;
2700again:
2700 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 2701 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2701 spin_lock(&mm->page_table_lock); 2702 spin_lock(&mm->page_table_lock);
2702 if (unlikely(!pmd_trans_huge(*pmd))) { 2703 if (unlikely(!pmd_trans_huge(*pmd))) {
@@ -2719,7 +2720,14 @@ void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
2719 split_huge_page(page); 2720 split_huge_page(page);
2720 2721
2721 put_page(page); 2722 put_page(page);
2722 BUG_ON(pmd_trans_huge(*pmd)); 2723
2724 /*
2725 * We don't always have down_write of mmap_sem here: a racing
2726 * do_huge_pmd_wp_page() might have copied-on-write to another
2727 * huge page before our split_huge_page() got the anon_vma lock.
2728 */
2729 if (unlikely(pmd_trans_huge(*pmd)))
2730 goto again;
2723} 2731}
2724 2732
2725void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address, 2733void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index b49579c7f2a5..0b7656e804d1 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -653,6 +653,7 @@ static void free_huge_page(struct page *page)
653 BUG_ON(page_count(page)); 653 BUG_ON(page_count(page));
654 BUG_ON(page_mapcount(page)); 654 BUG_ON(page_mapcount(page));
655 restore_reserve = PagePrivate(page); 655 restore_reserve = PagePrivate(page);
656 ClearPagePrivate(page);
656 657
657 spin_lock(&hugetlb_lock); 658 spin_lock(&hugetlb_lock);
658 hugetlb_cgroup_uncharge_page(hstate_index(h), 659 hugetlb_cgroup_uncharge_page(hstate_index(h),
@@ -695,8 +696,22 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
695 /* we rely on prep_new_huge_page to set the destructor */ 696 /* we rely on prep_new_huge_page to set the destructor */
696 set_compound_order(page, order); 697 set_compound_order(page, order);
697 __SetPageHead(page); 698 __SetPageHead(page);
699 __ClearPageReserved(page);
698 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { 700 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
699 __SetPageTail(p); 701 __SetPageTail(p);
702 /*
703 * For gigantic hugepages allocated through bootmem at
704 * boot, it's safer to be consistent with the not-gigantic
705 * hugepages and clear the PG_reserved bit from all tail pages
706 * too. Otherwse drivers using get_user_pages() to access tail
707 * pages may get the reference counting wrong if they see
708 * PG_reserved set on a tail page (despite the head page not
709 * having PG_reserved set). Enforcing this consistency between
710 * head and tail pages allows drivers to optimize away a check
711 * on the head page when they need know if put_page() is needed
712 * after get_user_pages().
713 */
714 __ClearPageReserved(p);
700 set_page_count(p, 0); 715 set_page_count(p, 0);
701 p->first_page = page; 716 p->first_page = page;
702 } 717 }
@@ -1329,9 +1344,9 @@ static void __init gather_bootmem_prealloc(void)
1329#else 1344#else
1330 page = virt_to_page(m); 1345 page = virt_to_page(m);
1331#endif 1346#endif
1332 __ClearPageReserved(page);
1333 WARN_ON(page_count(page) != 1); 1347 WARN_ON(page_count(page) != 1);
1334 prep_compound_huge_page(page, h->order); 1348 prep_compound_huge_page(page, h->order);
1349 WARN_ON(PageReserved(page));
1335 prep_new_huge_page(h, page, page_to_nid(page)); 1350 prep_new_huge_page(h, page, page_to_nid(page));
1336 /* 1351 /*
1337 * If we had gigantic hugepages allocated at boot time, we need 1352 * If we had gigantic hugepages allocated at boot time, we need
diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c
index afc2daa91c60..4c84678371eb 100644
--- a/mm/hwpoison-inject.c
+++ b/mm/hwpoison-inject.c
@@ -20,8 +20,6 @@ static int hwpoison_inject(void *data, u64 val)
20 if (!capable(CAP_SYS_ADMIN)) 20 if (!capable(CAP_SYS_ADMIN))
21 return -EPERM; 21 return -EPERM;
22 22
23 if (!hwpoison_filter_enable)
24 goto inject;
25 if (!pfn_valid(pfn)) 23 if (!pfn_valid(pfn))
26 return -ENXIO; 24 return -ENXIO;
27 25
@@ -33,6 +31,9 @@ static int hwpoison_inject(void *data, u64 val)
33 if (!get_page_unless_zero(hpage)) 31 if (!get_page_unless_zero(hpage))
34 return 0; 32 return 0;
35 33
34 if (!hwpoison_filter_enable)
35 goto inject;
36
36 if (!PageLRU(p) && !PageHuge(p)) 37 if (!PageLRU(p) && !PageHuge(p))
37 shake_page(p, 0); 38 shake_page(p, 0);
38 /* 39 /*
diff --git a/mm/madvise.c b/mm/madvise.c
index 6975bc812542..539eeb96b323 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -343,10 +343,11 @@ static long madvise_remove(struct vm_area_struct *vma,
343 */ 343 */
344static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end) 344static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end)
345{ 345{
346 struct page *p;
346 if (!capable(CAP_SYS_ADMIN)) 347 if (!capable(CAP_SYS_ADMIN))
347 return -EPERM; 348 return -EPERM;
348 for (; start < end; start += PAGE_SIZE) { 349 for (; start < end; start += PAGE_SIZE <<
349 struct page *p; 350 compound_order(compound_head(p))) {
350 int ret; 351 int ret;
351 352
352 ret = get_user_pages_fast(start, 1, 0, &p); 353 ret = get_user_pages_fast(start, 1, 0, &p);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d5ff3ce13029..34d3ca9572d6 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -39,6 +39,7 @@
39#include <linux/limits.h> 39#include <linux/limits.h>
40#include <linux/export.h> 40#include <linux/export.h>
41#include <linux/mutex.h> 41#include <linux/mutex.h>
42#include <linux/rbtree.h>
42#include <linux/slab.h> 43#include <linux/slab.h>
43#include <linux/swap.h> 44#include <linux/swap.h>
44#include <linux/swapops.h> 45#include <linux/swapops.h>
@@ -160,6 +161,10 @@ struct mem_cgroup_per_zone {
160 161
161 struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1]; 162 struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
162 163
164 struct rb_node tree_node; /* RB tree node */
165 unsigned long long usage_in_excess;/* Set to the value by which */
166 /* the soft limit is exceeded*/
167 bool on_tree;
163 struct mem_cgroup *memcg; /* Back pointer, we cannot */ 168 struct mem_cgroup *memcg; /* Back pointer, we cannot */
164 /* use container_of */ 169 /* use container_of */
165}; 170};
@@ -168,6 +173,26 @@ struct mem_cgroup_per_node {
168 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES]; 173 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
169}; 174};
170 175
176/*
177 * Cgroups above their limits are maintained in a RB-Tree, independent of
178 * their hierarchy representation
179 */
180
181struct mem_cgroup_tree_per_zone {
182 struct rb_root rb_root;
183 spinlock_t lock;
184};
185
186struct mem_cgroup_tree_per_node {
187 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
188};
189
190struct mem_cgroup_tree {
191 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
192};
193
194static struct mem_cgroup_tree soft_limit_tree __read_mostly;
195
171struct mem_cgroup_threshold { 196struct mem_cgroup_threshold {
172 struct eventfd_ctx *eventfd; 197 struct eventfd_ctx *eventfd;
173 u64 threshold; 198 u64 threshold;
@@ -303,22 +328,6 @@ struct mem_cgroup {
303 atomic_t numainfo_events; 328 atomic_t numainfo_events;
304 atomic_t numainfo_updating; 329 atomic_t numainfo_updating;
305#endif 330#endif
306 /*
307 * Protects soft_contributed transitions.
308 * See mem_cgroup_update_soft_limit
309 */
310 spinlock_t soft_lock;
311
312 /*
313 * If true then this group has increased parents' children_in_excess
314 * when it got over the soft limit.
315 * When a group falls bellow the soft limit, parents' children_in_excess
316 * is decreased and soft_contributed changed to false.
317 */
318 bool soft_contributed;
319
320 /* Number of children that are in soft limit excess */
321 atomic_t children_in_excess;
322 331
323 struct mem_cgroup_per_node *nodeinfo[0]; 332 struct mem_cgroup_per_node *nodeinfo[0];
324 /* WARNING: nodeinfo must be the last member here */ 333 /* WARNING: nodeinfo must be the last member here */
@@ -422,6 +431,7 @@ static bool move_file(void)
422 * limit reclaim to prevent infinite loops, if they ever occur. 431 * limit reclaim to prevent infinite loops, if they ever occur.
423 */ 432 */
424#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100 433#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
434#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
425 435
426enum charge_type { 436enum charge_type {
427 MEM_CGROUP_CHARGE_TYPE_CACHE = 0, 437 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
@@ -648,6 +658,164 @@ page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page)
648 return mem_cgroup_zoneinfo(memcg, nid, zid); 658 return mem_cgroup_zoneinfo(memcg, nid, zid);
649} 659}
650 660
661static struct mem_cgroup_tree_per_zone *
662soft_limit_tree_node_zone(int nid, int zid)
663{
664 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
665}
666
667static struct mem_cgroup_tree_per_zone *
668soft_limit_tree_from_page(struct page *page)
669{
670 int nid = page_to_nid(page);
671 int zid = page_zonenum(page);
672
673 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
674}
675
676static void
677__mem_cgroup_insert_exceeded(struct mem_cgroup *memcg,
678 struct mem_cgroup_per_zone *mz,
679 struct mem_cgroup_tree_per_zone *mctz,
680 unsigned long long new_usage_in_excess)
681{
682 struct rb_node **p = &mctz->rb_root.rb_node;
683 struct rb_node *parent = NULL;
684 struct mem_cgroup_per_zone *mz_node;
685
686 if (mz->on_tree)
687 return;
688
689 mz->usage_in_excess = new_usage_in_excess;
690 if (!mz->usage_in_excess)
691 return;
692 while (*p) {
693 parent = *p;
694 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
695 tree_node);
696 if (mz->usage_in_excess < mz_node->usage_in_excess)
697 p = &(*p)->rb_left;
698 /*
699 * We can't avoid mem cgroups that are over their soft
700 * limit by the same amount
701 */
702 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
703 p = &(*p)->rb_right;
704 }
705 rb_link_node(&mz->tree_node, parent, p);
706 rb_insert_color(&mz->tree_node, &mctz->rb_root);
707 mz->on_tree = true;
708}
709
710static void
711__mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
712 struct mem_cgroup_per_zone *mz,
713 struct mem_cgroup_tree_per_zone *mctz)
714{
715 if (!mz->on_tree)
716 return;
717 rb_erase(&mz->tree_node, &mctz->rb_root);
718 mz->on_tree = false;
719}
720
721static void
722mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
723 struct mem_cgroup_per_zone *mz,
724 struct mem_cgroup_tree_per_zone *mctz)
725{
726 spin_lock(&mctz->lock);
727 __mem_cgroup_remove_exceeded(memcg, mz, mctz);
728 spin_unlock(&mctz->lock);
729}
730
731
732static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
733{
734 unsigned long long excess;
735 struct mem_cgroup_per_zone *mz;
736 struct mem_cgroup_tree_per_zone *mctz;
737 int nid = page_to_nid(page);
738 int zid = page_zonenum(page);
739 mctz = soft_limit_tree_from_page(page);
740
741 /*
742 * Necessary to update all ancestors when hierarchy is used.
743 * because their event counter is not touched.
744 */
745 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
746 mz = mem_cgroup_zoneinfo(memcg, nid, zid);
747 excess = res_counter_soft_limit_excess(&memcg->res);
748 /*
749 * We have to update the tree if mz is on RB-tree or
750 * mem is over its softlimit.
751 */
752 if (excess || mz->on_tree) {
753 spin_lock(&mctz->lock);
754 /* if on-tree, remove it */
755 if (mz->on_tree)
756 __mem_cgroup_remove_exceeded(memcg, mz, mctz);
757 /*
758 * Insert again. mz->usage_in_excess will be updated.
759 * If excess is 0, no tree ops.
760 */
761 __mem_cgroup_insert_exceeded(memcg, mz, mctz, excess);
762 spin_unlock(&mctz->lock);
763 }
764 }
765}
766
767static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
768{
769 int node, zone;
770 struct mem_cgroup_per_zone *mz;
771 struct mem_cgroup_tree_per_zone *mctz;
772
773 for_each_node(node) {
774 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
775 mz = mem_cgroup_zoneinfo(memcg, node, zone);
776 mctz = soft_limit_tree_node_zone(node, zone);
777 mem_cgroup_remove_exceeded(memcg, mz, mctz);
778 }
779 }
780}
781
782static struct mem_cgroup_per_zone *
783__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
784{
785 struct rb_node *rightmost = NULL;
786 struct mem_cgroup_per_zone *mz;
787
788retry:
789 mz = NULL;
790 rightmost = rb_last(&mctz->rb_root);
791 if (!rightmost)
792 goto done; /* Nothing to reclaim from */
793
794 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
795 /*
796 * Remove the node now but someone else can add it back,
797 * we will to add it back at the end of reclaim to its correct
798 * position in the tree.
799 */
800 __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
801 if (!res_counter_soft_limit_excess(&mz->memcg->res) ||
802 !css_tryget(&mz->memcg->css))
803 goto retry;
804done:
805 return mz;
806}
807
808static struct mem_cgroup_per_zone *
809mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
810{
811 struct mem_cgroup_per_zone *mz;
812
813 spin_lock(&mctz->lock);
814 mz = __mem_cgroup_largest_soft_limit_node(mctz);
815 spin_unlock(&mctz->lock);
816 return mz;
817}
818
651/* 819/*
652 * Implementation Note: reading percpu statistics for memcg. 820 * Implementation Note: reading percpu statistics for memcg.
653 * 821 *
@@ -698,6 +866,7 @@ static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
698 unsigned long val = 0; 866 unsigned long val = 0;
699 int cpu; 867 int cpu;
700 868
869 get_online_cpus();
701 for_each_online_cpu(cpu) 870 for_each_online_cpu(cpu)
702 val += per_cpu(memcg->stat->events[idx], cpu); 871 val += per_cpu(memcg->stat->events[idx], cpu);
703#ifdef CONFIG_HOTPLUG_CPU 872#ifdef CONFIG_HOTPLUG_CPU
@@ -705,6 +874,7 @@ static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
705 val += memcg->nocpu_base.events[idx]; 874 val += memcg->nocpu_base.events[idx];
706 spin_unlock(&memcg->pcp_counter_lock); 875 spin_unlock(&memcg->pcp_counter_lock);
707#endif 876#endif
877 put_online_cpus();
708 return val; 878 return val;
709} 879}
710 880
@@ -822,48 +992,6 @@ static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
822} 992}
823 993
824/* 994/*
825 * Called from rate-limited memcg_check_events when enough
826 * MEM_CGROUP_TARGET_SOFTLIMIT events are accumulated and it makes sure
827 * that all the parents up the hierarchy will be notified that this group
828 * is in excess or that it is not in excess anymore. mmecg->soft_contributed
829 * makes the transition a single action whenever the state flips from one to
830 * the other.
831 */
832static void mem_cgroup_update_soft_limit(struct mem_cgroup *memcg)
833{
834 unsigned long long excess = res_counter_soft_limit_excess(&memcg->res);
835 struct mem_cgroup *parent = memcg;
836 int delta = 0;
837
838 spin_lock(&memcg->soft_lock);
839 if (excess) {
840 if (!memcg->soft_contributed) {
841 delta = 1;
842 memcg->soft_contributed = true;
843 }
844 } else {
845 if (memcg->soft_contributed) {
846 delta = -1;
847 memcg->soft_contributed = false;
848 }
849 }
850
851 /*
852 * Necessary to update all ancestors when hierarchy is used
853 * because their event counter is not touched.
854 * We track children even outside the hierarchy for the root
855 * cgroup because tree walk starting at root should visit
856 * all cgroups and we want to prevent from pointless tree
857 * walk if no children is below the limit.
858 */
859 while (delta && (parent = parent_mem_cgroup(parent)))
860 atomic_add(delta, &parent->children_in_excess);
861 if (memcg != root_mem_cgroup && !root_mem_cgroup->use_hierarchy)
862 atomic_add(delta, &root_mem_cgroup->children_in_excess);
863 spin_unlock(&memcg->soft_lock);
864}
865
866/*
867 * Check events in order. 995 * Check events in order.
868 * 996 *
869 */ 997 */
@@ -886,7 +1014,7 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
886 1014
887 mem_cgroup_threshold(memcg); 1015 mem_cgroup_threshold(memcg);
888 if (unlikely(do_softlimit)) 1016 if (unlikely(do_softlimit))
889 mem_cgroup_update_soft_limit(memcg); 1017 mem_cgroup_update_tree(memcg, page);
890#if MAX_NUMNODES > 1 1018#if MAX_NUMNODES > 1
891 if (unlikely(do_numainfo)) 1019 if (unlikely(do_numainfo))
892 atomic_inc(&memcg->numainfo_events); 1020 atomic_inc(&memcg->numainfo_events);
@@ -929,15 +1057,6 @@ struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
929 return memcg; 1057 return memcg;
930} 1058}
931 1059
932static enum mem_cgroup_filter_t
933mem_cgroup_filter(struct mem_cgroup *memcg, struct mem_cgroup *root,
934 mem_cgroup_iter_filter cond)
935{
936 if (!cond)
937 return VISIT;
938 return cond(memcg, root);
939}
940
941/* 1060/*
942 * Returns a next (in a pre-order walk) alive memcg (with elevated css 1061 * Returns a next (in a pre-order walk) alive memcg (with elevated css
943 * ref. count) or NULL if the whole root's subtree has been visited. 1062 * ref. count) or NULL if the whole root's subtree has been visited.
@@ -945,7 +1064,7 @@ mem_cgroup_filter(struct mem_cgroup *memcg, struct mem_cgroup *root,
945 * helper function to be used by mem_cgroup_iter 1064 * helper function to be used by mem_cgroup_iter
946 */ 1065 */
947static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root, 1066static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root,
948 struct mem_cgroup *last_visited, mem_cgroup_iter_filter cond) 1067 struct mem_cgroup *last_visited)
949{ 1068{
950 struct cgroup_subsys_state *prev_css, *next_css; 1069 struct cgroup_subsys_state *prev_css, *next_css;
951 1070
@@ -963,31 +1082,11 @@ skip_node:
963 if (next_css) { 1082 if (next_css) {
964 struct mem_cgroup *mem = mem_cgroup_from_css(next_css); 1083 struct mem_cgroup *mem = mem_cgroup_from_css(next_css);
965 1084
966 switch (mem_cgroup_filter(mem, root, cond)) { 1085 if (css_tryget(&mem->css))
967 case SKIP: 1086 return mem;
1087 else {
968 prev_css = next_css; 1088 prev_css = next_css;
969 goto skip_node; 1089 goto skip_node;
970 case SKIP_TREE:
971 if (mem == root)
972 return NULL;
973 /*
974 * css_rightmost_descendant is not an optimal way to
975 * skip through a subtree (especially for imbalanced
976 * trees leaning to right) but that's what we have right
977 * now. More effective solution would be traversing
978 * right-up for first non-NULL without calling
979 * css_next_descendant_pre afterwards.
980 */
981 prev_css = css_rightmost_descendant(next_css);
982 goto skip_node;
983 case VISIT:
984 if (css_tryget(&mem->css))
985 return mem;
986 else {
987 prev_css = next_css;
988 goto skip_node;
989 }
990 break;
991 } 1090 }
992 } 1091 }
993 1092
@@ -1051,7 +1150,6 @@ static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter,
1051 * @root: hierarchy root 1150 * @root: hierarchy root
1052 * @prev: previously returned memcg, NULL on first invocation 1151 * @prev: previously returned memcg, NULL on first invocation
1053 * @reclaim: cookie for shared reclaim walks, NULL for full walks 1152 * @reclaim: cookie for shared reclaim walks, NULL for full walks
1054 * @cond: filter for visited nodes, NULL for no filter
1055 * 1153 *
1056 * Returns references to children of the hierarchy below @root, or 1154 * Returns references to children of the hierarchy below @root, or
1057 * @root itself, or %NULL after a full round-trip. 1155 * @root itself, or %NULL after a full round-trip.
@@ -1064,18 +1162,15 @@ static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter,
1064 * divide up the memcgs in the hierarchy among all concurrent 1162 * divide up the memcgs in the hierarchy among all concurrent
1065 * reclaimers operating on the same zone and priority. 1163 * reclaimers operating on the same zone and priority.
1066 */ 1164 */
1067struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root, 1165struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1068 struct mem_cgroup *prev, 1166 struct mem_cgroup *prev,
1069 struct mem_cgroup_reclaim_cookie *reclaim, 1167 struct mem_cgroup_reclaim_cookie *reclaim)
1070 mem_cgroup_iter_filter cond)
1071{ 1168{
1072 struct mem_cgroup *memcg = NULL; 1169 struct mem_cgroup *memcg = NULL;
1073 struct mem_cgroup *last_visited = NULL; 1170 struct mem_cgroup *last_visited = NULL;
1074 1171
1075 if (mem_cgroup_disabled()) { 1172 if (mem_cgroup_disabled())
1076 /* first call must return non-NULL, second return NULL */ 1173 return NULL;
1077 return (struct mem_cgroup *)(unsigned long)!prev;
1078 }
1079 1174
1080 if (!root) 1175 if (!root)
1081 root = root_mem_cgroup; 1176 root = root_mem_cgroup;
@@ -1086,9 +1181,7 @@ struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
1086 if (!root->use_hierarchy && root != root_mem_cgroup) { 1181 if (!root->use_hierarchy && root != root_mem_cgroup) {
1087 if (prev) 1182 if (prev)
1088 goto out_css_put; 1183 goto out_css_put;
1089 if (mem_cgroup_filter(root, root, cond) == VISIT) 1184 return root;
1090 return root;
1091 return NULL;
1092 } 1185 }
1093 1186
1094 rcu_read_lock(); 1187 rcu_read_lock();
@@ -1111,7 +1204,7 @@ struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
1111 last_visited = mem_cgroup_iter_load(iter, root, &seq); 1204 last_visited = mem_cgroup_iter_load(iter, root, &seq);
1112 } 1205 }
1113 1206
1114 memcg = __mem_cgroup_iter_next(root, last_visited, cond); 1207 memcg = __mem_cgroup_iter_next(root, last_visited);
1115 1208
1116 if (reclaim) { 1209 if (reclaim) {
1117 mem_cgroup_iter_update(iter, last_visited, memcg, seq); 1210 mem_cgroup_iter_update(iter, last_visited, memcg, seq);
@@ -1122,11 +1215,7 @@ struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
1122 reclaim->generation = iter->generation; 1215 reclaim->generation = iter->generation;
1123 } 1216 }
1124 1217
1125 /* 1218 if (prev && !memcg)
1126 * We have finished the whole tree walk or no group has been
1127 * visited because filter told us to skip the root node.
1128 */
1129 if (!memcg && (prev || (cond && !last_visited)))
1130 goto out_unlock; 1219 goto out_unlock;
1131 } 1220 }
1132out_unlock: 1221out_unlock:
@@ -1767,7 +1856,6 @@ static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg,
1767 return total; 1856 return total;
1768} 1857}
1769 1858
1770#if MAX_NUMNODES > 1
1771/** 1859/**
1772 * test_mem_cgroup_node_reclaimable 1860 * test_mem_cgroup_node_reclaimable
1773 * @memcg: the target memcg 1861 * @memcg: the target memcg
@@ -1790,6 +1878,7 @@ static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1790 return false; 1878 return false;
1791 1879
1792} 1880}
1881#if MAX_NUMNODES > 1
1793 1882
1794/* 1883/*
1795 * Always updating the nodemask is not very good - even if we have an empty 1884 * Always updating the nodemask is not very good - even if we have an empty
@@ -1857,50 +1946,104 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1857 return node; 1946 return node;
1858} 1947}
1859 1948
1949/*
1950 * Check all nodes whether it contains reclaimable pages or not.
1951 * For quick scan, we make use of scan_nodes. This will allow us to skip
1952 * unused nodes. But scan_nodes is lazily updated and may not cotain
1953 * enough new information. We need to do double check.
1954 */
1955static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1956{
1957 int nid;
1958
1959 /*
1960 * quick check...making use of scan_node.
1961 * We can skip unused nodes.
1962 */
1963 if (!nodes_empty(memcg->scan_nodes)) {
1964 for (nid = first_node(memcg->scan_nodes);
1965 nid < MAX_NUMNODES;
1966 nid = next_node(nid, memcg->scan_nodes)) {
1967
1968 if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1969 return true;
1970 }
1971 }
1972 /*
1973 * Check rest of nodes.
1974 */
1975 for_each_node_state(nid, N_MEMORY) {
1976 if (node_isset(nid, memcg->scan_nodes))
1977 continue;
1978 if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1979 return true;
1980 }
1981 return false;
1982}
1983
1860#else 1984#else
1861int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 1985int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1862{ 1986{
1863 return 0; 1987 return 0;
1864} 1988}
1865 1989
1866#endif 1990static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1867
1868/*
1869 * A group is eligible for the soft limit reclaim under the given root
1870 * hierarchy if
1871 * a) it is over its soft limit
1872 * b) any parent up the hierarchy is over its soft limit
1873 *
1874 * If the given group doesn't have any children over the limit then it
1875 * doesn't make any sense to iterate its subtree.
1876 */
1877enum mem_cgroup_filter_t
1878mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
1879 struct mem_cgroup *root)
1880{ 1991{
1881 struct mem_cgroup *parent; 1992 return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
1882 1993}
1883 if (!memcg) 1994#endif
1884 memcg = root_mem_cgroup;
1885 parent = memcg;
1886
1887 if (res_counter_soft_limit_excess(&memcg->res))
1888 return VISIT;
1889 1995
1890 /* 1996static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1891 * If any parent up to the root in the hierarchy is over its soft limit 1997 struct zone *zone,
1892 * then we have to obey and reclaim from this group as well. 1998 gfp_t gfp_mask,
1893 */ 1999 unsigned long *total_scanned)
1894 while ((parent = parent_mem_cgroup(parent))) { 2000{
1895 if (res_counter_soft_limit_excess(&parent->res)) 2001 struct mem_cgroup *victim = NULL;
1896 return VISIT; 2002 int total = 0;
1897 if (parent == root) 2003 int loop = 0;
2004 unsigned long excess;
2005 unsigned long nr_scanned;
2006 struct mem_cgroup_reclaim_cookie reclaim = {
2007 .zone = zone,
2008 .priority = 0,
2009 };
2010
2011 excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT;
2012
2013 while (1) {
2014 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
2015 if (!victim) {
2016 loop++;
2017 if (loop >= 2) {
2018 /*
2019 * If we have not been able to reclaim
2020 * anything, it might because there are
2021 * no reclaimable pages under this hierarchy
2022 */
2023 if (!total)
2024 break;
2025 /*
2026 * We want to do more targeted reclaim.
2027 * excess >> 2 is not to excessive so as to
2028 * reclaim too much, nor too less that we keep
2029 * coming back to reclaim from this cgroup
2030 */
2031 if (total >= (excess >> 2) ||
2032 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
2033 break;
2034 }
2035 continue;
2036 }
2037 if (!mem_cgroup_reclaimable(victim, false))
2038 continue;
2039 total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
2040 zone, &nr_scanned);
2041 *total_scanned += nr_scanned;
2042 if (!res_counter_soft_limit_excess(&root_memcg->res))
1898 break; 2043 break;
1899 } 2044 }
1900 2045 mem_cgroup_iter_break(root_memcg, victim);
1901 if (!atomic_read(&memcg->children_in_excess)) 2046 return total;
1902 return SKIP_TREE;
1903 return SKIP;
1904} 2047}
1905 2048
1906static DEFINE_SPINLOCK(memcg_oom_lock); 2049static DEFINE_SPINLOCK(memcg_oom_lock);
@@ -2018,110 +2161,59 @@ static void memcg_oom_recover(struct mem_cgroup *memcg)
2018 memcg_wakeup_oom(memcg); 2161 memcg_wakeup_oom(memcg);
2019} 2162}
2020 2163
2021/*
2022 * try to call OOM killer
2023 */
2024static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) 2164static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
2025{ 2165{
2026 bool locked;
2027 int wakeups;
2028
2029 if (!current->memcg_oom.may_oom) 2166 if (!current->memcg_oom.may_oom)
2030 return; 2167 return;
2031
2032 current->memcg_oom.in_memcg_oom = 1;
2033
2034 /* 2168 /*
2035 * As with any blocking lock, a contender needs to start 2169 * We are in the middle of the charge context here, so we
2036 * listening for wakeups before attempting the trylock, 2170 * don't want to block when potentially sitting on a callstack
2037 * otherwise it can miss the wakeup from the unlock and sleep 2171 * that holds all kinds of filesystem and mm locks.
2038 * indefinitely. This is just open-coded because our locking 2172 *
2039 * is so particular to memcg hierarchies. 2173 * Also, the caller may handle a failed allocation gracefully
2174 * (like optional page cache readahead) and so an OOM killer
2175 * invocation might not even be necessary.
2176 *
2177 * That's why we don't do anything here except remember the
2178 * OOM context and then deal with it at the end of the page
2179 * fault when the stack is unwound, the locks are released,
2180 * and when we know whether the fault was overall successful.
2040 */ 2181 */
2041 wakeups = atomic_read(&memcg->oom_wakeups); 2182 css_get(&memcg->css);
2042 mem_cgroup_mark_under_oom(memcg); 2183 current->memcg_oom.memcg = memcg;
2043 2184 current->memcg_oom.gfp_mask = mask;
2044 locked = mem_cgroup_oom_trylock(memcg); 2185 current->memcg_oom.order = order;
2045
2046 if (locked)
2047 mem_cgroup_oom_notify(memcg);
2048
2049 if (locked && !memcg->oom_kill_disable) {
2050 mem_cgroup_unmark_under_oom(memcg);
2051 mem_cgroup_out_of_memory(memcg, mask, order);
2052 mem_cgroup_oom_unlock(memcg);
2053 /*
2054 * There is no guarantee that an OOM-lock contender
2055 * sees the wakeups triggered by the OOM kill
2056 * uncharges. Wake any sleepers explicitely.
2057 */
2058 memcg_oom_recover(memcg);
2059 } else {
2060 /*
2061 * A system call can just return -ENOMEM, but if this
2062 * is a page fault and somebody else is handling the
2063 * OOM already, we need to sleep on the OOM waitqueue
2064 * for this memcg until the situation is resolved.
2065 * Which can take some time because it might be
2066 * handled by a userspace task.
2067 *
2068 * However, this is the charge context, which means
2069 * that we may sit on a large call stack and hold
2070 * various filesystem locks, the mmap_sem etc. and we
2071 * don't want the OOM handler to deadlock on them
2072 * while we sit here and wait. Store the current OOM
2073 * context in the task_struct, then return -ENOMEM.
2074 * At the end of the page fault handler, with the
2075 * stack unwound, pagefault_out_of_memory() will check
2076 * back with us by calling
2077 * mem_cgroup_oom_synchronize(), possibly putting the
2078 * task to sleep.
2079 */
2080 current->memcg_oom.oom_locked = locked;
2081 current->memcg_oom.wakeups = wakeups;
2082 css_get(&memcg->css);
2083 current->memcg_oom.wait_on_memcg = memcg;
2084 }
2085} 2186}
2086 2187
2087/** 2188/**
2088 * mem_cgroup_oom_synchronize - complete memcg OOM handling 2189 * mem_cgroup_oom_synchronize - complete memcg OOM handling
2190 * @handle: actually kill/wait or just clean up the OOM state
2089 * 2191 *
2090 * This has to be called at the end of a page fault if the the memcg 2192 * This has to be called at the end of a page fault if the memcg OOM
2091 * OOM handler was enabled and the fault is returning %VM_FAULT_OOM. 2193 * handler was enabled.
2092 * 2194 *
2093 * Memcg supports userspace OOM handling, so failed allocations must 2195 * Memcg supports userspace OOM handling where failed allocations must
2094 * sleep on a waitqueue until the userspace task resolves the 2196 * sleep on a waitqueue until the userspace task resolves the
2095 * situation. Sleeping directly in the charge context with all kinds 2197 * situation. Sleeping directly in the charge context with all kinds
2096 * of locks held is not a good idea, instead we remember an OOM state 2198 * of locks held is not a good idea, instead we remember an OOM state
2097 * in the task and mem_cgroup_oom_synchronize() has to be called at 2199 * in the task and mem_cgroup_oom_synchronize() has to be called at
2098 * the end of the page fault to put the task to sleep and clean up the 2200 * the end of the page fault to complete the OOM handling.
2099 * OOM state.
2100 * 2201 *
2101 * Returns %true if an ongoing memcg OOM situation was detected and 2202 * Returns %true if an ongoing memcg OOM situation was detected and
2102 * finalized, %false otherwise. 2203 * completed, %false otherwise.
2103 */ 2204 */
2104bool mem_cgroup_oom_synchronize(void) 2205bool mem_cgroup_oom_synchronize(bool handle)
2105{ 2206{
2207 struct mem_cgroup *memcg = current->memcg_oom.memcg;
2106 struct oom_wait_info owait; 2208 struct oom_wait_info owait;
2107 struct mem_cgroup *memcg; 2209 bool locked;
2108 2210
2109 /* OOM is global, do not handle */ 2211 /* OOM is global, do not handle */
2110 if (!current->memcg_oom.in_memcg_oom)
2111 return false;
2112
2113 /*
2114 * We invoked the OOM killer but there is a chance that a kill
2115 * did not free up any charges. Everybody else might already
2116 * be sleeping, so restart the fault and keep the rampage
2117 * going until some charges are released.
2118 */
2119 memcg = current->memcg_oom.wait_on_memcg;
2120 if (!memcg) 2212 if (!memcg)
2121 goto out; 2213 return false;
2122 2214
2123 if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current)) 2215 if (!handle)
2124 goto out_memcg; 2216 goto cleanup;
2125 2217
2126 owait.memcg = memcg; 2218 owait.memcg = memcg;
2127 owait.wait.flags = 0; 2219 owait.wait.flags = 0;
@@ -2130,13 +2222,25 @@ bool mem_cgroup_oom_synchronize(void)
2130 INIT_LIST_HEAD(&owait.wait.task_list); 2222 INIT_LIST_HEAD(&owait.wait.task_list);
2131 2223
2132 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); 2224 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
2133 /* Only sleep if we didn't miss any wakeups since OOM */ 2225 mem_cgroup_mark_under_oom(memcg);
2134 if (atomic_read(&memcg->oom_wakeups) == current->memcg_oom.wakeups) 2226
2227 locked = mem_cgroup_oom_trylock(memcg);
2228
2229 if (locked)
2230 mem_cgroup_oom_notify(memcg);
2231
2232 if (locked && !memcg->oom_kill_disable) {
2233 mem_cgroup_unmark_under_oom(memcg);
2234 finish_wait(&memcg_oom_waitq, &owait.wait);
2235 mem_cgroup_out_of_memory(memcg, current->memcg_oom.gfp_mask,
2236 current->memcg_oom.order);
2237 } else {
2135 schedule(); 2238 schedule();
2136 finish_wait(&memcg_oom_waitq, &owait.wait); 2239 mem_cgroup_unmark_under_oom(memcg);
2137out_memcg: 2240 finish_wait(&memcg_oom_waitq, &owait.wait);
2138 mem_cgroup_unmark_under_oom(memcg); 2241 }
2139 if (current->memcg_oom.oom_locked) { 2242
2243 if (locked) {
2140 mem_cgroup_oom_unlock(memcg); 2244 mem_cgroup_oom_unlock(memcg);
2141 /* 2245 /*
2142 * There is no guarantee that an OOM-lock contender 2246 * There is no guarantee that an OOM-lock contender
@@ -2145,10 +2249,9 @@ out_memcg:
2145 */ 2249 */
2146 memcg_oom_recover(memcg); 2250 memcg_oom_recover(memcg);
2147 } 2251 }
2252cleanup:
2253 current->memcg_oom.memcg = NULL;
2148 css_put(&memcg->css); 2254 css_put(&memcg->css);
2149 current->memcg_oom.wait_on_memcg = NULL;
2150out:
2151 current->memcg_oom.in_memcg_oom = 0;
2152 return true; 2255 return true;
2153} 2256}
2154 2257
@@ -2562,6 +2665,9 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
2562 || fatal_signal_pending(current))) 2665 || fatal_signal_pending(current)))
2563 goto bypass; 2666 goto bypass;
2564 2667
2668 if (unlikely(task_in_memcg_oom(current)))
2669 goto bypass;
2670
2565 /* 2671 /*
2566 * We always charge the cgroup the mm_struct belongs to. 2672 * We always charge the cgroup the mm_struct belongs to.
2567 * The mm_struct's mem_cgroup changes on task migration if the 2673 * The mm_struct's mem_cgroup changes on task migration if the
@@ -2660,6 +2766,8 @@ done:
2660 return 0; 2766 return 0;
2661nomem: 2767nomem:
2662 *ptr = NULL; 2768 *ptr = NULL;
2769 if (gfp_mask & __GFP_NOFAIL)
2770 return 0;
2663 return -ENOMEM; 2771 return -ENOMEM;
2664bypass: 2772bypass:
2665 *ptr = root_mem_cgroup; 2773 *ptr = root_mem_cgroup;
@@ -2812,7 +2920,9 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2812 unlock_page_cgroup(pc); 2920 unlock_page_cgroup(pc);
2813 2921
2814 /* 2922 /*
2815 * "charge_statistics" updated event counter. 2923 * "charge_statistics" updated event counter. Then, check it.
2924 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
2925 * if they exceeds softlimit.
2816 */ 2926 */
2817 memcg_check_events(memcg, page); 2927 memcg_check_events(memcg, page);
2818} 2928}
@@ -4647,6 +4757,98 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
4647 return ret; 4757 return ret;
4648} 4758}
4649 4759
4760unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
4761 gfp_t gfp_mask,
4762 unsigned long *total_scanned)
4763{
4764 unsigned long nr_reclaimed = 0;
4765 struct mem_cgroup_per_zone *mz, *next_mz = NULL;
4766 unsigned long reclaimed;
4767 int loop = 0;
4768 struct mem_cgroup_tree_per_zone *mctz;
4769 unsigned long long excess;
4770 unsigned long nr_scanned;
4771
4772 if (order > 0)
4773 return 0;
4774
4775 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
4776 /*
4777 * This loop can run a while, specially if mem_cgroup's continuously
4778 * keep exceeding their soft limit and putting the system under
4779 * pressure
4780 */
4781 do {
4782 if (next_mz)
4783 mz = next_mz;
4784 else
4785 mz = mem_cgroup_largest_soft_limit_node(mctz);
4786 if (!mz)
4787 break;
4788
4789 nr_scanned = 0;
4790 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
4791 gfp_mask, &nr_scanned);
4792 nr_reclaimed += reclaimed;
4793 *total_scanned += nr_scanned;
4794 spin_lock(&mctz->lock);
4795
4796 /*
4797 * If we failed to reclaim anything from this memory cgroup
4798 * it is time to move on to the next cgroup
4799 */
4800 next_mz = NULL;
4801 if (!reclaimed) {
4802 do {
4803 /*
4804 * Loop until we find yet another one.
4805 *
4806 * By the time we get the soft_limit lock
4807 * again, someone might have aded the
4808 * group back on the RB tree. Iterate to
4809 * make sure we get a different mem.
4810 * mem_cgroup_largest_soft_limit_node returns
4811 * NULL if no other cgroup is present on
4812 * the tree
4813 */
4814 next_mz =
4815 __mem_cgroup_largest_soft_limit_node(mctz);
4816 if (next_mz == mz)
4817 css_put(&next_mz->memcg->css);
4818 else /* next_mz == NULL or other memcg */
4819 break;
4820 } while (1);
4821 }
4822 __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
4823 excess = res_counter_soft_limit_excess(&mz->memcg->res);
4824 /*
4825 * One school of thought says that we should not add
4826 * back the node to the tree if reclaim returns 0.
4827 * But our reclaim could return 0, simply because due
4828 * to priority we are exposing a smaller subset of
4829 * memory to reclaim from. Consider this as a longer
4830 * term TODO.
4831 */
4832 /* If excess == 0, no tree ops */
4833 __mem_cgroup_insert_exceeded(mz->memcg, mz, mctz, excess);
4834 spin_unlock(&mctz->lock);
4835 css_put(&mz->memcg->css);
4836 loop++;
4837 /*
4838 * Could not reclaim anything and there are no more
4839 * mem cgroups to try or we seem to be looping without
4840 * reclaiming anything.
4841 */
4842 if (!nr_reclaimed &&
4843 (next_mz == NULL ||
4844 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
4845 break;
4846 } while (!nr_reclaimed);
4847 if (next_mz)
4848 css_put(&next_mz->memcg->css);
4849 return nr_reclaimed;
4850}
4851
4650/** 4852/**
4651 * mem_cgroup_force_empty_list - clears LRU of a group 4853 * mem_cgroup_force_empty_list - clears LRU of a group
4652 * @memcg: group to clear 4854 * @memcg: group to clear
@@ -5911,6 +6113,8 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
5911 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 6113 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
5912 mz = &pn->zoneinfo[zone]; 6114 mz = &pn->zoneinfo[zone];
5913 lruvec_init(&mz->lruvec); 6115 lruvec_init(&mz->lruvec);
6116 mz->usage_in_excess = 0;
6117 mz->on_tree = false;
5914 mz->memcg = memcg; 6118 mz->memcg = memcg;
5915 } 6119 }
5916 memcg->nodeinfo[node] = pn; 6120 memcg->nodeinfo[node] = pn;
@@ -5966,6 +6170,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
5966 int node; 6170 int node;
5967 size_t size = memcg_size(); 6171 size_t size = memcg_size();
5968 6172
6173 mem_cgroup_remove_from_trees(memcg);
5969 free_css_id(&mem_cgroup_subsys, &memcg->css); 6174 free_css_id(&mem_cgroup_subsys, &memcg->css);
5970 6175
5971 for_each_node(node) 6176 for_each_node(node)
@@ -6002,6 +6207,29 @@ struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
6002} 6207}
6003EXPORT_SYMBOL(parent_mem_cgroup); 6208EXPORT_SYMBOL(parent_mem_cgroup);
6004 6209
6210static void __init mem_cgroup_soft_limit_tree_init(void)
6211{
6212 struct mem_cgroup_tree_per_node *rtpn;
6213 struct mem_cgroup_tree_per_zone *rtpz;
6214 int tmp, node, zone;
6215
6216 for_each_node(node) {
6217 tmp = node;
6218 if (!node_state(node, N_NORMAL_MEMORY))
6219 tmp = -1;
6220 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
6221 BUG_ON(!rtpn);
6222
6223 soft_limit_tree.rb_tree_per_node[node] = rtpn;
6224
6225 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
6226 rtpz = &rtpn->rb_tree_per_zone[zone];
6227 rtpz->rb_root = RB_ROOT;
6228 spin_lock_init(&rtpz->lock);
6229 }
6230 }
6231}
6232
6005static struct cgroup_subsys_state * __ref 6233static struct cgroup_subsys_state * __ref
6006mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 6234mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
6007{ 6235{
@@ -6031,7 +6259,6 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
6031 mutex_init(&memcg->thresholds_lock); 6259 mutex_init(&memcg->thresholds_lock);
6032 spin_lock_init(&memcg->move_lock); 6260 spin_lock_init(&memcg->move_lock);
6033 vmpressure_init(&memcg->vmpressure); 6261 vmpressure_init(&memcg->vmpressure);
6034 spin_lock_init(&memcg->soft_lock);
6035 6262
6036 return &memcg->css; 6263 return &memcg->css;
6037 6264
@@ -6109,13 +6336,6 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
6109 6336
6110 mem_cgroup_invalidate_reclaim_iterators(memcg); 6337 mem_cgroup_invalidate_reclaim_iterators(memcg);
6111 mem_cgroup_reparent_charges(memcg); 6338 mem_cgroup_reparent_charges(memcg);
6112 if (memcg->soft_contributed) {
6113 while ((memcg = parent_mem_cgroup(memcg)))
6114 atomic_dec(&memcg->children_in_excess);
6115
6116 if (memcg != root_mem_cgroup && !root_mem_cgroup->use_hierarchy)
6117 atomic_dec(&root_mem_cgroup->children_in_excess);
6118 }
6119 mem_cgroup_destroy_all_caches(memcg); 6339 mem_cgroup_destroy_all_caches(memcg);
6120 vmpressure_cleanup(&memcg->vmpressure); 6340 vmpressure_cleanup(&memcg->vmpressure);
6121} 6341}
@@ -6790,6 +7010,7 @@ static int __init mem_cgroup_init(void)
6790{ 7010{
6791 hotcpu_notifier(memcg_cpu_hotplug_callback, 0); 7011 hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
6792 enable_swap_cgroup(); 7012 enable_swap_cgroup();
7013 mem_cgroup_soft_limit_tree_init();
6793 memcg_stock_init(); 7014 memcg_stock_init();
6794 return 0; 7015 return 0;
6795} 7016}
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 947ed5413279..bf3351b5115e 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1114,8 +1114,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1114 * shake_page could have turned it free. 1114 * shake_page could have turned it free.
1115 */ 1115 */
1116 if (is_free_buddy_page(p)) { 1116 if (is_free_buddy_page(p)) {
1117 action_result(pfn, "free buddy, 2nd try", 1117 if (flags & MF_COUNT_INCREASED)
1118 DELAYED); 1118 action_result(pfn, "free buddy", DELAYED);
1119 else
1120 action_result(pfn, "free buddy, 2nd try", DELAYED);
1119 return 0; 1121 return 0;
1120 } 1122 }
1121 action_result(pfn, "non LRU", IGNORED); 1123 action_result(pfn, "non LRU", IGNORED);
@@ -1349,7 +1351,7 @@ int unpoison_memory(unsigned long pfn)
1349 * worked by memory_failure() and the page lock is not held yet. 1351 * worked by memory_failure() and the page lock is not held yet.
1350 * In such case, we yield to memory_failure() and make unpoison fail. 1352 * In such case, we yield to memory_failure() and make unpoison fail.
1351 */ 1353 */
1352 if (PageTransHuge(page)) { 1354 if (!PageHuge(page) && PageTransHuge(page)) {
1353 pr_info("MCE: Memory failure is now running on %#lx\n", pfn); 1355 pr_info("MCE: Memory failure is now running on %#lx\n", pfn);
1354 return 0; 1356 return 0;
1355 } 1357 }
diff --git a/mm/memory.c b/mm/memory.c
index ca0003947115..1311f26497e6 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -837,6 +837,8 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
837 */ 837 */
838 make_migration_entry_read(&entry); 838 make_migration_entry_read(&entry);
839 pte = swp_entry_to_pte(entry); 839 pte = swp_entry_to_pte(entry);
840 if (pte_swp_soft_dirty(*src_pte))
841 pte = pte_swp_mksoft_dirty(pte);
840 set_pte_at(src_mm, addr, src_pte, pte); 842 set_pte_at(src_mm, addr, src_pte, pte);
841 } 843 }
842 } 844 }
@@ -3863,15 +3865,21 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3863 * space. Kernel faults are handled more gracefully. 3865 * space. Kernel faults are handled more gracefully.
3864 */ 3866 */
3865 if (flags & FAULT_FLAG_USER) 3867 if (flags & FAULT_FLAG_USER)
3866 mem_cgroup_enable_oom(); 3868 mem_cgroup_oom_enable();
3867 3869
3868 ret = __handle_mm_fault(mm, vma, address, flags); 3870 ret = __handle_mm_fault(mm, vma, address, flags);
3869 3871
3870 if (flags & FAULT_FLAG_USER) 3872 if (flags & FAULT_FLAG_USER) {
3871 mem_cgroup_disable_oom(); 3873 mem_cgroup_oom_disable();
3872 3874 /*
3873 if (WARN_ON(task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))) 3875 * The task may have entered a memcg OOM situation but
3874 mem_cgroup_oom_synchronize(); 3876 * if the allocation error was handled gracefully (no
3877 * VM_FAULT_OOM), there is no need to kill anything.
3878 * Just clean up the OOM state peacefully.
3879 */
3880 if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
3881 mem_cgroup_oom_synchronize(false);
3882 }
3875 3883
3876 return ret; 3884 return ret;
3877} 3885}
diff --git a/mm/migrate.c b/mm/migrate.c
index 9c8d5f59d30b..7a7325ee1d08 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -107,7 +107,7 @@ void putback_movable_pages(struct list_head *l)
107 list_del(&page->lru); 107 list_del(&page->lru);
108 dec_zone_page_state(page, NR_ISOLATED_ANON + 108 dec_zone_page_state(page, NR_ISOLATED_ANON +
109 page_is_file_cache(page)); 109 page_is_file_cache(page));
110 if (unlikely(balloon_page_movable(page))) 110 if (unlikely(isolated_balloon_page(page)))
111 balloon_page_putback(page); 111 balloon_page_putback(page);
112 else 112 else
113 putback_lru_page(page); 113 putback_lru_page(page);
@@ -161,6 +161,8 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
161 161
162 get_page(new); 162 get_page(new);
163 pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); 163 pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
164 if (pte_swp_soft_dirty(*ptep))
165 pte = pte_mksoft_dirty(pte);
164 if (is_write_migration_entry(entry)) 166 if (is_write_migration_entry(entry))
165 pte = pte_mkwrite(pte); 167 pte = pte_mkwrite(pte);
166#ifdef CONFIG_HUGETLB_PAGE 168#ifdef CONFIG_HUGETLB_PAGE
diff --git a/mm/mlock.c b/mm/mlock.c
index d63802663242..d480cd6fc475 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -379,10 +379,14 @@ static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
379 379
380 /* 380 /*
381 * Initialize pte walk starting at the already pinned page where we 381 * Initialize pte walk starting at the already pinned page where we
382 * are sure that there is a pte. 382 * are sure that there is a pte, as it was pinned under the same
383 * mmap_sem write op.
383 */ 384 */
384 pte = get_locked_pte(vma->vm_mm, start, &ptl); 385 pte = get_locked_pte(vma->vm_mm, start, &ptl);
385 end = min(end, pmd_addr_end(start, end)); 386 /* Make sure we do not cross the page table boundary */
387 end = pgd_addr_end(start, end);
388 end = pud_addr_end(start, end);
389 end = pmd_addr_end(start, end);
386 390
387 /* The page next to the pinned page is the first we will try to get */ 391 /* The page next to the pinned page is the first we will try to get */
388 start += PAGE_SIZE; 392 start += PAGE_SIZE;
@@ -736,6 +740,7 @@ static int do_mlockall(int flags)
736 740
737 /* Ignore errors */ 741 /* Ignore errors */
738 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags); 742 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
743 cond_resched();
739 } 744 }
740out: 745out:
741 return 0; 746 return 0;
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 94722a4d6b43..a3af058f68e4 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -94,13 +94,16 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
94 swp_entry_t entry = pte_to_swp_entry(oldpte); 94 swp_entry_t entry = pte_to_swp_entry(oldpte);
95 95
96 if (is_write_migration_entry(entry)) { 96 if (is_write_migration_entry(entry)) {
97 pte_t newpte;
97 /* 98 /*
98 * A protection check is difficult so 99 * A protection check is difficult so
99 * just be safe and disable write 100 * just be safe and disable write
100 */ 101 */
101 make_migration_entry_read(&entry); 102 make_migration_entry_read(&entry);
102 set_pte_at(mm, addr, pte, 103 newpte = swp_entry_to_pte(entry);
103 swp_entry_to_pte(entry)); 104 if (pte_swp_soft_dirty(oldpte))
105 newpte = pte_swp_mksoft_dirty(newpte);
106 set_pte_at(mm, addr, pte, newpte);
104 } 107 }
105 pages++; 108 pages++;
106 } 109 }
diff --git a/mm/mremap.c b/mm/mremap.c
index 91b13d6a16d4..0843feb66f3d 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -25,7 +25,6 @@
25#include <asm/uaccess.h> 25#include <asm/uaccess.h>
26#include <asm/cacheflush.h> 26#include <asm/cacheflush.h>
27#include <asm/tlbflush.h> 27#include <asm/tlbflush.h>
28#include <asm/pgalloc.h>
29 28
30#include "internal.h" 29#include "internal.h"
31 30
@@ -63,10 +62,8 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
63 return NULL; 62 return NULL;
64 63
65 pmd = pmd_alloc(mm, pud, addr); 64 pmd = pmd_alloc(mm, pud, addr);
66 if (!pmd) { 65 if (!pmd)
67 pud_free(mm, pud);
68 return NULL; 66 return NULL;
69 }
70 67
71 VM_BUG_ON(pmd_trans_huge(*pmd)); 68 VM_BUG_ON(pmd_trans_huge(*pmd));
72 69
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 314e9d274381..6738c47f1f72 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -680,7 +680,7 @@ void pagefault_out_of_memory(void)
680{ 680{
681 struct zonelist *zonelist; 681 struct zonelist *zonelist;
682 682
683 if (mem_cgroup_oom_synchronize()) 683 if (mem_cgroup_oom_synchronize(true))
684 return; 684 return;
685 685
686 zonelist = node_zonelist(first_online_node, GFP_KERNEL); 686 zonelist = node_zonelist(first_online_node, GFP_KERNEL);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index f5236f804aa6..63807583d8e8 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1210,11 +1210,11 @@ static unsigned long dirty_poll_interval(unsigned long dirty,
1210 return 1; 1210 return 1;
1211} 1211}
1212 1212
1213static long bdi_max_pause(struct backing_dev_info *bdi, 1213static unsigned long bdi_max_pause(struct backing_dev_info *bdi,
1214 unsigned long bdi_dirty) 1214 unsigned long bdi_dirty)
1215{ 1215{
1216 long bw = bdi->avg_write_bandwidth; 1216 unsigned long bw = bdi->avg_write_bandwidth;
1217 long t; 1217 unsigned long t;
1218 1218
1219 /* 1219 /*
1220 * Limit pause time for small memory systems. If sleeping for too long 1220 * Limit pause time for small memory systems. If sleeping for too long
@@ -1226,7 +1226,7 @@ static long bdi_max_pause(struct backing_dev_info *bdi,
1226 t = bdi_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8)); 1226 t = bdi_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8));
1227 t++; 1227 t++;
1228 1228
1229 return min_t(long, t, MAX_PAUSE); 1229 return min_t(unsigned long, t, MAX_PAUSE);
1230} 1230}
1231 1231
1232static long bdi_min_pause(struct backing_dev_info *bdi, 1232static long bdi_min_pause(struct backing_dev_info *bdi,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0ee638f76ebe..dd886fac451a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6366,10 +6366,6 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
6366 list_del(&page->lru); 6366 list_del(&page->lru);
6367 rmv_page_order(page); 6367 rmv_page_order(page);
6368 zone->free_area[order].nr_free--; 6368 zone->free_area[order].nr_free--;
6369#ifdef CONFIG_HIGHMEM
6370 if (PageHighMem(page))
6371 totalhigh_pages -= 1 << order;
6372#endif
6373 for (i = 0; i < (1 << order); i++) 6369 for (i = 0; i < (1 << order); i++)
6374 SetPageReserved((page+i)); 6370 SetPageReserved((page+i));
6375 pfn += (1 << order); 6371 pfn += (1 << order);
diff --git a/mm/slab_common.c b/mm/slab_common.c
index a3443278ce3a..e2e98af703ea 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -56,6 +56,7 @@ static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
56 continue; 56 continue;
57 } 57 }
58 58
59#if !defined(CONFIG_SLUB) || !defined(CONFIG_SLUB_DEBUG_ON)
59 /* 60 /*
60 * For simplicity, we won't check this in the list of memcg 61 * For simplicity, we won't check this in the list of memcg
61 * caches. We have control over memcg naming, and if there 62 * caches. We have control over memcg naming, and if there
@@ -69,6 +70,7 @@ static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
69 s = NULL; 70 s = NULL;
70 return -EINVAL; 71 return -EINVAL;
71 } 72 }
73#endif
72 } 74 }
73 75
74 WARN_ON(strchr(name, ' ')); /* It confuses parsers */ 76 WARN_ON(strchr(name, ' ')); /* It confuses parsers */
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 3963fc24fcc1..de7c904e52e5 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1824,6 +1824,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
1824 struct filename *pathname; 1824 struct filename *pathname;
1825 int i, type, prev; 1825 int i, type, prev;
1826 int err; 1826 int err;
1827 unsigned int old_block_size;
1827 1828
1828 if (!capable(CAP_SYS_ADMIN)) 1829 if (!capable(CAP_SYS_ADMIN))
1829 return -EPERM; 1830 return -EPERM;
@@ -1914,6 +1915,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
1914 } 1915 }
1915 1916
1916 swap_file = p->swap_file; 1917 swap_file = p->swap_file;
1918 old_block_size = p->old_block_size;
1917 p->swap_file = NULL; 1919 p->swap_file = NULL;
1918 p->max = 0; 1920 p->max = 0;
1919 swap_map = p->swap_map; 1921 swap_map = p->swap_map;
@@ -1938,7 +1940,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
1938 inode = mapping->host; 1940 inode = mapping->host;
1939 if (S_ISBLK(inode->i_mode)) { 1941 if (S_ISBLK(inode->i_mode)) {
1940 struct block_device *bdev = I_BDEV(inode); 1942 struct block_device *bdev = I_BDEV(inode);
1941 set_blocksize(bdev, p->old_block_size); 1943 set_blocksize(bdev, old_block_size);
1942 blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); 1944 blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1943 } else { 1945 } else {
1944 mutex_lock(&inode->i_mutex); 1946 mutex_lock(&inode->i_mutex);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8ed1b775bdc9..eea668d9cff6 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -48,6 +48,7 @@
48#include <asm/div64.h> 48#include <asm/div64.h>
49 49
50#include <linux/swapops.h> 50#include <linux/swapops.h>
51#include <linux/balloon_compaction.h>
51 52
52#include "internal.h" 53#include "internal.h"
53 54
@@ -139,23 +140,11 @@ static bool global_reclaim(struct scan_control *sc)
139{ 140{
140 return !sc->target_mem_cgroup; 141 return !sc->target_mem_cgroup;
141} 142}
142
143static bool mem_cgroup_should_soft_reclaim(struct scan_control *sc)
144{
145 struct mem_cgroup *root = sc->target_mem_cgroup;
146 return !mem_cgroup_disabled() &&
147 mem_cgroup_soft_reclaim_eligible(root, root) != SKIP_TREE;
148}
149#else 143#else
150static bool global_reclaim(struct scan_control *sc) 144static bool global_reclaim(struct scan_control *sc)
151{ 145{
152 return true; 146 return true;
153} 147}
154
155static bool mem_cgroup_should_soft_reclaim(struct scan_control *sc)
156{
157 return false;
158}
159#endif 148#endif
160 149
161unsigned long zone_reclaimable_pages(struct zone *zone) 150unsigned long zone_reclaimable_pages(struct zone *zone)
@@ -222,6 +211,7 @@ void unregister_shrinker(struct shrinker *shrinker)
222 down_write(&shrinker_rwsem); 211 down_write(&shrinker_rwsem);
223 list_del(&shrinker->list); 212 list_del(&shrinker->list);
224 up_write(&shrinker_rwsem); 213 up_write(&shrinker_rwsem);
214 kfree(shrinker->nr_deferred);
225} 215}
226EXPORT_SYMBOL(unregister_shrinker); 216EXPORT_SYMBOL(unregister_shrinker);
227 217
@@ -1125,7 +1115,8 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
1125 LIST_HEAD(clean_pages); 1115 LIST_HEAD(clean_pages);
1126 1116
1127 list_for_each_entry_safe(page, next, page_list, lru) { 1117 list_for_each_entry_safe(page, next, page_list, lru) {
1128 if (page_is_file_cache(page) && !PageDirty(page)) { 1118 if (page_is_file_cache(page) && !PageDirty(page) &&
1119 !isolated_balloon_page(page)) {
1129 ClearPageActive(page); 1120 ClearPageActive(page);
1130 list_move(&page->lru, &clean_pages); 1121 list_move(&page->lru, &clean_pages);
1131 } 1122 }
@@ -2176,11 +2167,9 @@ static inline bool should_continue_reclaim(struct zone *zone,
2176 } 2167 }
2177} 2168}
2178 2169
2179static int 2170static void shrink_zone(struct zone *zone, struct scan_control *sc)
2180__shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
2181{ 2171{
2182 unsigned long nr_reclaimed, nr_scanned; 2172 unsigned long nr_reclaimed, nr_scanned;
2183 int groups_scanned = 0;
2184 2173
2185 do { 2174 do {
2186 struct mem_cgroup *root = sc->target_mem_cgroup; 2175 struct mem_cgroup *root = sc->target_mem_cgroup;
@@ -2188,17 +2177,15 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
2188 .zone = zone, 2177 .zone = zone,
2189 .priority = sc->priority, 2178 .priority = sc->priority,
2190 }; 2179 };
2191 struct mem_cgroup *memcg = NULL; 2180 struct mem_cgroup *memcg;
2192 mem_cgroup_iter_filter filter = (soft_reclaim) ?
2193 mem_cgroup_soft_reclaim_eligible : NULL;
2194 2181
2195 nr_reclaimed = sc->nr_reclaimed; 2182 nr_reclaimed = sc->nr_reclaimed;
2196 nr_scanned = sc->nr_scanned; 2183 nr_scanned = sc->nr_scanned;
2197 2184
2198 while ((memcg = mem_cgroup_iter_cond(root, memcg, &reclaim, filter))) { 2185 memcg = mem_cgroup_iter(root, NULL, &reclaim);
2186 do {
2199 struct lruvec *lruvec; 2187 struct lruvec *lruvec;
2200 2188
2201 groups_scanned++;
2202 lruvec = mem_cgroup_zone_lruvec(zone, memcg); 2189 lruvec = mem_cgroup_zone_lruvec(zone, memcg);
2203 2190
2204 shrink_lruvec(lruvec, sc); 2191 shrink_lruvec(lruvec, sc);
@@ -2218,7 +2205,8 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
2218 mem_cgroup_iter_break(root, memcg); 2205 mem_cgroup_iter_break(root, memcg);
2219 break; 2206 break;
2220 } 2207 }
2221 } 2208 memcg = mem_cgroup_iter(root, memcg, &reclaim);
2209 } while (memcg);
2222 2210
2223 vmpressure(sc->gfp_mask, sc->target_mem_cgroup, 2211 vmpressure(sc->gfp_mask, sc->target_mem_cgroup,
2224 sc->nr_scanned - nr_scanned, 2212 sc->nr_scanned - nr_scanned,
@@ -2226,37 +2214,6 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
2226 2214
2227 } while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed, 2215 } while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed,
2228 sc->nr_scanned - nr_scanned, sc)); 2216 sc->nr_scanned - nr_scanned, sc));
2229
2230 return groups_scanned;
2231}
2232
2233
2234static void shrink_zone(struct zone *zone, struct scan_control *sc)
2235{
2236 bool do_soft_reclaim = mem_cgroup_should_soft_reclaim(sc);
2237 unsigned long nr_scanned = sc->nr_scanned;
2238 int scanned_groups;
2239
2240 scanned_groups = __shrink_zone(zone, sc, do_soft_reclaim);
2241 /*
2242 * memcg iterator might race with other reclaimer or start from
2243 * a incomplete tree walk so the tree walk in __shrink_zone
2244 * might have missed groups that are above the soft limit. Try
2245 * another loop to catch up with others. Do it just once to
2246 * prevent from reclaim latencies when other reclaimers always
2247 * preempt this one.
2248 */
2249 if (do_soft_reclaim && !scanned_groups)
2250 __shrink_zone(zone, sc, do_soft_reclaim);
2251
2252 /*
2253 * No group is over the soft limit or those that are do not have
2254 * pages in the zone we are reclaiming so we have to reclaim everybody
2255 */
2256 if (do_soft_reclaim && (sc->nr_scanned == nr_scanned)) {
2257 __shrink_zone(zone, sc, false);
2258 return;
2259 }
2260} 2217}
2261 2218
2262/* Returns true if compaction should go ahead for a high-order request */ 2219/* Returns true if compaction should go ahead for a high-order request */
@@ -2320,6 +2277,8 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2320{ 2277{
2321 struct zoneref *z; 2278 struct zoneref *z;
2322 struct zone *zone; 2279 struct zone *zone;
2280 unsigned long nr_soft_reclaimed;
2281 unsigned long nr_soft_scanned;
2323 bool aborted_reclaim = false; 2282 bool aborted_reclaim = false;
2324 2283
2325 /* 2284 /*
@@ -2359,6 +2318,18 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2359 continue; 2318 continue;
2360 } 2319 }
2361 } 2320 }
2321 /*
2322 * This steals pages from memory cgroups over softlimit
2323 * and returns the number of reclaimed pages and
2324 * scanned pages. This works for global memory pressure
2325 * and balancing, not for a memcg's limit.
2326 */
2327 nr_soft_scanned = 0;
2328 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
2329 sc->order, sc->gfp_mask,
2330 &nr_soft_scanned);
2331 sc->nr_reclaimed += nr_soft_reclaimed;
2332 sc->nr_scanned += nr_soft_scanned;
2362 /* need some check for avoid more shrink_zone() */ 2333 /* need some check for avoid more shrink_zone() */
2363 } 2334 }
2364 2335
@@ -2952,6 +2923,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
2952{ 2923{
2953 int i; 2924 int i;
2954 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ 2925 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
2926 unsigned long nr_soft_reclaimed;
2927 unsigned long nr_soft_scanned;
2955 struct scan_control sc = { 2928 struct scan_control sc = {
2956 .gfp_mask = GFP_KERNEL, 2929 .gfp_mask = GFP_KERNEL,
2957 .priority = DEF_PRIORITY, 2930 .priority = DEF_PRIORITY,
@@ -3066,6 +3039,15 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
3066 3039
3067 sc.nr_scanned = 0; 3040 sc.nr_scanned = 0;
3068 3041
3042 nr_soft_scanned = 0;
3043 /*
3044 * Call soft limit reclaim before calling shrink_zone.
3045 */
3046 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
3047 order, sc.gfp_mask,
3048 &nr_soft_scanned);
3049 sc.nr_reclaimed += nr_soft_reclaimed;
3050
3069 /* 3051 /*
3070 * There should be no need to raise the scanning 3052 * There should be no need to raise the scanning
3071 * priority if enough pages are already being scanned 3053 * priority if enough pages are already being scanned
diff --git a/mm/zswap.c b/mm/zswap.c
index 841e35f1db22..d93510c6aa2d 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -804,6 +804,10 @@ static void zswap_frontswap_invalidate_area(unsigned type)
804 } 804 }
805 tree->rbroot = RB_ROOT; 805 tree->rbroot = RB_ROOT;
806 spin_unlock(&tree->lock); 806 spin_unlock(&tree->lock);
807
808 zbud_destroy_pool(tree->pool);
809 kfree(tree);
810 zswap_trees[type] = NULL;
807} 811}
808 812
809static struct zbud_ops zswap_zbud_ops = { 813static struct zbud_ops zswap_zbud_ops = {
diff --git a/net/802/mrp.c b/net/802/mrp.c
index 1eb05d80b07b..3ed616215870 100644
--- a/net/802/mrp.c
+++ b/net/802/mrp.c
@@ -24,6 +24,11 @@
24static unsigned int mrp_join_time __read_mostly = 200; 24static unsigned int mrp_join_time __read_mostly = 200;
25module_param(mrp_join_time, uint, 0644); 25module_param(mrp_join_time, uint, 0644);
26MODULE_PARM_DESC(mrp_join_time, "Join time in ms (default 200ms)"); 26MODULE_PARM_DESC(mrp_join_time, "Join time in ms (default 200ms)");
27
28static unsigned int mrp_periodic_time __read_mostly = 1000;
29module_param(mrp_periodic_time, uint, 0644);
30MODULE_PARM_DESC(mrp_periodic_time, "Periodic time in ms (default 1s)");
31
27MODULE_LICENSE("GPL"); 32MODULE_LICENSE("GPL");
28 33
29static const u8 34static const u8
@@ -595,6 +600,24 @@ static void mrp_join_timer(unsigned long data)
595 mrp_join_timer_arm(app); 600 mrp_join_timer_arm(app);
596} 601}
597 602
603static void mrp_periodic_timer_arm(struct mrp_applicant *app)
604{
605 mod_timer(&app->periodic_timer,
606 jiffies + msecs_to_jiffies(mrp_periodic_time));
607}
608
609static void mrp_periodic_timer(unsigned long data)
610{
611 struct mrp_applicant *app = (struct mrp_applicant *)data;
612
613 spin_lock(&app->lock);
614 mrp_mad_event(app, MRP_EVENT_PERIODIC);
615 mrp_pdu_queue(app);
616 spin_unlock(&app->lock);
617
618 mrp_periodic_timer_arm(app);
619}
620
598static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset) 621static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset)
599{ 622{
600 __be16 endmark; 623 __be16 endmark;
@@ -845,6 +868,9 @@ int mrp_init_applicant(struct net_device *dev, struct mrp_application *appl)
845 rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app); 868 rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app);
846 setup_timer(&app->join_timer, mrp_join_timer, (unsigned long)app); 869 setup_timer(&app->join_timer, mrp_join_timer, (unsigned long)app);
847 mrp_join_timer_arm(app); 870 mrp_join_timer_arm(app);
871 setup_timer(&app->periodic_timer, mrp_periodic_timer,
872 (unsigned long)app);
873 mrp_periodic_timer_arm(app);
848 return 0; 874 return 0;
849 875
850err3: 876err3:
@@ -870,6 +896,7 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
870 * all pending messages before the applicant is gone. 896 * all pending messages before the applicant is gone.
871 */ 897 */
872 del_timer_sync(&app->join_timer); 898 del_timer_sync(&app->join_timer);
899 del_timer_sync(&app->periodic_timer);
873 900
874 spin_lock_bh(&app->lock); 901 spin_lock_bh(&app->lock);
875 mrp_mad_event(app, MRP_EVENT_TX); 902 mrp_mad_event(app, MRP_EVENT_TX);
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index 309129732285..c7e634af8516 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -171,7 +171,7 @@ static size_t vlan_get_size(const struct net_device *dev)
171 171
172 return nla_total_size(2) + /* IFLA_VLAN_PROTOCOL */ 172 return nla_total_size(2) + /* IFLA_VLAN_PROTOCOL */
173 nla_total_size(2) + /* IFLA_VLAN_ID */ 173 nla_total_size(2) + /* IFLA_VLAN_ID */
174 sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */ 174 nla_total_size(sizeof(struct ifla_vlan_flags)) + /* IFLA_VLAN_FLAGS */
175 vlan_qos_map_size(vlan->nr_ingress_mappings) + 175 vlan_qos_map_size(vlan->nr_ingress_mappings) +
176 vlan_qos_map_size(vlan->nr_egress_mappings); 176 vlan_qos_map_size(vlan->nr_egress_mappings);
177} 177}
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index c72d1bcdcf49..1356af660b5b 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -65,6 +65,7 @@ static int __init batadv_init(void)
65 batadv_recv_handler_init(); 65 batadv_recv_handler_init();
66 66
67 batadv_iv_init(); 67 batadv_iv_init();
68 batadv_nc_init();
68 69
69 batadv_event_workqueue = create_singlethread_workqueue("bat_events"); 70 batadv_event_workqueue = create_singlethread_workqueue("bat_events");
70 71
@@ -142,7 +143,7 @@ int batadv_mesh_init(struct net_device *soft_iface)
142 if (ret < 0) 143 if (ret < 0)
143 goto err; 144 goto err;
144 145
145 ret = batadv_nc_init(bat_priv); 146 ret = batadv_nc_mesh_init(bat_priv);
146 if (ret < 0) 147 if (ret < 0)
147 goto err; 148 goto err;
148 149
@@ -167,7 +168,7 @@ void batadv_mesh_free(struct net_device *soft_iface)
167 batadv_vis_quit(bat_priv); 168 batadv_vis_quit(bat_priv);
168 169
169 batadv_gw_node_purge(bat_priv); 170 batadv_gw_node_purge(bat_priv);
170 batadv_nc_free(bat_priv); 171 batadv_nc_mesh_free(bat_priv);
171 batadv_dat_free(bat_priv); 172 batadv_dat_free(bat_priv);
172 batadv_bla_free(bat_priv); 173 batadv_bla_free(bat_priv);
173 174
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index a487d46e0aec..4ecc0b6bf8ab 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -35,6 +35,20 @@ static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
35 struct batadv_hard_iface *recv_if); 35 struct batadv_hard_iface *recv_if);
36 36
37/** 37/**
38 * batadv_nc_init - one-time initialization for network coding
39 */
40int __init batadv_nc_init(void)
41{
42 int ret;
43
44 /* Register our packet type */
45 ret = batadv_recv_handler_register(BATADV_CODED,
46 batadv_nc_recv_coded_packet);
47
48 return ret;
49}
50
51/**
38 * batadv_nc_start_timer - initialise the nc periodic worker 52 * batadv_nc_start_timer - initialise the nc periodic worker
39 * @bat_priv: the bat priv with all the soft interface information 53 * @bat_priv: the bat priv with all the soft interface information
40 */ 54 */
@@ -45,10 +59,10 @@ static void batadv_nc_start_timer(struct batadv_priv *bat_priv)
45} 59}
46 60
47/** 61/**
48 * batadv_nc_init - initialise coding hash table and start house keeping 62 * batadv_nc_mesh_init - initialise coding hash table and start house keeping
49 * @bat_priv: the bat priv with all the soft interface information 63 * @bat_priv: the bat priv with all the soft interface information
50 */ 64 */
51int batadv_nc_init(struct batadv_priv *bat_priv) 65int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
52{ 66{
53 bat_priv->nc.timestamp_fwd_flush = jiffies; 67 bat_priv->nc.timestamp_fwd_flush = jiffies;
54 bat_priv->nc.timestamp_sniffed_purge = jiffies; 68 bat_priv->nc.timestamp_sniffed_purge = jiffies;
@@ -70,11 +84,6 @@ int batadv_nc_init(struct batadv_priv *bat_priv)
70 batadv_hash_set_lock_class(bat_priv->nc.coding_hash, 84 batadv_hash_set_lock_class(bat_priv->nc.coding_hash,
71 &batadv_nc_decoding_hash_lock_class_key); 85 &batadv_nc_decoding_hash_lock_class_key);
72 86
73 /* Register our packet type */
74 if (batadv_recv_handler_register(BATADV_CODED,
75 batadv_nc_recv_coded_packet) < 0)
76 goto err;
77
78 INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker); 87 INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker);
79 batadv_nc_start_timer(bat_priv); 88 batadv_nc_start_timer(bat_priv);
80 89
@@ -1721,12 +1730,11 @@ free_nc_packet:
1721} 1730}
1722 1731
1723/** 1732/**
1724 * batadv_nc_free - clean up network coding memory 1733 * batadv_nc_mesh_free - clean up network coding memory
1725 * @bat_priv: the bat priv with all the soft interface information 1734 * @bat_priv: the bat priv with all the soft interface information
1726 */ 1735 */
1727void batadv_nc_free(struct batadv_priv *bat_priv) 1736void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
1728{ 1737{
1729 batadv_recv_handler_unregister(BATADV_CODED);
1730 cancel_delayed_work_sync(&bat_priv->nc.work); 1738 cancel_delayed_work_sync(&bat_priv->nc.work);
1731 1739
1732 batadv_nc_purge_paths(bat_priv, bat_priv->nc.coding_hash, NULL); 1740 batadv_nc_purge_paths(bat_priv, bat_priv->nc.coding_hash, NULL);
diff --git a/net/batman-adv/network-coding.h b/net/batman-adv/network-coding.h
index 85a4ec81ad50..ddfa618e80bf 100644
--- a/net/batman-adv/network-coding.h
+++ b/net/batman-adv/network-coding.h
@@ -22,8 +22,9 @@
22 22
23#ifdef CONFIG_BATMAN_ADV_NC 23#ifdef CONFIG_BATMAN_ADV_NC
24 24
25int batadv_nc_init(struct batadv_priv *bat_priv); 25int batadv_nc_init(void);
26void batadv_nc_free(struct batadv_priv *bat_priv); 26int batadv_nc_mesh_init(struct batadv_priv *bat_priv);
27void batadv_nc_mesh_free(struct batadv_priv *bat_priv);
27void batadv_nc_update_nc_node(struct batadv_priv *bat_priv, 28void batadv_nc_update_nc_node(struct batadv_priv *bat_priv,
28 struct batadv_orig_node *orig_node, 29 struct batadv_orig_node *orig_node,
29 struct batadv_orig_node *orig_neigh_node, 30 struct batadv_orig_node *orig_neigh_node,
@@ -46,12 +47,17 @@ int batadv_nc_init_debugfs(struct batadv_priv *bat_priv);
46 47
47#else /* ifdef CONFIG_BATMAN_ADV_NC */ 48#else /* ifdef CONFIG_BATMAN_ADV_NC */
48 49
49static inline int batadv_nc_init(struct batadv_priv *bat_priv) 50static inline int batadv_nc_init(void)
50{ 51{
51 return 0; 52 return 0;
52} 53}
53 54
54static inline void batadv_nc_free(struct batadv_priv *bat_priv) 55static inline int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
56{
57 return 0;
58}
59
60static inline void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
55{ 61{
56 return; 62 return;
57} 63}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 634debab4d54..fb7356fcfe51 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1146,7 +1146,11 @@ int hci_dev_open(__u16 dev)
1146 goto done; 1146 goto done;
1147 } 1147 }
1148 1148
1149 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) { 1149 /* Check for rfkill but allow the HCI setup stage to proceed
1150 * (which in itself doesn't cause any RF activity).
1151 */
1152 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) &&
1153 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1150 ret = -ERFKILL; 1154 ret = -ERFKILL;
1151 goto done; 1155 goto done;
1152 } 1156 }
@@ -1566,10 +1570,13 @@ static int hci_rfkill_set_block(void *data, bool blocked)
1566 1570
1567 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked); 1571 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1568 1572
1569 if (!blocked) 1573 if (blocked) {
1570 return 0; 1574 set_bit(HCI_RFKILLED, &hdev->dev_flags);
1571 1575 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1572 hci_dev_do_close(hdev); 1576 hci_dev_do_close(hdev);
1577 } else {
1578 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1579 }
1573 1580
1574 return 0; 1581 return 0;
1575} 1582}
@@ -1591,9 +1598,13 @@ static void hci_power_on(struct work_struct *work)
1591 return; 1598 return;
1592 } 1599 }
1593 1600
1594 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) 1601 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1602 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1603 hci_dev_do_close(hdev);
1604 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1595 queue_delayed_work(hdev->req_workqueue, &hdev->power_off, 1605 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1596 HCI_AUTO_OFF_TIMEOUT); 1606 HCI_AUTO_OFF_TIMEOUT);
1607 }
1597 1608
1598 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) 1609 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1599 mgmt_index_added(hdev); 1610 mgmt_index_added(hdev);
@@ -2209,6 +2220,9 @@ int hci_register_dev(struct hci_dev *hdev)
2209 } 2220 }
2210 } 2221 }
2211 2222
2223 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2224 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2225
2212 set_bit(HCI_SETUP, &hdev->dev_flags); 2226 set_bit(HCI_SETUP, &hdev->dev_flags);
2213 2227
2214 if (hdev->dev_type != HCI_AMP) 2228 if (hdev->dev_type != HCI_AMP)
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 94aab73f89d4..8db3e89fae35 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -3557,7 +3557,11 @@ static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3557 cp.handle = cpu_to_le16(conn->handle); 3557 cp.handle = cpu_to_le16(conn->handle);
3558 3558
3559 if (ltk->authenticated) 3559 if (ltk->authenticated)
3560 conn->sec_level = BT_SECURITY_HIGH; 3560 conn->pending_sec_level = BT_SECURITY_HIGH;
3561 else
3562 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3563
3564 conn->enc_key_size = ltk->enc_size;
3561 3565
3562 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); 3566 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3563 3567
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index b3bb7bca8e60..63fa11109a1c 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -3755,6 +3755,13 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3755 3755
3756 sk = chan->sk; 3756 sk = chan->sk;
3757 3757
3758 /* For certain devices (ex: HID mouse), support for authentication,
3759 * pairing and bonding is optional. For such devices, inorder to avoid
3760 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3761 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3762 */
3763 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3764
3758 bacpy(&bt_sk(sk)->src, conn->src); 3765 bacpy(&bt_sk(sk)->src, conn->src);
3759 bacpy(&bt_sk(sk)->dst, conn->dst); 3766 bacpy(&bt_sk(sk)->dst, conn->dst);
3760 chan->psm = psm; 3767 chan->psm = psm;
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 6d126faf145f..84fcf9fff3ea 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -569,7 +569,6 @@ static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb)
569static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err) 569static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err)
570{ 570{
571 struct rfcomm_dev *dev = dlc->owner; 571 struct rfcomm_dev *dev = dlc->owner;
572 struct tty_struct *tty;
573 if (!dev) 572 if (!dev)
574 return; 573 return;
575 574
@@ -581,38 +580,8 @@ static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err)
581 DPM_ORDER_DEV_AFTER_PARENT); 580 DPM_ORDER_DEV_AFTER_PARENT);
582 581
583 wake_up_interruptible(&dev->port.open_wait); 582 wake_up_interruptible(&dev->port.open_wait);
584 } else if (dlc->state == BT_CLOSED) { 583 } else if (dlc->state == BT_CLOSED)
585 tty = tty_port_tty_get(&dev->port); 584 tty_port_tty_hangup(&dev->port, false);
586 if (!tty) {
587 if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) {
588 /* Drop DLC lock here to avoid deadlock
589 * 1. rfcomm_dev_get will take rfcomm_dev_lock
590 * but in rfcomm_dev_add there's lock order:
591 * rfcomm_dev_lock -> dlc lock
592 * 2. tty_port_put will deadlock if it's
593 * the last reference
594 *
595 * FIXME: when we release the lock anything
596 * could happen to dev, even its destruction
597 */
598 rfcomm_dlc_unlock(dlc);
599 if (rfcomm_dev_get(dev->id) == NULL) {
600 rfcomm_dlc_lock(dlc);
601 return;
602 }
603
604 if (!test_and_set_bit(RFCOMM_TTY_RELEASED,
605 &dev->flags))
606 tty_port_put(&dev->port);
607
608 tty_port_put(&dev->port);
609 rfcomm_dlc_lock(dlc);
610 }
611 } else {
612 tty_hangup(tty);
613 tty_kref_put(tty);
614 }
615 }
616} 585}
617 586
618static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig) 587static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig)
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index ffd5874f2592..33e8f23acddd 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -700,7 +700,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
700 700
701 vid = nla_get_u16(tb[NDA_VLAN]); 701 vid = nla_get_u16(tb[NDA_VLAN]);
702 702
703 if (vid >= VLAN_N_VID) { 703 if (!vid || vid >= VLAN_VID_MASK) {
704 pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n", 704 pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n",
705 vid); 705 vid);
706 return -EINVAL; 706 return -EINVAL;
@@ -794,7 +794,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
794 794
795 vid = nla_get_u16(tb[NDA_VLAN]); 795 vid = nla_get_u16(tb[NDA_VLAN]);
796 796
797 if (vid >= VLAN_N_VID) { 797 if (!vid || vid >= VLAN_VID_MASK) {
798 pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n", 798 pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n",
799 vid); 799 vid);
800 return -EINVAL; 800 return -EINVAL;
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 85a09bb5ca51..b7b1914dfa25 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -453,7 +453,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
453 call_rcu_bh(&p->rcu, br_multicast_free_pg); 453 call_rcu_bh(&p->rcu, br_multicast_free_pg);
454 err = 0; 454 err = 0;
455 455
456 if (!mp->ports && !mp->mglist && mp->timer_armed && 456 if (!mp->ports && !mp->mglist &&
457 netif_running(br->dev)) 457 netif_running(br->dev))
458 mod_timer(&mp->timer, jiffies); 458 mod_timer(&mp->timer, jiffies);
459 break; 459 break;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index d1c578630678..8b0b610ca2c9 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -272,7 +272,7 @@ static void br_multicast_del_pg(struct net_bridge *br,
272 del_timer(&p->timer); 272 del_timer(&p->timer);
273 call_rcu_bh(&p->rcu, br_multicast_free_pg); 273 call_rcu_bh(&p->rcu, br_multicast_free_pg);
274 274
275 if (!mp->ports && !mp->mglist && mp->timer_armed && 275 if (!mp->ports && !mp->mglist &&
276 netif_running(br->dev)) 276 netif_running(br->dev))
277 mod_timer(&mp->timer, jiffies); 277 mod_timer(&mp->timer, jiffies);
278 278
@@ -620,7 +620,6 @@ rehash:
620 620
621 mp->br = br; 621 mp->br = br;
622 mp->addr = *group; 622 mp->addr = *group;
623
624 setup_timer(&mp->timer, br_multicast_group_expired, 623 setup_timer(&mp->timer, br_multicast_group_expired,
625 (unsigned long)mp); 624 (unsigned long)mp);
626 625
@@ -660,6 +659,7 @@ static int br_multicast_add_group(struct net_bridge *br,
660 struct net_bridge_mdb_entry *mp; 659 struct net_bridge_mdb_entry *mp;
661 struct net_bridge_port_group *p; 660 struct net_bridge_port_group *p;
662 struct net_bridge_port_group __rcu **pp; 661 struct net_bridge_port_group __rcu **pp;
662 unsigned long now = jiffies;
663 int err; 663 int err;
664 664
665 spin_lock(&br->multicast_lock); 665 spin_lock(&br->multicast_lock);
@@ -674,6 +674,7 @@ static int br_multicast_add_group(struct net_bridge *br,
674 674
675 if (!port) { 675 if (!port) {
676 mp->mglist = true; 676 mp->mglist = true;
677 mod_timer(&mp->timer, now + br->multicast_membership_interval);
677 goto out; 678 goto out;
678 } 679 }
679 680
@@ -681,7 +682,7 @@ static int br_multicast_add_group(struct net_bridge *br,
681 (p = mlock_dereference(*pp, br)) != NULL; 682 (p = mlock_dereference(*pp, br)) != NULL;
682 pp = &p->next) { 683 pp = &p->next) {
683 if (p->port == port) 684 if (p->port == port)
684 goto out; 685 goto found;
685 if ((unsigned long)p->port < (unsigned long)port) 686 if ((unsigned long)p->port < (unsigned long)port)
686 break; 687 break;
687 } 688 }
@@ -692,6 +693,8 @@ static int br_multicast_add_group(struct net_bridge *br,
692 rcu_assign_pointer(*pp, p); 693 rcu_assign_pointer(*pp, p);
693 br_mdb_notify(br->dev, port, group, RTM_NEWMDB); 694 br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
694 695
696found:
697 mod_timer(&p->timer, now + br->multicast_membership_interval);
695out: 698out:
696 err = 0; 699 err = 0;
697 700
@@ -1191,9 +1194,6 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1191 if (!mp) 1194 if (!mp)
1192 goto out; 1195 goto out;
1193 1196
1194 mod_timer(&mp->timer, now + br->multicast_membership_interval);
1195 mp->timer_armed = true;
1196
1197 max_delay *= br->multicast_last_member_count; 1197 max_delay *= br->multicast_last_member_count;
1198 1198
1199 if (mp->mglist && 1199 if (mp->mglist &&
@@ -1270,9 +1270,6 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1270 if (!mp) 1270 if (!mp)
1271 goto out; 1271 goto out;
1272 1272
1273 mod_timer(&mp->timer, now + br->multicast_membership_interval);
1274 mp->timer_armed = true;
1275
1276 max_delay *= br->multicast_last_member_count; 1273 max_delay *= br->multicast_last_member_count;
1277 if (mp->mglist && 1274 if (mp->mglist &&
1278 (timer_pending(&mp->timer) ? 1275 (timer_pending(&mp->timer) ?
@@ -1358,7 +1355,7 @@ static void br_multicast_leave_group(struct net_bridge *br,
1358 call_rcu_bh(&p->rcu, br_multicast_free_pg); 1355 call_rcu_bh(&p->rcu, br_multicast_free_pg);
1359 br_mdb_notify(br->dev, port, group, RTM_DELMDB); 1356 br_mdb_notify(br->dev, port, group, RTM_DELMDB);
1360 1357
1361 if (!mp->ports && !mp->mglist && mp->timer_armed && 1358 if (!mp->ports && !mp->mglist &&
1362 netif_running(br->dev)) 1359 netif_running(br->dev))
1363 mod_timer(&mp->timer, jiffies); 1360 mod_timer(&mp->timer, jiffies);
1364 } 1361 }
@@ -1370,12 +1367,30 @@ static void br_multicast_leave_group(struct net_bridge *br,
1370 br->multicast_last_member_interval; 1367 br->multicast_last_member_interval;
1371 1368
1372 if (!port) { 1369 if (!port) {
1373 if (mp->mglist && mp->timer_armed && 1370 if (mp->mglist &&
1374 (timer_pending(&mp->timer) ? 1371 (timer_pending(&mp->timer) ?
1375 time_after(mp->timer.expires, time) : 1372 time_after(mp->timer.expires, time) :
1376 try_to_del_timer_sync(&mp->timer) >= 0)) { 1373 try_to_del_timer_sync(&mp->timer) >= 0)) {
1377 mod_timer(&mp->timer, time); 1374 mod_timer(&mp->timer, time);
1378 } 1375 }
1376
1377 goto out;
1378 }
1379
1380 for (p = mlock_dereference(mp->ports, br);
1381 p != NULL;
1382 p = mlock_dereference(p->next, br)) {
1383 if (p->port != port)
1384 continue;
1385
1386 if (!hlist_unhashed(&p->mglist) &&
1387 (timer_pending(&p->timer) ?
1388 time_after(p->timer.expires, time) :
1389 try_to_del_timer_sync(&p->timer) >= 0)) {
1390 mod_timer(&p->timer, time);
1391 }
1392
1393 break;
1379 } 1394 }
1380out: 1395out:
1381 spin_unlock(&br->multicast_lock); 1396 spin_unlock(&br->multicast_lock);
@@ -1798,7 +1813,6 @@ void br_multicast_stop(struct net_bridge *br)
1798 hlist_for_each_entry_safe(mp, n, &mdb->mhash[i], 1813 hlist_for_each_entry_safe(mp, n, &mdb->mhash[i],
1799 hlist[ver]) { 1814 hlist[ver]) {
1800 del_timer(&mp->timer); 1815 del_timer(&mp->timer);
1801 mp->timer_armed = false;
1802 call_rcu_bh(&mp->rcu, br_multicast_free_group); 1816 call_rcu_bh(&mp->rcu, br_multicast_free_group);
1803 } 1817 }
1804 } 1818 }
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index e74ddc1c29a8..f75d92e4f96b 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -243,7 +243,7 @@ static int br_afspec(struct net_bridge *br,
243 243
244 vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]); 244 vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
245 245
246 if (vinfo->vid >= VLAN_N_VID) 246 if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK)
247 return -EINVAL; 247 return -EINVAL;
248 248
249 switch (cmd) { 249 switch (cmd) {
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index efb57d911569..e14c33b42f75 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -126,7 +126,6 @@ struct net_bridge_mdb_entry
126 struct timer_list timer; 126 struct timer_list timer;
127 struct br_ip addr; 127 struct br_ip addr;
128 bool mglist; 128 bool mglist;
129 bool timer_armed;
130}; 129};
131 130
132struct net_bridge_mdb_htable 131struct net_bridge_mdb_htable
@@ -643,9 +642,7 @@ static inline u16 br_get_pvid(const struct net_port_vlans *v)
643 * vid wasn't set 642 * vid wasn't set
644 */ 643 */
645 smp_rmb(); 644 smp_rmb();
646 return (v->pvid & VLAN_TAG_PRESENT) ? 645 return v->pvid ?: VLAN_N_VID;
647 (v->pvid & ~VLAN_TAG_PRESENT) :
648 VLAN_N_VID;
649} 646}
650 647
651#else 648#else
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 108084a04671..656a6f3e40de 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -134,7 +134,7 @@ static void br_stp_start(struct net_bridge *br)
134 134
135 if (br->bridge_forward_delay < BR_MIN_FORWARD_DELAY) 135 if (br->bridge_forward_delay < BR_MIN_FORWARD_DELAY)
136 __br_set_forward_delay(br, BR_MIN_FORWARD_DELAY); 136 __br_set_forward_delay(br, BR_MIN_FORWARD_DELAY);
137 else if (br->bridge_forward_delay < BR_MAX_FORWARD_DELAY) 137 else if (br->bridge_forward_delay > BR_MAX_FORWARD_DELAY)
138 __br_set_forward_delay(br, BR_MAX_FORWARD_DELAY); 138 __br_set_forward_delay(br, BR_MAX_FORWARD_DELAY);
139 139
140 if (r == 0) { 140 if (r == 0) {
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 9a9ffe7e4019..53f0990eab58 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -45,37 +45,34 @@ static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags)
45 return 0; 45 return 0;
46 } 46 }
47 47
48 if (vid) { 48 if (v->port_idx) {
49 if (v->port_idx) { 49 p = v->parent.port;
50 p = v->parent.port; 50 br = p->br;
51 br = p->br; 51 dev = p->dev;
52 dev = p->dev; 52 } else {
53 } else { 53 br = v->parent.br;
54 br = v->parent.br; 54 dev = br->dev;
55 dev = br->dev; 55 }
56 } 56 ops = dev->netdev_ops;
57 ops = dev->netdev_ops; 57
58 58 if (p && (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
59 if (p && (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) { 59 /* Add VLAN to the device filter if it is supported.
60 /* Add VLAN to the device filter if it is supported. 60 * Stricly speaking, this is not necessary now, since
61 * Stricly speaking, this is not necessary now, since 61 * devices are made promiscuous by the bridge, but if
62 * devices are made promiscuous by the bridge, but if 62 * that ever changes this code will allow tagged
63 * that ever changes this code will allow tagged 63 * traffic to enter the bridge.
64 * traffic to enter the bridge. 64 */
65 */ 65 err = ops->ndo_vlan_rx_add_vid(dev, htons(ETH_P_8021Q),
66 err = ops->ndo_vlan_rx_add_vid(dev, htons(ETH_P_8021Q), 66 vid);
67 vid); 67 if (err)
68 if (err) 68 return err;
69 return err; 69 }
70 }
71
72 err = br_fdb_insert(br, p, dev->dev_addr, vid);
73 if (err) {
74 br_err(br, "failed insert local address into bridge "
75 "forwarding table\n");
76 goto out_filt;
77 }
78 70
71 err = br_fdb_insert(br, p, dev->dev_addr, vid);
72 if (err) {
73 br_err(br, "failed insert local address into bridge "
74 "forwarding table\n");
75 goto out_filt;
79 } 76 }
80 77
81 set_bit(vid, v->vlan_bitmap); 78 set_bit(vid, v->vlan_bitmap);
@@ -98,7 +95,7 @@ static int __vlan_del(struct net_port_vlans *v, u16 vid)
98 __vlan_delete_pvid(v, vid); 95 __vlan_delete_pvid(v, vid);
99 clear_bit(vid, v->untagged_bitmap); 96 clear_bit(vid, v->untagged_bitmap);
100 97
101 if (v->port_idx && vid) { 98 if (v->port_idx) {
102 struct net_device *dev = v->parent.port->dev; 99 struct net_device *dev = v->parent.port->dev;
103 const struct net_device_ops *ops = dev->netdev_ops; 100 const struct net_device_ops *ops = dev->netdev_ops;
104 101
@@ -192,6 +189,8 @@ out:
192bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v, 189bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
193 struct sk_buff *skb, u16 *vid) 190 struct sk_buff *skb, u16 *vid)
194{ 191{
192 int err;
193
195 /* If VLAN filtering is disabled on the bridge, all packets are 194 /* If VLAN filtering is disabled on the bridge, all packets are
196 * permitted. 195 * permitted.
197 */ 196 */
@@ -204,20 +203,32 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
204 if (!v) 203 if (!v)
205 return false; 204 return false;
206 205
207 if (br_vlan_get_tag(skb, vid)) { 206 err = br_vlan_get_tag(skb, vid);
207 if (!*vid) {
208 u16 pvid = br_get_pvid(v); 208 u16 pvid = br_get_pvid(v);
209 209
210 /* Frame did not have a tag. See if pvid is set 210 /* Frame had a tag with VID 0 or did not have a tag.
211 * on this port. That tells us which vlan untagged 211 * See if pvid is set on this port. That tells us which
212 * traffic belongs to. 212 * vlan untagged or priority-tagged traffic belongs to.
213 */ 213 */
214 if (pvid == VLAN_N_VID) 214 if (pvid == VLAN_N_VID)
215 return false; 215 return false;
216 216
217 /* PVID is set on this port. Any untagged ingress 217 /* PVID is set on this port. Any untagged or priority-tagged
218 * frame is considered to belong to this vlan. 218 * ingress frame is considered to belong to this vlan.
219 */ 219 */
220 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), pvid); 220 *vid = pvid;
221 if (likely(err))
222 /* Untagged Frame. */
223 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), pvid);
224 else
225 /* Priority-tagged Frame.
226 * At this point, We know that skb->vlan_tci had
227 * VLAN_TAG_PRESENT bit and its VID field was 0x000.
228 * We update only VID field and preserve PCP field.
229 */
230 skb->vlan_tci |= pvid;
231
221 return true; 232 return true;
222 } 233 }
223 234
@@ -248,7 +259,9 @@ bool br_allowed_egress(struct net_bridge *br,
248 return false; 259 return false;
249} 260}
250 261
251/* Must be protected by RTNL */ 262/* Must be protected by RTNL.
263 * Must be called with vid in range from 1 to 4094 inclusive.
264 */
252int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags) 265int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
253{ 266{
254 struct net_port_vlans *pv = NULL; 267 struct net_port_vlans *pv = NULL;
@@ -278,7 +291,9 @@ out:
278 return err; 291 return err;
279} 292}
280 293
281/* Must be protected by RTNL */ 294/* Must be protected by RTNL.
295 * Must be called with vid in range from 1 to 4094 inclusive.
296 */
282int br_vlan_delete(struct net_bridge *br, u16 vid) 297int br_vlan_delete(struct net_bridge *br, u16 vid)
283{ 298{
284 struct net_port_vlans *pv; 299 struct net_port_vlans *pv;
@@ -289,14 +304,9 @@ int br_vlan_delete(struct net_bridge *br, u16 vid)
289 if (!pv) 304 if (!pv)
290 return -EINVAL; 305 return -EINVAL;
291 306
292 if (vid) { 307 spin_lock_bh(&br->hash_lock);
293 /* If the VID !=0 remove fdb for this vid. VID 0 is special 308 fdb_delete_by_addr(br, br->dev->dev_addr, vid);
294 * in that it's the default and is always there in the fdb. 309 spin_unlock_bh(&br->hash_lock);
295 */
296 spin_lock_bh(&br->hash_lock);
297 fdb_delete_by_addr(br, br->dev->dev_addr, vid);
298 spin_unlock_bh(&br->hash_lock);
299 }
300 310
301 __vlan_del(pv, vid); 311 __vlan_del(pv, vid);
302 return 0; 312 return 0;
@@ -329,7 +339,9 @@ unlock:
329 return 0; 339 return 0;
330} 340}
331 341
332/* Must be protected by RTNL */ 342/* Must be protected by RTNL.
343 * Must be called with vid in range from 1 to 4094 inclusive.
344 */
333int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags) 345int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
334{ 346{
335 struct net_port_vlans *pv = NULL; 347 struct net_port_vlans *pv = NULL;
@@ -363,7 +375,9 @@ clean_up:
363 return err; 375 return err;
364} 376}
365 377
366/* Must be protected by RTNL */ 378/* Must be protected by RTNL.
379 * Must be called with vid in range from 1 to 4094 inclusive.
380 */
367int nbp_vlan_delete(struct net_bridge_port *port, u16 vid) 381int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
368{ 382{
369 struct net_port_vlans *pv; 383 struct net_port_vlans *pv;
@@ -374,14 +388,9 @@ int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
374 if (!pv) 388 if (!pv)
375 return -EINVAL; 389 return -EINVAL;
376 390
377 if (vid) { 391 spin_lock_bh(&port->br->hash_lock);
378 /* If the VID !=0 remove fdb for this vid. VID 0 is special 392 fdb_delete_by_addr(port->br, port->dev->dev_addr, vid);
379 * in that it's the default and is always there in the fdb. 393 spin_unlock_bh(&port->br->hash_lock);
380 */
381 spin_lock_bh(&port->br->hash_lock);
382 fdb_delete_by_addr(port->br, port->dev->dev_addr, vid);
383 spin_unlock_bh(&port->br->hash_lock);
384 }
385 394
386 return __vlan_del(pv, vid); 395 return __vlan_del(pv, vid);
387} 396}
diff --git a/net/compat.c b/net/compat.c
index f0a1ba6c8086..89032580bd1d 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -71,6 +71,8 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
71 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) || 71 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
72 __get_user(kmsg->msg_flags, &umsg->msg_flags)) 72 __get_user(kmsg->msg_flags, &umsg->msg_flags))
73 return -EFAULT; 73 return -EFAULT;
74 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
75 return -EINVAL;
74 kmsg->msg_name = compat_ptr(tmp1); 76 kmsg->msg_name = compat_ptr(tmp1);
75 kmsg->msg_iov = compat_ptr(tmp2); 77 kmsg->msg_iov = compat_ptr(tmp2);
76 kmsg->msg_control = compat_ptr(tmp3); 78 kmsg->msg_control = compat_ptr(tmp3);
diff --git a/net/core/dev.c b/net/core/dev.c
index 5c713f2239cc..3430b1ed12e5 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1917,7 +1917,8 @@ static struct xps_map *expand_xps_map(struct xps_map *map,
1917 return new_map; 1917 return new_map;
1918} 1918}
1919 1919
1920int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, u16 index) 1920int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
1921 u16 index)
1921{ 1922{
1922 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL; 1923 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
1923 struct xps_map *map, *new_map; 1924 struct xps_map *map, *new_map;
@@ -5247,10 +5248,12 @@ static int dev_new_index(struct net *net)
5247 5248
5248/* Delayed registration/unregisteration */ 5249/* Delayed registration/unregisteration */
5249static LIST_HEAD(net_todo_list); 5250static LIST_HEAD(net_todo_list);
5251static DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
5250 5252
5251static void net_set_todo(struct net_device *dev) 5253static void net_set_todo(struct net_device *dev)
5252{ 5254{
5253 list_add_tail(&dev->todo_list, &net_todo_list); 5255 list_add_tail(&dev->todo_list, &net_todo_list);
5256 dev_net(dev)->dev_unreg_count++;
5254} 5257}
5255 5258
5256static void rollback_registered_many(struct list_head *head) 5259static void rollback_registered_many(struct list_head *head)
@@ -5918,6 +5921,12 @@ void netdev_run_todo(void)
5918 if (dev->destructor) 5921 if (dev->destructor)
5919 dev->destructor(dev); 5922 dev->destructor(dev);
5920 5923
5924 /* Report a network device has been unregistered */
5925 rtnl_lock();
5926 dev_net(dev)->dev_unreg_count--;
5927 __rtnl_unlock();
5928 wake_up(&netdev_unregistering_wq);
5929
5921 /* Free network device */ 5930 /* Free network device */
5922 kobject_put(&dev->dev.kobj); 5931 kobject_put(&dev->dev.kobj);
5923 } 5932 }
@@ -6603,6 +6612,34 @@ static void __net_exit default_device_exit(struct net *net)
6603 rtnl_unlock(); 6612 rtnl_unlock();
6604} 6613}
6605 6614
6615static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
6616{
6617 /* Return with the rtnl_lock held when there are no network
6618 * devices unregistering in any network namespace in net_list.
6619 */
6620 struct net *net;
6621 bool unregistering;
6622 DEFINE_WAIT(wait);
6623
6624 for (;;) {
6625 prepare_to_wait(&netdev_unregistering_wq, &wait,
6626 TASK_UNINTERRUPTIBLE);
6627 unregistering = false;
6628 rtnl_lock();
6629 list_for_each_entry(net, net_list, exit_list) {
6630 if (net->dev_unreg_count > 0) {
6631 unregistering = true;
6632 break;
6633 }
6634 }
6635 if (!unregistering)
6636 break;
6637 __rtnl_unlock();
6638 schedule();
6639 }
6640 finish_wait(&netdev_unregistering_wq, &wait);
6641}
6642
6606static void __net_exit default_device_exit_batch(struct list_head *net_list) 6643static void __net_exit default_device_exit_batch(struct list_head *net_list)
6607{ 6644{
6608 /* At exit all network devices most be removed from a network 6645 /* At exit all network devices most be removed from a network
@@ -6614,7 +6651,18 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list)
6614 struct net *net; 6651 struct net *net;
6615 LIST_HEAD(dev_kill_list); 6652 LIST_HEAD(dev_kill_list);
6616 6653
6617 rtnl_lock(); 6654 /* To prevent network device cleanup code from dereferencing
6655 * loopback devices or network devices that have been freed
6656 * wait here for all pending unregistrations to complete,
6657 * before unregistring the loopback device and allowing the
6658 * network namespace be freed.
6659 *
6660 * The netdev todo list containing all network devices
6661 * unregistrations that happen in default_device_exit_batch
6662 * will run in the rtnl_unlock() at the end of
6663 * default_device_exit_batch.
6664 */
6665 rtnl_lock_unregistering(net_list);
6618 list_for_each_entry(net, net_list, exit_list) { 6666 list_for_each_entry(net, net_list, exit_list) {
6619 for_each_netdev_reverse(net, dev) { 6667 for_each_netdev_reverse(net, dev) {
6620 if (dev->rtnl_link_ops) 6668 if (dev->rtnl_link_ops)
diff --git a/net/core/filter.c b/net/core/filter.c
index 6438f29ff266..01b780856db2 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -644,7 +644,6 @@ void sk_filter_release_rcu(struct rcu_head *rcu)
644 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); 644 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
645 645
646 bpf_jit_free(fp); 646 bpf_jit_free(fp);
647 kfree(fp);
648} 647}
649EXPORT_SYMBOL(sk_filter_release_rcu); 648EXPORT_SYMBOL(sk_filter_release_rcu);
650 649
@@ -683,7 +682,7 @@ int sk_unattached_filter_create(struct sk_filter **pfp,
683 if (fprog->filter == NULL) 682 if (fprog->filter == NULL)
684 return -EINVAL; 683 return -EINVAL;
685 684
686 fp = kmalloc(fsize + sizeof(*fp), GFP_KERNEL); 685 fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL);
687 if (!fp) 686 if (!fp)
688 return -ENOMEM; 687 return -ENOMEM;
689 memcpy(fp->insns, fprog->filter, fsize); 688 memcpy(fp->insns, fprog->filter, fsize);
@@ -723,6 +722,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
723{ 722{
724 struct sk_filter *fp, *old_fp; 723 struct sk_filter *fp, *old_fp;
725 unsigned int fsize = sizeof(struct sock_filter) * fprog->len; 724 unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
725 unsigned int sk_fsize = sk_filter_size(fprog->len);
726 int err; 726 int err;
727 727
728 if (sock_flag(sk, SOCK_FILTER_LOCKED)) 728 if (sock_flag(sk, SOCK_FILTER_LOCKED))
@@ -732,11 +732,11 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
732 if (fprog->filter == NULL) 732 if (fprog->filter == NULL)
733 return -EINVAL; 733 return -EINVAL;
734 734
735 fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL); 735 fp = sock_kmalloc(sk, sk_fsize, GFP_KERNEL);
736 if (!fp) 736 if (!fp)
737 return -ENOMEM; 737 return -ENOMEM;
738 if (copy_from_user(fp->insns, fprog->filter, fsize)) { 738 if (copy_from_user(fp->insns, fprog->filter, fsize)) {
739 sock_kfree_s(sk, fp, fsize+sizeof(*fp)); 739 sock_kfree_s(sk, fp, sk_fsize);
740 return -EFAULT; 740 return -EFAULT;
741 } 741 }
742 742
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 1929af87b260..8d7d0dd72db2 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -154,8 +154,8 @@ ipv6:
154 if (poff >= 0) { 154 if (poff >= 0) {
155 __be32 *ports, _ports; 155 __be32 *ports, _ports;
156 156
157 nhoff += poff; 157 ports = skb_header_pointer(skb, nhoff + poff,
158 ports = skb_header_pointer(skb, nhoff, sizeof(_ports), &_ports); 158 sizeof(_ports), &_ports);
159 if (ports) 159 if (ports)
160 flow->ports = *ports; 160 flow->ports = *ports;
161 } 161 }
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
index 6a2f13cee86a..8d9d05edd2eb 100644
--- a/net/core/secure_seq.c
+++ b/net/core/secure_seq.c
@@ -10,12 +10,27 @@
10 10
11#include <net/secure_seq.h> 11#include <net/secure_seq.h>
12 12
13static u32 net_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned; 13#if IS_ENABLED(CONFIG_IPV6) || IS_ENABLED(CONFIG_INET)
14#define NET_SECRET_SIZE (MD5_MESSAGE_BYTES / 4)
14 15
15void net_secret_init(void) 16static u32 net_secret[NET_SECRET_SIZE] ____cacheline_aligned;
17
18static void net_secret_init(void)
16{ 19{
17 get_random_bytes(net_secret, sizeof(net_secret)); 20 u32 tmp;
21 int i;
22
23 if (likely(net_secret[0]))
24 return;
25
26 for (i = NET_SECRET_SIZE; i > 0;) {
27 do {
28 get_random_bytes(&tmp, sizeof(tmp));
29 } while (!tmp);
30 cmpxchg(&net_secret[--i], 0, tmp);
31 }
18} 32}
33#endif
19 34
20#ifdef CONFIG_INET 35#ifdef CONFIG_INET
21static u32 seq_scale(u32 seq) 36static u32 seq_scale(u32 seq)
@@ -42,6 +57,7 @@ __u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
42 u32 hash[MD5_DIGEST_WORDS]; 57 u32 hash[MD5_DIGEST_WORDS];
43 u32 i; 58 u32 i;
44 59
60 net_secret_init();
45 memcpy(hash, saddr, 16); 61 memcpy(hash, saddr, 16);
46 for (i = 0; i < 4; i++) 62 for (i = 0; i < 4; i++)
47 secret[i] = net_secret[i] + (__force u32)daddr[i]; 63 secret[i] = net_secret[i] + (__force u32)daddr[i];
@@ -63,6 +79,7 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
63 u32 hash[MD5_DIGEST_WORDS]; 79 u32 hash[MD5_DIGEST_WORDS];
64 u32 i; 80 u32 i;
65 81
82 net_secret_init();
66 memcpy(hash, saddr, 16); 83 memcpy(hash, saddr, 16);
67 for (i = 0; i < 4; i++) 84 for (i = 0; i < 4; i++)
68 secret[i] = net_secret[i] + (__force u32) daddr[i]; 85 secret[i] = net_secret[i] + (__force u32) daddr[i];
@@ -82,6 +99,7 @@ __u32 secure_ip_id(__be32 daddr)
82{ 99{
83 u32 hash[MD5_DIGEST_WORDS]; 100 u32 hash[MD5_DIGEST_WORDS];
84 101
102 net_secret_init();
85 hash[0] = (__force __u32) daddr; 103 hash[0] = (__force __u32) daddr;
86 hash[1] = net_secret[13]; 104 hash[1] = net_secret[13];
87 hash[2] = net_secret[14]; 105 hash[2] = net_secret[14];
@@ -96,6 +114,7 @@ __u32 secure_ipv6_id(const __be32 daddr[4])
96{ 114{
97 __u32 hash[4]; 115 __u32 hash[4];
98 116
117 net_secret_init();
99 memcpy(hash, daddr, 16); 118 memcpy(hash, daddr, 16);
100 md5_transform(hash, net_secret); 119 md5_transform(hash, net_secret);
101 120
@@ -107,6 +126,7 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
107{ 126{
108 u32 hash[MD5_DIGEST_WORDS]; 127 u32 hash[MD5_DIGEST_WORDS];
109 128
129 net_secret_init();
110 hash[0] = (__force u32)saddr; 130 hash[0] = (__force u32)saddr;
111 hash[1] = (__force u32)daddr; 131 hash[1] = (__force u32)daddr;
112 hash[2] = ((__force u16)sport << 16) + (__force u16)dport; 132 hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
@@ -121,6 +141,7 @@ u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
121{ 141{
122 u32 hash[MD5_DIGEST_WORDS]; 142 u32 hash[MD5_DIGEST_WORDS];
123 143
144 net_secret_init();
124 hash[0] = (__force u32)saddr; 145 hash[0] = (__force u32)saddr;
125 hash[1] = (__force u32)daddr; 146 hash[1] = (__force u32)daddr;
126 hash[2] = (__force u32)dport ^ net_secret[14]; 147 hash[2] = (__force u32)dport ^ net_secret[14];
@@ -140,6 +161,7 @@ u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
140 u32 hash[MD5_DIGEST_WORDS]; 161 u32 hash[MD5_DIGEST_WORDS];
141 u64 seq; 162 u64 seq;
142 163
164 net_secret_init();
143 hash[0] = (__force u32)saddr; 165 hash[0] = (__force u32)saddr;
144 hash[1] = (__force u32)daddr; 166 hash[1] = (__force u32)daddr;
145 hash[2] = ((__force u16)sport << 16) + (__force u16)dport; 167 hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
@@ -164,6 +186,7 @@ u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
164 u64 seq; 186 u64 seq;
165 u32 i; 187 u32 i;
166 188
189 net_secret_init();
167 memcpy(hash, saddr, 16); 190 memcpy(hash, saddr, 16);
168 for (i = 0; i < 4; i++) 191 for (i = 0; i < 4; i++)
169 secret[i] = net_secret[i] + daddr[i]; 192 secret[i] = net_secret[i] + daddr[i];
diff --git a/net/core/sock.c b/net/core/sock.c
index 5b6beba494a3..0b39e7ae4383 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2319,6 +2319,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
2319 sk->sk_ll_usec = sysctl_net_busy_read; 2319 sk->sk_ll_usec = sysctl_net_busy_read;
2320#endif 2320#endif
2321 2321
2322 sk->sk_pacing_rate = ~0U;
2322 /* 2323 /*
2323 * Before updating sk_refcnt, we must commit prior changes to memory 2324 * Before updating sk_refcnt, we must commit prior changes to memory
2324 * (Documentation/RCU/rculist_nulls.txt for details) 2325 * (Documentation/RCU/rculist_nulls.txt for details)
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index c85e71e0c7ff..ff41b4d60d30 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -1372,6 +1372,8 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
1372 real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); 1372 real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
1373 if (!real_dev) 1373 if (!real_dev)
1374 return -ENODEV; 1374 return -ENODEV;
1375 if (real_dev->type != ARPHRD_IEEE802154)
1376 return -EINVAL;
1375 1377
1376 lowpan_dev_info(dev)->real_dev = real_dev; 1378 lowpan_dev_info(dev)->real_dev = real_dev;
1377 lowpan_dev_info(dev)->fragment_tag = 0; 1379 lowpan_dev_info(dev)->fragment_tag = 0;
@@ -1386,6 +1388,9 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
1386 1388
1387 entry->ldev = dev; 1389 entry->ldev = dev;
1388 1390
1391 /* Set the lowpan harware address to the wpan hardware address. */
1392 memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN);
1393
1389 mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx); 1394 mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
1390 INIT_LIST_HEAD(&entry->list); 1395 INIT_LIST_HEAD(&entry->list);
1391 list_add_tail(&entry->list, &lowpan_devices); 1396 list_add_tail(&entry->list, &lowpan_devices);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 7a1874b7b8fd..cfeb85cff4f0 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -263,10 +263,8 @@ void build_ehash_secret(void)
263 get_random_bytes(&rnd, sizeof(rnd)); 263 get_random_bytes(&rnd, sizeof(rnd));
264 } while (rnd == 0); 264 } while (rnd == 0);
265 265
266 if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0) { 266 if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0)
267 get_random_bytes(&ipv6_hash_secret, sizeof(ipv6_hash_secret)); 267 get_random_bytes(&ipv6_hash_secret, sizeof(ipv6_hash_secret));
268 net_secret_init();
269 }
270} 268}
271EXPORT_SYMBOL(build_ehash_secret); 269EXPORT_SYMBOL(build_ehash_secret);
272 270
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index dace87f06e5f..7defdc9ba167 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -736,7 +736,7 @@ static void igmp_gq_timer_expire(unsigned long data)
736 736
737 in_dev->mr_gq_running = 0; 737 in_dev->mr_gq_running = 0;
738 igmpv3_send_report(in_dev, NULL); 738 igmpv3_send_report(in_dev, NULL);
739 __in_dev_put(in_dev); 739 in_dev_put(in_dev);
740} 740}
741 741
742static void igmp_ifc_timer_expire(unsigned long data) 742static void igmp_ifc_timer_expire(unsigned long data)
@@ -749,7 +749,7 @@ static void igmp_ifc_timer_expire(unsigned long data)
749 igmp_ifc_start_timer(in_dev, 749 igmp_ifc_start_timer(in_dev,
750 unsolicited_report_interval(in_dev)); 750 unsolicited_report_interval(in_dev));
751 } 751 }
752 __in_dev_put(in_dev); 752 in_dev_put(in_dev);
753} 753}
754 754
755static void igmp_ifc_event(struct in_device *in_dev) 755static void igmp_ifc_event(struct in_device *in_dev)
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 7bd8983dbfcf..96da9c77deca 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -287,7 +287,7 @@ begintw:
287 if (unlikely(!INET_TW_MATCH(sk, net, acookie, 287 if (unlikely(!INET_TW_MATCH(sk, net, acookie,
288 saddr, daddr, ports, 288 saddr, daddr, ports,
289 dif))) { 289 dif))) {
290 sock_put(sk); 290 inet_twsk_put(inet_twsk(sk));
291 goto begintw; 291 goto begintw;
292 } 292 }
293 goto out; 293 goto out;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index a04d872c54f9..3982eabf61e1 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -772,15 +772,20 @@ static inline int ip_ufo_append_data(struct sock *sk,
772 /* initialize protocol header pointer */ 772 /* initialize protocol header pointer */
773 skb->transport_header = skb->network_header + fragheaderlen; 773 skb->transport_header = skb->network_header + fragheaderlen;
774 774
775 skb->ip_summed = CHECKSUM_PARTIAL;
776 skb->csum = 0; 775 skb->csum = 0;
777 776
778 /* specify the length of each IP datagram fragment */ 777
779 skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
780 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
781 __skb_queue_tail(queue, skb); 778 __skb_queue_tail(queue, skb);
779 } else if (skb_is_gso(skb)) {
780 goto append;
782 } 781 }
783 782
783 skb->ip_summed = CHECKSUM_PARTIAL;
784 /* specify the length of each IP datagram fragment */
785 skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
786 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
787
788append:
784 return skb_append_datato_frags(sk, skb, getfrag, from, 789 return skb_append_datato_frags(sk, skb, getfrag, from,
785 (length - transhdrlen)); 790 (length - transhdrlen));
786} 791}
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index ac9fabe0300f..63a6d6d6b875 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -623,6 +623,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
623 tunnel->err_count = 0; 623 tunnel->err_count = 0;
624 } 624 }
625 625
626 tos = ip_tunnel_ecn_encap(tos, inner_iph, skb);
626 ttl = tnl_params->ttl; 627 ttl = tnl_params->ttl;
627 if (ttl == 0) { 628 if (ttl == 0) {
628 if (skb->protocol == htons(ETH_P_IP)) 629 if (skb->protocol == htons(ETH_P_IP))
@@ -641,18 +642,17 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
641 642
642 max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr) 643 max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
643 + rt->dst.header_len; 644 + rt->dst.header_len;
644 if (max_headroom > dev->needed_headroom) { 645 if (max_headroom > dev->needed_headroom)
645 dev->needed_headroom = max_headroom; 646 dev->needed_headroom = max_headroom;
646 if (skb_cow_head(skb, dev->needed_headroom)) { 647
647 dev->stats.tx_dropped++; 648 if (skb_cow_head(skb, dev->needed_headroom)) {
648 dev_kfree_skb(skb); 649 dev->stats.tx_dropped++;
649 return; 650 dev_kfree_skb(skb);
650 } 651 return;
651 } 652 }
652 653
653 err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, protocol, 654 err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, protocol,
654 ip_tunnel_ecn_encap(tos, inner_iph, skb), ttl, df, 655 tos, ttl, df, !net_eq(tunnel->net, dev_net(dev)));
655 !net_eq(tunnel->net, dev_net(dev)));
656 iptunnel_xmit_stats(err, &dev->stats, dev->tstats); 656 iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
657 657
658 return; 658 return;
@@ -853,8 +853,10 @@ int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
853 /* FB netdevice is special: we have one, and only one per netns. 853 /* FB netdevice is special: we have one, and only one per netns.
854 * Allowing to move it to another netns is clearly unsafe. 854 * Allowing to move it to another netns is clearly unsafe.
855 */ 855 */
856 if (!IS_ERR(itn->fb_tunnel_dev)) 856 if (!IS_ERR(itn->fb_tunnel_dev)) {
857 itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL; 857 itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
858 ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev));
859 }
858 rtnl_unlock(); 860 rtnl_unlock();
859 861
860 return PTR_RET(itn->fb_tunnel_dev); 862 return PTR_RET(itn->fb_tunnel_dev);
@@ -884,8 +886,6 @@ static void ip_tunnel_destroy(struct ip_tunnel_net *itn, struct list_head *head,
884 if (!net_eq(dev_net(t->dev), net)) 886 if (!net_eq(dev_net(t->dev), net))
885 unregister_netdevice_queue(t->dev, head); 887 unregister_netdevice_queue(t->dev, head);
886 } 888 }
887 if (itn->fb_tunnel_dev)
888 unregister_netdevice_queue(itn->fb_tunnel_dev, head);
889} 889}
890 890
891void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops) 891void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops)
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index d6c856b17fd4..c31e3ad98ef2 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -61,7 +61,7 @@ int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb,
61 memset(IPCB(skb), 0, sizeof(*IPCB(skb))); 61 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
62 62
63 /* Push down and install the IP header. */ 63 /* Push down and install the IP header. */
64 __skb_push(skb, sizeof(struct iphdr)); 64 skb_push(skb, sizeof(struct iphdr));
65 skb_reset_network_header(skb); 65 skb_reset_network_header(skb);
66 66
67 iph = ip_hdr(skb); 67 iph = ip_hdr(skb);
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index e805e7b3030e..6e87f853d033 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -125,8 +125,17 @@ static int vti_rcv(struct sk_buff *skb)
125 iph->saddr, iph->daddr, 0); 125 iph->saddr, iph->daddr, 0);
126 if (tunnel != NULL) { 126 if (tunnel != NULL) {
127 struct pcpu_tstats *tstats; 127 struct pcpu_tstats *tstats;
128 u32 oldmark = skb->mark;
129 int ret;
128 130
129 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 131
132 /* temporarily mark the skb with the tunnel o_key, to
133 * only match policies with this mark.
134 */
135 skb->mark = be32_to_cpu(tunnel->parms.o_key);
136 ret = xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb);
137 skb->mark = oldmark;
138 if (!ret)
130 return -1; 139 return -1;
131 140
132 tstats = this_cpu_ptr(tunnel->dev->tstats); 141 tstats = this_cpu_ptr(tunnel->dev->tstats);
@@ -135,7 +144,6 @@ static int vti_rcv(struct sk_buff *skb)
135 tstats->rx_bytes += skb->len; 144 tstats->rx_bytes += skb->len;
136 u64_stats_update_end(&tstats->syncp); 145 u64_stats_update_end(&tstats->syncp);
137 146
138 skb->mark = 0;
139 secpath_reset(skb); 147 secpath_reset(skb);
140 skb->dev = tunnel->dev; 148 skb->dev = tunnel->dev;
141 return 1; 149 return 1;
@@ -167,7 +175,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
167 175
168 memset(&fl4, 0, sizeof(fl4)); 176 memset(&fl4, 0, sizeof(fl4));
169 flowi4_init_output(&fl4, tunnel->parms.link, 177 flowi4_init_output(&fl4, tunnel->parms.link,
170 be32_to_cpu(tunnel->parms.i_key), RT_TOS(tos), 178 be32_to_cpu(tunnel->parms.o_key), RT_TOS(tos),
171 RT_SCOPE_UNIVERSE, 179 RT_SCOPE_UNIVERSE,
172 IPPROTO_IPIP, 0, 180 IPPROTO_IPIP, 0,
173 dst, tiph->saddr, 0, 0); 181 dst, tiph->saddr, 0, 0);
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c
index 67e17dcda65e..b6346bf2fde3 100644
--- a/net/ipv4/netfilter/ipt_SYNPROXY.c
+++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
@@ -267,7 +267,8 @@ synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par)
267 if (th == NULL) 267 if (th == NULL)
268 return NF_DROP; 268 return NF_DROP;
269 269
270 synproxy_parse_options(skb, par->thoff, th, &opts); 270 if (!synproxy_parse_options(skb, par->thoff, th, &opts))
271 return NF_DROP;
271 272
272 if (th->syn && !(th->ack || th->fin || th->rst)) { 273 if (th->syn && !(th->ack || th->fin || th->rst)) {
273 /* Initial SYN from client */ 274 /* Initial SYN from client */
@@ -350,7 +351,8 @@ static unsigned int ipv4_synproxy_hook(unsigned int hooknum,
350 351
351 /* fall through */ 352 /* fall through */
352 case TCP_CONNTRACK_SYN_SENT: 353 case TCP_CONNTRACK_SYN_SENT:
353 synproxy_parse_options(skb, thoff, th, &opts); 354 if (!synproxy_parse_options(skb, thoff, th, &opts))
355 return NF_DROP;
354 356
355 if (!th->syn && th->ack && 357 if (!th->syn && th->ack &&
356 CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) { 358 CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
@@ -373,7 +375,9 @@ static unsigned int ipv4_synproxy_hook(unsigned int hooknum,
373 if (!th->syn || !th->ack) 375 if (!th->syn || !th->ack)
374 break; 376 break;
375 377
376 synproxy_parse_options(skb, thoff, th, &opts); 378 if (!synproxy_parse_options(skb, thoff, th, &opts))
379 return NF_DROP;
380
377 if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP) 381 if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
378 synproxy->tsoff = opts.tsval - synproxy->its; 382 synproxy->tsoff = opts.tsval - synproxy->its;
379 383
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index bfec521c717f..193db03540ad 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -218,8 +218,10 @@ static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info)
218 218
219 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) 219 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
220 ipv4_sk_update_pmtu(skb, sk, info); 220 ipv4_sk_update_pmtu(skb, sk, info);
221 else if (type == ICMP_REDIRECT) 221 else if (type == ICMP_REDIRECT) {
222 ipv4_sk_redirect(skb, sk); 222 ipv4_sk_redirect(skb, sk);
223 return;
224 }
223 225
224 /* Report error on raw socket, if: 226 /* Report error on raw socket, if:
225 1. User requested ip_recverr. 227 1. User requested ip_recverr.
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 727f4365bcdf..6011615e810d 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2072,7 +2072,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
2072 RT_SCOPE_LINK); 2072 RT_SCOPE_LINK);
2073 goto make_route; 2073 goto make_route;
2074 } 2074 }
2075 if (fl4->saddr) { 2075 if (!fl4->saddr) {
2076 if (ipv4_is_multicast(fl4->daddr)) 2076 if (ipv4_is_multicast(fl4->daddr))
2077 fl4->saddr = inet_select_addr(dev_out, 0, 2077 fl4->saddr = inet_select_addr(dev_out, 0,
2078 fl4->flowi4_scope); 2078 fl4->flowi4_scope);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 25a89eaa669d..a16b01b537ba 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1284,7 +1284,10 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1284 tp->lost_cnt_hint -= tcp_skb_pcount(prev); 1284 tp->lost_cnt_hint -= tcp_skb_pcount(prev);
1285 } 1285 }
1286 1286
1287 TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(prev)->tcp_flags; 1287 TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1288 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1289 TCP_SKB_CB(prev)->end_seq++;
1290
1288 if (skb == tcp_highest_sack(sk)) 1291 if (skb == tcp_highest_sack(sk))
1289 tcp_advance_highest_sack(sk, skb); 1292 tcp_advance_highest_sack(sk, skb);
1290 1293
@@ -3288,7 +3291,7 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
3288 tcp_init_cwnd_reduction(sk, true); 3291 tcp_init_cwnd_reduction(sk, true);
3289 tcp_set_ca_state(sk, TCP_CA_CWR); 3292 tcp_set_ca_state(sk, TCP_CA_CWR);
3290 tcp_end_cwnd_reduction(sk); 3293 tcp_end_cwnd_reduction(sk);
3291 tcp_set_ca_state(sk, TCP_CA_Open); 3294 tcp_try_keep_open(sk);
3292 NET_INC_STATS_BH(sock_net(sk), 3295 NET_INC_STATS_BH(sock_net(sk),
3293 LINUX_MIB_TCPLOSSPROBERECOVERY); 3296 LINUX_MIB_TCPLOSSPROBERECOVERY);
3294 } 3297 }
@@ -5709,6 +5712,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5709 } else 5712 } else
5710 tcp_init_metrics(sk); 5713 tcp_init_metrics(sk);
5711 5714
5715 tcp_update_pacing_rate(sk);
5716
5712 /* Prevent spurious tcp_cwnd_restart() on first data packet */ 5717 /* Prevent spurious tcp_cwnd_restart() on first data packet */
5713 tp->lsndtime = tcp_time_stamp; 5718 tp->lsndtime = tcp_time_stamp;
5714 5719
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 7c83cb8bf137..d46f2143305c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -637,6 +637,8 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
637 unsigned int size = 0; 637 unsigned int size = 0;
638 unsigned int eff_sacks; 638 unsigned int eff_sacks;
639 639
640 opts->options = 0;
641
640#ifdef CONFIG_TCP_MD5SIG 642#ifdef CONFIG_TCP_MD5SIG
641 *md5 = tp->af_specific->md5_lookup(sk, sk); 643 *md5 = tp->af_specific->md5_lookup(sk, sk);
642 if (unlikely(*md5)) { 644 if (unlikely(*md5)) {
@@ -895,8 +897,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
895 897
896 skb_orphan(skb); 898 skb_orphan(skb);
897 skb->sk = sk; 899 skb->sk = sk;
898 skb->destructor = (sysctl_tcp_limit_output_bytes > 0) ? 900 skb->destructor = tcp_wfree;
899 tcp_wfree : sock_wfree;
900 atomic_add(skb->truesize, &sk->sk_wmem_alloc); 901 atomic_add(skb->truesize, &sk->sk_wmem_alloc);
901 902
902 /* Build TCP header and checksum it. */ 903 /* Build TCP header and checksum it. */
@@ -985,8 +986,10 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
985static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb, 986static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
986 unsigned int mss_now) 987 unsigned int mss_now)
987{ 988{
988 if (skb->len <= mss_now || !sk_can_gso(sk) || 989 /* Make sure we own this skb before messing gso_size/gso_segs */
989 skb->ip_summed == CHECKSUM_NONE) { 990 WARN_ON_ONCE(skb_cloned(skb));
991
992 if (skb->len <= mss_now || skb->ip_summed == CHECKSUM_NONE) {
990 /* Avoid the costly divide in the normal 993 /* Avoid the costly divide in the normal
991 * non-TSO case. 994 * non-TSO case.
992 */ 995 */
@@ -1066,9 +1069,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
1066 if (nsize < 0) 1069 if (nsize < 0)
1067 nsize = 0; 1070 nsize = 0;
1068 1071
1069 if (skb_cloned(skb) && 1072 if (skb_unclone(skb, GFP_ATOMIC))
1070 skb_is_nonlinear(skb) &&
1071 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
1072 return -ENOMEM; 1073 return -ENOMEM;
1073 1074
1074 /* Get a new skb... force flag on. */ 1075 /* Get a new skb... force flag on. */
@@ -1840,7 +1841,6 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1840 while ((skb = tcp_send_head(sk))) { 1841 while ((skb = tcp_send_head(sk))) {
1841 unsigned int limit; 1842 unsigned int limit;
1842 1843
1843
1844 tso_segs = tcp_init_tso_segs(sk, skb, mss_now); 1844 tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1845 BUG_ON(!tso_segs); 1845 BUG_ON(!tso_segs);
1846 1846
@@ -1869,13 +1869,20 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1869 break; 1869 break;
1870 } 1870 }
1871 1871
1872 /* TSQ : sk_wmem_alloc accounts skb truesize, 1872 /* TCP Small Queues :
1873 * including skb overhead. But thats OK. 1873 * Control number of packets in qdisc/devices to two packets / or ~1 ms.
1874 * This allows for :
1875 * - better RTT estimation and ACK scheduling
1876 * - faster recovery
1877 * - high rates
1874 */ 1878 */
1875 if (atomic_read(&sk->sk_wmem_alloc) >= sysctl_tcp_limit_output_bytes) { 1879 limit = max(skb->truesize, sk->sk_pacing_rate >> 10);
1880
1881 if (atomic_read(&sk->sk_wmem_alloc) > limit) {
1876 set_bit(TSQ_THROTTLED, &tp->tsq_flags); 1882 set_bit(TSQ_THROTTLED, &tp->tsq_flags);
1877 break; 1883 break;
1878 } 1884 }
1885
1879 limit = mss_now; 1886 limit = mss_now;
1880 if (tso_segs > 1 && !tcp_urg_mode(tp)) 1887 if (tso_segs > 1 && !tcp_urg_mode(tp))
1881 limit = tcp_mss_split_point(sk, skb, mss_now, 1888 limit = tcp_mss_split_point(sk, skb, mss_now,
@@ -2337,6 +2344,8 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2337 int oldpcount = tcp_skb_pcount(skb); 2344 int oldpcount = tcp_skb_pcount(skb);
2338 2345
2339 if (unlikely(oldpcount > 1)) { 2346 if (unlikely(oldpcount > 1)) {
2347 if (skb_unclone(skb, GFP_ATOMIC))
2348 return -ENOMEM;
2340 tcp_init_tso_segs(sk, skb, cur_mss); 2349 tcp_init_tso_segs(sk, skb, cur_mss);
2341 tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb)); 2350 tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
2342 } 2351 }
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 74d2c95db57f..0ca44df51ee9 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -658,7 +658,7 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
658 break; 658 break;
659 case ICMP_REDIRECT: 659 case ICMP_REDIRECT:
660 ipv4_sk_redirect(skb, sk); 660 ipv4_sk_redirect(skb, sk);
661 break; 661 goto out;
662 } 662 }
663 663
664 /* 664 /*
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 9a459be24af7..ccde54248c8c 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -107,6 +107,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
107 107
108 memset(fl4, 0, sizeof(struct flowi4)); 108 memset(fl4, 0, sizeof(struct flowi4));
109 fl4->flowi4_mark = skb->mark; 109 fl4->flowi4_mark = skb->mark;
110 fl4->flowi4_oif = skb_dst(skb)->dev->ifindex;
110 111
111 if (!ip_is_fragment(iph)) { 112 if (!ip_is_fragment(iph)) {
112 switch (iph->protocol) { 113 switch (iph->protocol) {
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index d6ff12617f36..cd3fb301da38 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1499,6 +1499,33 @@ static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
1499 return false; 1499 return false;
1500} 1500}
1501 1501
1502/* Compares an address/prefix_len with addresses on device @dev.
1503 * If one is found it returns true.
1504 */
1505bool ipv6_chk_custom_prefix(const struct in6_addr *addr,
1506 const unsigned int prefix_len, struct net_device *dev)
1507{
1508 struct inet6_dev *idev;
1509 struct inet6_ifaddr *ifa;
1510 bool ret = false;
1511
1512 rcu_read_lock();
1513 idev = __in6_dev_get(dev);
1514 if (idev) {
1515 read_lock_bh(&idev->lock);
1516 list_for_each_entry(ifa, &idev->addr_list, if_list) {
1517 ret = ipv6_prefix_equal(addr, &ifa->addr, prefix_len);
1518 if (ret)
1519 break;
1520 }
1521 read_unlock_bh(&idev->lock);
1522 }
1523 rcu_read_unlock();
1524
1525 return ret;
1526}
1527EXPORT_SYMBOL(ipv6_chk_custom_prefix);
1528
1502int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev) 1529int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev)
1503{ 1530{
1504 struct inet6_dev *idev; 1531 struct inet6_dev *idev;
@@ -2193,43 +2220,21 @@ ok:
2193 else 2220 else
2194 stored_lft = 0; 2221 stored_lft = 0;
2195 if (!update_lft && !create && stored_lft) { 2222 if (!update_lft && !create && stored_lft) {
2196 if (valid_lft > MIN_VALID_LIFETIME || 2223 const u32 minimum_lft = min(
2197 valid_lft > stored_lft) 2224 stored_lft, (u32)MIN_VALID_LIFETIME);
2198 update_lft = 1; 2225 valid_lft = max(valid_lft, minimum_lft);
2199 else if (stored_lft <= MIN_VALID_LIFETIME) { 2226
2200 /* valid_lft <= stored_lft is always true */ 2227 /* RFC4862 Section 5.5.3e:
2201 /* 2228 * "Note that the preferred lifetime of the
2202 * RFC 4862 Section 5.5.3e: 2229 * corresponding address is always reset to
2203 * "Note that the preferred lifetime of 2230 * the Preferred Lifetime in the received
2204 * the corresponding address is always 2231 * Prefix Information option, regardless of
2205 * reset to the Preferred Lifetime in 2232 * whether the valid lifetime is also reset or
2206 * the received Prefix Information 2233 * ignored."
2207 * option, regardless of whether the 2234 *
2208 * valid lifetime is also reset or 2235 * So we should always update prefered_lft here.
2209 * ignored." 2236 */
2210 * 2237 update_lft = 1;
2211 * So if the preferred lifetime in
2212 * this advertisement is different
2213 * than what we have stored, but the
2214 * valid lifetime is invalid, just
2215 * reset prefered_lft.
2216 *
2217 * We must set the valid lifetime
2218 * to the stored lifetime since we'll
2219 * be updating the timestamp below,
2220 * else we'll set it back to the
2221 * minimum.
2222 */
2223 if (prefered_lft != ifp->prefered_lft) {
2224 valid_lft = stored_lft;
2225 update_lft = 1;
2226 }
2227 } else {
2228 valid_lft = MIN_VALID_LIFETIME;
2229 if (valid_lft < prefered_lft)
2230 prefered_lft = valid_lft;
2231 update_lft = 1;
2232 }
2233 } 2238 }
2234 2239
2235 if (update_lft) { 2240 if (update_lft) {
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 73784c3d4642..82e1da3a40b9 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -618,8 +618,7 @@ static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
618 struct ip_auth_hdr *ah = (struct ip_auth_hdr*)(skb->data+offset); 618 struct ip_auth_hdr *ah = (struct ip_auth_hdr*)(skb->data+offset);
619 struct xfrm_state *x; 619 struct xfrm_state *x;
620 620
621 if (type != ICMPV6_DEST_UNREACH && 621 if (type != ICMPV6_PKT_TOOBIG &&
622 type != ICMPV6_PKT_TOOBIG &&
623 type != NDISC_REDIRECT) 622 type != NDISC_REDIRECT)
624 return; 623 return;
625 624
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index d3618a78fcac..e67e63f9858d 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -436,8 +436,7 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
436 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset); 436 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
437 struct xfrm_state *x; 437 struct xfrm_state *x;
438 438
439 if (type != ICMPV6_DEST_UNREACH && 439 if (type != ICMPV6_PKT_TOOBIG &&
440 type != ICMPV6_PKT_TOOBIG &&
441 type != NDISC_REDIRECT) 440 type != NDISC_REDIRECT)
442 return; 441 return;
443 442
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 32b4a1675d82..066640e0ba8e 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -116,7 +116,7 @@ begintw:
116 } 116 }
117 if (unlikely(!INET6_TW_MATCH(sk, net, saddr, daddr, 117 if (unlikely(!INET6_TW_MATCH(sk, net, saddr, daddr,
118 ports, dif))) { 118 ports, dif))) {
119 sock_put(sk); 119 inet_twsk_put(inet_twsk(sk));
120 goto begintw; 120 goto begintw;
121 } 121 }
122 goto out; 122 goto out;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 6b26e9feafb9..bf4a9a084de5 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -618,7 +618,7 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
618 struct ip6_tnl *tunnel = netdev_priv(dev); 618 struct ip6_tnl *tunnel = netdev_priv(dev);
619 struct net_device *tdev; /* Device to other host */ 619 struct net_device *tdev; /* Device to other host */
620 struct ipv6hdr *ipv6h; /* Our new IP header */ 620 struct ipv6hdr *ipv6h; /* Our new IP header */
621 unsigned int max_headroom; /* The extra header space needed */ 621 unsigned int max_headroom = 0; /* The extra header space needed */
622 int gre_hlen; 622 int gre_hlen;
623 struct ipv6_tel_txoption opt; 623 struct ipv6_tel_txoption opt;
624 int mtu; 624 int mtu;
@@ -693,7 +693,7 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
693 693
694 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev))); 694 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev)));
695 695
696 max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len; 696 max_headroom += LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len;
697 697
698 if (skb_headroom(skb) < max_headroom || skb_shared(skb) || 698 if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
699 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { 699 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
@@ -976,6 +976,7 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
976 if (t->parms.o_flags&GRE_SEQ) 976 if (t->parms.o_flags&GRE_SEQ)
977 addend += 4; 977 addend += 4;
978 } 978 }
979 t->hlen = addend;
979 980
980 if (p->flags & IP6_TNL_F_CAP_XMIT) { 981 if (p->flags & IP6_TNL_F_CAP_XMIT) {
981 int strict = (ipv6_addr_type(&p->raddr) & 982 int strict = (ipv6_addr_type(&p->raddr) &
@@ -1002,8 +1003,6 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
1002 } 1003 }
1003 ip6_rt_put(rt); 1004 ip6_rt_put(rt);
1004 } 1005 }
1005
1006 t->hlen = addend;
1007} 1006}
1008 1007
1009static int ip6gre_tnl_change(struct ip6_tnl *t, 1008static int ip6gre_tnl_change(struct ip6_tnl *t,
@@ -1173,9 +1172,8 @@ done:
1173 1172
1174static int ip6gre_tunnel_change_mtu(struct net_device *dev, int new_mtu) 1173static int ip6gre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1175{ 1174{
1176 struct ip6_tnl *tunnel = netdev_priv(dev);
1177 if (new_mtu < 68 || 1175 if (new_mtu < 68 ||
1178 new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen) 1176 new_mtu > 0xFFF8 - dev->hard_header_len)
1179 return -EINVAL; 1177 return -EINVAL;
1180 dev->mtu = new_mtu; 1178 dev->mtu = new_mtu;
1181 return 0; 1179 return 0;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 3a692d529163..91fb4e8212f5 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -105,7 +105,7 @@ static int ip6_finish_output2(struct sk_buff *skb)
105 } 105 }
106 106
107 rcu_read_lock_bh(); 107 rcu_read_lock_bh();
108 nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr); 108 nexthop = rt6_nexthop((struct rt6_info *)dst);
109 neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop); 109 neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
110 if (unlikely(!neigh)) 110 if (unlikely(!neigh))
111 neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false); 111 neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
@@ -874,7 +874,7 @@ static int ip6_dst_lookup_tail(struct sock *sk,
874 */ 874 */
875 rt = (struct rt6_info *) *dst; 875 rt = (struct rt6_info *) *dst;
876 rcu_read_lock_bh(); 876 rcu_read_lock_bh();
877 n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt, &fl6->daddr)); 877 n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt));
878 err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0; 878 err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
879 rcu_read_unlock_bh(); 879 rcu_read_unlock_bh();
880 880
@@ -1008,6 +1008,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
1008 1008
1009{ 1009{
1010 struct sk_buff *skb; 1010 struct sk_buff *skb;
1011 struct frag_hdr fhdr;
1011 int err; 1012 int err;
1012 1013
1013 /* There is support for UDP large send offload by network 1014 /* There is support for UDP large send offload by network
@@ -1034,33 +1035,26 @@ static inline int ip6_ufo_append_data(struct sock *sk,
1034 skb->transport_header = skb->network_header + fragheaderlen; 1035 skb->transport_header = skb->network_header + fragheaderlen;
1035 1036
1036 skb->protocol = htons(ETH_P_IPV6); 1037 skb->protocol = htons(ETH_P_IPV6);
1037 skb->ip_summed = CHECKSUM_PARTIAL;
1038 skb->csum = 0; 1038 skb->csum = 0;
1039 }
1040
1041 err = skb_append_datato_frags(sk,skb, getfrag, from,
1042 (length - transhdrlen));
1043 if (!err) {
1044 struct frag_hdr fhdr;
1045 1039
1046 /* Specify the length of each IPv6 datagram fragment.
1047 * It has to be a multiple of 8.
1048 */
1049 skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
1050 sizeof(struct frag_hdr)) & ~7;
1051 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1052 ipv6_select_ident(&fhdr, rt);
1053 skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
1054 __skb_queue_tail(&sk->sk_write_queue, skb); 1040 __skb_queue_tail(&sk->sk_write_queue, skb);
1055 1041 } else if (skb_is_gso(skb)) {
1056 return 0; 1042 goto append;
1057 } 1043 }
1058 /* There is not enough support do UPD LSO,
1059 * so follow normal path
1060 */
1061 kfree_skb(skb);
1062 1044
1063 return err; 1045 skb->ip_summed = CHECKSUM_PARTIAL;
1046 /* Specify the length of each IPv6 datagram fragment.
1047 * It has to be a multiple of 8.
1048 */
1049 skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
1050 sizeof(struct frag_hdr)) & ~7;
1051 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1052 ipv6_select_ident(&fhdr, rt);
1053 skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
1054
1055append:
1056 return skb_append_datato_frags(sk, skb, getfrag, from,
1057 (length - transhdrlen));
1064} 1058}
1065 1059
1066static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src, 1060static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
@@ -1227,27 +1221,27 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1227 * --yoshfuji 1221 * --yoshfuji
1228 */ 1222 */
1229 1223
1230 cork->length += length; 1224 if ((length > mtu) && dontfrag && (sk->sk_protocol == IPPROTO_UDP ||
1231 if (length > mtu) { 1225 sk->sk_protocol == IPPROTO_RAW)) {
1232 int proto = sk->sk_protocol; 1226 ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
1233 if (dontfrag && (proto == IPPROTO_UDP || proto == IPPROTO_RAW)){ 1227 return -EMSGSIZE;
1234 ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen); 1228 }
1235 return -EMSGSIZE;
1236 }
1237
1238 if (proto == IPPROTO_UDP &&
1239 (rt->dst.dev->features & NETIF_F_UFO)) {
1240 1229
1241 err = ip6_ufo_append_data(sk, getfrag, from, length, 1230 skb = skb_peek_tail(&sk->sk_write_queue);
1242 hh_len, fragheaderlen, 1231 cork->length += length;
1243 transhdrlen, mtu, flags, rt); 1232 if (((length > mtu) ||
1244 if (err) 1233 (skb && skb_is_gso(skb))) &&
1245 goto error; 1234 (sk->sk_protocol == IPPROTO_UDP) &&
1246 return 0; 1235 (rt->dst.dev->features & NETIF_F_UFO)) {
1247 } 1236 err = ip6_ufo_append_data(sk, getfrag, from, length,
1237 hh_len, fragheaderlen,
1238 transhdrlen, mtu, flags, rt);
1239 if (err)
1240 goto error;
1241 return 0;
1248 } 1242 }
1249 1243
1250 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) 1244 if (!skb)
1251 goto alloc_new_skb; 1245 goto alloc_new_skb;
1252 1246
1253 while (length > 0) { 1247 while (length > 0) {
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 2d8f4829575b..583b77e2f69b 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1430,9 +1430,17 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1430static int 1430static int
1431ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) 1431ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
1432{ 1432{
1433 if (new_mtu < IPV6_MIN_MTU) { 1433 struct ip6_tnl *tnl = netdev_priv(dev);
1434 return -EINVAL; 1434
1435 if (tnl->parms.proto == IPPROTO_IPIP) {
1436 if (new_mtu < 68)
1437 return -EINVAL;
1438 } else {
1439 if (new_mtu < IPV6_MIN_MTU)
1440 return -EINVAL;
1435 } 1441 }
1442 if (new_mtu > 0xFFF8 - dev->hard_header_len)
1443 return -EINVAL;
1436 dev->mtu = new_mtu; 1444 dev->mtu = new_mtu;
1437 return 0; 1445 return 0;
1438} 1446}
@@ -1731,8 +1739,6 @@ static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
1731 } 1739 }
1732 } 1740 }
1733 1741
1734 t = rtnl_dereference(ip6n->tnls_wc[0]);
1735 unregister_netdevice_queue(t->dev, &list);
1736 unregister_netdevice_many(&list); 1742 unregister_netdevice_many(&list);
1737} 1743}
1738 1744
@@ -1752,6 +1758,7 @@ static int __net_init ip6_tnl_init_net(struct net *net)
1752 if (!ip6n->fb_tnl_dev) 1758 if (!ip6n->fb_tnl_dev)
1753 goto err_alloc_dev; 1759 goto err_alloc_dev;
1754 dev_net_set(ip6n->fb_tnl_dev, net); 1760 dev_net_set(ip6n->fb_tnl_dev, net);
1761 ip6n->fb_tnl_dev->rtnl_link_ops = &ip6_link_ops;
1755 /* FB netdevice is special: we have one, and only one per netns. 1762 /* FB netdevice is special: we have one, and only one per netns.
1756 * Allowing to move it to another netns is clearly unsafe. 1763 * Allowing to move it to another netns is clearly unsafe.
1757 */ 1764 */
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 5636a912074a..ce507d9e1c90 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -64,8 +64,7 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
64 (struct ip_comp_hdr *)(skb->data + offset); 64 (struct ip_comp_hdr *)(skb->data + offset);
65 struct xfrm_state *x; 65 struct xfrm_state *x;
66 66
67 if (type != ICMPV6_DEST_UNREACH && 67 if (type != ICMPV6_PKT_TOOBIG &&
68 type != ICMPV6_PKT_TOOBIG &&
69 type != NDISC_REDIRECT) 68 type != NDISC_REDIRECT)
70 return; 69 return;
71 70
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 096cd67b737c..d18f9f903db6 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -2034,7 +2034,7 @@ static void mld_dad_timer_expire(unsigned long data)
2034 if (idev->mc_dad_count) 2034 if (idev->mc_dad_count)
2035 mld_dad_start_timer(idev, idev->mc_maxdelay); 2035 mld_dad_start_timer(idev, idev->mc_maxdelay);
2036 } 2036 }
2037 __in6_dev_put(idev); 2037 in6_dev_put(idev);
2038} 2038}
2039 2039
2040static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode, 2040static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
@@ -2379,7 +2379,7 @@ static void mld_gq_timer_expire(unsigned long data)
2379 2379
2380 idev->mc_gq_running = 0; 2380 idev->mc_gq_running = 0;
2381 mld_send_report(idev, NULL); 2381 mld_send_report(idev, NULL);
2382 __in6_dev_put(idev); 2382 in6_dev_put(idev);
2383} 2383}
2384 2384
2385static void mld_ifc_timer_expire(unsigned long data) 2385static void mld_ifc_timer_expire(unsigned long data)
@@ -2392,7 +2392,7 @@ static void mld_ifc_timer_expire(unsigned long data)
2392 if (idev->mc_ifc_count) 2392 if (idev->mc_ifc_count)
2393 mld_ifc_start_timer(idev, idev->mc_maxdelay); 2393 mld_ifc_start_timer(idev, idev->mc_maxdelay);
2394 } 2394 }
2395 __in6_dev_put(idev); 2395 in6_dev_put(idev);
2396} 2396}
2397 2397
2398static void mld_ifc_event(struct inet6_dev *idev) 2398static void mld_ifc_event(struct inet6_dev *idev)
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
index 19cfea8dbcaa..2748b042da72 100644
--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -282,7 +282,8 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
282 if (th == NULL) 282 if (th == NULL)
283 return NF_DROP; 283 return NF_DROP;
284 284
285 synproxy_parse_options(skb, par->thoff, th, &opts); 285 if (!synproxy_parse_options(skb, par->thoff, th, &opts))
286 return NF_DROP;
286 287
287 if (th->syn && !(th->ack || th->fin || th->rst)) { 288 if (th->syn && !(th->ack || th->fin || th->rst)) {
288 /* Initial SYN from client */ 289 /* Initial SYN from client */
@@ -372,7 +373,8 @@ static unsigned int ipv6_synproxy_hook(unsigned int hooknum,
372 373
373 /* fall through */ 374 /* fall through */
374 case TCP_CONNTRACK_SYN_SENT: 375 case TCP_CONNTRACK_SYN_SENT:
375 synproxy_parse_options(skb, thoff, th, &opts); 376 if (!synproxy_parse_options(skb, thoff, th, &opts))
377 return NF_DROP;
376 378
377 if (!th->syn && th->ack && 379 if (!th->syn && th->ack &&
378 CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) { 380 CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
@@ -395,7 +397,9 @@ static unsigned int ipv6_synproxy_hook(unsigned int hooknum,
395 if (!th->syn || !th->ack) 397 if (!th->syn || !th->ack)
396 break; 398 break;
397 399
398 synproxy_parse_options(skb, thoff, th, &opts); 400 if (!synproxy_parse_options(skb, thoff, th, &opts))
401 return NF_DROP;
402
399 if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP) 403 if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
400 synproxy->tsoff = opts.tsval - synproxy->its; 404 synproxy->tsoff = opts.tsval - synproxy->its;
401 405
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 58916bbb1728..a4ed2416399e 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -335,8 +335,10 @@ static void rawv6_err(struct sock *sk, struct sk_buff *skb,
335 ip6_sk_update_pmtu(skb, sk, info); 335 ip6_sk_update_pmtu(skb, sk, info);
336 harderr = (np->pmtudisc == IPV6_PMTUDISC_DO); 336 harderr = (np->pmtudisc == IPV6_PMTUDISC_DO);
337 } 337 }
338 if (type == NDISC_REDIRECT) 338 if (type == NDISC_REDIRECT) {
339 ip6_sk_redirect(skb, sk); 339 ip6_sk_redirect(skb, sk);
340 return;
341 }
340 if (np->recverr) { 342 if (np->recverr) {
341 u8 *payload = skb->data; 343 u8 *payload = skb->data;
342 if (!inet->hdrincl) 344 if (!inet->hdrincl)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index c979dd96d82a..f54e3a101098 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -476,6 +476,24 @@ out:
476} 476}
477 477
478#ifdef CONFIG_IPV6_ROUTER_PREF 478#ifdef CONFIG_IPV6_ROUTER_PREF
479struct __rt6_probe_work {
480 struct work_struct work;
481 struct in6_addr target;
482 struct net_device *dev;
483};
484
485static void rt6_probe_deferred(struct work_struct *w)
486{
487 struct in6_addr mcaddr;
488 struct __rt6_probe_work *work =
489 container_of(w, struct __rt6_probe_work, work);
490
491 addrconf_addr_solict_mult(&work->target, &mcaddr);
492 ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL);
493 dev_put(work->dev);
494 kfree(w);
495}
496
479static void rt6_probe(struct rt6_info *rt) 497static void rt6_probe(struct rt6_info *rt)
480{ 498{
481 struct neighbour *neigh; 499 struct neighbour *neigh;
@@ -499,17 +517,23 @@ static void rt6_probe(struct rt6_info *rt)
499 517
500 if (!neigh || 518 if (!neigh ||
501 time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) { 519 time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
502 struct in6_addr mcaddr; 520 struct __rt6_probe_work *work;
503 struct in6_addr *target;
504 521
505 if (neigh) { 522 work = kmalloc(sizeof(*work), GFP_ATOMIC);
523
524 if (neigh && work)
506 neigh->updated = jiffies; 525 neigh->updated = jiffies;
526
527 if (neigh)
507 write_unlock(&neigh->lock); 528 write_unlock(&neigh->lock);
508 }
509 529
510 target = (struct in6_addr *)&rt->rt6i_gateway; 530 if (work) {
511 addrconf_addr_solict_mult(target, &mcaddr); 531 INIT_WORK(&work->work, rt6_probe_deferred);
512 ndisc_send_ns(rt->dst.dev, NULL, target, &mcaddr, NULL); 532 work->target = rt->rt6i_gateway;
533 dev_hold(rt->dst.dev);
534 work->dev = rt->dst.dev;
535 schedule_work(&work->work);
536 }
513 } else { 537 } else {
514out: 538out:
515 write_unlock(&neigh->lock); 539 write_unlock(&neigh->lock);
@@ -851,7 +875,6 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
851 if (ort->rt6i_dst.plen != 128 && 875 if (ort->rt6i_dst.plen != 128 &&
852 ipv6_addr_equal(&ort->rt6i_dst.addr, daddr)) 876 ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
853 rt->rt6i_flags |= RTF_ANYCAST; 877 rt->rt6i_flags |= RTF_ANYCAST;
854 rt->rt6i_gateway = *daddr;
855 } 878 }
856 879
857 rt->rt6i_flags |= RTF_CACHE; 880 rt->rt6i_flags |= RTF_CACHE;
@@ -1338,6 +1361,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1338 rt->dst.flags |= DST_HOST; 1361 rt->dst.flags |= DST_HOST;
1339 rt->dst.output = ip6_output; 1362 rt->dst.output = ip6_output;
1340 atomic_set(&rt->dst.__refcnt, 1); 1363 atomic_set(&rt->dst.__refcnt, 1);
1364 rt->rt6i_gateway = fl6->daddr;
1341 rt->rt6i_dst.addr = fl6->daddr; 1365 rt->rt6i_dst.addr = fl6->daddr;
1342 rt->rt6i_dst.plen = 128; 1366 rt->rt6i_dst.plen = 128;
1343 rt->rt6i_idev = idev; 1367 rt->rt6i_idev = idev;
@@ -1873,7 +1897,10 @@ static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
1873 in6_dev_hold(rt->rt6i_idev); 1897 in6_dev_hold(rt->rt6i_idev);
1874 rt->dst.lastuse = jiffies; 1898 rt->dst.lastuse = jiffies;
1875 1899
1876 rt->rt6i_gateway = ort->rt6i_gateway; 1900 if (ort->rt6i_flags & RTF_GATEWAY)
1901 rt->rt6i_gateway = ort->rt6i_gateway;
1902 else
1903 rt->rt6i_gateway = *dest;
1877 rt->rt6i_flags = ort->rt6i_flags; 1904 rt->rt6i_flags = ort->rt6i_flags;
1878 if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) == 1905 if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ==
1879 (RTF_DEFAULT | RTF_ADDRCONF)) 1906 (RTF_DEFAULT | RTF_ADDRCONF))
@@ -2160,6 +2187,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2160 else 2187 else
2161 rt->rt6i_flags |= RTF_LOCAL; 2188 rt->rt6i_flags |= RTF_LOCAL;
2162 2189
2190 rt->rt6i_gateway = *addr;
2163 rt->rt6i_dst.addr = *addr; 2191 rt->rt6i_dst.addr = *addr;
2164 rt->rt6i_dst.plen = 128; 2192 rt->rt6i_dst.plen = 128;
2165 rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL); 2193 rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 7ee5cb96db34..19269453a8ea 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -566,6 +566,70 @@ static inline bool is_spoofed_6rd(struct ip_tunnel *tunnel, const __be32 v4addr,
566 return false; 566 return false;
567} 567}
568 568
569/* Checks if an address matches an address on the tunnel interface.
570 * Used to detect the NAT of proto 41 packets and let them pass spoofing test.
571 * Long story:
572 * This function is called after we considered the packet as spoofed
573 * in is_spoofed_6rd.
574 * We may have a router that is doing NAT for proto 41 packets
575 * for an internal station. Destination a.a.a.a/PREFIX:bbbb:bbbb
576 * will be translated to n.n.n.n/PREFIX:bbbb:bbbb. And is_spoofed_6rd
577 * function will return true, dropping the packet.
578 * But, we can still check if is spoofed against the IP
579 * addresses associated with the interface.
580 */
581static bool only_dnatted(const struct ip_tunnel *tunnel,
582 const struct in6_addr *v6dst)
583{
584 int prefix_len;
585
586#ifdef CONFIG_IPV6_SIT_6RD
587 prefix_len = tunnel->ip6rd.prefixlen + 32
588 - tunnel->ip6rd.relay_prefixlen;
589#else
590 prefix_len = 48;
591#endif
592 return ipv6_chk_custom_prefix(v6dst, prefix_len, tunnel->dev);
593}
594
595/* Returns true if a packet is spoofed */
596static bool packet_is_spoofed(struct sk_buff *skb,
597 const struct iphdr *iph,
598 struct ip_tunnel *tunnel)
599{
600 const struct ipv6hdr *ipv6h;
601
602 if (tunnel->dev->priv_flags & IFF_ISATAP) {
603 if (!isatap_chksrc(skb, iph, tunnel))
604 return true;
605
606 return false;
607 }
608
609 if (tunnel->dev->flags & IFF_POINTOPOINT)
610 return false;
611
612 ipv6h = ipv6_hdr(skb);
613
614 if (unlikely(is_spoofed_6rd(tunnel, iph->saddr, &ipv6h->saddr))) {
615 net_warn_ratelimited("Src spoofed %pI4/%pI6c -> %pI4/%pI6c\n",
616 &iph->saddr, &ipv6h->saddr,
617 &iph->daddr, &ipv6h->daddr);
618 return true;
619 }
620
621 if (likely(!is_spoofed_6rd(tunnel, iph->daddr, &ipv6h->daddr)))
622 return false;
623
624 if (only_dnatted(tunnel, &ipv6h->daddr))
625 return false;
626
627 net_warn_ratelimited("Dst spoofed %pI4/%pI6c -> %pI4/%pI6c\n",
628 &iph->saddr, &ipv6h->saddr,
629 &iph->daddr, &ipv6h->daddr);
630 return true;
631}
632
569static int ipip6_rcv(struct sk_buff *skb) 633static int ipip6_rcv(struct sk_buff *skb)
570{ 634{
571 const struct iphdr *iph = ip_hdr(skb); 635 const struct iphdr *iph = ip_hdr(skb);
@@ -586,19 +650,9 @@ static int ipip6_rcv(struct sk_buff *skb)
586 IPCB(skb)->flags = 0; 650 IPCB(skb)->flags = 0;
587 skb->protocol = htons(ETH_P_IPV6); 651 skb->protocol = htons(ETH_P_IPV6);
588 652
589 if (tunnel->dev->priv_flags & IFF_ISATAP) { 653 if (packet_is_spoofed(skb, iph, tunnel)) {
590 if (!isatap_chksrc(skb, iph, tunnel)) { 654 tunnel->dev->stats.rx_errors++;
591 tunnel->dev->stats.rx_errors++; 655 goto out;
592 goto out;
593 }
594 } else if (!(tunnel->dev->flags&IFF_POINTOPOINT)) {
595 if (is_spoofed_6rd(tunnel, iph->saddr,
596 &ipv6_hdr(skb)->saddr) ||
597 is_spoofed_6rd(tunnel, iph->daddr,
598 &ipv6_hdr(skb)->daddr)) {
599 tunnel->dev->stats.rx_errors++;
600 goto out;
601 }
602 } 656 }
603 657
604 __skb_tunnel_rx(skb, tunnel->dev, tunnel->net); 658 __skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
@@ -748,7 +802,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
748 neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr); 802 neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
749 803
750 if (neigh == NULL) { 804 if (neigh == NULL) {
751 net_dbg_ratelimited("sit: nexthop == NULL\n"); 805 net_dbg_ratelimited("nexthop == NULL\n");
752 goto tx_error; 806 goto tx_error;
753 } 807 }
754 808
@@ -777,7 +831,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
777 neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr); 831 neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
778 832
779 if (neigh == NULL) { 833 if (neigh == NULL) {
780 net_dbg_ratelimited("sit: nexthop == NULL\n"); 834 net_dbg_ratelimited("nexthop == NULL\n");
781 goto tx_error; 835 goto tx_error;
782 } 836 }
783 837
@@ -1612,6 +1666,7 @@ static int __net_init sit_init_net(struct net *net)
1612 goto err_alloc_dev; 1666 goto err_alloc_dev;
1613 } 1667 }
1614 dev_net_set(sitn->fb_tunnel_dev, net); 1668 dev_net_set(sitn->fb_tunnel_dev, net);
1669 sitn->fb_tunnel_dev->rtnl_link_ops = &sit_link_ops;
1615 /* FB netdevice is special: we have one, and only one per netns. 1670 /* FB netdevice is special: we have one, and only one per netns.
1616 * Allowing to move it to another netns is clearly unsafe. 1671 * Allowing to move it to another netns is clearly unsafe.
1617 */ 1672 */
@@ -1646,7 +1701,6 @@ static void __net_exit sit_exit_net(struct net *net)
1646 1701
1647 rtnl_lock(); 1702 rtnl_lock();
1648 sit_destroy_tunnels(sitn, &list); 1703 sit_destroy_tunnels(sitn, &list);
1649 unregister_netdevice_queue(sitn->fb_tunnel_dev, &list);
1650 unregister_netdevice_many(&list); 1704 unregister_netdevice_many(&list);
1651 rtnl_unlock(); 1705 rtnl_unlock();
1652} 1706}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index f4058150262b..18786098fd41 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -525,8 +525,10 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
525 525
526 if (type == ICMPV6_PKT_TOOBIG) 526 if (type == ICMPV6_PKT_TOOBIG)
527 ip6_sk_update_pmtu(skb, sk, info); 527 ip6_sk_update_pmtu(skb, sk, info);
528 if (type == NDISC_REDIRECT) 528 if (type == NDISC_REDIRECT) {
529 ip6_sk_redirect(skb, sk); 529 ip6_sk_redirect(skb, sk);
530 goto out;
531 }
530 532
531 np = inet6_sk(sk); 533 np = inet6_sk(sk);
532 534
@@ -1223,9 +1225,6 @@ do_udp_sendmsg:
1223 if (tclass < 0) 1225 if (tclass < 0)
1224 tclass = np->tclass; 1226 tclass = np->tclass;
1225 1227
1226 if (dontfrag < 0)
1227 dontfrag = np->dontfrag;
1228
1229 if (msg->msg_flags&MSG_CONFIRM) 1228 if (msg->msg_flags&MSG_CONFIRM)
1230 goto do_confirm; 1229 goto do_confirm;
1231back_from_confirm: 1230back_from_confirm:
@@ -1244,6 +1243,8 @@ back_from_confirm:
1244 up->pending = AF_INET6; 1243 up->pending = AF_INET6;
1245 1244
1246do_append_data: 1245do_append_data:
1246 if (dontfrag < 0)
1247 dontfrag = np->dontfrag;
1247 up->len += ulen; 1248 up->len += ulen;
1248 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; 1249 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
1249 err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen, 1250 err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen,
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 23ed03d786c8..08ed2772b7aa 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -138,6 +138,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
138 138
139 memset(fl6, 0, sizeof(struct flowi6)); 139 memset(fl6, 0, sizeof(struct flowi6));
140 fl6->flowi6_mark = skb->mark; 140 fl6->flowi6_mark = skb->mark;
141 fl6->flowi6_oif = skb_dst(skb)->dev->ifindex;
141 142
142 fl6->daddr = reverse ? hdr->saddr : hdr->daddr; 143 fl6->daddr = reverse ? hdr->saddr : hdr->daddr;
143 fl6->saddr = reverse ? hdr->daddr : hdr->saddr; 144 fl6->saddr = reverse ? hdr->daddr : hdr->saddr;
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 9d585370c5b4..911ef03bf8fb 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1098,7 +1098,8 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
1098 1098
1099 x->id.proto = proto; 1099 x->id.proto = proto;
1100 x->id.spi = sa->sadb_sa_spi; 1100 x->id.spi = sa->sadb_sa_spi;
1101 x->props.replay_window = sa->sadb_sa_replay; 1101 x->props.replay_window = min_t(unsigned int, sa->sadb_sa_replay,
1102 (sizeof(x->replay.bitmap) * 8));
1102 if (sa->sadb_sa_flags & SADB_SAFLAGS_NOECN) 1103 if (sa->sadb_sa_flags & SADB_SAFLAGS_NOECN)
1103 x->props.flags |= XFRM_STATE_NOECN; 1104 x->props.flags |= XFRM_STATE_NOECN;
1104 if (sa->sadb_sa_flags & SADB_SAFLAGS_DECAP_DSCP) 1105 if (sa->sadb_sa_flags & SADB_SAFLAGS_DECAP_DSCP)
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index feae495a0a30..b076e8309bc2 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -115,6 +115,11 @@ struct l2tp_net {
115static void l2tp_session_set_header_len(struct l2tp_session *session, int version); 115static void l2tp_session_set_header_len(struct l2tp_session *session, int version);
116static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); 116static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
117 117
118static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk)
119{
120 return sk->sk_user_data;
121}
122
118static inline struct l2tp_net *l2tp_pernet(struct net *net) 123static inline struct l2tp_net *l2tp_pernet(struct net *net)
119{ 124{
120 BUG_ON(!net); 125 BUG_ON(!net);
@@ -504,7 +509,7 @@ static inline int l2tp_verify_udp_checksum(struct sock *sk,
504 return 0; 509 return 0;
505 510
506#if IS_ENABLED(CONFIG_IPV6) 511#if IS_ENABLED(CONFIG_IPV6)
507 if (sk->sk_family == PF_INET6) { 512 if (sk->sk_family == PF_INET6 && !l2tp_tunnel(sk)->v4mapped) {
508 if (!uh->check) { 513 if (!uh->check) {
509 LIMIT_NETDEBUG(KERN_INFO "L2TP: IPv6: checksum is 0\n"); 514 LIMIT_NETDEBUG(KERN_INFO "L2TP: IPv6: checksum is 0\n");
510 return 1; 515 return 1;
@@ -1128,7 +1133,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
1128 /* Queue the packet to IP for output */ 1133 /* Queue the packet to IP for output */
1129 skb->local_df = 1; 1134 skb->local_df = 1;
1130#if IS_ENABLED(CONFIG_IPV6) 1135#if IS_ENABLED(CONFIG_IPV6)
1131 if (skb->sk->sk_family == PF_INET6) 1136 if (skb->sk->sk_family == PF_INET6 && !tunnel->v4mapped)
1132 error = inet6_csk_xmit(skb, NULL); 1137 error = inet6_csk_xmit(skb, NULL);
1133 else 1138 else
1134#endif 1139#endif
@@ -1255,7 +1260,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1255 1260
1256 /* Calculate UDP checksum if configured to do so */ 1261 /* Calculate UDP checksum if configured to do so */
1257#if IS_ENABLED(CONFIG_IPV6) 1262#if IS_ENABLED(CONFIG_IPV6)
1258 if (sk->sk_family == PF_INET6) 1263 if (sk->sk_family == PF_INET6 && !tunnel->v4mapped)
1259 l2tp_xmit_ipv6_csum(sk, skb, udp_len); 1264 l2tp_xmit_ipv6_csum(sk, skb, udp_len);
1260 else 1265 else
1261#endif 1266#endif
@@ -1304,10 +1309,9 @@ EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
1304 */ 1309 */
1305static void l2tp_tunnel_destruct(struct sock *sk) 1310static void l2tp_tunnel_destruct(struct sock *sk)
1306{ 1311{
1307 struct l2tp_tunnel *tunnel; 1312 struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
1308 struct l2tp_net *pn; 1313 struct l2tp_net *pn;
1309 1314
1310 tunnel = sk->sk_user_data;
1311 if (tunnel == NULL) 1315 if (tunnel == NULL)
1312 goto end; 1316 goto end;
1313 1317
@@ -1675,7 +1679,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1675 } 1679 }
1676 1680
1677 /* Check if this socket has already been prepped */ 1681 /* Check if this socket has already been prepped */
1678 tunnel = (struct l2tp_tunnel *)sk->sk_user_data; 1682 tunnel = l2tp_tunnel(sk);
1679 if (tunnel != NULL) { 1683 if (tunnel != NULL) {
1680 /* This socket has already been prepped */ 1684 /* This socket has already been prepped */
1681 err = -EBUSY; 1685 err = -EBUSY;
@@ -1704,6 +1708,24 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1704 if (cfg != NULL) 1708 if (cfg != NULL)
1705 tunnel->debug = cfg->debug; 1709 tunnel->debug = cfg->debug;
1706 1710
1711#if IS_ENABLED(CONFIG_IPV6)
1712 if (sk->sk_family == PF_INET6) {
1713 struct ipv6_pinfo *np = inet6_sk(sk);
1714
1715 if (ipv6_addr_v4mapped(&np->saddr) &&
1716 ipv6_addr_v4mapped(&np->daddr)) {
1717 struct inet_sock *inet = inet_sk(sk);
1718
1719 tunnel->v4mapped = true;
1720 inet->inet_saddr = np->saddr.s6_addr32[3];
1721 inet->inet_rcv_saddr = np->rcv_saddr.s6_addr32[3];
1722 inet->inet_daddr = np->daddr.s6_addr32[3];
1723 } else {
1724 tunnel->v4mapped = false;
1725 }
1726 }
1727#endif
1728
1707 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ 1729 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
1708 tunnel->encap = encap; 1730 tunnel->encap = encap;
1709 if (encap == L2TP_ENCAPTYPE_UDP) { 1731 if (encap == L2TP_ENCAPTYPE_UDP) {
@@ -1712,7 +1734,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1712 udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv; 1734 udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv;
1713 udp_sk(sk)->encap_destroy = l2tp_udp_encap_destroy; 1735 udp_sk(sk)->encap_destroy = l2tp_udp_encap_destroy;
1714#if IS_ENABLED(CONFIG_IPV6) 1736#if IS_ENABLED(CONFIG_IPV6)
1715 if (sk->sk_family == PF_INET6) 1737 if (sk->sk_family == PF_INET6 && !tunnel->v4mapped)
1716 udpv6_encap_enable(); 1738 udpv6_encap_enable();
1717 else 1739 else
1718#endif 1740#endif
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 66a559b104b6..6f251cbc2ed7 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -194,6 +194,9 @@ struct l2tp_tunnel {
194 struct sock *sock; /* Parent socket */ 194 struct sock *sock; /* Parent socket */
195 int fd; /* Parent fd, if tunnel socket 195 int fd; /* Parent fd, if tunnel socket
196 * was created by userspace */ 196 * was created by userspace */
197#if IS_ENABLED(CONFIG_IPV6)
198 bool v4mapped;
199#endif
197 200
198 struct work_struct del_work; 201 struct work_struct del_work;
199 202
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 5ebee2ded9e9..8c46b271064a 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -353,7 +353,9 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
353 goto error_put_sess_tun; 353 goto error_put_sess_tun;
354 } 354 }
355 355
356 local_bh_disable();
356 l2tp_xmit_skb(session, skb, session->hdr_len); 357 l2tp_xmit_skb(session, skb, session->hdr_len);
358 local_bh_enable();
357 359
358 sock_put(ps->tunnel_sock); 360 sock_put(ps->tunnel_sock);
359 sock_put(sk); 361 sock_put(sk);
@@ -422,7 +424,9 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
422 skb->data[0] = ppph[0]; 424 skb->data[0] = ppph[0];
423 skb->data[1] = ppph[1]; 425 skb->data[1] = ppph[1];
424 426
427 local_bh_disable();
425 l2tp_xmit_skb(session, skb, session->hdr_len); 428 l2tp_xmit_skb(session, skb, session->hdr_len);
429 local_bh_enable();
426 430
427 sock_put(sk_tun); 431 sock_put(sk_tun);
428 sock_put(sk); 432 sock_put(sk);
diff --git a/net/lapb/lapb_timer.c b/net/lapb/lapb_timer.c
index 54563ad8aeb1..355cc3b6fa4d 100644
--- a/net/lapb/lapb_timer.c
+++ b/net/lapb/lapb_timer.c
@@ -154,6 +154,7 @@ static void lapb_t1timer_expiry(unsigned long param)
154 } else { 154 } else {
155 lapb->n2count++; 155 lapb->n2count++;
156 lapb_requeue_frames(lapb); 156 lapb_requeue_frames(lapb);
157 lapb_kick(lapb);
157 } 158 }
158 break; 159 break;
159 160
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 2e7855a1b10d..629dee7ec9bf 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -3518,7 +3518,7 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
3518 return -EINVAL; 3518 return -EINVAL;
3519 } 3519 }
3520 band = chanctx_conf->def.chan->band; 3520 band = chanctx_conf->def.chan->band;
3521 sta = sta_info_get(sdata, peer); 3521 sta = sta_info_get_bss(sdata, peer);
3522 if (sta) { 3522 if (sta) {
3523 qos = test_sta_flag(sta, WLAN_STA_WME); 3523 qos = test_sta_flag(sta, WLAN_STA_WME);
3524 } else { 3524 } else {
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index b6186517ec56..611abfcfb5eb 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -893,6 +893,8 @@ struct tpt_led_trigger {
893 * that the scan completed. 893 * that the scan completed.
894 * @SCAN_ABORTED: Set for our scan work function when the driver reported 894 * @SCAN_ABORTED: Set for our scan work function when the driver reported
895 * a scan complete for an aborted scan. 895 * a scan complete for an aborted scan.
896 * @SCAN_HW_CANCELLED: Set for our scan work function when the scan is being
897 * cancelled.
896 */ 898 */
897enum { 899enum {
898 SCAN_SW_SCANNING, 900 SCAN_SW_SCANNING,
@@ -900,6 +902,7 @@ enum {
900 SCAN_ONCHANNEL_SCANNING, 902 SCAN_ONCHANNEL_SCANNING,
901 SCAN_COMPLETED, 903 SCAN_COMPLETED,
902 SCAN_ABORTED, 904 SCAN_ABORTED,
905 SCAN_HW_CANCELLED,
903}; 906};
904 907
905/** 908/**
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index acd1f71adc03..0c2a29484c07 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -394,6 +394,8 @@ void ieee80211_sw_roc_work(struct work_struct *work)
394 394
395 if (started) 395 if (started)
396 ieee80211_start_next_roc(local); 396 ieee80211_start_next_roc(local);
397 else if (list_empty(&local->roc_list))
398 ieee80211_run_deferred_scan(local);
397 } 399 }
398 400
399 out_unlock: 401 out_unlock:
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 54395d7583ba..674eac1f996c 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -3056,6 +3056,9 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
3056 case NL80211_IFTYPE_ADHOC: 3056 case NL80211_IFTYPE_ADHOC:
3057 if (!bssid) 3057 if (!bssid)
3058 return 0; 3058 return 0;
3059 if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
3060 ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2))
3061 return 0;
3059 if (ieee80211_is_beacon(hdr->frame_control)) { 3062 if (ieee80211_is_beacon(hdr->frame_control)) {
3060 return 1; 3063 return 1;
3061 } else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) { 3064 } else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 08afe74b98f4..d2d17a449224 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -238,6 +238,9 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
238 enum ieee80211_band band; 238 enum ieee80211_band band;
239 int i, ielen, n_chans; 239 int i, ielen, n_chans;
240 240
241 if (test_bit(SCAN_HW_CANCELLED, &local->scanning))
242 return false;
243
241 do { 244 do {
242 if (local->hw_scan_band == IEEE80211_NUM_BANDS) 245 if (local->hw_scan_band == IEEE80211_NUM_BANDS)
243 return false; 246 return false;
@@ -940,7 +943,23 @@ void ieee80211_scan_cancel(struct ieee80211_local *local)
940 if (!local->scan_req) 943 if (!local->scan_req)
941 goto out; 944 goto out;
942 945
946 /*
947 * We have a scan running and the driver already reported completion,
948 * but the worker hasn't run yet or is stuck on the mutex - mark it as
949 * cancelled.
950 */
951 if (test_bit(SCAN_HW_SCANNING, &local->scanning) &&
952 test_bit(SCAN_COMPLETED, &local->scanning)) {
953 set_bit(SCAN_HW_CANCELLED, &local->scanning);
954 goto out;
955 }
956
943 if (test_bit(SCAN_HW_SCANNING, &local->scanning)) { 957 if (test_bit(SCAN_HW_SCANNING, &local->scanning)) {
958 /*
959 * Make sure that __ieee80211_scan_completed doesn't trigger a
960 * scan on another band.
961 */
962 set_bit(SCAN_HW_CANCELLED, &local->scanning);
944 if (local->ops->cancel_hw_scan) 963 if (local->ops->cancel_hw_scan)
945 drv_cancel_hw_scan(local, 964 drv_cancel_hw_scan(local,
946 rcu_dereference_protected(local->scan_sdata, 965 rcu_dereference_protected(local->scan_sdata,
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 368837fe3b80..78dc2e99027e 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -180,6 +180,9 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
180 struct ieee80211_local *local = sta->local; 180 struct ieee80211_local *local = sta->local;
181 struct ieee80211_sub_if_data *sdata = sta->sdata; 181 struct ieee80211_sub_if_data *sdata = sta->sdata;
182 182
183 if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
184 sta->last_rx = jiffies;
185
183 if (ieee80211_is_data_qos(mgmt->frame_control)) { 186 if (ieee80211_is_data_qos(mgmt->frame_control)) {
184 struct ieee80211_hdr *hdr = (void *) skb->data; 187 struct ieee80211_hdr *hdr = (void *) skb->data;
185 u8 *qc = ieee80211_get_qos_ctl(hdr); 188 u8 *qc = ieee80211_get_qos_ctl(hdr);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 3456c0486b48..70b5a05c0a4e 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1120,7 +1120,8 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1120 tx->sta = rcu_dereference(sdata->u.vlan.sta); 1120 tx->sta = rcu_dereference(sdata->u.vlan.sta);
1121 if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr) 1121 if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr)
1122 return TX_DROP; 1122 return TX_DROP;
1123 } else if (info->flags & IEEE80211_TX_CTL_INJECTED || 1123 } else if (info->flags & (IEEE80211_TX_CTL_INJECTED |
1124 IEEE80211_TX_INTFL_NL80211_FRAME_TX) ||
1124 tx->sdata->control_port_protocol == tx->skb->protocol) { 1125 tx->sdata->control_port_protocol == tx->skb->protocol) {
1125 tx->sta = sta_info_get_bss(sdata, hdr->addr1); 1126 tx->sta = sta_info_get_bss(sdata, hdr->addr1);
1126 } 1127 }
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index e1b34a18b243..69e4ef5348a0 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -2103,7 +2103,7 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
2103{ 2103{
2104 struct ieee80211_local *local = sdata->local; 2104 struct ieee80211_local *local = sdata->local;
2105 struct ieee80211_supported_band *sband; 2105 struct ieee80211_supported_band *sband;
2106 int rate, skip, shift; 2106 int rate, shift;
2107 u8 i, exrates, *pos; 2107 u8 i, exrates, *pos;
2108 u32 basic_rates = sdata->vif.bss_conf.basic_rates; 2108 u32 basic_rates = sdata->vif.bss_conf.basic_rates;
2109 u32 rate_flags; 2109 u32 rate_flags;
@@ -2131,14 +2131,11 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
2131 pos = skb_put(skb, exrates + 2); 2131 pos = skb_put(skb, exrates + 2);
2132 *pos++ = WLAN_EID_EXT_SUPP_RATES; 2132 *pos++ = WLAN_EID_EXT_SUPP_RATES;
2133 *pos++ = exrates; 2133 *pos++ = exrates;
2134 skip = 0;
2135 for (i = 8; i < sband->n_bitrates; i++) { 2134 for (i = 8; i < sband->n_bitrates; i++) {
2136 u8 basic = 0; 2135 u8 basic = 0;
2137 if ((rate_flags & sband->bitrates[i].flags) 2136 if ((rate_flags & sband->bitrates[i].flags)
2138 != rate_flags) 2137 != rate_flags)
2139 continue; 2138 continue;
2140 if (skip++ < 8)
2141 continue;
2142 if (need_basic && basic_rates & BIT(i)) 2139 if (need_basic && basic_rates & BIT(i))
2143 basic = 0x80; 2140 basic = 0x80;
2144 rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, 2141 rate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
@@ -2241,6 +2238,10 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
2241 } 2238 }
2242 2239
2243 rate = cfg80211_calculate_bitrate(&ri); 2240 rate = cfg80211_calculate_bitrate(&ri);
2241 if (WARN_ONCE(!rate,
2242 "Invalid bitrate: flags=0x%x, idx=%d, vht_nss=%d\n",
2243 status->flag, status->rate_idx, status->vht_nss))
2244 return 0;
2244 2245
2245 /* rewind from end of MPDU */ 2246 /* rewind from end of MPDU */
2246 if (status->flag & RX_FLAG_MACTIME_END) 2247 if (status->flag & RX_FLAG_MACTIME_END)
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 4f69e83ff836..74fd00c27210 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -116,6 +116,7 @@ ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
116 116
117 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { 117 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
118 struct ip_vs_cpu_stats *s; 118 struct ip_vs_cpu_stats *s;
119 struct ip_vs_service *svc;
119 120
120 s = this_cpu_ptr(dest->stats.cpustats); 121 s = this_cpu_ptr(dest->stats.cpustats);
121 s->ustats.inpkts++; 122 s->ustats.inpkts++;
@@ -123,11 +124,14 @@ ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
123 s->ustats.inbytes += skb->len; 124 s->ustats.inbytes += skb->len;
124 u64_stats_update_end(&s->syncp); 125 u64_stats_update_end(&s->syncp);
125 126
126 s = this_cpu_ptr(dest->svc->stats.cpustats); 127 rcu_read_lock();
128 svc = rcu_dereference(dest->svc);
129 s = this_cpu_ptr(svc->stats.cpustats);
127 s->ustats.inpkts++; 130 s->ustats.inpkts++;
128 u64_stats_update_begin(&s->syncp); 131 u64_stats_update_begin(&s->syncp);
129 s->ustats.inbytes += skb->len; 132 s->ustats.inbytes += skb->len;
130 u64_stats_update_end(&s->syncp); 133 u64_stats_update_end(&s->syncp);
134 rcu_read_unlock();
131 135
132 s = this_cpu_ptr(ipvs->tot_stats.cpustats); 136 s = this_cpu_ptr(ipvs->tot_stats.cpustats);
133 s->ustats.inpkts++; 137 s->ustats.inpkts++;
@@ -146,6 +150,7 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
146 150
147 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { 151 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
148 struct ip_vs_cpu_stats *s; 152 struct ip_vs_cpu_stats *s;
153 struct ip_vs_service *svc;
149 154
150 s = this_cpu_ptr(dest->stats.cpustats); 155 s = this_cpu_ptr(dest->stats.cpustats);
151 s->ustats.outpkts++; 156 s->ustats.outpkts++;
@@ -153,11 +158,14 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
153 s->ustats.outbytes += skb->len; 158 s->ustats.outbytes += skb->len;
154 u64_stats_update_end(&s->syncp); 159 u64_stats_update_end(&s->syncp);
155 160
156 s = this_cpu_ptr(dest->svc->stats.cpustats); 161 rcu_read_lock();
162 svc = rcu_dereference(dest->svc);
163 s = this_cpu_ptr(svc->stats.cpustats);
157 s->ustats.outpkts++; 164 s->ustats.outpkts++;
158 u64_stats_update_begin(&s->syncp); 165 u64_stats_update_begin(&s->syncp);
159 s->ustats.outbytes += skb->len; 166 s->ustats.outbytes += skb->len;
160 u64_stats_update_end(&s->syncp); 167 u64_stats_update_end(&s->syncp);
168 rcu_read_unlock();
161 169
162 s = this_cpu_ptr(ipvs->tot_stats.cpustats); 170 s = this_cpu_ptr(ipvs->tot_stats.cpustats);
163 s->ustats.outpkts++; 171 s->ustats.outpkts++;
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index c8148e487386..a3df9bddc4f7 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -460,7 +460,7 @@ static inline void
460__ip_vs_bind_svc(struct ip_vs_dest *dest, struct ip_vs_service *svc) 460__ip_vs_bind_svc(struct ip_vs_dest *dest, struct ip_vs_service *svc)
461{ 461{
462 atomic_inc(&svc->refcnt); 462 atomic_inc(&svc->refcnt);
463 dest->svc = svc; 463 rcu_assign_pointer(dest->svc, svc);
464} 464}
465 465
466static void ip_vs_service_free(struct ip_vs_service *svc) 466static void ip_vs_service_free(struct ip_vs_service *svc)
@@ -470,18 +470,25 @@ static void ip_vs_service_free(struct ip_vs_service *svc)
470 kfree(svc); 470 kfree(svc);
471} 471}
472 472
473static void 473static void ip_vs_service_rcu_free(struct rcu_head *head)
474__ip_vs_unbind_svc(struct ip_vs_dest *dest)
475{ 474{
476 struct ip_vs_service *svc = dest->svc; 475 struct ip_vs_service *svc;
476
477 svc = container_of(head, struct ip_vs_service, rcu_head);
478 ip_vs_service_free(svc);
479}
477 480
478 dest->svc = NULL; 481static void __ip_vs_svc_put(struct ip_vs_service *svc, bool do_delay)
482{
479 if (atomic_dec_and_test(&svc->refcnt)) { 483 if (atomic_dec_and_test(&svc->refcnt)) {
480 IP_VS_DBG_BUF(3, "Removing service %u/%s:%u\n", 484 IP_VS_DBG_BUF(3, "Removing service %u/%s:%u\n",
481 svc->fwmark, 485 svc->fwmark,
482 IP_VS_DBG_ADDR(svc->af, &svc->addr), 486 IP_VS_DBG_ADDR(svc->af, &svc->addr),
483 ntohs(svc->port)); 487 ntohs(svc->port));
484 ip_vs_service_free(svc); 488 if (do_delay)
489 call_rcu(&svc->rcu_head, ip_vs_service_rcu_free);
490 else
491 ip_vs_service_free(svc);
485 } 492 }
486} 493}
487 494
@@ -667,11 +674,6 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
667 IP_VS_DBG_ADDR(svc->af, &dest->addr), 674 IP_VS_DBG_ADDR(svc->af, &dest->addr),
668 ntohs(dest->port), 675 ntohs(dest->port),
669 atomic_read(&dest->refcnt)); 676 atomic_read(&dest->refcnt));
670 /* We can not reuse dest while in grace period
671 * because conns still can use dest->svc
672 */
673 if (test_bit(IP_VS_DEST_STATE_REMOVING, &dest->state))
674 continue;
675 if (dest->af == svc->af && 677 if (dest->af == svc->af &&
676 ip_vs_addr_equal(svc->af, &dest->addr, daddr) && 678 ip_vs_addr_equal(svc->af, &dest->addr, daddr) &&
677 dest->port == dport && 679 dest->port == dport &&
@@ -697,8 +699,10 @@ out:
697 699
698static void ip_vs_dest_free(struct ip_vs_dest *dest) 700static void ip_vs_dest_free(struct ip_vs_dest *dest)
699{ 701{
702 struct ip_vs_service *svc = rcu_dereference_protected(dest->svc, 1);
703
700 __ip_vs_dst_cache_reset(dest); 704 __ip_vs_dst_cache_reset(dest);
701 __ip_vs_unbind_svc(dest); 705 __ip_vs_svc_put(svc, false);
702 free_percpu(dest->stats.cpustats); 706 free_percpu(dest->stats.cpustats);
703 kfree(dest); 707 kfree(dest);
704} 708}
@@ -771,6 +775,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
771 struct ip_vs_dest_user_kern *udest, int add) 775 struct ip_vs_dest_user_kern *udest, int add)
772{ 776{
773 struct netns_ipvs *ipvs = net_ipvs(svc->net); 777 struct netns_ipvs *ipvs = net_ipvs(svc->net);
778 struct ip_vs_service *old_svc;
774 struct ip_vs_scheduler *sched; 779 struct ip_vs_scheduler *sched;
775 int conn_flags; 780 int conn_flags;
776 781
@@ -792,13 +797,14 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
792 atomic_set(&dest->conn_flags, conn_flags); 797 atomic_set(&dest->conn_flags, conn_flags);
793 798
794 /* bind the service */ 799 /* bind the service */
795 if (!dest->svc) { 800 old_svc = rcu_dereference_protected(dest->svc, 1);
801 if (!old_svc) {
796 __ip_vs_bind_svc(dest, svc); 802 __ip_vs_bind_svc(dest, svc);
797 } else { 803 } else {
798 if (dest->svc != svc) { 804 if (old_svc != svc) {
799 __ip_vs_unbind_svc(dest);
800 ip_vs_zero_stats(&dest->stats); 805 ip_vs_zero_stats(&dest->stats);
801 __ip_vs_bind_svc(dest, svc); 806 __ip_vs_bind_svc(dest, svc);
807 __ip_vs_svc_put(old_svc, true);
802 } 808 }
803 } 809 }
804 810
@@ -998,16 +1004,6 @@ ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
998 return 0; 1004 return 0;
999} 1005}
1000 1006
1001static void ip_vs_dest_wait_readers(struct rcu_head *head)
1002{
1003 struct ip_vs_dest *dest = container_of(head, struct ip_vs_dest,
1004 rcu_head);
1005
1006 /* End of grace period after unlinking */
1007 clear_bit(IP_VS_DEST_STATE_REMOVING, &dest->state);
1008}
1009
1010
1011/* 1007/*
1012 * Delete a destination (must be already unlinked from the service) 1008 * Delete a destination (must be already unlinked from the service)
1013 */ 1009 */
@@ -1023,20 +1019,16 @@ static void __ip_vs_del_dest(struct net *net, struct ip_vs_dest *dest,
1023 */ 1019 */
1024 ip_vs_rs_unhash(dest); 1020 ip_vs_rs_unhash(dest);
1025 1021
1026 if (!cleanup) {
1027 set_bit(IP_VS_DEST_STATE_REMOVING, &dest->state);
1028 call_rcu(&dest->rcu_head, ip_vs_dest_wait_readers);
1029 }
1030
1031 spin_lock_bh(&ipvs->dest_trash_lock); 1022 spin_lock_bh(&ipvs->dest_trash_lock);
1032 IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, dest->refcnt=%d\n", 1023 IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, dest->refcnt=%d\n",
1033 IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port), 1024 IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port),
1034 atomic_read(&dest->refcnt)); 1025 atomic_read(&dest->refcnt));
1035 if (list_empty(&ipvs->dest_trash) && !cleanup) 1026 if (list_empty(&ipvs->dest_trash) && !cleanup)
1036 mod_timer(&ipvs->dest_trash_timer, 1027 mod_timer(&ipvs->dest_trash_timer,
1037 jiffies + IP_VS_DEST_TRASH_PERIOD); 1028 jiffies + (IP_VS_DEST_TRASH_PERIOD >> 1));
1038 /* dest lives in trash without reference */ 1029 /* dest lives in trash without reference */
1039 list_add(&dest->t_list, &ipvs->dest_trash); 1030 list_add(&dest->t_list, &ipvs->dest_trash);
1031 dest->idle_start = 0;
1040 spin_unlock_bh(&ipvs->dest_trash_lock); 1032 spin_unlock_bh(&ipvs->dest_trash_lock);
1041 ip_vs_dest_put(dest); 1033 ip_vs_dest_put(dest);
1042} 1034}
@@ -1108,24 +1100,30 @@ static void ip_vs_dest_trash_expire(unsigned long data)
1108 struct net *net = (struct net *) data; 1100 struct net *net = (struct net *) data;
1109 struct netns_ipvs *ipvs = net_ipvs(net); 1101 struct netns_ipvs *ipvs = net_ipvs(net);
1110 struct ip_vs_dest *dest, *next; 1102 struct ip_vs_dest *dest, *next;
1103 unsigned long now = jiffies;
1111 1104
1112 spin_lock(&ipvs->dest_trash_lock); 1105 spin_lock(&ipvs->dest_trash_lock);
1113 list_for_each_entry_safe(dest, next, &ipvs->dest_trash, t_list) { 1106 list_for_each_entry_safe(dest, next, &ipvs->dest_trash, t_list) {
1114 /* Skip if dest is in grace period */
1115 if (test_bit(IP_VS_DEST_STATE_REMOVING, &dest->state))
1116 continue;
1117 if (atomic_read(&dest->refcnt) > 0) 1107 if (atomic_read(&dest->refcnt) > 0)
1118 continue; 1108 continue;
1109 if (dest->idle_start) {
1110 if (time_before(now, dest->idle_start +
1111 IP_VS_DEST_TRASH_PERIOD))
1112 continue;
1113 } else {
1114 dest->idle_start = max(1UL, now);
1115 continue;
1116 }
1119 IP_VS_DBG_BUF(3, "Removing destination %u/%s:%u from trash\n", 1117 IP_VS_DBG_BUF(3, "Removing destination %u/%s:%u from trash\n",
1120 dest->vfwmark, 1118 dest->vfwmark,
1121 IP_VS_DBG_ADDR(dest->svc->af, &dest->addr), 1119 IP_VS_DBG_ADDR(dest->af, &dest->addr),
1122 ntohs(dest->port)); 1120 ntohs(dest->port));
1123 list_del(&dest->t_list); 1121 list_del(&dest->t_list);
1124 ip_vs_dest_free(dest); 1122 ip_vs_dest_free(dest);
1125 } 1123 }
1126 if (!list_empty(&ipvs->dest_trash)) 1124 if (!list_empty(&ipvs->dest_trash))
1127 mod_timer(&ipvs->dest_trash_timer, 1125 mod_timer(&ipvs->dest_trash_timer,
1128 jiffies + IP_VS_DEST_TRASH_PERIOD); 1126 jiffies + (IP_VS_DEST_TRASH_PERIOD >> 1));
1129 spin_unlock(&ipvs->dest_trash_lock); 1127 spin_unlock(&ipvs->dest_trash_lock);
1130} 1128}
1131 1129
@@ -1320,14 +1318,6 @@ out:
1320 return ret; 1318 return ret;
1321} 1319}
1322 1320
1323static void ip_vs_service_rcu_free(struct rcu_head *head)
1324{
1325 struct ip_vs_service *svc;
1326
1327 svc = container_of(head, struct ip_vs_service, rcu_head);
1328 ip_vs_service_free(svc);
1329}
1330
1331/* 1321/*
1332 * Delete a service from the service list 1322 * Delete a service from the service list
1333 * - The service must be unlinked, unlocked and not referenced! 1323 * - The service must be unlinked, unlocked and not referenced!
@@ -1376,13 +1366,7 @@ static void __ip_vs_del_service(struct ip_vs_service *svc, bool cleanup)
1376 /* 1366 /*
1377 * Free the service if nobody refers to it 1367 * Free the service if nobody refers to it
1378 */ 1368 */
1379 if (atomic_dec_and_test(&svc->refcnt)) { 1369 __ip_vs_svc_put(svc, true);
1380 IP_VS_DBG_BUF(3, "Removing service %u/%s:%u\n",
1381 svc->fwmark,
1382 IP_VS_DBG_ADDR(svc->af, &svc->addr),
1383 ntohs(svc->port));
1384 call_rcu(&svc->rcu_head, ip_vs_service_rcu_free);
1385 }
1386 1370
1387 /* decrease the module use count */ 1371 /* decrease the module use count */
1388 ip_vs_use_count_dec(); 1372 ip_vs_use_count_dec();
diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c
index 6bee6d0c73a5..1425e9a924c4 100644
--- a/net/netfilter/ipvs/ip_vs_est.c
+++ b/net/netfilter/ipvs/ip_vs_est.c
@@ -59,12 +59,13 @@ static void ip_vs_read_cpu_stats(struct ip_vs_stats_user *sum,
59 struct ip_vs_cpu_stats __percpu *stats) 59 struct ip_vs_cpu_stats __percpu *stats)
60{ 60{
61 int i; 61 int i;
62 bool add = false;
62 63
63 for_each_possible_cpu(i) { 64 for_each_possible_cpu(i) {
64 struct ip_vs_cpu_stats *s = per_cpu_ptr(stats, i); 65 struct ip_vs_cpu_stats *s = per_cpu_ptr(stats, i);
65 unsigned int start; 66 unsigned int start;
66 __u64 inbytes, outbytes; 67 __u64 inbytes, outbytes;
67 if (i) { 68 if (add) {
68 sum->conns += s->ustats.conns; 69 sum->conns += s->ustats.conns;
69 sum->inpkts += s->ustats.inpkts; 70 sum->inpkts += s->ustats.inpkts;
70 sum->outpkts += s->ustats.outpkts; 71 sum->outpkts += s->ustats.outpkts;
@@ -76,6 +77,7 @@ static void ip_vs_read_cpu_stats(struct ip_vs_stats_user *sum,
76 sum->inbytes += inbytes; 77 sum->inbytes += inbytes;
77 sum->outbytes += outbytes; 78 sum->outbytes += outbytes;
78 } else { 79 } else {
80 add = true;
79 sum->conns = s->ustats.conns; 81 sum->conns = s->ustats.conns;
80 sum->inpkts = s->ustats.inpkts; 82 sum->inpkts = s->ustats.inpkts;
81 sum->outpkts = s->ustats.outpkts; 83 sum->outpkts = s->ustats.outpkts;
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 1383b0eadc0e..eff13c94498e 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -93,7 +93,7 @@ struct ip_vs_lblc_entry {
93 struct hlist_node list; 93 struct hlist_node list;
94 int af; /* address family */ 94 int af; /* address family */
95 union nf_inet_addr addr; /* destination IP address */ 95 union nf_inet_addr addr; /* destination IP address */
96 struct ip_vs_dest __rcu *dest; /* real server (cache) */ 96 struct ip_vs_dest *dest; /* real server (cache) */
97 unsigned long lastuse; /* last used time */ 97 unsigned long lastuse; /* last used time */
98 struct rcu_head rcu_head; 98 struct rcu_head rcu_head;
99}; 99};
@@ -130,20 +130,21 @@ static struct ctl_table vs_vars_table[] = {
130}; 130};
131#endif 131#endif
132 132
133static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en) 133static void ip_vs_lblc_rcu_free(struct rcu_head *head)
134{ 134{
135 struct ip_vs_dest *dest; 135 struct ip_vs_lblc_entry *en = container_of(head,
136 struct ip_vs_lblc_entry,
137 rcu_head);
136 138
137 hlist_del_rcu(&en->list); 139 ip_vs_dest_put(en->dest);
138 /* 140 kfree(en);
139 * We don't kfree dest because it is referred either by its service
140 * or the trash dest list.
141 */
142 dest = rcu_dereference_protected(en->dest, 1);
143 ip_vs_dest_put(dest);
144 kfree_rcu(en, rcu_head);
145} 141}
146 142
143static inline void ip_vs_lblc_del(struct ip_vs_lblc_entry *en)
144{
145 hlist_del_rcu(&en->list);
146 call_rcu(&en->rcu_head, ip_vs_lblc_rcu_free);
147}
147 148
148/* 149/*
149 * Returns hash value for IPVS LBLC entry 150 * Returns hash value for IPVS LBLC entry
@@ -203,30 +204,23 @@ ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr,
203 struct ip_vs_lblc_entry *en; 204 struct ip_vs_lblc_entry *en;
204 205
205 en = ip_vs_lblc_get(dest->af, tbl, daddr); 206 en = ip_vs_lblc_get(dest->af, tbl, daddr);
206 if (!en) { 207 if (en) {
207 en = kmalloc(sizeof(*en), GFP_ATOMIC); 208 if (en->dest == dest)
208 if (!en) 209 return en;
209 return NULL; 210 ip_vs_lblc_del(en);
210 211 }
211 en->af = dest->af; 212 en = kmalloc(sizeof(*en), GFP_ATOMIC);
212 ip_vs_addr_copy(dest->af, &en->addr, daddr); 213 if (!en)
213 en->lastuse = jiffies; 214 return NULL;
214 215
215 ip_vs_dest_hold(dest); 216 en->af = dest->af;
216 RCU_INIT_POINTER(en->dest, dest); 217 ip_vs_addr_copy(dest->af, &en->addr, daddr);
218 en->lastuse = jiffies;
217 219
218 ip_vs_lblc_hash(tbl, en); 220 ip_vs_dest_hold(dest);
219 } else { 221 en->dest = dest;
220 struct ip_vs_dest *old_dest;
221 222
222 old_dest = rcu_dereference_protected(en->dest, 1); 223 ip_vs_lblc_hash(tbl, en);
223 if (old_dest != dest) {
224 ip_vs_dest_put(old_dest);
225 ip_vs_dest_hold(dest);
226 /* No ordering constraints for refcnt */
227 RCU_INIT_POINTER(en->dest, dest);
228 }
229 }
230 224
231 return en; 225 return en;
232} 226}
@@ -246,7 +240,7 @@ static void ip_vs_lblc_flush(struct ip_vs_service *svc)
246 tbl->dead = 1; 240 tbl->dead = 1;
247 for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) { 241 for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
248 hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) { 242 hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
249 ip_vs_lblc_free(en); 243 ip_vs_lblc_del(en);
250 atomic_dec(&tbl->entries); 244 atomic_dec(&tbl->entries);
251 } 245 }
252 } 246 }
@@ -281,7 +275,7 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
281 sysctl_lblc_expiration(svc))) 275 sysctl_lblc_expiration(svc)))
282 continue; 276 continue;
283 277
284 ip_vs_lblc_free(en); 278 ip_vs_lblc_del(en);
285 atomic_dec(&tbl->entries); 279 atomic_dec(&tbl->entries);
286 } 280 }
287 spin_unlock(&svc->sched_lock); 281 spin_unlock(&svc->sched_lock);
@@ -335,7 +329,7 @@ static void ip_vs_lblc_check_expire(unsigned long data)
335 if (time_before(now, en->lastuse + ENTRY_TIMEOUT)) 329 if (time_before(now, en->lastuse + ENTRY_TIMEOUT))
336 continue; 330 continue;
337 331
338 ip_vs_lblc_free(en); 332 ip_vs_lblc_del(en);
339 atomic_dec(&tbl->entries); 333 atomic_dec(&tbl->entries);
340 goal--; 334 goal--;
341 } 335 }
@@ -443,8 +437,8 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc)
443 continue; 437 continue;
444 438
445 doh = ip_vs_dest_conn_overhead(dest); 439 doh = ip_vs_dest_conn_overhead(dest);
446 if (loh * atomic_read(&dest->weight) > 440 if ((__s64)loh * atomic_read(&dest->weight) >
447 doh * atomic_read(&least->weight)) { 441 (__s64)doh * atomic_read(&least->weight)) {
448 least = dest; 442 least = dest;
449 loh = doh; 443 loh = doh;
450 } 444 }
@@ -511,7 +505,7 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
511 * free up entries from the trash at any time. 505 * free up entries from the trash at any time.
512 */ 506 */
513 507
514 dest = rcu_dereference(en->dest); 508 dest = en->dest;
515 if ((dest->flags & IP_VS_DEST_F_AVAILABLE) && 509 if ((dest->flags & IP_VS_DEST_F_AVAILABLE) &&
516 atomic_read(&dest->weight) > 0 && !is_overloaded(dest, svc)) 510 atomic_read(&dest->weight) > 0 && !is_overloaded(dest, svc))
517 goto out; 511 goto out;
@@ -631,7 +625,7 @@ static void __exit ip_vs_lblc_cleanup(void)
631{ 625{
632 unregister_ip_vs_scheduler(&ip_vs_lblc_scheduler); 626 unregister_ip_vs_scheduler(&ip_vs_lblc_scheduler);
633 unregister_pernet_subsys(&ip_vs_lblc_ops); 627 unregister_pernet_subsys(&ip_vs_lblc_ops);
634 synchronize_rcu(); 628 rcu_barrier();
635} 629}
636 630
637 631
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index 5199448697f6..0b8550089a2e 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -89,7 +89,7 @@
89 */ 89 */
90struct ip_vs_dest_set_elem { 90struct ip_vs_dest_set_elem {
91 struct list_head list; /* list link */ 91 struct list_head list; /* list link */
92 struct ip_vs_dest __rcu *dest; /* destination server */ 92 struct ip_vs_dest *dest; /* destination server */
93 struct rcu_head rcu_head; 93 struct rcu_head rcu_head;
94}; 94};
95 95
@@ -107,11 +107,7 @@ static void ip_vs_dest_set_insert(struct ip_vs_dest_set *set,
107 107
108 if (check) { 108 if (check) {
109 list_for_each_entry(e, &set->list, list) { 109 list_for_each_entry(e, &set->list, list) {
110 struct ip_vs_dest *d; 110 if (e->dest == dest)
111
112 d = rcu_dereference_protected(e->dest, 1);
113 if (d == dest)
114 /* already existed */
115 return; 111 return;
116 } 112 }
117 } 113 }
@@ -121,7 +117,7 @@ static void ip_vs_dest_set_insert(struct ip_vs_dest_set *set,
121 return; 117 return;
122 118
123 ip_vs_dest_hold(dest); 119 ip_vs_dest_hold(dest);
124 RCU_INIT_POINTER(e->dest, dest); 120 e->dest = dest;
125 121
126 list_add_rcu(&e->list, &set->list); 122 list_add_rcu(&e->list, &set->list);
127 atomic_inc(&set->size); 123 atomic_inc(&set->size);
@@ -129,22 +125,27 @@ static void ip_vs_dest_set_insert(struct ip_vs_dest_set *set,
129 set->lastmod = jiffies; 125 set->lastmod = jiffies;
130} 126}
131 127
128static void ip_vs_lblcr_elem_rcu_free(struct rcu_head *head)
129{
130 struct ip_vs_dest_set_elem *e;
131
132 e = container_of(head, struct ip_vs_dest_set_elem, rcu_head);
133 ip_vs_dest_put(e->dest);
134 kfree(e);
135}
136
132static void 137static void
133ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest) 138ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
134{ 139{
135 struct ip_vs_dest_set_elem *e; 140 struct ip_vs_dest_set_elem *e;
136 141
137 list_for_each_entry(e, &set->list, list) { 142 list_for_each_entry(e, &set->list, list) {
138 struct ip_vs_dest *d; 143 if (e->dest == dest) {
139
140 d = rcu_dereference_protected(e->dest, 1);
141 if (d == dest) {
142 /* HIT */ 144 /* HIT */
143 atomic_dec(&set->size); 145 atomic_dec(&set->size);
144 set->lastmod = jiffies; 146 set->lastmod = jiffies;
145 ip_vs_dest_put(dest);
146 list_del_rcu(&e->list); 147 list_del_rcu(&e->list);
147 kfree_rcu(e, rcu_head); 148 call_rcu(&e->rcu_head, ip_vs_lblcr_elem_rcu_free);
148 break; 149 break;
149 } 150 }
150 } 151 }
@@ -155,16 +156,8 @@ static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set)
155 struct ip_vs_dest_set_elem *e, *ep; 156 struct ip_vs_dest_set_elem *e, *ep;
156 157
157 list_for_each_entry_safe(e, ep, &set->list, list) { 158 list_for_each_entry_safe(e, ep, &set->list, list) {
158 struct ip_vs_dest *d;
159
160 d = rcu_dereference_protected(e->dest, 1);
161 /*
162 * We don't kfree dest because it is referred either
163 * by its service or by the trash dest list.
164 */
165 ip_vs_dest_put(d);
166 list_del_rcu(&e->list); 159 list_del_rcu(&e->list);
167 kfree_rcu(e, rcu_head); 160 call_rcu(&e->rcu_head, ip_vs_lblcr_elem_rcu_free);
168 } 161 }
169} 162}
170 163
@@ -175,12 +168,9 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
175 struct ip_vs_dest *dest, *least; 168 struct ip_vs_dest *dest, *least;
176 int loh, doh; 169 int loh, doh;
177 170
178 if (set == NULL)
179 return NULL;
180
181 /* select the first destination server, whose weight > 0 */ 171 /* select the first destination server, whose weight > 0 */
182 list_for_each_entry_rcu(e, &set->list, list) { 172 list_for_each_entry_rcu(e, &set->list, list) {
183 least = rcu_dereference(e->dest); 173 least = e->dest;
184 if (least->flags & IP_VS_DEST_F_OVERLOAD) 174 if (least->flags & IP_VS_DEST_F_OVERLOAD)
185 continue; 175 continue;
186 176
@@ -195,13 +185,13 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
195 /* find the destination with the weighted least load */ 185 /* find the destination with the weighted least load */
196 nextstage: 186 nextstage:
197 list_for_each_entry_continue_rcu(e, &set->list, list) { 187 list_for_each_entry_continue_rcu(e, &set->list, list) {
198 dest = rcu_dereference(e->dest); 188 dest = e->dest;
199 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 189 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
200 continue; 190 continue;
201 191
202 doh = ip_vs_dest_conn_overhead(dest); 192 doh = ip_vs_dest_conn_overhead(dest);
203 if ((loh * atomic_read(&dest->weight) > 193 if (((__s64)loh * atomic_read(&dest->weight) >
204 doh * atomic_read(&least->weight)) 194 (__s64)doh * atomic_read(&least->weight))
205 && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { 195 && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
206 least = dest; 196 least = dest;
207 loh = doh; 197 loh = doh;
@@ -232,7 +222,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
232 222
233 /* select the first destination server, whose weight > 0 */ 223 /* select the first destination server, whose weight > 0 */
234 list_for_each_entry(e, &set->list, list) { 224 list_for_each_entry(e, &set->list, list) {
235 most = rcu_dereference_protected(e->dest, 1); 225 most = e->dest;
236 if (atomic_read(&most->weight) > 0) { 226 if (atomic_read(&most->weight) > 0) {
237 moh = ip_vs_dest_conn_overhead(most); 227 moh = ip_vs_dest_conn_overhead(most);
238 goto nextstage; 228 goto nextstage;
@@ -243,11 +233,11 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
243 /* find the destination with the weighted most load */ 233 /* find the destination with the weighted most load */
244 nextstage: 234 nextstage:
245 list_for_each_entry_continue(e, &set->list, list) { 235 list_for_each_entry_continue(e, &set->list, list) {
246 dest = rcu_dereference_protected(e->dest, 1); 236 dest = e->dest;
247 doh = ip_vs_dest_conn_overhead(dest); 237 doh = ip_vs_dest_conn_overhead(dest);
248 /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */ 238 /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */
249 if ((moh * atomic_read(&dest->weight) < 239 if (((__s64)moh * atomic_read(&dest->weight) <
250 doh * atomic_read(&most->weight)) 240 (__s64)doh * atomic_read(&most->weight))
251 && (atomic_read(&dest->weight) > 0)) { 241 && (atomic_read(&dest->weight) > 0)) {
252 most = dest; 242 most = dest;
253 moh = doh; 243 moh = doh;
@@ -611,8 +601,8 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
611 continue; 601 continue;
612 602
613 doh = ip_vs_dest_conn_overhead(dest); 603 doh = ip_vs_dest_conn_overhead(dest);
614 if (loh * atomic_read(&dest->weight) > 604 if ((__s64)loh * atomic_read(&dest->weight) >
615 doh * atomic_read(&least->weight)) { 605 (__s64)doh * atomic_read(&least->weight)) {
616 least = dest; 606 least = dest;
617 loh = doh; 607 loh = doh;
618 } 608 }
@@ -819,7 +809,7 @@ static void __exit ip_vs_lblcr_cleanup(void)
819{ 809{
820 unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler); 810 unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
821 unregister_pernet_subsys(&ip_vs_lblcr_ops); 811 unregister_pernet_subsys(&ip_vs_lblcr_ops);
822 synchronize_rcu(); 812 rcu_barrier();
823} 813}
824 814
825 815
diff --git a/net/netfilter/ipvs/ip_vs_nq.c b/net/netfilter/ipvs/ip_vs_nq.c
index d8d9860934fe..961a6de9bb29 100644
--- a/net/netfilter/ipvs/ip_vs_nq.c
+++ b/net/netfilter/ipvs/ip_vs_nq.c
@@ -40,7 +40,7 @@
40#include <net/ip_vs.h> 40#include <net/ip_vs.h>
41 41
42 42
43static inline unsigned int 43static inline int
44ip_vs_nq_dest_overhead(struct ip_vs_dest *dest) 44ip_vs_nq_dest_overhead(struct ip_vs_dest *dest)
45{ 45{
46 /* 46 /*
@@ -59,7 +59,7 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
59 struct ip_vs_iphdr *iph) 59 struct ip_vs_iphdr *iph)
60{ 60{
61 struct ip_vs_dest *dest, *least = NULL; 61 struct ip_vs_dest *dest, *least = NULL;
62 unsigned int loh = 0, doh; 62 int loh = 0, doh;
63 63
64 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); 64 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
65 65
@@ -92,8 +92,8 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
92 } 92 }
93 93
94 if (!least || 94 if (!least ||
95 (loh * atomic_read(&dest->weight) > 95 ((__s64)loh * atomic_read(&dest->weight) >
96 doh * atomic_read(&least->weight))) { 96 (__s64)doh * atomic_read(&least->weight))) {
97 least = dest; 97 least = dest;
98 loh = doh; 98 loh = doh;
99 } 99 }
diff --git a/net/netfilter/ipvs/ip_vs_sed.c b/net/netfilter/ipvs/ip_vs_sed.c
index a5284cc3d882..e446b9fa7424 100644
--- a/net/netfilter/ipvs/ip_vs_sed.c
+++ b/net/netfilter/ipvs/ip_vs_sed.c
@@ -44,7 +44,7 @@
44#include <net/ip_vs.h> 44#include <net/ip_vs.h>
45 45
46 46
47static inline unsigned int 47static inline int
48ip_vs_sed_dest_overhead(struct ip_vs_dest *dest) 48ip_vs_sed_dest_overhead(struct ip_vs_dest *dest)
49{ 49{
50 /* 50 /*
@@ -63,7 +63,7 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
63 struct ip_vs_iphdr *iph) 63 struct ip_vs_iphdr *iph)
64{ 64{
65 struct ip_vs_dest *dest, *least; 65 struct ip_vs_dest *dest, *least;
66 unsigned int loh, doh; 66 int loh, doh;
67 67
68 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); 68 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
69 69
@@ -99,8 +99,8 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
99 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 99 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
100 continue; 100 continue;
101 doh = ip_vs_sed_dest_overhead(dest); 101 doh = ip_vs_sed_dest_overhead(dest);
102 if (loh * atomic_read(&dest->weight) > 102 if ((__s64)loh * atomic_read(&dest->weight) >
103 doh * atomic_read(&least->weight)) { 103 (__s64)doh * atomic_read(&least->weight)) {
104 least = dest; 104 least = dest;
105 loh = doh; 105 loh = doh;
106 } 106 }
diff --git a/net/netfilter/ipvs/ip_vs_wlc.c b/net/netfilter/ipvs/ip_vs_wlc.c
index 6dc1fa128840..b5b4650d50a9 100644
--- a/net/netfilter/ipvs/ip_vs_wlc.c
+++ b/net/netfilter/ipvs/ip_vs_wlc.c
@@ -35,7 +35,7 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
35 struct ip_vs_iphdr *iph) 35 struct ip_vs_iphdr *iph)
36{ 36{
37 struct ip_vs_dest *dest, *least; 37 struct ip_vs_dest *dest, *least;
38 unsigned int loh, doh; 38 int loh, doh;
39 39
40 IP_VS_DBG(6, "ip_vs_wlc_schedule(): Scheduling...\n"); 40 IP_VS_DBG(6, "ip_vs_wlc_schedule(): Scheduling...\n");
41 41
@@ -71,8 +71,8 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
71 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 71 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
72 continue; 72 continue;
73 doh = ip_vs_dest_conn_overhead(dest); 73 doh = ip_vs_dest_conn_overhead(dest);
74 if (loh * atomic_read(&dest->weight) > 74 if ((__s64)loh * atomic_read(&dest->weight) >
75 doh * atomic_read(&least->weight)) { 75 (__s64)doh * atomic_read(&least->weight)) {
76 least = dest; 76 least = dest;
77 loh = doh; 77 loh = doh;
78 } 78 }
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index bdebd03bc8cd..70866d192efc 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -778,8 +778,8 @@ static int callforward_do_filter(const union nf_inet_addr *src,
778 flowi6_to_flowi(&fl1), false)) { 778 flowi6_to_flowi(&fl1), false)) {
779 if (!afinfo->route(&init_net, (struct dst_entry **)&rt2, 779 if (!afinfo->route(&init_net, (struct dst_entry **)&rt2,
780 flowi6_to_flowi(&fl2), false)) { 780 flowi6_to_flowi(&fl2), false)) {
781 if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway, 781 if (ipv6_addr_equal(rt6_nexthop(rt1),
782 sizeof(rt1->rt6i_gateway)) && 782 rt6_nexthop(rt2)) &&
783 rt1->dst.dev == rt2->dst.dev) 783 rt1->dst.dev == rt2->dst.dev)
784 ret = 1; 784 ret = 1;
785 dst_release(&rt2->dst); 785 dst_release(&rt2->dst);
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index 6fd967c6278c..cdf4567ba9b3 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -24,7 +24,7 @@
24int synproxy_net_id; 24int synproxy_net_id;
25EXPORT_SYMBOL_GPL(synproxy_net_id); 25EXPORT_SYMBOL_GPL(synproxy_net_id);
26 26
27void 27bool
28synproxy_parse_options(const struct sk_buff *skb, unsigned int doff, 28synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
29 const struct tcphdr *th, struct synproxy_options *opts) 29 const struct tcphdr *th, struct synproxy_options *opts)
30{ 30{
@@ -32,7 +32,8 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
32 u8 buf[40], *ptr; 32 u8 buf[40], *ptr;
33 33
34 ptr = skb_header_pointer(skb, doff + sizeof(*th), length, buf); 34 ptr = skb_header_pointer(skb, doff + sizeof(*th), length, buf);
35 BUG_ON(ptr == NULL); 35 if (ptr == NULL)
36 return false;
36 37
37 opts->options = 0; 38 opts->options = 0;
38 while (length > 0) { 39 while (length > 0) {
@@ -41,16 +42,16 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
41 42
42 switch (opcode) { 43 switch (opcode) {
43 case TCPOPT_EOL: 44 case TCPOPT_EOL:
44 return; 45 return true;
45 case TCPOPT_NOP: 46 case TCPOPT_NOP:
46 length--; 47 length--;
47 continue; 48 continue;
48 default: 49 default:
49 opsize = *ptr++; 50 opsize = *ptr++;
50 if (opsize < 2) 51 if (opsize < 2)
51 return; 52 return true;
52 if (opsize > length) 53 if (opsize > length)
53 return; 54 return true;
54 55
55 switch (opcode) { 56 switch (opcode) {
56 case TCPOPT_MSS: 57 case TCPOPT_MSS:
@@ -84,6 +85,7 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
84 length -= opsize; 85 length -= opsize;
85 } 86 }
86 } 87 }
88 return true;
87} 89}
88EXPORT_SYMBOL_GPL(synproxy_parse_options); 90EXPORT_SYMBOL_GPL(synproxy_parse_options);
89 91
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 32ad015ee8ce..a9dfdda9ed1d 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -285,7 +285,7 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
285 285
286 286
287/* remove one skb from head of flow queue */ 287/* remove one skb from head of flow queue */
288static struct sk_buff *fq_dequeue_head(struct fq_flow *flow) 288static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow)
289{ 289{
290 struct sk_buff *skb = flow->head; 290 struct sk_buff *skb = flow->head;
291 291
@@ -293,6 +293,8 @@ static struct sk_buff *fq_dequeue_head(struct fq_flow *flow)
293 flow->head = skb->next; 293 flow->head = skb->next;
294 skb->next = NULL; 294 skb->next = NULL;
295 flow->qlen--; 295 flow->qlen--;
296 sch->qstats.backlog -= qdisc_pkt_len(skb);
297 sch->q.qlen--;
296 } 298 }
297 return skb; 299 return skb;
298} 300}
@@ -418,8 +420,9 @@ static struct sk_buff *fq_dequeue(struct Qdisc *sch)
418 struct fq_flow_head *head; 420 struct fq_flow_head *head;
419 struct sk_buff *skb; 421 struct sk_buff *skb;
420 struct fq_flow *f; 422 struct fq_flow *f;
423 u32 rate;
421 424
422 skb = fq_dequeue_head(&q->internal); 425 skb = fq_dequeue_head(sch, &q->internal);
423 if (skb) 426 if (skb)
424 goto out; 427 goto out;
425 fq_check_throttled(q, now); 428 fq_check_throttled(q, now);
@@ -449,7 +452,7 @@ begin:
449 goto begin; 452 goto begin;
450 } 453 }
451 454
452 skb = fq_dequeue_head(f); 455 skb = fq_dequeue_head(sch, f);
453 if (!skb) { 456 if (!skb) {
454 head->first = f->next; 457 head->first = f->next;
455 /* force a pass through old_flows to prevent starvation */ 458 /* force a pass through old_flows to prevent starvation */
@@ -466,43 +469,70 @@ begin:
466 f->time_next_packet = now; 469 f->time_next_packet = now;
467 f->credit -= qdisc_pkt_len(skb); 470 f->credit -= qdisc_pkt_len(skb);
468 471
469 if (f->credit <= 0 && 472 if (f->credit > 0 || !q->rate_enable)
470 q->rate_enable && 473 goto out;
471 skb->sk && skb->sk->sk_state != TCP_TIME_WAIT) {
472 u32 rate = skb->sk->sk_pacing_rate ?: q->flow_default_rate;
473 474
474 rate = min(rate, q->flow_max_rate); 475 rate = q->flow_max_rate;
475 if (rate) { 476 if (skb->sk && skb->sk->sk_state != TCP_TIME_WAIT)
476 u64 len = (u64)qdisc_pkt_len(skb) * NSEC_PER_SEC; 477 rate = min(skb->sk->sk_pacing_rate, rate);
477 478
478 do_div(len, rate); 479 if (rate != ~0U) {
479 /* Since socket rate can change later, 480 u32 plen = max(qdisc_pkt_len(skb), q->quantum);
480 * clamp the delay to 125 ms. 481 u64 len = (u64)plen * NSEC_PER_SEC;
481 * TODO: maybe segment the too big skb, as in commit
482 * e43ac79a4bc ("sch_tbf: segment too big GSO packets")
483 */
484 if (unlikely(len > 125 * NSEC_PER_MSEC)) {
485 len = 125 * NSEC_PER_MSEC;
486 q->stat_pkts_too_long++;
487 }
488 482
489 f->time_next_packet = now + len; 483 if (likely(rate))
484 do_div(len, rate);
485 /* Since socket rate can change later,
486 * clamp the delay to 125 ms.
487 * TODO: maybe segment the too big skb, as in commit
488 * e43ac79a4bc ("sch_tbf: segment too big GSO packets")
489 */
490 if (unlikely(len > 125 * NSEC_PER_MSEC)) {
491 len = 125 * NSEC_PER_MSEC;
492 q->stat_pkts_too_long++;
490 } 493 }
494
495 f->time_next_packet = now + len;
491 } 496 }
492out: 497out:
493 sch->qstats.backlog -= qdisc_pkt_len(skb);
494 qdisc_bstats_update(sch, skb); 498 qdisc_bstats_update(sch, skb);
495 sch->q.qlen--;
496 qdisc_unthrottled(sch); 499 qdisc_unthrottled(sch);
497 return skb; 500 return skb;
498} 501}
499 502
500static void fq_reset(struct Qdisc *sch) 503static void fq_reset(struct Qdisc *sch)
501{ 504{
505 struct fq_sched_data *q = qdisc_priv(sch);
506 struct rb_root *root;
502 struct sk_buff *skb; 507 struct sk_buff *skb;
508 struct rb_node *p;
509 struct fq_flow *f;
510 unsigned int idx;
503 511
504 while ((skb = fq_dequeue(sch)) != NULL) 512 while ((skb = fq_dequeue_head(sch, &q->internal)) != NULL)
505 kfree_skb(skb); 513 kfree_skb(skb);
514
515 if (!q->fq_root)
516 return;
517
518 for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
519 root = &q->fq_root[idx];
520 while ((p = rb_first(root)) != NULL) {
521 f = container_of(p, struct fq_flow, fq_node);
522 rb_erase(p, root);
523
524 while ((skb = fq_dequeue_head(sch, f)) != NULL)
525 kfree_skb(skb);
526
527 kmem_cache_free(fq_flow_cachep, f);
528 }
529 }
530 q->new_flows.first = NULL;
531 q->old_flows.first = NULL;
532 q->delayed = RB_ROOT;
533 q->flows = 0;
534 q->inactive_flows = 0;
535 q->throttled_flows = 0;
506} 536}
507 537
508static void fq_rehash(struct fq_sched_data *q, 538static void fq_rehash(struct fq_sched_data *q,
@@ -622,7 +652,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
622 q->quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]); 652 q->quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
623 653
624 if (tb[TCA_FQ_INITIAL_QUANTUM]) 654 if (tb[TCA_FQ_INITIAL_QUANTUM])
625 q->quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]); 655 q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
626 656
627 if (tb[TCA_FQ_FLOW_DEFAULT_RATE]) 657 if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
628 q->flow_default_rate = nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]); 658 q->flow_default_rate = nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]);
@@ -645,6 +675,8 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
645 while (sch->q.qlen > sch->limit) { 675 while (sch->q.qlen > sch->limit) {
646 struct sk_buff *skb = fq_dequeue(sch); 676 struct sk_buff *skb = fq_dequeue(sch);
647 677
678 if (!skb)
679 break;
648 kfree_skb(skb); 680 kfree_skb(skb);
649 drop_count++; 681 drop_count++;
650 } 682 }
@@ -657,21 +689,9 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
657static void fq_destroy(struct Qdisc *sch) 689static void fq_destroy(struct Qdisc *sch)
658{ 690{
659 struct fq_sched_data *q = qdisc_priv(sch); 691 struct fq_sched_data *q = qdisc_priv(sch);
660 struct rb_root *root;
661 struct rb_node *p;
662 unsigned int idx;
663 692
664 if (q->fq_root) { 693 fq_reset(sch);
665 for (idx = 0; idx < (1U << q->fq_trees_log); idx++) { 694 kfree(q->fq_root);
666 root = &q->fq_root[idx];
667 while ((p = rb_first(root)) != NULL) {
668 rb_erase(p, root);
669 kmem_cache_free(fq_flow_cachep,
670 container_of(p, struct fq_flow, fq_node));
671 }
672 }
673 kfree(q->fq_root);
674 }
675 qdisc_watchdog_cancel(&q->watchdog); 695 qdisc_watchdog_cancel(&q->watchdog);
676} 696}
677 697
@@ -711,12 +731,14 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
711 if (opts == NULL) 731 if (opts == NULL)
712 goto nla_put_failure; 732 goto nla_put_failure;
713 733
734 /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore,
735 * do not bother giving its value
736 */
714 if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) || 737 if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
715 nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) || 738 nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
716 nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) || 739 nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
717 nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) || 740 nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
718 nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) || 741 nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
719 nla_put_u32(skb, TCA_FQ_FLOW_DEFAULT_RATE, q->flow_default_rate) ||
720 nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) || 742 nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) ||
721 nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log)) 743 nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
722 goto nla_put_failure; 744 goto nla_put_failure;
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index a6d788d45216..b87e83d07478 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -358,6 +358,21 @@ static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sche
358 return PSCHED_NS2TICKS(ticks); 358 return PSCHED_NS2TICKS(ticks);
359} 359}
360 360
361static void tfifo_reset(struct Qdisc *sch)
362{
363 struct netem_sched_data *q = qdisc_priv(sch);
364 struct rb_node *p;
365
366 while ((p = rb_first(&q->t_root))) {
367 struct sk_buff *skb = netem_rb_to_skb(p);
368
369 rb_erase(p, &q->t_root);
370 skb->next = NULL;
371 skb->prev = NULL;
372 kfree_skb(skb);
373 }
374}
375
361static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) 376static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
362{ 377{
363 struct netem_sched_data *q = qdisc_priv(sch); 378 struct netem_sched_data *q = qdisc_priv(sch);
@@ -520,6 +535,7 @@ static unsigned int netem_drop(struct Qdisc *sch)
520 skb->next = NULL; 535 skb->next = NULL;
521 skb->prev = NULL; 536 skb->prev = NULL;
522 len = qdisc_pkt_len(skb); 537 len = qdisc_pkt_len(skb);
538 sch->qstats.backlog -= len;
523 kfree_skb(skb); 539 kfree_skb(skb);
524 } 540 }
525 } 541 }
@@ -609,6 +625,7 @@ static void netem_reset(struct Qdisc *sch)
609 struct netem_sched_data *q = qdisc_priv(sch); 625 struct netem_sched_data *q = qdisc_priv(sch);
610 626
611 qdisc_reset_queue(sch); 627 qdisc_reset_queue(sch);
628 tfifo_reset(sch);
612 if (q->qdisc) 629 if (q->qdisc)
613 qdisc_reset(q->qdisc); 630 qdisc_reset(q->qdisc);
614 qdisc_watchdog_cancel(&q->watchdog); 631 qdisc_watchdog_cancel(&q->watchdog);
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 0ac3a65daccb..319137340d15 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -536,7 +536,8 @@ int sctp_packet_transmit(struct sctp_packet *packet)
536 * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>. 536 * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>.
537 */ 537 */
538 if (!sctp_checksum_disable) { 538 if (!sctp_checksum_disable) {
539 if (!(dst->dev->features & NETIF_F_SCTP_CSUM)) { 539 if (!(dst->dev->features & NETIF_F_SCTP_CSUM) ||
540 (dst_xfrm(dst) != NULL) || packet->ipfragok) {
540 __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len); 541 __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len);
541 542
542 /* 3) Put the resultant value into the checksum field in the 543 /* 3) Put the resultant value into the checksum field in the
diff --git a/net/socket.c b/net/socket.c
index ebed4b68f768..c226aceee65b 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1964,6 +1964,16 @@ struct used_address {
1964 unsigned int name_len; 1964 unsigned int name_len;
1965}; 1965};
1966 1966
1967static int copy_msghdr_from_user(struct msghdr *kmsg,
1968 struct msghdr __user *umsg)
1969{
1970 if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
1971 return -EFAULT;
1972 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
1973 return -EINVAL;
1974 return 0;
1975}
1976
1967static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg, 1977static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
1968 struct msghdr *msg_sys, unsigned int flags, 1978 struct msghdr *msg_sys, unsigned int flags,
1969 struct used_address *used_address) 1979 struct used_address *used_address)
@@ -1982,8 +1992,11 @@ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
1982 if (MSG_CMSG_COMPAT & flags) { 1992 if (MSG_CMSG_COMPAT & flags) {
1983 if (get_compat_msghdr(msg_sys, msg_compat)) 1993 if (get_compat_msghdr(msg_sys, msg_compat))
1984 return -EFAULT; 1994 return -EFAULT;
1985 } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) 1995 } else {
1986 return -EFAULT; 1996 err = copy_msghdr_from_user(msg_sys, msg);
1997 if (err)
1998 return err;
1999 }
1987 2000
1988 if (msg_sys->msg_iovlen > UIO_FASTIOV) { 2001 if (msg_sys->msg_iovlen > UIO_FASTIOV) {
1989 err = -EMSGSIZE; 2002 err = -EMSGSIZE;
@@ -2191,8 +2204,11 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
2191 if (MSG_CMSG_COMPAT & flags) { 2204 if (MSG_CMSG_COMPAT & flags) {
2192 if (get_compat_msghdr(msg_sys, msg_compat)) 2205 if (get_compat_msghdr(msg_sys, msg_compat))
2193 return -EFAULT; 2206 return -EFAULT;
2194 } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) 2207 } else {
2195 return -EFAULT; 2208 err = copy_msghdr_from_user(msg_sys, msg);
2209 if (err)
2210 return err;
2211 }
2196 2212
2197 if (msg_sys->msg_iovlen > UIO_FASTIOV) { 2213 if (msg_sys->msg_iovlen > UIO_FASTIOV) {
2198 err = -EMSGSIZE; 2214 err = -EMSGSIZE;
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index 9bc6db04be3e..e7000be321b0 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -47,12 +47,12 @@ static int net_ctl_permissions(struct ctl_table_header *head,
47 47
48 /* Allow network administrator to have same access as root. */ 48 /* Allow network administrator to have same access as root. */
49 if (ns_capable(net->user_ns, CAP_NET_ADMIN) || 49 if (ns_capable(net->user_ns, CAP_NET_ADMIN) ||
50 uid_eq(root_uid, current_uid())) { 50 uid_eq(root_uid, current_euid())) {
51 int mode = (table->mode >> 6) & 7; 51 int mode = (table->mode >> 6) & 7;
52 return (mode << 6) | (mode << 3) | mode; 52 return (mode << 6) | (mode << 3) | mode;
53 } 53 }
54 /* Allow netns root group to have the same access as the root group */ 54 /* Allow netns root group to have the same access as the root group */
55 if (gid_eq(root_gid, current_gid())) { 55 if (in_egroup_p(root_gid)) {
56 int mode = (table->mode >> 3) & 7; 56 int mode = (table->mode >> 3) & 7;
57 return (mode << 3) | mode; 57 return (mode << 3) | mode;
58 } 58 }
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 86de99ad2976..c1f403bed683 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1246,6 +1246,15 @@ static int unix_socketpair(struct socket *socka, struct socket *sockb)
1246 return 0; 1246 return 0;
1247} 1247}
1248 1248
1249static void unix_sock_inherit_flags(const struct socket *old,
1250 struct socket *new)
1251{
1252 if (test_bit(SOCK_PASSCRED, &old->flags))
1253 set_bit(SOCK_PASSCRED, &new->flags);
1254 if (test_bit(SOCK_PASSSEC, &old->flags))
1255 set_bit(SOCK_PASSSEC, &new->flags);
1256}
1257
1249static int unix_accept(struct socket *sock, struct socket *newsock, int flags) 1258static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1250{ 1259{
1251 struct sock *sk = sock->sk; 1260 struct sock *sk = sock->sk;
@@ -1280,6 +1289,7 @@ static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1280 /* attach accepted sock to socket */ 1289 /* attach accepted sock to socket */
1281 unix_state_lock(tsk); 1290 unix_state_lock(tsk);
1282 newsock->state = SS_CONNECTED; 1291 newsock->state = SS_CONNECTED;
1292 unix_sock_inherit_flags(sock, newsock);
1283 sock_graft(tsk, newsock); 1293 sock_graft(tsk, newsock);
1284 unix_state_unlock(tsk); 1294 unix_state_unlock(tsk);
1285 return 0; 1295 return 0;
diff --git a/net/unix/diag.c b/net/unix/diag.c
index d591091603bf..86fa0f3b2caf 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -124,6 +124,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
124 rep->udiag_family = AF_UNIX; 124 rep->udiag_family = AF_UNIX;
125 rep->udiag_type = sk->sk_type; 125 rep->udiag_type = sk->sk_type;
126 rep->udiag_state = sk->sk_state; 126 rep->udiag_state = sk->sk_state;
127 rep->pad = 0;
127 rep->udiag_ino = sk_ino; 128 rep->udiag_ino = sk_ino;
128 sock_diag_save_cookie(sk, rep->udiag_cookie); 129 sock_diag_save_cookie(sk, rep->udiag_cookie);
129 130
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 67153964aad2..aff959e5a1b3 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -566,18 +566,13 @@ int wiphy_register(struct wiphy *wiphy)
566 /* check and set up bitrates */ 566 /* check and set up bitrates */
567 ieee80211_set_bitrate_flags(wiphy); 567 ieee80211_set_bitrate_flags(wiphy);
568 568
569 569 rtnl_lock();
570 res = device_add(&rdev->wiphy.dev); 570 res = device_add(&rdev->wiphy.dev);
571 if (res)
572 return res;
573
574 res = rfkill_register(rdev->rfkill);
575 if (res) { 571 if (res) {
576 device_del(&rdev->wiphy.dev); 572 rtnl_unlock();
577 return res; 573 return res;
578 } 574 }
579 575
580 rtnl_lock();
581 /* set up regulatory info */ 576 /* set up regulatory info */
582 wiphy_regulatory_register(wiphy); 577 wiphy_regulatory_register(wiphy);
583 578
@@ -606,6 +601,15 @@ int wiphy_register(struct wiphy *wiphy)
606 601
607 rdev->wiphy.registered = true; 602 rdev->wiphy.registered = true;
608 rtnl_unlock(); 603 rtnl_unlock();
604
605 res = rfkill_register(rdev->rfkill);
606 if (res) {
607 rfkill_destroy(rdev->rfkill);
608 rdev->rfkill = NULL;
609 wiphy_unregister(&rdev->wiphy);
610 return res;
611 }
612
609 return 0; 613 return 0;
610} 614}
611EXPORT_SYMBOL(wiphy_register); 615EXPORT_SYMBOL(wiphy_register);
@@ -640,7 +644,8 @@ void wiphy_unregister(struct wiphy *wiphy)
640 rtnl_unlock(); 644 rtnl_unlock();
641 __count == 0; })); 645 __count == 0; }));
642 646
643 rfkill_unregister(rdev->rfkill); 647 if (rdev->rfkill)
648 rfkill_unregister(rdev->rfkill);
644 649
645 rtnl_lock(); 650 rtnl_lock();
646 rdev->wiphy.registered = false; 651 rdev->wiphy.registered = false;
@@ -953,8 +958,6 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
953 case NETDEV_PRE_UP: 958 case NETDEV_PRE_UP:
954 if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype))) 959 if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)))
955 return notifier_from_errno(-EOPNOTSUPP); 960 return notifier_from_errno(-EOPNOTSUPP);
956 if (rfkill_blocked(rdev->rfkill))
957 return notifier_from_errno(-ERFKILL);
958 ret = cfg80211_can_add_interface(rdev, wdev->iftype); 961 ret = cfg80211_can_add_interface(rdev, wdev->iftype);
959 if (ret) 962 if (ret)
960 return notifier_from_errno(ret); 963 return notifier_from_errno(ret);
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 9ad43c619c54..3159e9c284c5 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -411,6 +411,9 @@ static inline int
411cfg80211_can_add_interface(struct cfg80211_registered_device *rdev, 411cfg80211_can_add_interface(struct cfg80211_registered_device *rdev,
412 enum nl80211_iftype iftype) 412 enum nl80211_iftype iftype)
413{ 413{
414 if (rfkill_blocked(rdev->rfkill))
415 return -ERFKILL;
416
414 return cfg80211_can_change_interface(rdev, NULL, iftype); 417 return cfg80211_can_change_interface(rdev, NULL, iftype);
415} 418}
416 419
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 39bff7d36768..403fe29c024d 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -263,6 +263,8 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
263 if (chan->flags & IEEE80211_CHAN_DISABLED) 263 if (chan->flags & IEEE80211_CHAN_DISABLED)
264 continue; 264 continue;
265 wdev->wext.ibss.chandef.chan = chan; 265 wdev->wext.ibss.chandef.chan = chan;
266 wdev->wext.ibss.chandef.center_freq1 =
267 chan->center_freq;
266 break; 268 break;
267 } 269 }
268 270
@@ -347,6 +349,7 @@ int cfg80211_ibss_wext_siwfreq(struct net_device *dev,
347 if (chan) { 349 if (chan) {
348 wdev->wext.ibss.chandef.chan = chan; 350 wdev->wext.ibss.chandef.chan = chan;
349 wdev->wext.ibss.chandef.width = NL80211_CHAN_WIDTH_20_NOHT; 351 wdev->wext.ibss.chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
352 wdev->wext.ibss.chandef.center_freq1 = freq;
350 wdev->wext.ibss.channel_fixed = true; 353 wdev->wext.ibss.channel_fixed = true;
351 } else { 354 } else {
352 /* cfg80211_ibss_wext_join will pick one if needed */ 355 /* cfg80211_ibss_wext_join will pick one if needed */
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index af8d84a4a5b2..626dc3b5fd8d 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -2421,7 +2421,7 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
2421 change = true; 2421 change = true;
2422 } 2422 }
2423 2423
2424 if (flags && (*flags & NL80211_MNTR_FLAG_ACTIVE) && 2424 if (flags && (*flags & MONITOR_FLAG_ACTIVE) &&
2425 !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR)) 2425 !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR))
2426 return -EOPNOTSUPP; 2426 return -EOPNOTSUPP;
2427 2427
@@ -2483,7 +2483,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
2483 info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL, 2483 info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL,
2484 &flags); 2484 &flags);
2485 2485
2486 if (!err && (flags & NL80211_MNTR_FLAG_ACTIVE) && 2486 if (!err && (flags & MONITOR_FLAG_ACTIVE) &&
2487 !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR)) 2487 !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR))
2488 return -EOPNOTSUPP; 2488 return -EOPNOTSUPP;
2489 2489
diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
index 7d604c06c3dc..a271c27fac77 100644
--- a/net/wireless/radiotap.c
+++ b/net/wireless/radiotap.c
@@ -97,6 +97,10 @@ int ieee80211_radiotap_iterator_init(
97 struct ieee80211_radiotap_header *radiotap_header, 97 struct ieee80211_radiotap_header *radiotap_header,
98 int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns) 98 int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns)
99{ 99{
100 /* check the radiotap header can actually be present */
101 if (max_length < sizeof(struct ieee80211_radiotap_header))
102 return -EINVAL;
103
100 /* Linux only supports version 0 radiotap format */ 104 /* Linux only supports version 0 radiotap format */
101 if (radiotap_header->it_version) 105 if (radiotap_header->it_version)
102 return -EINVAL; 106 return -EINVAL;
@@ -131,7 +135,8 @@ int ieee80211_radiotap_iterator_init(
131 */ 135 */
132 136
133 if ((unsigned long)iterator->_arg - 137 if ((unsigned long)iterator->_arg -
134 (unsigned long)iterator->_rtheader > 138 (unsigned long)iterator->_rtheader +
139 sizeof(uint32_t) >
135 (unsigned long)iterator->_max_length) 140 (unsigned long)iterator->_max_length)
136 return -EINVAL; 141 return -EINVAL;
137 } 142 }
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index ed38d5d81f9e..76e1873811d4 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -334,7 +334,8 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
334 334
335 atomic_inc(&policy->genid); 335 atomic_inc(&policy->genid);
336 336
337 del_timer(&policy->polq.hold_timer); 337 if (del_timer(&policy->polq.hold_timer))
338 xfrm_pol_put(policy);
338 xfrm_queue_purge(&policy->polq.hold_queue); 339 xfrm_queue_purge(&policy->polq.hold_queue);
339 340
340 if (del_timer(&policy->timer)) 341 if (del_timer(&policy->timer))
@@ -589,7 +590,8 @@ static void xfrm_policy_requeue(struct xfrm_policy *old,
589 590
590 spin_lock_bh(&pq->hold_queue.lock); 591 spin_lock_bh(&pq->hold_queue.lock);
591 skb_queue_splice_init(&pq->hold_queue, &list); 592 skb_queue_splice_init(&pq->hold_queue, &list);
592 del_timer(&pq->hold_timer); 593 if (del_timer(&pq->hold_timer))
594 xfrm_pol_put(old);
593 spin_unlock_bh(&pq->hold_queue.lock); 595 spin_unlock_bh(&pq->hold_queue.lock);
594 596
595 if (skb_queue_empty(&list)) 597 if (skb_queue_empty(&list))
@@ -600,7 +602,8 @@ static void xfrm_policy_requeue(struct xfrm_policy *old,
600 spin_lock_bh(&pq->hold_queue.lock); 602 spin_lock_bh(&pq->hold_queue.lock);
601 skb_queue_splice(&list, &pq->hold_queue); 603 skb_queue_splice(&list, &pq->hold_queue);
602 pq->timeout = XFRM_QUEUE_TMO_MIN; 604 pq->timeout = XFRM_QUEUE_TMO_MIN;
603 mod_timer(&pq->hold_timer, jiffies); 605 if (!mod_timer(&pq->hold_timer, jiffies))
606 xfrm_pol_hold(new);
604 spin_unlock_bh(&pq->hold_queue.lock); 607 spin_unlock_bh(&pq->hold_queue.lock);
605} 608}
606 609
@@ -1769,6 +1772,10 @@ static void xfrm_policy_queue_process(unsigned long arg)
1769 1772
1770 spin_lock(&pq->hold_queue.lock); 1773 spin_lock(&pq->hold_queue.lock);
1771 skb = skb_peek(&pq->hold_queue); 1774 skb = skb_peek(&pq->hold_queue);
1775 if (!skb) {
1776 spin_unlock(&pq->hold_queue.lock);
1777 goto out;
1778 }
1772 dst = skb_dst(skb); 1779 dst = skb_dst(skb);
1773 sk = skb->sk; 1780 sk = skb->sk;
1774 xfrm_decode_session(skb, &fl, dst->ops->family); 1781 xfrm_decode_session(skb, &fl, dst->ops->family);
@@ -1787,8 +1794,9 @@ static void xfrm_policy_queue_process(unsigned long arg)
1787 goto purge_queue; 1794 goto purge_queue;
1788 1795
1789 pq->timeout = pq->timeout << 1; 1796 pq->timeout = pq->timeout << 1;
1790 mod_timer(&pq->hold_timer, jiffies + pq->timeout); 1797 if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
1791 return; 1798 xfrm_pol_hold(pol);
1799 goto out;
1792 } 1800 }
1793 1801
1794 dst_release(dst); 1802 dst_release(dst);
@@ -1819,11 +1827,14 @@ static void xfrm_policy_queue_process(unsigned long arg)
1819 err = dst_output(skb); 1827 err = dst_output(skb);
1820 } 1828 }
1821 1829
1830out:
1831 xfrm_pol_put(pol);
1822 return; 1832 return;
1823 1833
1824purge_queue: 1834purge_queue:
1825 pq->timeout = 0; 1835 pq->timeout = 0;
1826 xfrm_queue_purge(&pq->hold_queue); 1836 xfrm_queue_purge(&pq->hold_queue);
1837 xfrm_pol_put(pol);
1827} 1838}
1828 1839
1829static int xdst_queue_output(struct sk_buff *skb) 1840static int xdst_queue_output(struct sk_buff *skb)
@@ -1831,7 +1842,8 @@ static int xdst_queue_output(struct sk_buff *skb)
1831 unsigned long sched_next; 1842 unsigned long sched_next;
1832 struct dst_entry *dst = skb_dst(skb); 1843 struct dst_entry *dst = skb_dst(skb);
1833 struct xfrm_dst *xdst = (struct xfrm_dst *) dst; 1844 struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
1834 struct xfrm_policy_queue *pq = &xdst->pols[0]->polq; 1845 struct xfrm_policy *pol = xdst->pols[0];
1846 struct xfrm_policy_queue *pq = &pol->polq;
1835 1847
1836 if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) { 1848 if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
1837 kfree_skb(skb); 1849 kfree_skb(skb);
@@ -1850,10 +1862,12 @@ static int xdst_queue_output(struct sk_buff *skb)
1850 if (del_timer(&pq->hold_timer)) { 1862 if (del_timer(&pq->hold_timer)) {
1851 if (time_before(pq->hold_timer.expires, sched_next)) 1863 if (time_before(pq->hold_timer.expires, sched_next))
1852 sched_next = pq->hold_timer.expires; 1864 sched_next = pq->hold_timer.expires;
1865 xfrm_pol_put(pol);
1853 } 1866 }
1854 1867
1855 __skb_queue_tail(&pq->hold_queue, skb); 1868 __skb_queue_tail(&pq->hold_queue, skb);
1856 mod_timer(&pq->hold_timer, sched_next); 1869 if (!mod_timer(&pq->hold_timer, sched_next))
1870 xfrm_pol_hold(pol);
1857 1871
1858 spin_unlock_bh(&pq->hold_queue.lock); 1872 spin_unlock_bh(&pq->hold_queue.lock);
1859 1873
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index 8dafe6d3c6e4..dab57daae408 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -61,9 +61,9 @@ static void xfrm_replay_notify(struct xfrm_state *x, int event)
61 61
62 switch (event) { 62 switch (event) {
63 case XFRM_REPLAY_UPDATE: 63 case XFRM_REPLAY_UPDATE:
64 if (x->replay_maxdiff && 64 if (!x->replay_maxdiff ||
65 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) && 65 ((x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
66 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) { 66 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff))) {
67 if (x->xflags & XFRM_TIME_DEFER) 67 if (x->xflags & XFRM_TIME_DEFER)
68 event = XFRM_REPLAY_TIMEOUT; 68 event = XFRM_REPLAY_TIMEOUT;
69 else 69 else
@@ -129,8 +129,7 @@ static int xfrm_replay_check(struct xfrm_state *x,
129 return 0; 129 return 0;
130 130
131 diff = x->replay.seq - seq; 131 diff = x->replay.seq - seq;
132 if (diff >= min_t(unsigned int, x->props.replay_window, 132 if (diff >= x->props.replay_window) {
133 sizeof(x->replay.bitmap) * 8)) {
134 x->stats.replay_window++; 133 x->stats.replay_window++;
135 goto err; 134 goto err;
136 } 135 }
@@ -302,9 +301,10 @@ static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event)
302 301
303 switch (event) { 302 switch (event) {
304 case XFRM_REPLAY_UPDATE: 303 case XFRM_REPLAY_UPDATE:
305 if (x->replay_maxdiff && 304 if (!x->replay_maxdiff ||
306 (replay_esn->seq - preplay_esn->seq < x->replay_maxdiff) && 305 ((replay_esn->seq - preplay_esn->seq < x->replay_maxdiff) &&
307 (replay_esn->oseq - preplay_esn->oseq < x->replay_maxdiff)) { 306 (replay_esn->oseq - preplay_esn->oseq
307 < x->replay_maxdiff))) {
308 if (x->xflags & XFRM_TIME_DEFER) 308 if (x->xflags & XFRM_TIME_DEFER)
309 event = XFRM_REPLAY_TIMEOUT; 309 event = XFRM_REPLAY_TIMEOUT;
310 else 310 else
@@ -353,28 +353,30 @@ static void xfrm_replay_notify_esn(struct xfrm_state *x, int event)
353 353
354 switch (event) { 354 switch (event) {
355 case XFRM_REPLAY_UPDATE: 355 case XFRM_REPLAY_UPDATE:
356 if (!x->replay_maxdiff) 356 if (x->replay_maxdiff) {
357 break; 357 if (replay_esn->seq_hi == preplay_esn->seq_hi)
358 358 seq_diff = replay_esn->seq - preplay_esn->seq;
359 if (replay_esn->seq_hi == preplay_esn->seq_hi) 359 else
360 seq_diff = replay_esn->seq - preplay_esn->seq; 360 seq_diff = ~preplay_esn->seq + replay_esn->seq
361 else 361 + 1;
362 seq_diff = ~preplay_esn->seq + replay_esn->seq + 1;
363
364 if (replay_esn->oseq_hi == preplay_esn->oseq_hi)
365 oseq_diff = replay_esn->oseq - preplay_esn->oseq;
366 else
367 oseq_diff = ~preplay_esn->oseq + replay_esn->oseq + 1;
368
369 if (seq_diff < x->replay_maxdiff &&
370 oseq_diff < x->replay_maxdiff) {
371 362
372 if (x->xflags & XFRM_TIME_DEFER) 363 if (replay_esn->oseq_hi == preplay_esn->oseq_hi)
373 event = XFRM_REPLAY_TIMEOUT; 364 oseq_diff = replay_esn->oseq
365 - preplay_esn->oseq;
374 else 366 else
375 return; 367 oseq_diff = ~preplay_esn->oseq
368 + replay_esn->oseq + 1;
369
370 if (seq_diff >= x->replay_maxdiff ||
371 oseq_diff >= x->replay_maxdiff)
372 break;
376 } 373 }
377 374
375 if (x->xflags & XFRM_TIME_DEFER)
376 event = XFRM_REPLAY_TIMEOUT;
377 else
378 return;
379
378 break; 380 break;
379 381
380 case XFRM_REPLAY_TIMEOUT: 382 case XFRM_REPLAY_TIMEOUT:
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 3f565e495ac6..f964d4c00ffb 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -446,7 +446,8 @@ static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *
446 memcpy(&x->sel, &p->sel, sizeof(x->sel)); 446 memcpy(&x->sel, &p->sel, sizeof(x->sel));
447 memcpy(&x->lft, &p->lft, sizeof(x->lft)); 447 memcpy(&x->lft, &p->lft, sizeof(x->lft));
448 x->props.mode = p->mode; 448 x->props.mode = p->mode;
449 x->props.replay_window = p->replay_window; 449 x->props.replay_window = min_t(unsigned int, p->replay_window,
450 sizeof(x->replay.bitmap) * 8);
450 x->props.reqid = p->reqid; 451 x->props.reqid = p->reqid;
451 x->props.family = p->family; 452 x->props.family = p->family;
452 memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr)); 453 memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr));
@@ -1856,7 +1857,7 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1856 if (x->km.state != XFRM_STATE_VALID) 1857 if (x->km.state != XFRM_STATE_VALID)
1857 goto out; 1858 goto out;
1858 1859
1859 err = xfrm_replay_verify_len(x->replay_esn, rp); 1860 err = xfrm_replay_verify_len(x->replay_esn, re);
1860 if (err) 1861 if (err)
1861 goto out; 1862 goto out;
1862 1863
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 47016c304c84..66cad506b8a2 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -3975,8 +3975,8 @@ sub string_find_replace {
3975# check for new externs in .h files. 3975# check for new externs in .h files.
3976 if ($realfile =~ /\.h$/ && 3976 if ($realfile =~ /\.h$/ &&
3977 $line =~ /^\+\s*(extern\s+)$Type\s*$Ident\s*\(/s) { 3977 $line =~ /^\+\s*(extern\s+)$Type\s*$Ident\s*\(/s) {
3978 if (WARN("AVOID_EXTERNS", 3978 if (CHK("AVOID_EXTERNS",
3979 "extern prototypes should be avoided in .h files\n" . $herecurr) && 3979 "extern prototypes should be avoided in .h files\n" . $herecurr) &&
3980 $fix) { 3980 $fix) {
3981 $fixed[$linenr - 1] =~ s/(.*)\bextern\b\s*(.*)/$1$2/; 3981 $fixed[$linenr - 1] =~ s/(.*)\bextern\b\s*(.*)/$1$2/;
3982 } 3982 }
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index 95c2b2689a03..7db9954f1af2 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -580,15 +580,13 @@ static struct aa_namespace *__next_namespace(struct aa_namespace *root,
580 580
581 /* check if the next ns is a sibling, parent, gp, .. */ 581 /* check if the next ns is a sibling, parent, gp, .. */
582 parent = ns->parent; 582 parent = ns->parent;
583 while (parent) { 583 while (ns != root) {
584 mutex_unlock(&ns->lock); 584 mutex_unlock(&ns->lock);
585 next = list_entry_next(ns, base.list); 585 next = list_entry_next(ns, base.list);
586 if (!list_entry_is_head(next, &parent->sub_ns, base.list)) { 586 if (!list_entry_is_head(next, &parent->sub_ns, base.list)) {
587 mutex_lock(&next->lock); 587 mutex_lock(&next->lock);
588 return next; 588 return next;
589 } 589 }
590 if (parent == root)
591 return NULL;
592 ns = parent; 590 ns = parent;
593 parent = parent->parent; 591 parent = parent->parent;
594 } 592 }
diff --git a/security/apparmor/crypto.c b/security/apparmor/crypto.c
index d6222ba4e919..532471d0b3a0 100644
--- a/security/apparmor/crypto.c
+++ b/security/apparmor/crypto.c
@@ -15,14 +15,14 @@
15 * it should be. 15 * it should be.
16 */ 16 */
17 17
18#include <linux/crypto.h> 18#include <crypto/hash.h>
19 19
20#include "include/apparmor.h" 20#include "include/apparmor.h"
21#include "include/crypto.h" 21#include "include/crypto.h"
22 22
23static unsigned int apparmor_hash_size; 23static unsigned int apparmor_hash_size;
24 24
25static struct crypto_hash *apparmor_tfm; 25static struct crypto_shash *apparmor_tfm;
26 26
27unsigned int aa_hash_size(void) 27unsigned int aa_hash_size(void)
28{ 28{
@@ -32,35 +32,33 @@ unsigned int aa_hash_size(void)
32int aa_calc_profile_hash(struct aa_profile *profile, u32 version, void *start, 32int aa_calc_profile_hash(struct aa_profile *profile, u32 version, void *start,
33 size_t len) 33 size_t len)
34{ 34{
35 struct scatterlist sg[2]; 35 struct {
36 struct hash_desc desc = { 36 struct shash_desc shash;
37 .tfm = apparmor_tfm, 37 char ctx[crypto_shash_descsize(apparmor_tfm)];
38 .flags = 0 38 } desc;
39 };
40 int error = -ENOMEM; 39 int error = -ENOMEM;
41 u32 le32_version = cpu_to_le32(version); 40 u32 le32_version = cpu_to_le32(version);
42 41
43 if (!apparmor_tfm) 42 if (!apparmor_tfm)
44 return 0; 43 return 0;
45 44
46 sg_init_table(sg, 2);
47 sg_set_buf(&sg[0], &le32_version, 4);
48 sg_set_buf(&sg[1], (u8 *) start, len);
49
50 profile->hash = kzalloc(apparmor_hash_size, GFP_KERNEL); 45 profile->hash = kzalloc(apparmor_hash_size, GFP_KERNEL);
51 if (!profile->hash) 46 if (!profile->hash)
52 goto fail; 47 goto fail;
53 48
54 error = crypto_hash_init(&desc); 49 desc.shash.tfm = apparmor_tfm;
50 desc.shash.flags = 0;
51
52 error = crypto_shash_init(&desc.shash);
55 if (error) 53 if (error)
56 goto fail; 54 goto fail;
57 error = crypto_hash_update(&desc, &sg[0], 4); 55 error = crypto_shash_update(&desc.shash, (u8 *) &le32_version, 4);
58 if (error) 56 if (error)
59 goto fail; 57 goto fail;
60 error = crypto_hash_update(&desc, &sg[1], len); 58 error = crypto_shash_update(&desc.shash, (u8 *) start, len);
61 if (error) 59 if (error)
62 goto fail; 60 goto fail;
63 error = crypto_hash_final(&desc, profile->hash); 61 error = crypto_shash_final(&desc.shash, profile->hash);
64 if (error) 62 if (error)
65 goto fail; 63 goto fail;
66 64
@@ -75,19 +73,19 @@ fail:
75 73
76static int __init init_profile_hash(void) 74static int __init init_profile_hash(void)
77{ 75{
78 struct crypto_hash *tfm; 76 struct crypto_shash *tfm;
79 77
80 if (!apparmor_initialized) 78 if (!apparmor_initialized)
81 return 0; 79 return 0;
82 80
83 tfm = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC); 81 tfm = crypto_alloc_shash("sha1", 0, CRYPTO_ALG_ASYNC);
84 if (IS_ERR(tfm)) { 82 if (IS_ERR(tfm)) {
85 int error = PTR_ERR(tfm); 83 int error = PTR_ERR(tfm);
86 AA_ERROR("failed to setup profile sha1 hashing: %d\n", error); 84 AA_ERROR("failed to setup profile sha1 hashing: %d\n", error);
87 return error; 85 return error;
88 } 86 }
89 apparmor_tfm = tfm; 87 apparmor_tfm = tfm;
90 apparmor_hash_size = crypto_hash_digestsize(apparmor_tfm); 88 apparmor_hash_size = crypto_shash_digestsize(apparmor_tfm);
91 89
92 aa_info_message("AppArmor sha1 policy hashing enabled"); 90 aa_info_message("AppArmor sha1 policy hashing enabled");
93 91
diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h
index f2d4b6348cbc..c28b0f20ab53 100644
--- a/security/apparmor/include/policy.h
+++ b/security/apparmor/include/policy.h
@@ -360,7 +360,9 @@ static inline void aa_put_replacedby(struct aa_replacedby *p)
360static inline void __aa_update_replacedby(struct aa_profile *orig, 360static inline void __aa_update_replacedby(struct aa_profile *orig,
361 struct aa_profile *new) 361 struct aa_profile *new)
362{ 362{
363 struct aa_profile *tmp = rcu_dereference(orig->replacedby->profile); 363 struct aa_profile *tmp;
364 tmp = rcu_dereference_protected(orig->replacedby->profile,
365 mutex_is_locked(&orig->ns->lock));
364 rcu_assign_pointer(orig->replacedby->profile, aa_get_profile(new)); 366 rcu_assign_pointer(orig->replacedby->profile, aa_get_profile(new));
365 orig->flags |= PFLAG_INVALID; 367 orig->flags |= PFLAG_INVALID;
366 aa_put_profile(tmp); 368 aa_put_profile(tmp);
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
index 6172509fa2b7..705c2879d3a9 100644
--- a/security/apparmor/policy.c
+++ b/security/apparmor/policy.c
@@ -563,7 +563,8 @@ void __init aa_free_root_ns(void)
563static void free_replacedby(struct aa_replacedby *r) 563static void free_replacedby(struct aa_replacedby *r)
564{ 564{
565 if (r) { 565 if (r) {
566 aa_put_profile(rcu_dereference(r->profile)); 566 /* r->profile will not be updated any more as r is dead */
567 aa_put_profile(rcu_dereference_protected(r->profile, true));
567 kzfree(r); 568 kzfree(r);
568 } 569 }
569} 570}
@@ -609,6 +610,7 @@ void aa_free_profile(struct aa_profile *profile)
609 aa_put_dfa(profile->policy.dfa); 610 aa_put_dfa(profile->policy.dfa);
610 aa_put_replacedby(profile->replacedby); 611 aa_put_replacedby(profile->replacedby);
611 612
613 kzfree(profile->hash);
612 kzfree(profile); 614 kzfree(profile);
613} 615}
614 616
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index dad36a6ab45f..fc3e6628a864 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -746,7 +746,6 @@ inline int avc_has_perm_noaudit(u32 ssid, u32 tsid,
746 * @tclass: target security class 746 * @tclass: target security class
747 * @requested: requested permissions, interpreted based on @tclass 747 * @requested: requested permissions, interpreted based on @tclass
748 * @auditdata: auxiliary audit data 748 * @auditdata: auxiliary audit data
749 * @flags: VFS walk flags
750 * 749 *
751 * Check the AVC to determine whether the @requested permissions are granted 750 * Check the AVC to determine whether the @requested permissions are granted
752 * for the SID pair (@ssid, @tsid), interpreting the permissions 751 * for the SID pair (@ssid, @tsid), interpreting the permissions
@@ -756,17 +755,15 @@ inline int avc_has_perm_noaudit(u32 ssid, u32 tsid,
756 * permissions are granted, -%EACCES if any permissions are denied, or 755 * permissions are granted, -%EACCES if any permissions are denied, or
757 * another -errno upon other errors. 756 * another -errno upon other errors.
758 */ 757 */
759int avc_has_perm_flags(u32 ssid, u32 tsid, u16 tclass, 758int avc_has_perm(u32 ssid, u32 tsid, u16 tclass,
760 u32 requested, struct common_audit_data *auditdata, 759 u32 requested, struct common_audit_data *auditdata)
761 unsigned flags)
762{ 760{
763 struct av_decision avd; 761 struct av_decision avd;
764 int rc, rc2; 762 int rc, rc2;
765 763
766 rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, 0, &avd); 764 rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, 0, &avd);
767 765
768 rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata, 766 rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata);
769 flags);
770 if (rc2) 767 if (rc2)
771 return rc2; 768 return rc2;
772 return rc; 769 return rc;
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index a5091ec06aa6..5b5231068516 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1502,7 +1502,7 @@ static int cred_has_capability(const struct cred *cred,
1502 1502
1503 rc = avc_has_perm_noaudit(sid, sid, sclass, av, 0, &avd); 1503 rc = avc_has_perm_noaudit(sid, sid, sclass, av, 0, &avd);
1504 if (audit == SECURITY_CAP_AUDIT) { 1504 if (audit == SECURITY_CAP_AUDIT) {
1505 int rc2 = avc_audit(sid, sid, sclass, av, &avd, rc, &ad, 0); 1505 int rc2 = avc_audit(sid, sid, sclass, av, &avd, rc, &ad);
1506 if (rc2) 1506 if (rc2)
1507 return rc2; 1507 return rc2;
1508 } 1508 }
@@ -1525,8 +1525,7 @@ static int task_has_system(struct task_struct *tsk,
1525static int inode_has_perm(const struct cred *cred, 1525static int inode_has_perm(const struct cred *cred,
1526 struct inode *inode, 1526 struct inode *inode,
1527 u32 perms, 1527 u32 perms,
1528 struct common_audit_data *adp, 1528 struct common_audit_data *adp)
1529 unsigned flags)
1530{ 1529{
1531 struct inode_security_struct *isec; 1530 struct inode_security_struct *isec;
1532 u32 sid; 1531 u32 sid;
@@ -1539,7 +1538,7 @@ static int inode_has_perm(const struct cred *cred,
1539 sid = cred_sid(cred); 1538 sid = cred_sid(cred);
1540 isec = inode->i_security; 1539 isec = inode->i_security;
1541 1540
1542 return avc_has_perm_flags(sid, isec->sid, isec->sclass, perms, adp, flags); 1541 return avc_has_perm(sid, isec->sid, isec->sclass, perms, adp);
1543} 1542}
1544 1543
1545/* Same as inode_has_perm, but pass explicit audit data containing 1544/* Same as inode_has_perm, but pass explicit audit data containing
@@ -1554,7 +1553,7 @@ static inline int dentry_has_perm(const struct cred *cred,
1554 1553
1555 ad.type = LSM_AUDIT_DATA_DENTRY; 1554 ad.type = LSM_AUDIT_DATA_DENTRY;
1556 ad.u.dentry = dentry; 1555 ad.u.dentry = dentry;
1557 return inode_has_perm(cred, inode, av, &ad, 0); 1556 return inode_has_perm(cred, inode, av, &ad);
1558} 1557}
1559 1558
1560/* Same as inode_has_perm, but pass explicit audit data containing 1559/* Same as inode_has_perm, but pass explicit audit data containing
@@ -1569,7 +1568,7 @@ static inline int path_has_perm(const struct cred *cred,
1569 1568
1570 ad.type = LSM_AUDIT_DATA_PATH; 1569 ad.type = LSM_AUDIT_DATA_PATH;
1571 ad.u.path = *path; 1570 ad.u.path = *path;
1572 return inode_has_perm(cred, inode, av, &ad, 0); 1571 return inode_has_perm(cred, inode, av, &ad);
1573} 1572}
1574 1573
1575/* Same as path_has_perm, but uses the inode from the file struct. */ 1574/* Same as path_has_perm, but uses the inode from the file struct. */
@@ -1581,7 +1580,7 @@ static inline int file_path_has_perm(const struct cred *cred,
1581 1580
1582 ad.type = LSM_AUDIT_DATA_PATH; 1581 ad.type = LSM_AUDIT_DATA_PATH;
1583 ad.u.path = file->f_path; 1582 ad.u.path = file->f_path;
1584 return inode_has_perm(cred, file_inode(file), av, &ad, 0); 1583 return inode_has_perm(cred, file_inode(file), av, &ad);
1585} 1584}
1586 1585
1587/* Check whether a task can use an open file descriptor to 1586/* Check whether a task can use an open file descriptor to
@@ -1617,7 +1616,7 @@ static int file_has_perm(const struct cred *cred,
1617 /* av is zero if only checking access to the descriptor. */ 1616 /* av is zero if only checking access to the descriptor. */
1618 rc = 0; 1617 rc = 0;
1619 if (av) 1618 if (av)
1620 rc = inode_has_perm(cred, inode, av, &ad, 0); 1619 rc = inode_has_perm(cred, inode, av, &ad);
1621 1620
1622out: 1621out:
1623 return rc; 1622 return rc;
diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
index 92d0ab561db8..f53ee3c58d0f 100644
--- a/security/selinux/include/avc.h
+++ b/security/selinux/include/avc.h
@@ -130,7 +130,7 @@ static inline int avc_audit(u32 ssid, u32 tsid,
130 u16 tclass, u32 requested, 130 u16 tclass, u32 requested,
131 struct av_decision *avd, 131 struct av_decision *avd,
132 int result, 132 int result,
133 struct common_audit_data *a, unsigned flags) 133 struct common_audit_data *a)
134{ 134{
135 u32 audited, denied; 135 u32 audited, denied;
136 audited = avc_audit_required(requested, avd, result, 0, &denied); 136 audited = avc_audit_required(requested, avd, result, 0, &denied);
@@ -138,7 +138,7 @@ static inline int avc_audit(u32 ssid, u32 tsid,
138 return 0; 138 return 0;
139 return slow_avc_audit(ssid, tsid, tclass, 139 return slow_avc_audit(ssid, tsid, tclass,
140 requested, audited, denied, 140 requested, audited, denied,
141 a, flags); 141 a, 0);
142} 142}
143 143
144#define AVC_STRICT 1 /* Ignore permissive mode. */ 144#define AVC_STRICT 1 /* Ignore permissive mode. */
@@ -147,17 +147,9 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid,
147 unsigned flags, 147 unsigned flags,
148 struct av_decision *avd); 148 struct av_decision *avd);
149 149
150int avc_has_perm_flags(u32 ssid, u32 tsid, 150int avc_has_perm(u32 ssid, u32 tsid,
151 u16 tclass, u32 requested, 151 u16 tclass, u32 requested,
152 struct common_audit_data *auditdata, 152 struct common_audit_data *auditdata);
153 unsigned);
154
155static inline int avc_has_perm(u32 ssid, u32 tsid,
156 u16 tclass, u32 requested,
157 struct common_audit_data *auditdata)
158{
159 return avc_has_perm_flags(ssid, tsid, tclass, requested, auditdata, 0);
160}
161 153
162u32 avc_policy_seqno(void); 154u32 avc_policy_seqno(void);
163 155
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index 98969541cbcc..bea523a5d852 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -139,6 +139,18 @@ static int snd_compr_open(struct inode *inode, struct file *f)
139static int snd_compr_free(struct inode *inode, struct file *f) 139static int snd_compr_free(struct inode *inode, struct file *f)
140{ 140{
141 struct snd_compr_file *data = f->private_data; 141 struct snd_compr_file *data = f->private_data;
142 struct snd_compr_runtime *runtime = data->stream.runtime;
143
144 switch (runtime->state) {
145 case SNDRV_PCM_STATE_RUNNING:
146 case SNDRV_PCM_STATE_DRAINING:
147 case SNDRV_PCM_STATE_PAUSED:
148 data->stream.ops->trigger(&data->stream, SNDRV_PCM_TRIGGER_STOP);
149 break;
150 default:
151 break;
152 }
153
142 data->stream.ops->free(&data->stream); 154 data->stream.ops->free(&data->stream);
143 kfree(data->stream.runtime->buffer); 155 kfree(data->stream.runtime->buffer);
144 kfree(data->stream.runtime); 156 kfree(data->stream.runtime);
@@ -837,7 +849,8 @@ static int snd_compress_dev_disconnect(struct snd_device *device)
837 struct snd_compr *compr; 849 struct snd_compr *compr;
838 850
839 compr = device->device_data; 851 compr = device->device_data;
840 snd_unregister_device(compr->direction, compr->card, compr->device); 852 snd_unregister_device(SNDRV_DEVICE_TYPE_COMPRESS, compr->card,
853 compr->device);
841 return 0; 854 return 0;
842} 855}
843 856
diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
index 445ca481d8d3..bf578ba2677e 100644
--- a/sound/pci/ac97/ac97_codec.c
+++ b/sound/pci/ac97/ac97_codec.c
@@ -175,6 +175,7 @@ static const struct ac97_codec_id snd_ac97_codec_ids[] = {
175{ 0x54524106, 0xffffffff, "TR28026", NULL, NULL }, 175{ 0x54524106, 0xffffffff, "TR28026", NULL, NULL },
176{ 0x54524108, 0xffffffff, "TR28028", patch_tritech_tr28028, NULL }, // added by xin jin [07/09/99] 176{ 0x54524108, 0xffffffff, "TR28028", patch_tritech_tr28028, NULL }, // added by xin jin [07/09/99]
177{ 0x54524123, 0xffffffff, "TR28602", NULL, NULL }, // only guess --jk [TR28023 = eMicro EM28023 (new CT1297)] 177{ 0x54524123, 0xffffffff, "TR28602", NULL, NULL }, // only guess --jk [TR28023 = eMicro EM28023 (new CT1297)]
178{ 0x54584e03, 0xffffffff, "TLV320AIC27", NULL, NULL },
178{ 0x54584e20, 0xffffffff, "TLC320AD9xC", NULL, NULL }, 179{ 0x54584e20, 0xffffffff, "TLC320AD9xC", NULL, NULL },
179{ 0x56494161, 0xffffffff, "VIA1612A", NULL, NULL }, // modified ICE1232 with S/PDIF 180{ 0x56494161, 0xffffffff, "VIA1612A", NULL, NULL }, // modified ICE1232 with S/PDIF
180{ 0x56494170, 0xffffffff, "VIA1617A", patch_vt1617a, NULL }, // modified VT1616 with S/PDIF 181{ 0x56494170, 0xffffffff, "VIA1617A", patch_vt1617a, NULL }, // modified VT1616 with S/PDIF
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index ac41e9cdc976..26ad4f0aade3 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -3531,7 +3531,7 @@ static int create_capture_mixers(struct hda_codec *codec)
3531 if (!multi) 3531 if (!multi)
3532 err = create_single_cap_vol_ctl(codec, n, vol, sw, 3532 err = create_single_cap_vol_ctl(codec, n, vol, sw,
3533 inv_dmic); 3533 inv_dmic);
3534 else if (!multi_cap_vol) 3534 else if (!multi_cap_vol && !inv_dmic)
3535 err = create_bind_cap_vol_ctl(codec, n, vol, sw); 3535 err = create_bind_cap_vol_ctl(codec, n, vol, sw);
3536 else 3536 else
3537 err = create_multi_cap_vol_ctl(codec); 3537 err = create_multi_cap_vol_ctl(codec);
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index b524f89a1f13..18d972501585 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -111,6 +111,9 @@ enum {
111/* 0x0009 - 0x0014 -> 12 test regs */ 111/* 0x0009 - 0x0014 -> 12 test regs */
112/* 0x0015 - visibility reg */ 112/* 0x0015 - visibility reg */
113 113
114/* Cirrus Logic CS4208 */
115#define CS4208_VENDOR_NID 0x24
116
114/* 117/*
115 * Cirrus Logic CS4210 118 * Cirrus Logic CS4210
116 * 119 *
@@ -223,6 +226,16 @@ static const struct hda_verb cs_coef_init_verbs[] = {
223 {} /* terminator */ 226 {} /* terminator */
224}; 227};
225 228
229static const struct hda_verb cs4208_coef_init_verbs[] = {
230 {0x01, AC_VERB_SET_POWER_STATE, 0x00}, /* AFG: D0 */
231 {0x24, AC_VERB_SET_PROC_STATE, 0x01}, /* VPW: processing on */
232 {0x24, AC_VERB_SET_COEF_INDEX, 0x0033},
233 {0x24, AC_VERB_SET_PROC_COEF, 0x0001}, /* A1 ICS */
234 {0x24, AC_VERB_SET_COEF_INDEX, 0x0034},
235 {0x24, AC_VERB_SET_PROC_COEF, 0x1C01}, /* A1 Enable, A Thresh = 300mV */
236 {} /* terminator */
237};
238
226/* Errata: CS4207 rev C0/C1/C2 Silicon 239/* Errata: CS4207 rev C0/C1/C2 Silicon
227 * 240 *
228 * http://www.cirrus.com/en/pubs/errata/ER880C3.pdf 241 * http://www.cirrus.com/en/pubs/errata/ER880C3.pdf
@@ -295,6 +308,8 @@ static int cs_init(struct hda_codec *codec)
295 /* init_verb sequence for C0/C1/C2 errata*/ 308 /* init_verb sequence for C0/C1/C2 errata*/
296 snd_hda_sequence_write(codec, cs_errata_init_verbs); 309 snd_hda_sequence_write(codec, cs_errata_init_verbs);
297 snd_hda_sequence_write(codec, cs_coef_init_verbs); 310 snd_hda_sequence_write(codec, cs_coef_init_verbs);
311 } else if (spec->vendor_nid == CS4208_VENDOR_NID) {
312 snd_hda_sequence_write(codec, cs4208_coef_init_verbs);
298 } 313 }
299 314
300 snd_hda_gen_init(codec); 315 snd_hda_gen_init(codec);
@@ -434,6 +449,29 @@ static const struct hda_pintbl mba42_pincfgs[] = {
434 {} /* terminator */ 449 {} /* terminator */
435}; 450};
436 451
452static const struct hda_pintbl mba6_pincfgs[] = {
453 { 0x10, 0x032120f0 }, /* HP */
454 { 0x11, 0x500000f0 },
455 { 0x12, 0x90100010 }, /* Speaker */
456 { 0x13, 0x500000f0 },
457 { 0x14, 0x500000f0 },
458 { 0x15, 0x770000f0 },
459 { 0x16, 0x770000f0 },
460 { 0x17, 0x430000f0 },
461 { 0x18, 0x43ab9030 }, /* Mic */
462 { 0x19, 0x770000f0 },
463 { 0x1a, 0x770000f0 },
464 { 0x1b, 0x770000f0 },
465 { 0x1c, 0x90a00090 },
466 { 0x1d, 0x500000f0 },
467 { 0x1e, 0x500000f0 },
468 { 0x1f, 0x500000f0 },
469 { 0x20, 0x500000f0 },
470 { 0x21, 0x430000f0 },
471 { 0x22, 0x430000f0 },
472 {} /* terminator */
473};
474
437static void cs420x_fixup_gpio_13(struct hda_codec *codec, 475static void cs420x_fixup_gpio_13(struct hda_codec *codec,
438 const struct hda_fixup *fix, int action) 476 const struct hda_fixup *fix, int action)
439{ 477{
@@ -556,22 +594,23 @@ static int patch_cs420x(struct hda_codec *codec)
556 594
557/* 595/*
558 * CS4208 support: 596 * CS4208 support:
559 * Its layout is no longer compatible with CS4206/CS4207, and the generic 597 * Its layout is no longer compatible with CS4206/CS4207
560 * parser seems working fairly well, except for trivial fixups.
561 */ 598 */
562enum { 599enum {
600 CS4208_MBA6,
563 CS4208_GPIO0, 601 CS4208_GPIO0,
564}; 602};
565 603
566static const struct hda_model_fixup cs4208_models[] = { 604static const struct hda_model_fixup cs4208_models[] = {
567 { .id = CS4208_GPIO0, .name = "gpio0" }, 605 { .id = CS4208_GPIO0, .name = "gpio0" },
606 { .id = CS4208_MBA6, .name = "mba6" },
568 {} 607 {}
569}; 608};
570 609
571static const struct snd_pci_quirk cs4208_fixup_tbl[] = { 610static const struct snd_pci_quirk cs4208_fixup_tbl[] = {
572 /* codec SSID */ 611 /* codec SSID */
573 SND_PCI_QUIRK(0x106b, 0x7100, "MacBookPro 6,1", CS4208_GPIO0), 612 SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6),
574 SND_PCI_QUIRK(0x106b, 0x7200, "MacBookPro 6,2", CS4208_GPIO0), 613 SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6),
575 {} /* terminator */ 614 {} /* terminator */
576}; 615};
577 616
@@ -588,18 +627,35 @@ static void cs4208_fixup_gpio0(struct hda_codec *codec,
588} 627}
589 628
590static const struct hda_fixup cs4208_fixups[] = { 629static const struct hda_fixup cs4208_fixups[] = {
630 [CS4208_MBA6] = {
631 .type = HDA_FIXUP_PINS,
632 .v.pins = mba6_pincfgs,
633 .chained = true,
634 .chain_id = CS4208_GPIO0,
635 },
591 [CS4208_GPIO0] = { 636 [CS4208_GPIO0] = {
592 .type = HDA_FIXUP_FUNC, 637 .type = HDA_FIXUP_FUNC,
593 .v.func = cs4208_fixup_gpio0, 638 .v.func = cs4208_fixup_gpio0,
594 }, 639 },
595}; 640};
596 641
642/* correct the 0dB offset of input pins */
643static void cs4208_fix_amp_caps(struct hda_codec *codec, hda_nid_t adc)
644{
645 unsigned int caps;
646
647 caps = query_amp_caps(codec, adc, HDA_INPUT);
648 caps &= ~(AC_AMPCAP_OFFSET);
649 caps |= 0x02;
650 snd_hda_override_amp_caps(codec, adc, HDA_INPUT, caps);
651}
652
597static int patch_cs4208(struct hda_codec *codec) 653static int patch_cs4208(struct hda_codec *codec)
598{ 654{
599 struct cs_spec *spec; 655 struct cs_spec *spec;
600 int err; 656 int err;
601 657
602 spec = cs_alloc_spec(codec, 0); /* no specific w/a */ 658 spec = cs_alloc_spec(codec, CS4208_VENDOR_NID);
603 if (!spec) 659 if (!spec)
604 return -ENOMEM; 660 return -ENOMEM;
605 661
@@ -609,6 +665,12 @@ static int patch_cs4208(struct hda_codec *codec)
609 cs4208_fixups); 665 cs4208_fixups);
610 snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE); 666 snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
611 667
668 snd_hda_override_wcaps(codec, 0x18,
669 get_wcaps(codec, 0x18) | AC_WCAP_STEREO);
670 cs4208_fix_amp_caps(codec, 0x18);
671 cs4208_fix_amp_caps(codec, 0x1b);
672 cs4208_fix_amp_caps(codec, 0x1c);
673
612 err = cs_parse_auto_config(codec); 674 err = cs_parse_auto_config(codec);
613 if (err < 0) 675 if (err < 0)
614 goto error; 676 goto error;
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 4edd2d0f9a3c..ec68eaea0336 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -3231,6 +3231,7 @@ enum {
3231 CXT_FIXUP_INC_MIC_BOOST, 3231 CXT_FIXUP_INC_MIC_BOOST,
3232 CXT_FIXUP_HEADPHONE_MIC_PIN, 3232 CXT_FIXUP_HEADPHONE_MIC_PIN,
3233 CXT_FIXUP_HEADPHONE_MIC, 3233 CXT_FIXUP_HEADPHONE_MIC,
3234 CXT_FIXUP_GPIO1,
3234}; 3235};
3235 3236
3236static void cxt_fixup_stereo_dmic(struct hda_codec *codec, 3237static void cxt_fixup_stereo_dmic(struct hda_codec *codec,
@@ -3375,6 +3376,15 @@ static const struct hda_fixup cxt_fixups[] = {
3375 .type = HDA_FIXUP_FUNC, 3376 .type = HDA_FIXUP_FUNC,
3376 .v.func = cxt_fixup_headphone_mic, 3377 .v.func = cxt_fixup_headphone_mic,
3377 }, 3378 },
3379 [CXT_FIXUP_GPIO1] = {
3380 .type = HDA_FIXUP_VERBS,
3381 .v.verbs = (const struct hda_verb[]) {
3382 { 0x01, AC_VERB_SET_GPIO_MASK, 0x01 },
3383 { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x01 },
3384 { 0x01, AC_VERB_SET_GPIO_DATA, 0x01 },
3385 { }
3386 },
3387 },
3378}; 3388};
3379 3389
3380static const struct snd_pci_quirk cxt5051_fixups[] = { 3390static const struct snd_pci_quirk cxt5051_fixups[] = {
@@ -3384,6 +3394,7 @@ static const struct snd_pci_quirk cxt5051_fixups[] = {
3384 3394
3385static const struct snd_pci_quirk cxt5066_fixups[] = { 3395static const struct snd_pci_quirk cxt5066_fixups[] = {
3386 SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC), 3396 SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC),
3397 SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_GPIO1),
3387 SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN), 3398 SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
3388 SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410), 3399 SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
3389 SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T410", CXT_PINCFG_LENOVO_TP410), 3400 SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T410", CXT_PINCFG_LENOVO_TP410),
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 3d8cd04455a6..50173d412ac5 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -937,6 +937,14 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
937 } 937 }
938 938
939 /* 939 /*
940 * always configure channel mapping, it may have been changed by the
941 * user in the meantime
942 */
943 hdmi_setup_channel_mapping(codec, pin_nid, non_pcm, ca,
944 channels, per_pin->chmap,
945 per_pin->chmap_set);
946
947 /*
940 * sizeof(ai) is used instead of sizeof(*hdmi_ai) or 948 * sizeof(ai) is used instead of sizeof(*hdmi_ai) or
941 * sizeof(*dp_ai) to avoid partial match/update problems when 949 * sizeof(*dp_ai) to avoid partial match/update problems when
942 * the user switches between HDMI/DP monitors. 950 * the user switches between HDMI/DP monitors.
@@ -947,20 +955,10 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
947 "pin=%d channels=%d\n", 955 "pin=%d channels=%d\n",
948 pin_nid, 956 pin_nid,
949 channels); 957 channels);
950 hdmi_setup_channel_mapping(codec, pin_nid, non_pcm, ca,
951 channels, per_pin->chmap,
952 per_pin->chmap_set);
953 hdmi_stop_infoframe_trans(codec, pin_nid); 958 hdmi_stop_infoframe_trans(codec, pin_nid);
954 hdmi_fill_audio_infoframe(codec, pin_nid, 959 hdmi_fill_audio_infoframe(codec, pin_nid,
955 ai.bytes, sizeof(ai)); 960 ai.bytes, sizeof(ai));
956 hdmi_start_infoframe_trans(codec, pin_nid); 961 hdmi_start_infoframe_trans(codec, pin_nid);
957 } else {
958 /* For non-pcm audio switch, setup new channel mapping
959 * accordingly */
960 if (per_pin->non_pcm != non_pcm)
961 hdmi_setup_channel_mapping(codec, pin_nid, non_pcm, ca,
962 channels, per_pin->chmap,
963 per_pin->chmap_set);
964 } 962 }
965 963
966 per_pin->non_pcm = non_pcm; 964 per_pin->non_pcm = non_pcm;
@@ -1149,32 +1147,43 @@ static int hdmi_choose_cvt(struct hda_codec *codec,
1149} 1147}
1150 1148
1151static void haswell_config_cvts(struct hda_codec *codec, 1149static void haswell_config_cvts(struct hda_codec *codec,
1152 int pin_id, int mux_id) 1150 hda_nid_t pin_nid, int mux_idx)
1153{ 1151{
1154 struct hdmi_spec *spec = codec->spec; 1152 struct hdmi_spec *spec = codec->spec;
1155 struct hdmi_spec_per_pin *per_pin; 1153 hda_nid_t nid, end_nid;
1156 int pin_idx, mux_idx; 1154 int cvt_idx, curr;
1157 int curr; 1155 struct hdmi_spec_per_cvt *per_cvt;
1158 int err;
1159 1156
1160 for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) { 1157 /* configure all pins, including "no physical connection" ones */
1161 per_pin = get_pin(spec, pin_idx); 1158 end_nid = codec->start_nid + codec->num_nodes;
1159 for (nid = codec->start_nid; nid < end_nid; nid++) {
1160 unsigned int wid_caps = get_wcaps(codec, nid);
1161 unsigned int wid_type = get_wcaps_type(wid_caps);
1162
1163 if (wid_type != AC_WID_PIN)
1164 continue;
1162 1165
1163 if (pin_idx == pin_id) 1166 if (nid == pin_nid)
1164 continue; 1167 continue;
1165 1168
1166 curr = snd_hda_codec_read(codec, per_pin->pin_nid, 0, 1169 curr = snd_hda_codec_read(codec, nid, 0,
1167 AC_VERB_GET_CONNECT_SEL, 0); 1170 AC_VERB_GET_CONNECT_SEL, 0);
1171 if (curr != mux_idx)
1172 continue;
1168 1173
1169 /* Choose another unused converter */ 1174 /* choose an unassigned converter. The conveters in the
1170 if (curr == mux_id) { 1175 * connection list are in the same order as in the codec.
1171 err = hdmi_choose_cvt(codec, pin_idx, NULL, &mux_idx); 1176 */
1172 if (err < 0) 1177 for (cvt_idx = 0; cvt_idx < spec->num_cvts; cvt_idx++) {
1173 return; 1178 per_cvt = get_cvt(spec, cvt_idx);
1174 snd_printdd("HDMI: choose converter %d for pin %d\n", mux_idx, pin_idx); 1179 if (!per_cvt->assigned) {
1175 snd_hda_codec_write_cache(codec, per_pin->pin_nid, 0, 1180 snd_printdd("choose cvt %d for pin nid %d\n",
1181 cvt_idx, nid);
1182 snd_hda_codec_write_cache(codec, nid, 0,
1176 AC_VERB_SET_CONNECT_SEL, 1183 AC_VERB_SET_CONNECT_SEL,
1177 mux_idx); 1184 cvt_idx);
1185 break;
1186 }
1178 } 1187 }
1179 } 1188 }
1180} 1189}
@@ -1216,7 +1225,7 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
1216 1225
1217 /* configure unused pins to choose other converters */ 1226 /* configure unused pins to choose other converters */
1218 if (is_haswell(codec)) 1227 if (is_haswell(codec))
1219 haswell_config_cvts(codec, pin_idx, mux_idx); 1228 haswell_config_cvts(codec, per_pin->pin_nid, mux_idx);
1220 1229
1221 snd_hda_spdif_ctls_assign(codec, pin_idx, per_cvt->cvt_nid); 1230 snd_hda_spdif_ctls_assign(codec, pin_idx, per_cvt->cvt_nid);
1222 1231
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index bc07d369fac4..bf313bea7085 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2819,6 +2819,15 @@ static void alc269_fixup_hweq(struct hda_codec *codec,
2819 alc_write_coef_idx(codec, 0x1e, coef | 0x80); 2819 alc_write_coef_idx(codec, 0x1e, coef | 0x80);
2820} 2820}
2821 2821
2822static void alc269_fixup_headset_mic(struct hda_codec *codec,
2823 const struct hda_fixup *fix, int action)
2824{
2825 struct alc_spec *spec = codec->spec;
2826
2827 if (action == HDA_FIXUP_ACT_PRE_PROBE)
2828 spec->parse_flags |= HDA_PINCFG_HEADSET_MIC;
2829}
2830
2822static void alc271_fixup_dmic(struct hda_codec *codec, 2831static void alc271_fixup_dmic(struct hda_codec *codec,
2823 const struct hda_fixup *fix, int action) 2832 const struct hda_fixup *fix, int action)
2824{ 2833{
@@ -3439,6 +3448,9 @@ static void alc283_fixup_chromebook(struct hda_codec *codec,
3439 /* Set to manual mode */ 3448 /* Set to manual mode */
3440 val = alc_read_coef_idx(codec, 0x06); 3449 val = alc_read_coef_idx(codec, 0x06);
3441 alc_write_coef_idx(codec, 0x06, val & ~0x000c); 3450 alc_write_coef_idx(codec, 0x06, val & ~0x000c);
3451 /* Enable Line1 input control by verb */
3452 val = alc_read_coef_idx(codec, 0x1a);
3453 alc_write_coef_idx(codec, 0x1a, val | (1 << 4));
3442 break; 3454 break;
3443 } 3455 }
3444} 3456}
@@ -3493,6 +3505,15 @@ static void alc282_fixup_asus_tx300(struct hda_codec *codec,
3493 } 3505 }
3494} 3506}
3495 3507
3508static void alc290_fixup_mono_speakers(struct hda_codec *codec,
3509 const struct hda_fixup *fix, int action)
3510{
3511 if (action == HDA_FIXUP_ACT_PRE_PROBE)
3512 /* Remove DAC node 0x03, as it seems to be
3513 giving mono output */
3514 snd_hda_override_wcaps(codec, 0x03, 0);
3515}
3516
3496enum { 3517enum {
3497 ALC269_FIXUP_SONY_VAIO, 3518 ALC269_FIXUP_SONY_VAIO,
3498 ALC275_FIXUP_SONY_VAIO_GPIO2, 3519 ALC275_FIXUP_SONY_VAIO_GPIO2,
@@ -3504,6 +3525,7 @@ enum {
3504 ALC271_FIXUP_DMIC, 3525 ALC271_FIXUP_DMIC,
3505 ALC269_FIXUP_PCM_44K, 3526 ALC269_FIXUP_PCM_44K,
3506 ALC269_FIXUP_STEREO_DMIC, 3527 ALC269_FIXUP_STEREO_DMIC,
3528 ALC269_FIXUP_HEADSET_MIC,
3507 ALC269_FIXUP_QUANTA_MUTE, 3529 ALC269_FIXUP_QUANTA_MUTE,
3508 ALC269_FIXUP_LIFEBOOK, 3530 ALC269_FIXUP_LIFEBOOK,
3509 ALC269_FIXUP_AMIC, 3531 ALC269_FIXUP_AMIC,
@@ -3516,9 +3538,11 @@ enum {
3516 ALC269_FIXUP_HP_GPIO_LED, 3538 ALC269_FIXUP_HP_GPIO_LED,
3517 ALC269_FIXUP_INV_DMIC, 3539 ALC269_FIXUP_INV_DMIC,
3518 ALC269_FIXUP_LENOVO_DOCK, 3540 ALC269_FIXUP_LENOVO_DOCK,
3541 ALC286_FIXUP_SONY_MIC_NO_PRESENCE,
3519 ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT, 3542 ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT,
3520 ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, 3543 ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
3521 ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, 3544 ALC269_FIXUP_DELL2_MIC_NO_PRESENCE,
3545 ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
3522 ALC269_FIXUP_HEADSET_MODE, 3546 ALC269_FIXUP_HEADSET_MODE,
3523 ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC, 3547 ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC,
3524 ALC269_FIXUP_ASUS_X101_FUNC, 3548 ALC269_FIXUP_ASUS_X101_FUNC,
@@ -3531,6 +3555,8 @@ enum {
3531 ALC269VB_FIXUP_ORDISSIMO_EVE2, 3555 ALC269VB_FIXUP_ORDISSIMO_EVE2,
3532 ALC283_FIXUP_CHROME_BOOK, 3556 ALC283_FIXUP_CHROME_BOOK,
3533 ALC282_FIXUP_ASUS_TX300, 3557 ALC282_FIXUP_ASUS_TX300,
3558 ALC283_FIXUP_INT_MIC,
3559 ALC290_FIXUP_MONO_SPEAKERS,
3534}; 3560};
3535 3561
3536static const struct hda_fixup alc269_fixups[] = { 3562static const struct hda_fixup alc269_fixups[] = {
@@ -3599,6 +3625,10 @@ static const struct hda_fixup alc269_fixups[] = {
3599 .type = HDA_FIXUP_FUNC, 3625 .type = HDA_FIXUP_FUNC,
3600 .v.func = alc269_fixup_stereo_dmic, 3626 .v.func = alc269_fixup_stereo_dmic,
3601 }, 3627 },
3628 [ALC269_FIXUP_HEADSET_MIC] = {
3629 .type = HDA_FIXUP_FUNC,
3630 .v.func = alc269_fixup_headset_mic,
3631 },
3602 [ALC269_FIXUP_QUANTA_MUTE] = { 3632 [ALC269_FIXUP_QUANTA_MUTE] = {
3603 .type = HDA_FIXUP_FUNC, 3633 .type = HDA_FIXUP_FUNC,
3604 .v.func = alc269_fixup_quanta_mute, 3634 .v.func = alc269_fixup_quanta_mute,
@@ -3708,6 +3738,15 @@ static const struct hda_fixup alc269_fixups[] = {
3708 .chained = true, 3738 .chained = true,
3709 .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC 3739 .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
3710 }, 3740 },
3741 [ALC269_FIXUP_DELL3_MIC_NO_PRESENCE] = {
3742 .type = HDA_FIXUP_PINS,
3743 .v.pins = (const struct hda_pintbl[]) {
3744 { 0x1a, 0x01a1913c }, /* use as headset mic, without its own jack detect */
3745 { }
3746 },
3747 .chained = true,
3748 .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
3749 },
3711 [ALC269_FIXUP_HEADSET_MODE] = { 3750 [ALC269_FIXUP_HEADSET_MODE] = {
3712 .type = HDA_FIXUP_FUNC, 3751 .type = HDA_FIXUP_FUNC,
3713 .v.func = alc_fixup_headset_mode, 3752 .v.func = alc_fixup_headset_mode,
@@ -3716,6 +3755,15 @@ static const struct hda_fixup alc269_fixups[] = {
3716 .type = HDA_FIXUP_FUNC, 3755 .type = HDA_FIXUP_FUNC,
3717 .v.func = alc_fixup_headset_mode_no_hp_mic, 3756 .v.func = alc_fixup_headset_mode_no_hp_mic,
3718 }, 3757 },
3758 [ALC286_FIXUP_SONY_MIC_NO_PRESENCE] = {
3759 .type = HDA_FIXUP_PINS,
3760 .v.pins = (const struct hda_pintbl[]) {
3761 { 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */
3762 { }
3763 },
3764 .chained = true,
3765 .chain_id = ALC269_FIXUP_HEADSET_MIC
3766 },
3719 [ALC269_FIXUP_ASUS_X101_FUNC] = { 3767 [ALC269_FIXUP_ASUS_X101_FUNC] = {
3720 .type = HDA_FIXUP_FUNC, 3768 .type = HDA_FIXUP_FUNC,
3721 .v.func = alc269_fixup_x101_headset_mic, 3769 .v.func = alc269_fixup_x101_headset_mic,
@@ -3790,6 +3838,22 @@ static const struct hda_fixup alc269_fixups[] = {
3790 .type = HDA_FIXUP_FUNC, 3838 .type = HDA_FIXUP_FUNC,
3791 .v.func = alc282_fixup_asus_tx300, 3839 .v.func = alc282_fixup_asus_tx300,
3792 }, 3840 },
3841 [ALC283_FIXUP_INT_MIC] = {
3842 .type = HDA_FIXUP_VERBS,
3843 .v.verbs = (const struct hda_verb[]) {
3844 {0x20, AC_VERB_SET_COEF_INDEX, 0x1a},
3845 {0x20, AC_VERB_SET_PROC_COEF, 0x0011},
3846 { }
3847 },
3848 .chained = true,
3849 .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST
3850 },
3851 [ALC290_FIXUP_MONO_SPEAKERS] = {
3852 .type = HDA_FIXUP_FUNC,
3853 .v.func = alc290_fixup_mono_speakers,
3854 .chained = true,
3855 .chain_id = ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
3856 },
3793}; 3857};
3794 3858
3795static const struct snd_pci_quirk alc269_fixup_tbl[] = { 3859static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -3831,6 +3895,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3831 SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 3895 SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
3832 SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 3896 SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
3833 SND_PCI_QUIRK(0x1028, 0x0613, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 3897 SND_PCI_QUIRK(0x1028, 0x0613, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
3898 SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_MONO_SPEAKERS),
3834 SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), 3899 SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
3835 SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), 3900 SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
3836 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), 3901 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -3853,6 +3918,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3853 SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC), 3918 SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
3854 SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC), 3919 SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
3855 SND_PCI_QUIRK(0x1043, 0x8516, "ASUS X101CH", ALC269_FIXUP_ASUS_X101), 3920 SND_PCI_QUIRK(0x1043, 0x8516, "ASUS X101CH", ALC269_FIXUP_ASUS_X101),
3921 SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
3856 SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIXUP_SONY_VAIO_GPIO2), 3922 SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIXUP_SONY_VAIO_GPIO2),
3857 SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ), 3923 SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
3858 SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ), 3924 SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
@@ -3874,7 +3940,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3874 SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 3940 SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
3875 SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 3941 SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
3876 SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 3942 SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
3877 SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 3943 SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
3878 SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 3944 SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
3879 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 3945 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
3880 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), 3946 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
@@ -3938,6 +4004,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
3938 {.id = ALC269_FIXUP_STEREO_DMIC, .name = "alc269-dmic"}, 4004 {.id = ALC269_FIXUP_STEREO_DMIC, .name = "alc269-dmic"},
3939 {.id = ALC271_FIXUP_DMIC, .name = "alc271-dmic"}, 4005 {.id = ALC271_FIXUP_DMIC, .name = "alc271-dmic"},
3940 {.id = ALC269_FIXUP_INV_DMIC, .name = "inv-dmic"}, 4006 {.id = ALC269_FIXUP_INV_DMIC, .name = "inv-dmic"},
4007 {.id = ALC269_FIXUP_HEADSET_MIC, .name = "headset-mic"},
3941 {.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"}, 4008 {.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"},
3942 {.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"}, 4009 {.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"},
3943 {.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"}, 4010 {.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
@@ -4555,6 +4622,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
4555 SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), 4622 SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
4556 SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), 4623 SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
4557 SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800), 4624 SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
4625 SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_ASUS_MODE4),
4558 SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT), 4626 SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
4559 SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_FIXUP_ASUS_MODE2), 4627 SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_FIXUP_ASUS_MODE2),
4560 SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD), 4628 SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index 4f255dfee450..f59a321a6d6a 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -4845,6 +4845,7 @@ static int snd_hdsp_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, unsigne
4845 if ((err = hdsp_get_iobox_version(hdsp)) < 0) 4845 if ((err = hdsp_get_iobox_version(hdsp)) < 0)
4846 return err; 4846 return err;
4847 } 4847 }
4848 memset(&hdsp_version, 0, sizeof(hdsp_version));
4848 hdsp_version.io_type = hdsp->io_type; 4849 hdsp_version.io_type = hdsp->io_type;
4849 hdsp_version.firmware_rev = hdsp->firmware_rev; 4850 hdsp_version.firmware_rev = hdsp->firmware_rev;
4850 if ((err = copy_to_user(argp, &hdsp_version, sizeof(hdsp_version)))) 4851 if ((err = copy_to_user(argp, &hdsp_version, sizeof(hdsp_version))))
diff --git a/sound/soc/blackfin/bf6xx-i2s.c b/sound/soc/blackfin/bf6xx-i2s.c
index c02405cc007d..5810a0603f2f 100644
--- a/sound/soc/blackfin/bf6xx-i2s.c
+++ b/sound/soc/blackfin/bf6xx-i2s.c
@@ -88,6 +88,7 @@ static int bfin_i2s_hw_params(struct snd_pcm_substream *substream,
88 case SNDRV_PCM_FORMAT_S8: 88 case SNDRV_PCM_FORMAT_S8:
89 param.spctl |= 0x70; 89 param.spctl |= 0x70;
90 sport->wdsize = 1; 90 sport->wdsize = 1;
91 break;
91 case SNDRV_PCM_FORMAT_S16_LE: 92 case SNDRV_PCM_FORMAT_S16_LE:
92 param.spctl |= 0xf0; 93 param.spctl |= 0xf0;
93 sport->wdsize = 2; 94 sport->wdsize = 2;
diff --git a/sound/soc/codecs/88pm860x-codec.c b/sound/soc/codecs/88pm860x-codec.c
index 8af04343cc1a..259d1ac4492f 100644
--- a/sound/soc/codecs/88pm860x-codec.c
+++ b/sound/soc/codecs/88pm860x-codec.c
@@ -349,6 +349,9 @@ static int snd_soc_put_volsw_2r_st(struct snd_kcontrol *kcontrol,
349 val = ucontrol->value.integer.value[0]; 349 val = ucontrol->value.integer.value[0];
350 val2 = ucontrol->value.integer.value[1]; 350 val2 = ucontrol->value.integer.value[1];
351 351
352 if (val >= ARRAY_SIZE(st_table) || val2 >= ARRAY_SIZE(st_table))
353 return -EINVAL;
354
352 err = snd_soc_update_bits(codec, reg, 0x3f, st_table[val].m); 355 err = snd_soc_update_bits(codec, reg, 0x3f, st_table[val].m);
353 if (err < 0) 356 if (err < 0)
354 return err; 357 return err;
diff --git a/sound/soc/codecs/ab8500-codec.c b/sound/soc/codecs/ab8500-codec.c
index b8ba0adacfce..80555d7551e6 100644
--- a/sound/soc/codecs/ab8500-codec.c
+++ b/sound/soc/codecs/ab8500-codec.c
@@ -1225,13 +1225,18 @@ static int anc_status_control_put(struct snd_kcontrol *kcontrol,
1225 struct ab8500_codec_drvdata *drvdata = dev_get_drvdata(codec->dev); 1225 struct ab8500_codec_drvdata *drvdata = dev_get_drvdata(codec->dev);
1226 struct device *dev = codec->dev; 1226 struct device *dev = codec->dev;
1227 bool apply_fir, apply_iir; 1227 bool apply_fir, apply_iir;
1228 int req, status; 1228 unsigned int req;
1229 int status;
1229 1230
1230 dev_dbg(dev, "%s: Enter.\n", __func__); 1231 dev_dbg(dev, "%s: Enter.\n", __func__);
1231 1232
1232 mutex_lock(&drvdata->anc_lock); 1233 mutex_lock(&drvdata->anc_lock);
1233 1234
1234 req = ucontrol->value.integer.value[0]; 1235 req = ucontrol->value.integer.value[0];
1236 if (req >= ARRAY_SIZE(enum_anc_state)) {
1237 status = -EINVAL;
1238 goto cleanup;
1239 }
1235 if (req != ANC_APPLY_FIR_IIR && req != ANC_APPLY_FIR && 1240 if (req != ANC_APPLY_FIR_IIR && req != ANC_APPLY_FIR &&
1236 req != ANC_APPLY_IIR) { 1241 req != ANC_APPLY_IIR) {
1237 dev_err(dev, "%s: ERROR: Unsupported status to set '%s'!\n", 1242 dev_err(dev, "%s: ERROR: Unsupported status to set '%s'!\n",
diff --git a/sound/soc/codecs/max98095.c b/sound/soc/codecs/max98095.c
index 41cdd1642970..8dbcacd44e6a 100644
--- a/sound/soc/codecs/max98095.c
+++ b/sound/soc/codecs/max98095.c
@@ -1863,7 +1863,7 @@ static int max98095_put_eq_enum(struct snd_kcontrol *kcontrol,
1863 struct max98095_pdata *pdata = max98095->pdata; 1863 struct max98095_pdata *pdata = max98095->pdata;
1864 int channel = max98095_get_eq_channel(kcontrol->id.name); 1864 int channel = max98095_get_eq_channel(kcontrol->id.name);
1865 struct max98095_cdata *cdata; 1865 struct max98095_cdata *cdata;
1866 int sel = ucontrol->value.integer.value[0]; 1866 unsigned int sel = ucontrol->value.integer.value[0];
1867 struct max98095_eq_cfg *coef_set; 1867 struct max98095_eq_cfg *coef_set;
1868 int fs, best, best_val, i; 1868 int fs, best, best_val, i;
1869 int regmask, regsave; 1869 int regmask, regsave;
@@ -2016,7 +2016,7 @@ static int max98095_put_bq_enum(struct snd_kcontrol *kcontrol,
2016 struct max98095_pdata *pdata = max98095->pdata; 2016 struct max98095_pdata *pdata = max98095->pdata;
2017 int channel = max98095_get_bq_channel(codec, kcontrol->id.name); 2017 int channel = max98095_get_bq_channel(codec, kcontrol->id.name);
2018 struct max98095_cdata *cdata; 2018 struct max98095_cdata *cdata;
2019 int sel = ucontrol->value.integer.value[0]; 2019 unsigned int sel = ucontrol->value.integer.value[0];
2020 struct max98095_biquad_cfg *coef_set; 2020 struct max98095_biquad_cfg *coef_set;
2021 int fs, best, best_val, i; 2021 int fs, best, best_val, i;
2022 int regmask, regsave; 2022 int regmask, regsave;
diff --git a/sound/soc/codecs/pcm1681.c b/sound/soc/codecs/pcm1681.c
index 651ce0923675..c91eba504f92 100644
--- a/sound/soc/codecs/pcm1681.c
+++ b/sound/soc/codecs/pcm1681.c
@@ -270,7 +270,7 @@ MODULE_DEVICE_TABLE(of, pcm1681_dt_ids);
270static const struct regmap_config pcm1681_regmap = { 270static const struct regmap_config pcm1681_regmap = {
271 .reg_bits = 8, 271 .reg_bits = 8,
272 .val_bits = 8, 272 .val_bits = 8,
273 .max_register = ARRAY_SIZE(pcm1681_reg_defaults) + 1, 273 .max_register = 0x13,
274 .reg_defaults = pcm1681_reg_defaults, 274 .reg_defaults = pcm1681_reg_defaults,
275 .num_reg_defaults = ARRAY_SIZE(pcm1681_reg_defaults), 275 .num_reg_defaults = ARRAY_SIZE(pcm1681_reg_defaults),
276 .writeable_reg = pcm1681_writeable_reg, 276 .writeable_reg = pcm1681_writeable_reg,
diff --git a/sound/soc/codecs/pcm1792a.c b/sound/soc/codecs/pcm1792a.c
index 2a8eccf64c76..7613181123fe 100644
--- a/sound/soc/codecs/pcm1792a.c
+++ b/sound/soc/codecs/pcm1792a.c
@@ -188,7 +188,7 @@ MODULE_DEVICE_TABLE(of, pcm1792a_of_match);
188static const struct regmap_config pcm1792a_regmap = { 188static const struct regmap_config pcm1792a_regmap = {
189 .reg_bits = 8, 189 .reg_bits = 8,
190 .val_bits = 8, 190 .val_bits = 8,
191 .max_register = 24, 191 .max_register = 23,
192 .reg_defaults = pcm1792a_reg_defaults, 192 .reg_defaults = pcm1792a_reg_defaults,
193 .num_reg_defaults = ARRAY_SIZE(pcm1792a_reg_defaults), 193 .num_reg_defaults = ARRAY_SIZE(pcm1792a_reg_defaults),
194 .writeable_reg = pcm1792a_writeable_reg, 194 .writeable_reg = pcm1792a_writeable_reg,
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index 6e3f269243e0..64ad84d8a306 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -674,6 +674,8 @@ static const struct snd_soc_dapm_route intercon[] = {
674 /* Left Input */ 674 /* Left Input */
675 {"Left Line1L Mux", "single-ended", "LINE1L"}, 675 {"Left Line1L Mux", "single-ended", "LINE1L"},
676 {"Left Line1L Mux", "differential", "LINE1L"}, 676 {"Left Line1L Mux", "differential", "LINE1L"},
677 {"Left Line1R Mux", "single-ended", "LINE1R"},
678 {"Left Line1R Mux", "differential", "LINE1R"},
677 679
678 {"Left Line2L Mux", "single-ended", "LINE2L"}, 680 {"Left Line2L Mux", "single-ended", "LINE2L"},
679 {"Left Line2L Mux", "differential", "LINE2L"}, 681 {"Left Line2L Mux", "differential", "LINE2L"},
@@ -690,6 +692,8 @@ static const struct snd_soc_dapm_route intercon[] = {
690 /* Right Input */ 692 /* Right Input */
691 {"Right Line1R Mux", "single-ended", "LINE1R"}, 693 {"Right Line1R Mux", "single-ended", "LINE1R"},
692 {"Right Line1R Mux", "differential", "LINE1R"}, 694 {"Right Line1R Mux", "differential", "LINE1R"},
695 {"Right Line1L Mux", "single-ended", "LINE1L"},
696 {"Right Line1L Mux", "differential", "LINE1L"},
693 697
694 {"Right Line2R Mux", "single-ended", "LINE2R"}, 698 {"Right Line2R Mux", "single-ended", "LINE2R"},
695 {"Right Line2R Mux", "differential", "LINE2R"}, 699 {"Right Line2R Mux", "differential", "LINE2R"},
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index c6b743978d5e..6b81d0ce2c44 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -936,7 +936,7 @@ static int fsl_ssi_probe(struct platform_device *pdev)
936 ssi_private->ssi_phys = res.start; 936 ssi_private->ssi_phys = res.start;
937 937
938 ssi_private->irq = irq_of_parse_and_map(np, 0); 938 ssi_private->irq = irq_of_parse_and_map(np, 0);
939 if (ssi_private->irq == NO_IRQ) { 939 if (ssi_private->irq == 0) {
940 dev_err(&pdev->dev, "no irq for node %s\n", np->full_name); 940 dev_err(&pdev->dev, "no irq for node %s\n", np->full_name);
941 return -ENXIO; 941 return -ENXIO;
942 } 942 }
diff --git a/sound/soc/fsl/imx-mc13783.c b/sound/soc/fsl/imx-mc13783.c
index a3d60d4bea4c..a2fd7321b5a9 100644
--- a/sound/soc/fsl/imx-mc13783.c
+++ b/sound/soc/fsl/imx-mc13783.c
@@ -112,7 +112,7 @@ static int imx_mc13783_probe(struct platform_device *pdev)
112 return ret; 112 return ret;
113 } 113 }
114 114
115 if (machine_is_mx31_3ds()) { 115 if (machine_is_mx31_3ds() || machine_is_mx31moboard()) {
116 imx_audmux_v2_configure_port(MX31_AUDMUX_PORT4_SSI_PINS_4, 116 imx_audmux_v2_configure_port(MX31_AUDMUX_PORT4_SSI_PINS_4,
117 IMX_AUDMUX_V2_PTCR_SYN, 117 IMX_AUDMUX_V2_PTCR_SYN,
118 IMX_AUDMUX_V2_PDCR_RXDSEL(MX31_AUDMUX_PORT1_SSI0) | 118 IMX_AUDMUX_V2_PDCR_RXDSEL(MX31_AUDMUX_PORT1_SSI0) |
diff --git a/sound/soc/fsl/imx-sgtl5000.c b/sound/soc/fsl/imx-sgtl5000.c
index 46c5b4fdfc52..ca1be1d9dcf0 100644
--- a/sound/soc/fsl/imx-sgtl5000.c
+++ b/sound/soc/fsl/imx-sgtl5000.c
@@ -62,7 +62,7 @@ static int imx_sgtl5000_probe(struct platform_device *pdev)
62 struct device_node *ssi_np, *codec_np; 62 struct device_node *ssi_np, *codec_np;
63 struct platform_device *ssi_pdev; 63 struct platform_device *ssi_pdev;
64 struct i2c_client *codec_dev; 64 struct i2c_client *codec_dev;
65 struct imx_sgtl5000_data *data; 65 struct imx_sgtl5000_data *data = NULL;
66 int int_port, ext_port; 66 int int_port, ext_port;
67 int ret; 67 int ret;
68 68
@@ -128,7 +128,7 @@ static int imx_sgtl5000_probe(struct platform_device *pdev)
128 goto fail; 128 goto fail;
129 } 129 }
130 130
131 data->codec_clk = devm_clk_get(&codec_dev->dev, NULL); 131 data->codec_clk = clk_get(&codec_dev->dev, NULL);
132 if (IS_ERR(data->codec_clk)) { 132 if (IS_ERR(data->codec_clk)) {
133 ret = PTR_ERR(data->codec_clk); 133 ret = PTR_ERR(data->codec_clk);
134 goto fail; 134 goto fail;
@@ -172,6 +172,8 @@ static int imx_sgtl5000_probe(struct platform_device *pdev)
172 return 0; 172 return 0;
173 173
174fail: 174fail:
175 if (data && !IS_ERR(data->codec_clk))
176 clk_put(data->codec_clk);
175 if (ssi_np) 177 if (ssi_np)
176 of_node_put(ssi_np); 178 of_node_put(ssi_np);
177 if (codec_np) 179 if (codec_np)
@@ -185,6 +187,7 @@ static int imx_sgtl5000_remove(struct platform_device *pdev)
185 struct imx_sgtl5000_data *data = platform_get_drvdata(pdev); 187 struct imx_sgtl5000_data *data = platform_get_drvdata(pdev);
186 188
187 snd_soc_unregister_card(&data->card); 189 snd_soc_unregister_card(&data->card);
190 clk_put(data->codec_clk);
188 191
189 return 0; 192 return 0;
190} 193}
diff --git a/sound/soc/fsl/imx-ssi.c b/sound/soc/fsl/imx-ssi.c
index f58bcd85c07f..57d6941676ff 100644
--- a/sound/soc/fsl/imx-ssi.c
+++ b/sound/soc/fsl/imx-ssi.c
@@ -600,19 +600,17 @@ static int imx_ssi_probe(struct platform_device *pdev)
600 ssi->fiq_params.dma_params_rx = &ssi->dma_params_rx; 600 ssi->fiq_params.dma_params_rx = &ssi->dma_params_rx;
601 ssi->fiq_params.dma_params_tx = &ssi->dma_params_tx; 601 ssi->fiq_params.dma_params_tx = &ssi->dma_params_tx;
602 602
603 ret = imx_pcm_fiq_init(pdev, &ssi->fiq_params); 603 ssi->fiq_init = imx_pcm_fiq_init(pdev, &ssi->fiq_params);
604 if (ret) 604 ssi->dma_init = imx_pcm_dma_init(pdev);
605 goto failed_pcm_fiq;
606 605
607 ret = imx_pcm_dma_init(pdev); 606 if (ssi->fiq_init && ssi->dma_init) {
608 if (ret) 607 ret = ssi->fiq_init;
609 goto failed_pcm_dma; 608 goto failed_pcm;
609 }
610 610
611 return 0; 611 return 0;
612 612
613failed_pcm_dma: 613failed_pcm:
614 imx_pcm_fiq_exit(pdev);
615failed_pcm_fiq:
616 snd_soc_unregister_component(&pdev->dev); 614 snd_soc_unregister_component(&pdev->dev);
617failed_register: 615failed_register:
618 release_mem_region(res->start, resource_size(res)); 616 release_mem_region(res->start, resource_size(res));
@@ -628,8 +626,11 @@ static int imx_ssi_remove(struct platform_device *pdev)
628 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 626 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
629 struct imx_ssi *ssi = platform_get_drvdata(pdev); 627 struct imx_ssi *ssi = platform_get_drvdata(pdev);
630 628
631 imx_pcm_dma_exit(pdev); 629 if (!ssi->dma_init)
632 imx_pcm_fiq_exit(pdev); 630 imx_pcm_dma_exit(pdev);
631
632 if (!ssi->fiq_init)
633 imx_pcm_fiq_exit(pdev);
633 634
634 snd_soc_unregister_component(&pdev->dev); 635 snd_soc_unregister_component(&pdev->dev);
635 636
diff --git a/sound/soc/fsl/imx-ssi.h b/sound/soc/fsl/imx-ssi.h
index fb1616ba8c59..560c40fc9ebb 100644
--- a/sound/soc/fsl/imx-ssi.h
+++ b/sound/soc/fsl/imx-ssi.h
@@ -211,6 +211,8 @@ struct imx_ssi {
211 struct imx_dma_data filter_data_rx; 211 struct imx_dma_data filter_data_rx;
212 struct imx_pcm_fiq_params fiq_params; 212 struct imx_pcm_fiq_params fiq_params;
213 213
214 int fiq_init;
215 int dma_init;
214 int enabled; 216 int enabled;
215}; 217};
216 218
diff --git a/sound/soc/omap/Kconfig b/sound/soc/omap/Kconfig
index daa78a0095fa..4a07f7179690 100644
--- a/sound/soc/omap/Kconfig
+++ b/sound/soc/omap/Kconfig
@@ -1,6 +1,6 @@
1config SND_OMAP_SOC 1config SND_OMAP_SOC
2 tristate "SoC Audio for the Texas Instruments OMAP chips" 2 tristate "SoC Audio for the Texas Instruments OMAP chips"
3 depends on (ARCH_OMAP && DMA_OMAP) || (ARCH_ARM && COMPILE_TEST) 3 depends on (ARCH_OMAP && DMA_OMAP) || (ARM && COMPILE_TEST)
4 select SND_DMAENGINE_PCM 4 select SND_DMAENGINE_PCM
5 5
6config SND_OMAP_SOC_DMIC 6config SND_OMAP_SOC_DMIC
@@ -26,7 +26,7 @@ config SND_OMAP_SOC_N810
26 26
27config SND_OMAP_SOC_RX51 27config SND_OMAP_SOC_RX51
28 tristate "SoC Audio support for Nokia RX-51" 28 tristate "SoC Audio support for Nokia RX-51"
29 depends on SND_OMAP_SOC && ARCH_ARM && (MACH_NOKIA_RX51 || COMPILE_TEST) 29 depends on SND_OMAP_SOC && ARM && (MACH_NOKIA_RX51 || COMPILE_TEST)
30 select SND_OMAP_SOC_MCBSP 30 select SND_OMAP_SOC_MCBSP
31 select SND_SOC_TLV320AIC3X 31 select SND_SOC_TLV320AIC3X
32 select SND_SOC_TPA6130A2 32 select SND_SOC_TPA6130A2
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index 9cc6986a8cfb..5dd87f4c919e 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -220,8 +220,8 @@ int rsnd_gen_path_exit(struct rsnd_priv *priv,
220void __iomem *rsnd_gen_reg_get(struct rsnd_priv *priv, 220void __iomem *rsnd_gen_reg_get(struct rsnd_priv *priv,
221 struct rsnd_mod *mod, 221 struct rsnd_mod *mod,
222 enum rsnd_reg reg); 222 enum rsnd_reg reg);
223#define rsnd_is_gen1(s) ((s)->info->flags & RSND_GEN1) 223#define rsnd_is_gen1(s) (((s)->info->flags & RSND_GEN_MASK) == RSND_GEN1)
224#define rsnd_is_gen2(s) ((s)->info->flags & RSND_GEN2) 224#define rsnd_is_gen2(s) (((s)->info->flags & RSND_GEN_MASK) == RSND_GEN2)
225 225
226/* 226/*
227 * R-Car ADG 227 * R-Car ADG
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 4d0561312f3b..1a38be0d0ca8 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1380,7 +1380,6 @@ static int soc_probe_link_dais(struct snd_soc_card *card, int num, int order)
1380 return -ENODEV; 1380 return -ENODEV;
1381 1381
1382 list_add(&cpu_dai->dapm.list, &card->dapm_list); 1382 list_add(&cpu_dai->dapm.list, &card->dapm_list);
1383 snd_soc_dapm_new_dai_widgets(&cpu_dai->dapm, cpu_dai);
1384 } 1383 }
1385 1384
1386 if (cpu_dai->driver->probe) { 1385 if (cpu_dai->driver->probe) {
diff --git a/sound/usb/usx2y/us122l.c b/sound/usb/usx2y/us122l.c
index d0323a693ba2..999550bbad40 100644
--- a/sound/usb/usx2y/us122l.c
+++ b/sound/usb/usx2y/us122l.c
@@ -262,7 +262,9 @@ static int usb_stream_hwdep_mmap(struct snd_hwdep *hw,
262 } 262 }
263 263
264 area->vm_ops = &usb_stream_hwdep_vm_ops; 264 area->vm_ops = &usb_stream_hwdep_vm_ops;
265 area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 265 area->vm_flags |= VM_DONTDUMP;
266 if (!read)
267 area->vm_flags |= VM_DONTEXPAND;
266 area->vm_private_data = us122l; 268 area->vm_private_data = us122l;
267 atomic_inc(&us122l->mmap_count); 269 atomic_inc(&us122l->mmap_count);
268out: 270out:
diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c
index 63fb5219f0f8..6234a51625b1 100644
--- a/sound/usb/usx2y/usbusx2yaudio.c
+++ b/sound/usb/usx2y/usbusx2yaudio.c
@@ -299,19 +299,6 @@ static void usX2Y_error_urb_status(struct usX2Ydev *usX2Y,
299 usX2Y_clients_stop(usX2Y); 299 usX2Y_clients_stop(usX2Y);
300} 300}
301 301
302static void usX2Y_error_sequence(struct usX2Ydev *usX2Y,
303 struct snd_usX2Y_substream *subs, struct urb *urb)
304{
305 snd_printk(KERN_ERR
306"Sequence Error!(hcd_frame=%i ep=%i%s;wait=%i,frame=%i).\n"
307"Most probably some urb of usb-frame %i is still missing.\n"
308"Cause could be too long delays in usb-hcd interrupt handling.\n",
309 usb_get_current_frame_number(usX2Y->dev),
310 subs->endpoint, usb_pipein(urb->pipe) ? "in" : "out",
311 usX2Y->wait_iso_frame, urb->start_frame, usX2Y->wait_iso_frame);
312 usX2Y_clients_stop(usX2Y);
313}
314
315static void i_usX2Y_urb_complete(struct urb *urb) 302static void i_usX2Y_urb_complete(struct urb *urb)
316{ 303{
317 struct snd_usX2Y_substream *subs = urb->context; 304 struct snd_usX2Y_substream *subs = urb->context;
@@ -328,12 +315,9 @@ static void i_usX2Y_urb_complete(struct urb *urb)
328 usX2Y_error_urb_status(usX2Y, subs, urb); 315 usX2Y_error_urb_status(usX2Y, subs, urb);
329 return; 316 return;
330 } 317 }
331 if (likely((urb->start_frame & 0xFFFF) == (usX2Y->wait_iso_frame & 0xFFFF))) 318
332 subs->completed_urb = urb; 319 subs->completed_urb = urb;
333 else { 320
334 usX2Y_error_sequence(usX2Y, subs, urb);
335 return;
336 }
337 { 321 {
338 struct snd_usX2Y_substream *capsubs = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE], 322 struct snd_usX2Y_substream *capsubs = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE],
339 *playbacksubs = usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK]; 323 *playbacksubs = usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK];
diff --git a/sound/usb/usx2y/usx2yhwdeppcm.c b/sound/usb/usx2y/usx2yhwdeppcm.c
index f2a1acdc4d83..814d0e887c62 100644
--- a/sound/usb/usx2y/usx2yhwdeppcm.c
+++ b/sound/usb/usx2y/usx2yhwdeppcm.c
@@ -244,13 +244,8 @@ static void i_usX2Y_usbpcm_urb_complete(struct urb *urb)
244 usX2Y_error_urb_status(usX2Y, subs, urb); 244 usX2Y_error_urb_status(usX2Y, subs, urb);
245 return; 245 return;
246 } 246 }
247 if (likely((urb->start_frame & 0xFFFF) == (usX2Y->wait_iso_frame & 0xFFFF)))
248 subs->completed_urb = urb;
249 else {
250 usX2Y_error_sequence(usX2Y, subs, urb);
251 return;
252 }
253 247
248 subs->completed_urb = urb;
254 capsubs = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE]; 249 capsubs = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE];
255 capsubs2 = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE + 2]; 250 capsubs2 = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE + 2];
256 playbacksubs = usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK]; 251 playbacksubs = usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK];
diff --git a/tools/lib/lk/debugfs.c b/tools/lib/lk/debugfs.c
index 099e7cd022e4..7c4347962353 100644
--- a/tools/lib/lk/debugfs.c
+++ b/tools/lib/lk/debugfs.c
@@ -5,7 +5,6 @@
5#include <stdbool.h> 5#include <stdbool.h>
6#include <sys/vfs.h> 6#include <sys/vfs.h>
7#include <sys/mount.h> 7#include <sys/mount.h>
8#include <linux/magic.h>
9#include <linux/kernel.h> 8#include <linux/kernel.h>
10 9
11#include "debugfs.h" 10#include "debugfs.h"
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 3a0ff7fb71b6..64c043b7a438 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -770,6 +770,7 @@ check: $(OUTPUT)common-cmds.h
770install-bin: all 770install-bin: all
771 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)' 771 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)'
772 $(INSTALL) $(OUTPUT)perf '$(DESTDIR_SQ)$(bindir_SQ)' 772 $(INSTALL) $(OUTPUT)perf '$(DESTDIR_SQ)$(bindir_SQ)'
773 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
773 $(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' 774 $(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
774ifndef NO_LIBPERL 775ifndef NO_LIBPERL
775 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace' 776 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'
diff --git a/tools/perf/arch/x86/util/tsc.c b/tools/perf/arch/x86/util/tsc.c
index 9570c2b0f83c..b2519e49424f 100644
--- a/tools/perf/arch/x86/util/tsc.c
+++ b/tools/perf/arch/x86/util/tsc.c
@@ -32,7 +32,7 @@ u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc)
32int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc, 32int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
33 struct perf_tsc_conversion *tc) 33 struct perf_tsc_conversion *tc)
34{ 34{
35 bool cap_usr_time_zero; 35 bool cap_user_time_zero;
36 u32 seq; 36 u32 seq;
37 int i = 0; 37 int i = 0;
38 38
@@ -42,7 +42,7 @@ int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
42 tc->time_mult = pc->time_mult; 42 tc->time_mult = pc->time_mult;
43 tc->time_shift = pc->time_shift; 43 tc->time_shift = pc->time_shift;
44 tc->time_zero = pc->time_zero; 44 tc->time_zero = pc->time_zero;
45 cap_usr_time_zero = pc->cap_usr_time_zero; 45 cap_user_time_zero = pc->cap_user_time_zero;
46 rmb(); 46 rmb();
47 if (pc->lock == seq && !(seq & 1)) 47 if (pc->lock == seq && !(seq & 1))
48 break; 48 break;
@@ -52,7 +52,7 @@ int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
52 } 52 }
53 } 53 }
54 54
55 if (!cap_usr_time_zero) 55 if (!cap_user_time_zero)
56 return -EOPNOTSUPP; 56 return -EOPNOTSUPP;
57 57
58 return 0; 58 return 0;
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 423875c999b2..afe377b2884f 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -321,8 +321,6 @@ found:
321 return perf_event__repipe(tool, event_sw, &sample_sw, machine); 321 return perf_event__repipe(tool, event_sw, &sample_sw, machine);
322} 322}
323 323
324extern volatile int session_done;
325
326static void sig_handler(int sig __maybe_unused) 324static void sig_handler(int sig __maybe_unused)
327{ 325{
328 session_done = 1; 326 session_done = 1;
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index c2dff9cb1f2c..9b5f077fee5b 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -101,7 +101,7 @@ static int setup_cpunode_map(void)
101 101
102 dir1 = opendir(PATH_SYS_NODE); 102 dir1 = opendir(PATH_SYS_NODE);
103 if (!dir1) 103 if (!dir1)
104 return -1; 104 return 0;
105 105
106 while ((dent1 = readdir(dir1)) != NULL) { 106 while ((dent1 = readdir(dir1)) != NULL) {
107 if (dent1->d_type != DT_DIR || 107 if (dent1->d_type != DT_DIR ||
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 8e50d8d77419..72eae7498c09 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -401,8 +401,6 @@ static int perf_report__setup_sample_type(struct perf_report *rep)
401 return 0; 401 return 0;
402} 402}
403 403
404extern volatile int session_done;
405
406static void sig_handler(int sig __maybe_unused) 404static void sig_handler(int sig __maybe_unused)
407{ 405{
408 session_done = 1; 406 session_done = 1;
@@ -568,6 +566,9 @@ static int __cmd_report(struct perf_report *rep)
568 } 566 }
569 } 567 }
570 568
569 if (session_done())
570 return 0;
571
571 if (nr_samples == 0) { 572 if (nr_samples == 0) {
572 ui__error("The %s file has no samples!\n", session->filename); 573 ui__error("The %s file has no samples!\n", session->filename);
573 return 0; 574 return 0;
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 7f31a3ded1b6..9c333ff3dfeb 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -553,8 +553,6 @@ static struct perf_tool perf_script = {
553 .ordering_requires_timestamps = true, 553 .ordering_requires_timestamps = true,
554}; 554};
555 555
556extern volatile int session_done;
557
558static void sig_handler(int sig __maybe_unused) 556static void sig_handler(int sig __maybe_unused)
559{ 557{
560 session_done = 1; 558 session_done = 1;
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index f686d5ff594e..5098f144b92d 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -457,6 +457,7 @@ static int __run_perf_stat(int argc, const char **argv)
457 perror("failed to prepare workload"); 457 perror("failed to prepare workload");
458 return -1; 458 return -1;
459 } 459 }
460 child_pid = evsel_list->workload.pid;
460 } 461 }
461 462
462 if (group) 463 if (group)
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index f5aa6375e3e9..71aa3e35406b 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -16,6 +16,23 @@
16#include <sys/mman.h> 16#include <sys/mman.h>
17#include <linux/futex.h> 17#include <linux/futex.h>
18 18
19/* For older distros: */
20#ifndef MAP_STACK
21# define MAP_STACK 0x20000
22#endif
23
24#ifndef MADV_HWPOISON
25# define MADV_HWPOISON 100
26#endif
27
28#ifndef MADV_MERGEABLE
29# define MADV_MERGEABLE 12
30#endif
31
32#ifndef MADV_UNMERGEABLE
33# define MADV_UNMERGEABLE 13
34#endif
35
19static size_t syscall_arg__scnprintf_hex(char *bf, size_t size, 36static size_t syscall_arg__scnprintf_hex(char *bf, size_t size,
20 unsigned long arg, 37 unsigned long arg,
21 u8 arg_idx __maybe_unused, 38 u8 arg_idx __maybe_unused,
@@ -1038,6 +1055,7 @@ static int trace__replay(struct trace *trace)
1038 1055
1039 trace->tool.sample = trace__process_sample; 1056 trace->tool.sample = trace__process_sample;
1040 trace->tool.mmap = perf_event__process_mmap; 1057 trace->tool.mmap = perf_event__process_mmap;
1058 trace->tool.mmap2 = perf_event__process_mmap2;
1041 trace->tool.comm = perf_event__process_comm; 1059 trace->tool.comm = perf_event__process_comm;
1042 trace->tool.exit = perf_event__process_exit; 1060 trace->tool.exit = perf_event__process_exit;
1043 trace->tool.fork = perf_event__process_fork; 1061 trace->tool.fork = perf_event__process_fork;
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index 214e17e97e5c..5f6f9b3271bb 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -87,7 +87,7 @@ CFLAGS += -Wall
87CFLAGS += -Wextra 87CFLAGS += -Wextra
88CFLAGS += -std=gnu99 88CFLAGS += -std=gnu99
89 89
90EXTLIBS = -lelf -lpthread -lrt -lm 90EXTLIBS = -lelf -lpthread -lrt -lm -ldl
91 91
92ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -Werror -fstack-protector-all,-fstack-protector-all),y) 92ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -Werror -fstack-protector-all,-fstack-protector-all),y)
93 CFLAGS += -fstack-protector-all 93 CFLAGS += -fstack-protector-all
@@ -180,6 +180,9 @@ FLAGS_LIBELF=$(CFLAGS) $(LDFLAGS) $(EXTLIBS)
180ifeq ($(call try-cc,$(SOURCE_ELF_MMAP),$(FLAGS_LIBELF),-DLIBELF_MMAP),y) 180ifeq ($(call try-cc,$(SOURCE_ELF_MMAP),$(FLAGS_LIBELF),-DLIBELF_MMAP),y)
181 CFLAGS += -DLIBELF_MMAP 181 CFLAGS += -DLIBELF_MMAP
182endif 182endif
183ifeq ($(call try-cc,$(SOURCE_ELF_GETPHDRNUM),$(FLAGS_LIBELF),-DHAVE_ELF_GETPHDRNUM),y)
184 CFLAGS += -DHAVE_ELF_GETPHDRNUM
185endif
183 186
184# include ARCH specific config 187# include ARCH specific config
185-include $(src-perf)/arch/$(ARCH)/Makefile 188-include $(src-perf)/arch/$(ARCH)/Makefile
diff --git a/tools/perf/config/feature-tests.mak b/tools/perf/config/feature-tests.mak
index 708fb8e9822a..f79305739ecc 100644
--- a/tools/perf/config/feature-tests.mak
+++ b/tools/perf/config/feature-tests.mak
@@ -61,6 +61,15 @@ int main(void)
61} 61}
62endef 62endef
63 63
64define SOURCE_ELF_GETPHDRNUM
65#include <libelf.h>
66int main(void)
67{
68 size_t dst;
69 return elf_getphdrnum(0, &dst);
70}
71endef
72
64ifndef NO_SLANG 73ifndef NO_SLANG
65define SOURCE_SLANG 74define SOURCE_SLANG
66#include <slang.h> 75#include <slang.h>
@@ -210,6 +219,7 @@ define SOURCE_LIBAUDIT
210 219
211int main(void) 220int main(void)
212{ 221{
222 printf(\"error message: %s\", audit_errno_to_name(0));
213 return audit_open(); 223 return audit_open();
214} 224}
215endef 225endef
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index bfc5a27597d6..7eae5488ecea 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -809,7 +809,7 @@ static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
809 end = map__rip_2objdump(map, sym->end); 809 end = map__rip_2objdump(map, sym->end);
810 810
811 offset = line_ip - start; 811 offset = line_ip - start;
812 if (offset < 0 || (u64)line_ip > end) 812 if ((u64)line_ip < start || (u64)line_ip > end)
813 offset = -1; 813 offset = -1;
814 else 814 else
815 parsed_line = tmp2 + 1; 815 parsed_line = tmp2 + 1;
diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
index 3e5f5430a28a..7defd77105d0 100644
--- a/tools/perf/util/dwarf-aux.c
+++ b/tools/perf/util/dwarf-aux.c
@@ -263,6 +263,21 @@ bool die_is_signed_type(Dwarf_Die *tp_die)
263} 263}
264 264
265/** 265/**
266 * die_is_func_def - Ensure that this DIE is a subprogram and definition
267 * @dw_die: a DIE
268 *
269 * Ensure that this DIE is a subprogram and NOT a declaration. This
270 * returns true if @dw_die is a function definition.
271 **/
272bool die_is_func_def(Dwarf_Die *dw_die)
273{
274 Dwarf_Attribute attr;
275
276 return (dwarf_tag(dw_die) == DW_TAG_subprogram &&
277 dwarf_attr(dw_die, DW_AT_declaration, &attr) == NULL);
278}
279
280/**
266 * die_get_data_member_location - Get the data-member offset 281 * die_get_data_member_location - Get the data-member offset
267 * @mb_die: a DIE of a member of a data structure 282 * @mb_die: a DIE of a member of a data structure
268 * @offs: The offset of the member in the data structure 283 * @offs: The offset of the member in the data structure
@@ -392,6 +407,10 @@ static int __die_search_func_cb(Dwarf_Die *fn_die, void *data)
392{ 407{
393 struct __addr_die_search_param *ad = data; 408 struct __addr_die_search_param *ad = data;
394 409
410 /*
411 * Since a declaration entry doesn't has given pc, this always returns
412 * function definition entry.
413 */
395 if (dwarf_tag(fn_die) == DW_TAG_subprogram && 414 if (dwarf_tag(fn_die) == DW_TAG_subprogram &&
396 dwarf_haspc(fn_die, ad->addr)) { 415 dwarf_haspc(fn_die, ad->addr)) {
397 memcpy(ad->die_mem, fn_die, sizeof(Dwarf_Die)); 416 memcpy(ad->die_mem, fn_die, sizeof(Dwarf_Die));
@@ -407,7 +426,7 @@ static int __die_search_func_cb(Dwarf_Die *fn_die, void *data)
407 * @die_mem: a buffer for result DIE 426 * @die_mem: a buffer for result DIE
408 * 427 *
409 * Search a non-inlined function DIE which includes @addr. Stores the 428 * Search a non-inlined function DIE which includes @addr. Stores the
410 * DIE to @die_mem and returns it if found. Returns NULl if failed. 429 * DIE to @die_mem and returns it if found. Returns NULL if failed.
411 */ 430 */
412Dwarf_Die *die_find_realfunc(Dwarf_Die *cu_die, Dwarf_Addr addr, 431Dwarf_Die *die_find_realfunc(Dwarf_Die *cu_die, Dwarf_Addr addr,
413 Dwarf_Die *die_mem) 432 Dwarf_Die *die_mem)
@@ -435,15 +454,32 @@ static int __die_find_inline_cb(Dwarf_Die *die_mem, void *data)
435} 454}
436 455
437/** 456/**
457 * die_find_top_inlinefunc - Search the top inlined function at given address
458 * @sp_die: a subprogram DIE which including @addr
459 * @addr: target address
460 * @die_mem: a buffer for result DIE
461 *
462 * Search an inlined function DIE which includes @addr. Stores the
463 * DIE to @die_mem and returns it if found. Returns NULL if failed.
464 * Even if several inlined functions are expanded recursively, this
465 * doesn't trace it down, and returns the topmost one.
466 */
467Dwarf_Die *die_find_top_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
468 Dwarf_Die *die_mem)
469{
470 return die_find_child(sp_die, __die_find_inline_cb, &addr, die_mem);
471}
472
473/**
438 * die_find_inlinefunc - Search an inlined function at given address 474 * die_find_inlinefunc - Search an inlined function at given address
439 * @cu_die: a CU DIE which including @addr 475 * @sp_die: a subprogram DIE which including @addr
440 * @addr: target address 476 * @addr: target address
441 * @die_mem: a buffer for result DIE 477 * @die_mem: a buffer for result DIE
442 * 478 *
443 * Search an inlined function DIE which includes @addr. Stores the 479 * Search an inlined function DIE which includes @addr. Stores the
444 * DIE to @die_mem and returns it if found. Returns NULl if failed. 480 * DIE to @die_mem and returns it if found. Returns NULL if failed.
445 * If several inlined functions are expanded recursively, this trace 481 * If several inlined functions are expanded recursively, this trace
446 * it and returns deepest one. 482 * it down and returns deepest one.
447 */ 483 */
448Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr, 484Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
449 Dwarf_Die *die_mem) 485 Dwarf_Die *die_mem)
diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h
index 6ce1717784b7..b4fe90c6cb2d 100644
--- a/tools/perf/util/dwarf-aux.h
+++ b/tools/perf/util/dwarf-aux.h
@@ -38,6 +38,9 @@ extern int cu_find_lineinfo(Dwarf_Die *cudie, unsigned long addr,
38extern int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr, 38extern int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr,
39 int (*callback)(Dwarf_Die *, void *), void *data); 39 int (*callback)(Dwarf_Die *, void *), void *data);
40 40
41/* Ensure that this DIE is a subprogram and definition (not declaration) */
42extern bool die_is_func_def(Dwarf_Die *dw_die);
43
41/* Compare diename and tname */ 44/* Compare diename and tname */
42extern bool die_compare_name(Dwarf_Die *dw_die, const char *tname); 45extern bool die_compare_name(Dwarf_Die *dw_die, const char *tname);
43 46
@@ -76,7 +79,11 @@ extern Dwarf_Die *die_find_child(Dwarf_Die *rt_die,
76extern Dwarf_Die *die_find_realfunc(Dwarf_Die *cu_die, Dwarf_Addr addr, 79extern Dwarf_Die *die_find_realfunc(Dwarf_Die *cu_die, Dwarf_Addr addr,
77 Dwarf_Die *die_mem); 80 Dwarf_Die *die_mem);
78 81
79/* Search an inlined function including given address */ 82/* Search the top inlined function including given address */
83extern Dwarf_Die *die_find_top_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
84 Dwarf_Die *die_mem);
85
86/* Search the deepest inlined function including given address */
80extern Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr, 87extern Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
81 Dwarf_Die *die_mem); 88 Dwarf_Die *die_mem);
82 89
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 9b393e7dca6f..63df031fc9c7 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -187,7 +187,7 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
187 return -1; 187 return -1;
188 } 188 }
189 189
190 event->header.type = PERF_RECORD_MMAP2; 190 event->header.type = PERF_RECORD_MMAP;
191 /* 191 /*
192 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c 192 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
193 */ 193 */
@@ -198,7 +198,6 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
198 char prot[5]; 198 char prot[5];
199 char execname[PATH_MAX]; 199 char execname[PATH_MAX];
200 char anonstr[] = "//anon"; 200 char anonstr[] = "//anon";
201 unsigned int ino;
202 size_t size; 201 size_t size;
203 ssize_t n; 202 ssize_t n;
204 203
@@ -209,13 +208,10 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
209 strcpy(execname, ""); 208 strcpy(execname, "");
210 209
211 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ 210 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
212 n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n", 211 n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %*x:%*x %*u %s\n",
213 &event->mmap2.start, &event->mmap2.len, prot, 212 &event->mmap.start, &event->mmap.len, prot,
214 &event->mmap2.pgoff, &event->mmap2.maj, 213 &event->mmap.pgoff,
215 &event->mmap2.min, 214 execname);
216 &ino, execname);
217
218 event->mmap2.ino = (u64)ino;
219 215
220 if (n != 8) 216 if (n != 8)
221 continue; 217 continue;
@@ -227,15 +223,15 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
227 strcpy(execname, anonstr); 223 strcpy(execname, anonstr);
228 224
229 size = strlen(execname) + 1; 225 size = strlen(execname) + 1;
230 memcpy(event->mmap2.filename, execname, size); 226 memcpy(event->mmap.filename, execname, size);
231 size = PERF_ALIGN(size, sizeof(u64)); 227 size = PERF_ALIGN(size, sizeof(u64));
232 event->mmap2.len -= event->mmap.start; 228 event->mmap.len -= event->mmap.start;
233 event->mmap2.header.size = (sizeof(event->mmap2) - 229 event->mmap.header.size = (sizeof(event->mmap) -
234 (sizeof(event->mmap2.filename) - size)); 230 (sizeof(event->mmap.filename) - size));
235 memset(event->mmap2.filename + size, 0, machine->id_hdr_size); 231 memset(event->mmap.filename + size, 0, machine->id_hdr_size);
236 event->mmap2.header.size += machine->id_hdr_size; 232 event->mmap.header.size += machine->id_hdr_size;
237 event->mmap2.pid = tgid; 233 event->mmap.pid = tgid;
238 event->mmap2.tid = pid; 234 event->mmap.tid = pid;
239 235
240 if (process(tool, event, &synth_sample, machine) != 0) { 236 if (process(tool, event, &synth_sample, machine) != 0) {
241 rc = -1; 237 rc = -1;
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 0ce9febf1ba0..9f1ef9bee2d0 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -678,7 +678,6 @@ void perf_evsel__config(struct perf_evsel *evsel,
678 attr->sample_type |= PERF_SAMPLE_WEIGHT; 678 attr->sample_type |= PERF_SAMPLE_WEIGHT;
679 679
680 attr->mmap = track; 680 attr->mmap = track;
681 attr->mmap2 = track && !perf_missing_features.mmap2;
682 attr->comm = track; 681 attr->comm = track;
683 682
684 /* 683 /*
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 26441d0e571b..c3e5a3b817ab 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -199,9 +199,11 @@ static int write_buildid(char *name, size_t name_len, u8 *build_id,
199 return write_padded(fd, name, name_len + 1, len); 199 return write_padded(fd, name, name_len + 1, len);
200} 200}
201 201
202static int __dsos__write_buildid_table(struct list_head *head, pid_t pid, 202static int __dsos__write_buildid_table(struct list_head *head,
203 u16 misc, int fd) 203 struct machine *machine,
204 pid_t pid, u16 misc, int fd)
204{ 205{
206 char nm[PATH_MAX];
205 struct dso *pos; 207 struct dso *pos;
206 208
207 dsos__for_each_with_build_id(pos, head) { 209 dsos__for_each_with_build_id(pos, head) {
@@ -215,6 +217,10 @@ static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
215 if (is_vdso_map(pos->short_name)) { 217 if (is_vdso_map(pos->short_name)) {
216 name = (char *) VDSO__MAP_NAME; 218 name = (char *) VDSO__MAP_NAME;
217 name_len = sizeof(VDSO__MAP_NAME) + 1; 219 name_len = sizeof(VDSO__MAP_NAME) + 1;
220 } else if (dso__is_kcore(pos)) {
221 machine__mmap_name(machine, nm, sizeof(nm));
222 name = nm;
223 name_len = strlen(nm) + 1;
218 } else { 224 } else {
219 name = pos->long_name; 225 name = pos->long_name;
220 name_len = pos->long_name_len + 1; 226 name_len = pos->long_name_len + 1;
@@ -240,10 +246,10 @@ static int machine__write_buildid_table(struct machine *machine, int fd)
240 umisc = PERF_RECORD_MISC_GUEST_USER; 246 umisc = PERF_RECORD_MISC_GUEST_USER;
241 } 247 }
242 248
243 err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid, 249 err = __dsos__write_buildid_table(&machine->kernel_dsos, machine,
244 kmisc, fd); 250 machine->pid, kmisc, fd);
245 if (err == 0) 251 if (err == 0)
246 err = __dsos__write_buildid_table(&machine->user_dsos, 252 err = __dsos__write_buildid_table(&machine->user_dsos, machine,
247 machine->pid, umisc, fd); 253 machine->pid, umisc, fd);
248 return err; 254 return err;
249} 255}
@@ -375,23 +381,31 @@ out_free:
375 return err; 381 return err;
376} 382}
377 383
378static int dso__cache_build_id(struct dso *dso, const char *debugdir) 384static int dso__cache_build_id(struct dso *dso, struct machine *machine,
385 const char *debugdir)
379{ 386{
380 bool is_kallsyms = dso->kernel && dso->long_name[0] != '/'; 387 bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
381 bool is_vdso = is_vdso_map(dso->short_name); 388 bool is_vdso = is_vdso_map(dso->short_name);
389 char *name = dso->long_name;
390 char nm[PATH_MAX];
382 391
383 return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), 392 if (dso__is_kcore(dso)) {
384 dso->long_name, debugdir, 393 is_kallsyms = true;
385 is_kallsyms, is_vdso); 394 machine__mmap_name(machine, nm, sizeof(nm));
395 name = nm;
396 }
397 return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), name,
398 debugdir, is_kallsyms, is_vdso);
386} 399}
387 400
388static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir) 401static int __dsos__cache_build_ids(struct list_head *head,
402 struct machine *machine, const char *debugdir)
389{ 403{
390 struct dso *pos; 404 struct dso *pos;
391 int err = 0; 405 int err = 0;
392 406
393 dsos__for_each_with_build_id(pos, head) 407 dsos__for_each_with_build_id(pos, head)
394 if (dso__cache_build_id(pos, debugdir)) 408 if (dso__cache_build_id(pos, machine, debugdir))
395 err = -1; 409 err = -1;
396 410
397 return err; 411 return err;
@@ -399,8 +413,9 @@ static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
399 413
400static int machine__cache_build_ids(struct machine *machine, const char *debugdir) 414static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
401{ 415{
402 int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir); 416 int ret = __dsos__cache_build_ids(&machine->kernel_dsos, machine,
403 ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir); 417 debugdir);
418 ret |= __dsos__cache_build_ids(&machine->user_dsos, machine, debugdir);
404 return ret; 419 return ret;
405} 420}
406 421
@@ -2753,6 +2768,18 @@ int perf_session__read_header(struct perf_session *session)
2753 if (perf_file_header__read(&f_header, header, fd) < 0) 2768 if (perf_file_header__read(&f_header, header, fd) < 0)
2754 return -EINVAL; 2769 return -EINVAL;
2755 2770
2771 /*
2772 * Sanity check that perf.data was written cleanly; data size is
2773 * initialized to 0 and updated only if the on_exit function is run.
2774 * If data size is still 0 then the file contains only partial
2775 * information. Just warn user and process it as much as it can.
2776 */
2777 if (f_header.data.size == 0) {
2778 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
2779 "Was the 'perf record' command properly terminated?\n",
2780 session->filename);
2781 }
2782
2756 nr_attrs = f_header.attrs.size / f_header.attr_size; 2783 nr_attrs = f_header.attrs.size / f_header.attr_size;
2757 lseek(fd, f_header.attrs.offset, SEEK_SET); 2784 lseek(fd, f_header.attrs.offset, SEEK_SET);
2758 2785
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 46a0d35a05e1..9ff6cf3e9a99 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -611,6 +611,8 @@ void hists__collapse_resort(struct hists *hists)
611 next = rb_first(root); 611 next = rb_first(root);
612 612
613 while (next) { 613 while (next) {
614 if (session_done())
615 break;
614 n = rb_entry(next, struct hist_entry, rb_node_in); 616 n = rb_entry(next, struct hist_entry, rb_node_in);
615 next = rb_next(&n->rb_node_in); 617 next = rb_next(&n->rb_node_in);
616 618
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 933d14f287ca..6188d2876a71 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -792,7 +792,7 @@ static int machine__create_modules(struct machine *machine)
792 modules = path; 792 modules = path;
793 } 793 }
794 794
795 if (symbol__restricted_filename(path, "/proc/modules")) 795 if (symbol__restricted_filename(modules, "/proc/modules"))
796 return -1; 796 return -1;
797 797
798 file = fopen(modules, "r"); 798 file = fopen(modules, "r");
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index be0329394d56..f0692737ebf1 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -118,7 +118,6 @@ static const Dwfl_Callbacks offline_callbacks = {
118static int debuginfo__init_offline_dwarf(struct debuginfo *self, 118static int debuginfo__init_offline_dwarf(struct debuginfo *self,
119 const char *path) 119 const char *path)
120{ 120{
121 Dwfl_Module *mod;
122 int fd; 121 int fd;
123 122
124 fd = open(path, O_RDONLY); 123 fd = open(path, O_RDONLY);
@@ -129,11 +128,11 @@ static int debuginfo__init_offline_dwarf(struct debuginfo *self,
129 if (!self->dwfl) 128 if (!self->dwfl)
130 goto error; 129 goto error;
131 130
132 mod = dwfl_report_offline(self->dwfl, "", "", fd); 131 self->mod = dwfl_report_offline(self->dwfl, "", "", fd);
133 if (!mod) 132 if (!self->mod)
134 goto error; 133 goto error;
135 134
136 self->dbg = dwfl_module_getdwarf(mod, &self->bias); 135 self->dbg = dwfl_module_getdwarf(self->mod, &self->bias);
137 if (!self->dbg) 136 if (!self->dbg)
138 goto error; 137 goto error;
139 138
@@ -676,37 +675,42 @@ static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf)
676} 675}
677 676
678/* Convert subprogram DIE to trace point */ 677/* Convert subprogram DIE to trace point */
679static int convert_to_trace_point(Dwarf_Die *sp_die, Dwarf_Addr paddr, 678static int convert_to_trace_point(Dwarf_Die *sp_die, Dwfl_Module *mod,
680 bool retprobe, struct probe_trace_point *tp) 679 Dwarf_Addr paddr, bool retprobe,
680 struct probe_trace_point *tp)
681{ 681{
682 Dwarf_Addr eaddr, highaddr; 682 Dwarf_Addr eaddr, highaddr;
683 const char *name; 683 GElf_Sym sym;
684 684 const char *symbol;
685 /* Copy the name of probe point */ 685
686 name = dwarf_diename(sp_die); 686 /* Verify the address is correct */
687 if (name) { 687 if (dwarf_entrypc(sp_die, &eaddr) != 0) {
688 if (dwarf_entrypc(sp_die, &eaddr) != 0) { 688 pr_warning("Failed to get entry address of %s\n",
689 pr_warning("Failed to get entry address of %s\n", 689 dwarf_diename(sp_die));
690 dwarf_diename(sp_die)); 690 return -ENOENT;
691 return -ENOENT; 691 }
692 } 692 if (dwarf_highpc(sp_die, &highaddr) != 0) {
693 if (dwarf_highpc(sp_die, &highaddr) != 0) { 693 pr_warning("Failed to get end address of %s\n",
694 pr_warning("Failed to get end address of %s\n", 694 dwarf_diename(sp_die));
695 dwarf_diename(sp_die)); 695 return -ENOENT;
696 return -ENOENT; 696 }
697 } 697 if (paddr > highaddr) {
698 if (paddr > highaddr) { 698 pr_warning("Offset specified is greater than size of %s\n",
699 pr_warning("Offset specified is greater than size of %s\n", 699 dwarf_diename(sp_die));
700 dwarf_diename(sp_die)); 700 return -EINVAL;
701 return -EINVAL; 701 }
702 } 702
703 tp->symbol = strdup(name); 703 /* Get an appropriate symbol from symtab */
704 if (tp->symbol == NULL) 704 symbol = dwfl_module_addrsym(mod, paddr, &sym, NULL);
705 return -ENOMEM; 705 if (!symbol) {
706 tp->offset = (unsigned long)(paddr - eaddr); 706 pr_warning("Failed to find symbol at 0x%lx\n",
707 } else 707 (unsigned long)paddr);
708 /* This function has no name. */ 708 return -ENOENT;
709 tp->offset = (unsigned long)paddr; 709 }
710 tp->offset = (unsigned long)(paddr - sym.st_value);
711 tp->symbol = strdup(symbol);
712 if (!tp->symbol)
713 return -ENOMEM;
710 714
711 /* Return probe must be on the head of a subprogram */ 715 /* Return probe must be on the head of a subprogram */
712 if (retprobe) { 716 if (retprobe) {
@@ -734,7 +738,7 @@ static int call_probe_finder(Dwarf_Die *sc_die, struct probe_finder *pf)
734 } 738 }
735 739
736 /* If not a real subprogram, find a real one */ 740 /* If not a real subprogram, find a real one */
737 if (dwarf_tag(sc_die) != DW_TAG_subprogram) { 741 if (!die_is_func_def(sc_die)) {
738 if (!die_find_realfunc(&pf->cu_die, pf->addr, &pf->sp_die)) { 742 if (!die_find_realfunc(&pf->cu_die, pf->addr, &pf->sp_die)) {
739 pr_warning("Failed to find probe point in any " 743 pr_warning("Failed to find probe point in any "
740 "functions.\n"); 744 "functions.\n");
@@ -980,12 +984,10 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
980 struct dwarf_callback_param *param = data; 984 struct dwarf_callback_param *param = data;
981 struct probe_finder *pf = param->data; 985 struct probe_finder *pf = param->data;
982 struct perf_probe_point *pp = &pf->pev->point; 986 struct perf_probe_point *pp = &pf->pev->point;
983 Dwarf_Attribute attr;
984 987
985 /* Check tag and diename */ 988 /* Check tag and diename */
986 if (dwarf_tag(sp_die) != DW_TAG_subprogram || 989 if (!die_is_func_def(sp_die) ||
987 !die_compare_name(sp_die, pp->function) || 990 !die_compare_name(sp_die, pp->function))
988 dwarf_attr(sp_die, DW_AT_declaration, &attr))
989 return DWARF_CB_OK; 991 return DWARF_CB_OK;
990 992
991 /* Check declared file */ 993 /* Check declared file */
@@ -1151,7 +1153,7 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
1151 tev = &tf->tevs[tf->ntevs++]; 1153 tev = &tf->tevs[tf->ntevs++];
1152 1154
1153 /* Trace point should be converted from subprogram DIE */ 1155 /* Trace point should be converted from subprogram DIE */
1154 ret = convert_to_trace_point(&pf->sp_die, pf->addr, 1156 ret = convert_to_trace_point(&pf->sp_die, tf->mod, pf->addr,
1155 pf->pev->point.retprobe, &tev->point); 1157 pf->pev->point.retprobe, &tev->point);
1156 if (ret < 0) 1158 if (ret < 0)
1157 return ret; 1159 return ret;
@@ -1183,7 +1185,7 @@ int debuginfo__find_trace_events(struct debuginfo *self,
1183{ 1185{
1184 struct trace_event_finder tf = { 1186 struct trace_event_finder tf = {
1185 .pf = {.pev = pev, .callback = add_probe_trace_event}, 1187 .pf = {.pev = pev, .callback = add_probe_trace_event},
1186 .max_tevs = max_tevs}; 1188 .mod = self->mod, .max_tevs = max_tevs};
1187 int ret; 1189 int ret;
1188 1190
1189 /* Allocate result tevs array */ 1191 /* Allocate result tevs array */
@@ -1252,7 +1254,7 @@ static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf)
1252 vl = &af->vls[af->nvls++]; 1254 vl = &af->vls[af->nvls++];
1253 1255
1254 /* Trace point should be converted from subprogram DIE */ 1256 /* Trace point should be converted from subprogram DIE */
1255 ret = convert_to_trace_point(&pf->sp_die, pf->addr, 1257 ret = convert_to_trace_point(&pf->sp_die, af->mod, pf->addr,
1256 pf->pev->point.retprobe, &vl->point); 1258 pf->pev->point.retprobe, &vl->point);
1257 if (ret < 0) 1259 if (ret < 0)
1258 return ret; 1260 return ret;
@@ -1291,6 +1293,7 @@ int debuginfo__find_available_vars_at(struct debuginfo *self,
1291{ 1293{
1292 struct available_var_finder af = { 1294 struct available_var_finder af = {
1293 .pf = {.pev = pev, .callback = add_available_vars}, 1295 .pf = {.pev = pev, .callback = add_available_vars},
1296 .mod = self->mod,
1294 .max_vls = max_vls, .externs = externs}; 1297 .max_vls = max_vls, .externs = externs};
1295 int ret; 1298 int ret;
1296 1299
@@ -1324,8 +1327,8 @@ int debuginfo__find_probe_point(struct debuginfo *self, unsigned long addr,
1324 struct perf_probe_point *ppt) 1327 struct perf_probe_point *ppt)
1325{ 1328{
1326 Dwarf_Die cudie, spdie, indie; 1329 Dwarf_Die cudie, spdie, indie;
1327 Dwarf_Addr _addr, baseaddr; 1330 Dwarf_Addr _addr = 0, baseaddr = 0;
1328 const char *fname = NULL, *func = NULL, *tmp; 1331 const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp;
1329 int baseline = 0, lineno = 0, ret = 0; 1332 int baseline = 0, lineno = 0, ret = 0;
1330 1333
1331 /* Adjust address with bias */ 1334 /* Adjust address with bias */
@@ -1346,27 +1349,36 @@ int debuginfo__find_probe_point(struct debuginfo *self, unsigned long addr,
1346 /* Find a corresponding function (name, baseline and baseaddr) */ 1349 /* Find a corresponding function (name, baseline and baseaddr) */
1347 if (die_find_realfunc(&cudie, (Dwarf_Addr)addr, &spdie)) { 1350 if (die_find_realfunc(&cudie, (Dwarf_Addr)addr, &spdie)) {
1348 /* Get function entry information */ 1351 /* Get function entry information */
1349 tmp = dwarf_diename(&spdie); 1352 func = basefunc = dwarf_diename(&spdie);
1350 if (!tmp || 1353 if (!func ||
1351 dwarf_entrypc(&spdie, &baseaddr) != 0 || 1354 dwarf_entrypc(&spdie, &baseaddr) != 0 ||
1352 dwarf_decl_line(&spdie, &baseline) != 0) 1355 dwarf_decl_line(&spdie, &baseline) != 0) {
1356 lineno = 0;
1353 goto post; 1357 goto post;
1354 func = tmp; 1358 }
1355 1359
1356 if (addr == (unsigned long)baseaddr) 1360 fname = dwarf_decl_file(&spdie);
1361 if (addr == (unsigned long)baseaddr) {
1357 /* Function entry - Relative line number is 0 */ 1362 /* Function entry - Relative line number is 0 */
1358 lineno = baseline; 1363 lineno = baseline;
1359 else if (die_find_inlinefunc(&spdie, (Dwarf_Addr)addr, 1364 goto post;
1360 &indie)) { 1365 }
1366
1367 /* Track down the inline functions step by step */
1368 while (die_find_top_inlinefunc(&spdie, (Dwarf_Addr)addr,
1369 &indie)) {
1370 /* There is an inline function */
1361 if (dwarf_entrypc(&indie, &_addr) == 0 && 1371 if (dwarf_entrypc(&indie, &_addr) == 0 &&
1362 _addr == addr) 1372 _addr == addr) {
1363 /* 1373 /*
1364 * addr is at an inline function entry. 1374 * addr is at an inline function entry.
1365 * In this case, lineno should be the call-site 1375 * In this case, lineno should be the call-site
1366 * line number. 1376 * line number. (overwrite lineinfo)
1367 */ 1377 */
1368 lineno = die_get_call_lineno(&indie); 1378 lineno = die_get_call_lineno(&indie);
1369 else { 1379 fname = die_get_call_file(&indie);
1380 break;
1381 } else {
1370 /* 1382 /*
1371 * addr is in an inline function body. 1383 * addr is in an inline function body.
1372 * Since lineno points one of the lines 1384 * Since lineno points one of the lines
@@ -1374,19 +1386,27 @@ int debuginfo__find_probe_point(struct debuginfo *self, unsigned long addr,
1374 * be the entry line of the inline function. 1386 * be the entry line of the inline function.
1375 */ 1387 */
1376 tmp = dwarf_diename(&indie); 1388 tmp = dwarf_diename(&indie);
1377 if (tmp && 1389 if (!tmp ||
1378 dwarf_decl_line(&spdie, &baseline) == 0) 1390 dwarf_decl_line(&indie, &baseline) != 0)
1379 func = tmp; 1391 break;
1392 func = tmp;
1393 spdie = indie;
1380 } 1394 }
1381 } 1395 }
1396 /* Verify the lineno and baseline are in a same file */
1397 tmp = dwarf_decl_file(&spdie);
1398 if (!tmp || strcmp(tmp, fname) != 0)
1399 lineno = 0;
1382 } 1400 }
1383 1401
1384post: 1402post:
1385 /* Make a relative line number or an offset */ 1403 /* Make a relative line number or an offset */
1386 if (lineno) 1404 if (lineno)
1387 ppt->line = lineno - baseline; 1405 ppt->line = lineno - baseline;
1388 else if (func) 1406 else if (basefunc) {
1389 ppt->offset = addr - (unsigned long)baseaddr; 1407 ppt->offset = addr - (unsigned long)baseaddr;
1408 func = basefunc;
1409 }
1390 1410
1391 /* Duplicate strings */ 1411 /* Duplicate strings */
1392 if (func) { 1412 if (func) {
@@ -1474,7 +1494,7 @@ static int line_range_inline_cb(Dwarf_Die *in_die, void *data)
1474 return 0; 1494 return 0;
1475} 1495}
1476 1496
1477/* Search function from function name */ 1497/* Search function definition from function name */
1478static int line_range_search_cb(Dwarf_Die *sp_die, void *data) 1498static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
1479{ 1499{
1480 struct dwarf_callback_param *param = data; 1500 struct dwarf_callback_param *param = data;
@@ -1485,7 +1505,7 @@ static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
1485 if (lr->file && strtailcmp(lr->file, dwarf_decl_file(sp_die))) 1505 if (lr->file && strtailcmp(lr->file, dwarf_decl_file(sp_die)))
1486 return DWARF_CB_OK; 1506 return DWARF_CB_OK;
1487 1507
1488 if (dwarf_tag(sp_die) == DW_TAG_subprogram && 1508 if (die_is_func_def(sp_die) &&
1489 die_compare_name(sp_die, lr->function)) { 1509 die_compare_name(sp_die, lr->function)) {
1490 lf->fname = dwarf_decl_file(sp_die); 1510 lf->fname = dwarf_decl_file(sp_die);
1491 dwarf_decl_line(sp_die, &lr->offset); 1511 dwarf_decl_line(sp_die, &lr->offset);
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index 17e94d0c36f9..3b7d63018960 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -23,6 +23,7 @@ static inline int is_c_varname(const char *name)
23/* debug information structure */ 23/* debug information structure */
24struct debuginfo { 24struct debuginfo {
25 Dwarf *dbg; 25 Dwarf *dbg;
26 Dwfl_Module *mod;
26 Dwfl *dwfl; 27 Dwfl *dwfl;
27 Dwarf_Addr bias; 28 Dwarf_Addr bias;
28}; 29};
@@ -77,6 +78,7 @@ struct probe_finder {
77 78
78struct trace_event_finder { 79struct trace_event_finder {
79 struct probe_finder pf; 80 struct probe_finder pf;
81 Dwfl_Module *mod; /* For solving symbols */
80 struct probe_trace_event *tevs; /* Found trace events */ 82 struct probe_trace_event *tevs; /* Found trace events */
81 int ntevs; /* Number of trace events */ 83 int ntevs; /* Number of trace events */
82 int max_tevs; /* Max number of trace events */ 84 int max_tevs; /* Max number of trace events */
@@ -84,6 +86,7 @@ struct trace_event_finder {
84 86
85struct available_var_finder { 87struct available_var_finder {
86 struct probe_finder pf; 88 struct probe_finder pf;
89 Dwfl_Module *mod; /* For solving symbols */
87 struct variable_list *vls; /* Found variable lists */ 90 struct variable_list *vls; /* Found variable lists */
88 int nvls; /* Number of variable lists */ 91 int nvls; /* Number of variable lists */
89 int max_vls; /* Max no. of variable lists */ 92 int max_vls; /* Max no. of variable lists */
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index a85e4ae5f3ac..c0c9795c4f02 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -282,7 +282,7 @@ static void perl_process_tracepoint(union perf_event *perf_event __maybe_unused,
282 282
283 event = find_cache_event(evsel); 283 event = find_cache_event(evsel);
284 if (!event) 284 if (!event)
285 die("ug! no event found for type %" PRIu64, evsel->attr.config); 285 die("ug! no event found for type %" PRIu64, (u64)evsel->attr.config);
286 286
287 pid = raw_field_value(event, "common_pid", data); 287 pid = raw_field_value(event, "common_pid", data);
288 288
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 51f5edf2a6d0..568b750c01f6 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -256,6 +256,8 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
256 tool->sample = process_event_sample_stub; 256 tool->sample = process_event_sample_stub;
257 if (tool->mmap == NULL) 257 if (tool->mmap == NULL)
258 tool->mmap = process_event_stub; 258 tool->mmap = process_event_stub;
259 if (tool->mmap2 == NULL)
260 tool->mmap2 = process_event_stub;
259 if (tool->comm == NULL) 261 if (tool->comm == NULL)
260 tool->comm = process_event_stub; 262 tool->comm = process_event_stub;
261 if (tool->fork == NULL) 263 if (tool->fork == NULL)
@@ -531,6 +533,9 @@ static int flush_sample_queue(struct perf_session *s,
531 return 0; 533 return 0;
532 534
533 list_for_each_entry_safe(iter, tmp, head, list) { 535 list_for_each_entry_safe(iter, tmp, head, list) {
536 if (session_done())
537 return 0;
538
534 if (iter->timestamp > limit) 539 if (iter->timestamp > limit)
535 break; 540 break;
536 541
@@ -1160,7 +1165,6 @@ static void perf_session__warn_about_errors(const struct perf_session *session,
1160 } 1165 }
1161} 1166}
1162 1167
1163#define session_done() (*(volatile int *)(&session_done))
1164volatile int session_done; 1168volatile int session_done;
1165 1169
1166static int __perf_session__process_pipe_events(struct perf_session *self, 1170static int __perf_session__process_pipe_events(struct perf_session *self,
@@ -1308,7 +1312,7 @@ int __perf_session__process_events(struct perf_session *session,
1308 file_offset = page_offset; 1312 file_offset = page_offset;
1309 head = data_offset - page_offset; 1313 head = data_offset - page_offset;
1310 1314
1311 if (data_offset + data_size < file_size) 1315 if (data_size && (data_offset + data_size < file_size))
1312 file_size = data_offset + data_size; 1316 file_size = data_offset + data_size;
1313 1317
1314 progress_next = file_size / 16; 1318 progress_next = file_size / 16;
@@ -1372,10 +1376,13 @@ more:
1372 "Processing events..."); 1376 "Processing events...");
1373 } 1377 }
1374 1378
1379 err = 0;
1380 if (session_done())
1381 goto out_err;
1382
1375 if (file_pos < file_size) 1383 if (file_pos < file_size)
1376 goto more; 1384 goto more;
1377 1385
1378 err = 0;
1379 /* do the final flush for ordered samples */ 1386 /* do the final flush for ordered samples */
1380 session->ordered_samples.next_flush = ULLONG_MAX; 1387 session->ordered_samples.next_flush = ULLONG_MAX;
1381 err = flush_sample_queue(session, tool); 1388 err = flush_sample_queue(session, tool);
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 3aa75fb2225f..04bf7373a7e5 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -124,4 +124,8 @@ int __perf_session__set_tracepoints_handlers(struct perf_session *session,
124 124
125#define perf_session__set_tracepoints_handlers(session, array) \ 125#define perf_session__set_tracepoints_handlers(session, array) \
126 __perf_session__set_tracepoints_handlers(session, array, ARRAY_SIZE(array)) 126 __perf_session__set_tracepoints_handlers(session, array, ARRAY_SIZE(array))
127
128extern volatile int session_done;
129
130#define session_done() (*(volatile int *)(&session_done))
127#endif /* __PERF_SESSION_H */ 131#endif /* __PERF_SESSION_H */
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index a7b9ab557380..a9c829be5216 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -8,6 +8,22 @@
8#include "symbol.h" 8#include "symbol.h"
9#include "debug.h" 9#include "debug.h"
10 10
11#ifndef HAVE_ELF_GETPHDRNUM
12static int elf_getphdrnum(Elf *elf, size_t *dst)
13{
14 GElf_Ehdr gehdr;
15 GElf_Ehdr *ehdr;
16
17 ehdr = gelf_getehdr(elf, &gehdr);
18 if (!ehdr)
19 return -1;
20
21 *dst = ehdr->e_phnum;
22
23 return 0;
24}
25#endif
26
11#ifndef NT_GNU_BUILD_ID 27#ifndef NT_GNU_BUILD_ID
12#define NT_GNU_BUILD_ID 3 28#define NT_GNU_BUILD_ID 3
13#endif 29#endif
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index fe7a27d67d2b..e9e1c03f927d 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -186,7 +186,7 @@ void parse_proc_kallsyms(struct pevent *pevent,
186 char *next = NULL; 186 char *next = NULL;
187 char *addr_str; 187 char *addr_str;
188 char *mod; 188 char *mod;
189 char *fmt; 189 char *fmt = NULL;
190 190
191 line = strtok_r(file, "\n", &next); 191 line = strtok_r(file, "\n", &next);
192 while (line) { 192 while (line) {
diff --git a/tools/testing/selftests/timers/posix_timers.c b/tools/testing/selftests/timers/posix_timers.c
index 4fa655d68a81..41bd85559d4b 100644
--- a/tools/testing/selftests/timers/posix_timers.c
+++ b/tools/testing/selftests/timers/posix_timers.c
@@ -151,7 +151,7 @@ static int check_timer_create(int which)
151 fflush(stdout); 151 fflush(stdout);
152 152
153 done = 0; 153 done = 0;
154 timer_create(which, NULL, &id); 154 err = timer_create(which, NULL, &id);
155 if (err < 0) { 155 if (err < 0) {
156 perror("Can't create timer\n"); 156 perror("Can't create timer\n");
157 return -1; 157 return -1;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 979bff485fb0..a9dd682cf5e3 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1064,10 +1064,12 @@ EXPORT_SYMBOL_GPL(gfn_to_hva);
1064unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) 1064unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
1065{ 1065{
1066 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 1066 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
1067 if (writable) 1067 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false);
1068
1069 if (!kvm_is_error_hva(hva) && writable)
1068 *writable = !memslot_is_readonly(slot); 1070 *writable = !memslot_is_readonly(slot);
1069 1071
1070 return __gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL, false); 1072 return hva;
1071} 1073}
1072 1074
1073static int kvm_read_hva(void *data, void __user *hva, int len) 1075static int kvm_read_hva(void *data, void __user *hva, int len)