aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2014-02-27 06:41:17 -0500
committerIngo Molnar <mingo@kernel.org>2014-02-27 06:41:17 -0500
commitff5a7088f0f04dd246e514f898cab0c863c3598d (patch)
tree68a8f588e60fba05ee54b74b32a3269a72d130b9
parent7e74efcf76c16f851df5c838c143c4a1865ea9fa (diff)
parente3703f8cdfcf39c25c4338c3ad8e68891cca3731 (diff)
Merge branch 'perf/urgent' into perf/core
Merge the latest fixes before queueing up new changes. Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--.gitignore3
-rw-r--r--Documentation/00-INDEX24
-rw-r--r--Documentation/PCI/MSI-HOWTO.txt119
-rw-r--r--Documentation/RCU/00-INDEX2
-rw-r--r--Documentation/arm/00-INDEX14
-rw-r--r--Documentation/blackfin/00-INDEX6
-rw-r--r--Documentation/block/00-INDEX2
-rw-r--r--Documentation/devicetree/00-INDEX2
-rw-r--r--Documentation/devicetree/bindings/arm/omap/omap.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/atmel-hsmci.txt5
-rw-r--r--Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt5
-rw-r--r--Documentation/devicetree/bindings/net/allwinner,sun4i-mdio.txt5
-rw-r--r--Documentation/devicetree/bindings/net/sti-dwmac.txt58
-rw-r--r--Documentation/devicetree/bindings/power/bq2415x.txt47
-rw-r--r--Documentation/devicetree/bindings/spi/spi_atmel.txt5
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt6
-rw-r--r--Documentation/fb/00-INDEX6
-rw-r--r--Documentation/filesystems/00-INDEX2
-rw-r--r--Documentation/filesystems/nfs/00-INDEX4
-rw-r--r--Documentation/i2c/instantiating-devices41
-rw-r--r--Documentation/ide/00-INDEX2
-rw-r--r--Documentation/laptops/00-INDEX6
-rw-r--r--Documentation/leds/00-INDEX8
-rw-r--r--Documentation/m68k/00-INDEX2
-rw-r--r--Documentation/networking/00-INDEX30
-rw-r--r--Documentation/networking/3c505.txt45
-rw-r--r--Documentation/phy.txt26
-rw-r--r--Documentation/power/00-INDEX6
-rw-r--r--Documentation/ptp/testptp.c11
-rw-r--r--Documentation/s390/00-INDEX8
-rw-r--r--Documentation/scheduler/00-INDEX2
-rw-r--r--Documentation/scsi/00-INDEX16
-rw-r--r--Documentation/serial/00-INDEX6
-rw-r--r--Documentation/spi/00-INDEX22
-rw-r--r--Documentation/spi/spi-summary17
-rw-r--r--Documentation/timers/00-INDEX2
-rw-r--r--Documentation/virtual/kvm/00-INDEX2
-rw-r--r--Documentation/vm/00-INDEX4
-rw-r--r--Documentation/w1/masters/00-INDEX4
-rw-r--r--Documentation/w1/slaves/00-INDEX2
-rw-r--r--Documentation/x86/00-INDEX18
-rw-r--r--Documentation/zh_CN/arm64/booting.txt65
-rw-r--r--Documentation/zh_CN/arm64/memory.txt46
-rw-r--r--Documentation/zh_CN/arm64/tagged-pointers.txt52
-rw-r--r--MAINTAINERS29
-rw-r--r--Makefile10
-rw-r--r--arch/arm/boot/dts/Makefile4
-rw-r--r--arch/arm/boot/dts/am335x-evmsk.dts11
-rw-r--r--arch/arm/boot/dts/armada-xp-mv78260.dtsi3
-rw-r--r--arch/arm/boot/dts/at91-sama5d3_xplained.dts229
-rw-r--r--arch/arm/boot/dts/at91sam9263.dtsi2
-rw-r--r--arch/arm/boot/dts/at91sam9n12ek.dts4
-rw-r--r--arch/arm/boot/dts/dove.dtsi11
-rw-r--r--arch/arm/boot/dts/imx6dl-hummingboard.dts10
-rw-r--r--arch/arm/boot/dts/imx6qdl-cubox-i.dtsi10
-rw-r--r--arch/arm/boot/dts/omap3-gta04.dts6
-rw-r--r--arch/arm/boot/dts/omap3-n9.dts2
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts4
-rw-r--r--arch/arm/boot/dts/omap3-n950.dts2
-rw-r--r--arch/arm/boot/dts/omap3-overo-storm-tobi.dts22
-rw-r--r--arch/arm/boot/dts/omap3-overo-tobi-common.dtsi (renamed from arch/arm/boot/dts/omap3-tobi.dts)3
-rw-r--r--arch/arm/boot/dts/omap3-overo-tobi.dts22
-rw-r--r--arch/arm/boot/dts/omap3-overo.dtsi3
-rw-r--r--arch/arm/boot/dts/sama5d3.dtsi2
-rw-r--r--arch/arm/boot/dts/ste-href.dtsi1
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi4
-rw-r--r--arch/arm/boot/dts/sun5i-a10s.dtsi4
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi4
-rw-r--r--arch/arm/boot/dts/tegra114.dtsi4
-rw-r--r--arch/arm/boot/dts/tegra20.dtsi4
-rw-r--r--arch/arm/boot/dts/tegra30-cardhu.dtsi2
-rw-r--r--arch/arm/boot/dts/tegra30.dtsi4
-rw-r--r--arch/arm/boot/dts/testcases/tests.dtsi2
-rw-r--r--arch/arm/boot/dts/versatile-pb.dts4
-rw-r--r--arch/arm/configs/multi_v7_defconfig1
-rw-r--r--arch/arm/include/asm/cacheflush.h1
-rw-r--r--arch/arm/include/asm/pgtable-3level.h15
-rw-r--r--arch/arm/include/asm/spinlock.h15
-rw-r--r--arch/arm/kernel/setup.c2
-rw-r--r--arch/arm/mach-hisi/Kconfig2
-rw-r--r--arch/arm/mach-imx/Makefile2
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c3
-rw-r--r--arch/arm/mach-imx/clk-imx6sl.c3
-rw-r--r--arch/arm/mach-imx/common.h4
-rw-r--r--arch/arm/mach-imx/pm-imx6q.c2
-rw-r--r--arch/arm/mach-moxart/Kconfig1
-rw-r--r--arch/arm/mach-omap1/board-nokia770.c1
-rw-r--r--arch/arm/mach-omap2/Kconfig10
-rw-r--r--arch/arm/mach-omap2/gpmc.c4
-rw-r--r--arch/arm/mach-omap2/io.c9
-rw-r--r--arch/arm/mach-pxa/am300epd.c1
-rw-r--r--arch/arm/mach-pxa/include/mach/balloon3.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/corgi.h1
-rw-r--r--arch/arm/mach-pxa/include/mach/csb726.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/gumstix.h1
-rw-r--r--arch/arm/mach-pxa/include/mach/idp.h1
-rw-r--r--arch/arm/mach-pxa/include/mach/palmld.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/palmt5.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/palmtc.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/palmtx.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/pcm027.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/pcm990_baseboard.h1
-rw-r--r--arch/arm/mach-pxa/include/mach/poodle.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/spitz.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/tosa.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/trizeps4.h2
-rw-r--r--arch/arm/mach-pxa/mioa701.c9
-rw-r--r--arch/arm/mach-shmobile/Kconfig2
-rw-r--r--arch/arm/mach-tegra/pm.c1
-rw-r--r--arch/arm/mach-tegra/tegra.c10
-rw-r--r--arch/arm/mach-zynq/common.c14
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/mm/mm.h1
-rw-r--r--arch/arm/mm/mmu.c7
-rw-r--r--arch/arm/mm/proc-v6.S3
-rw-r--r--arch/arm/mm/proc-v7.S2
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h9
-rw-r--r--arch/avr32/Makefile2
-rw-r--r--arch/avr32/boards/mimc200/fram.c1
-rw-r--r--arch/avr32/include/asm/Kbuild1
-rw-r--r--arch/avr32/include/asm/io.h2
-rw-r--r--arch/m68k/include/asm/Kbuild6
-rw-r--r--arch/m68k/include/asm/barrier.h8
-rw-r--r--arch/m68k/include/asm/unistd.h2
-rw-r--r--arch/m68k/include/uapi/asm/unistd.h2
-rw-r--r--arch/m68k/kernel/syscalltable.S2
-rw-r--r--arch/microblaze/include/asm/delay.h2
-rw-r--r--arch/microblaze/include/asm/io.h6
-rw-r--r--arch/microblaze/kernel/head.S2
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h1
-rw-r--r--arch/powerpc/include/asm/eeh.h21
-rw-r--r--arch/powerpc/include/asm/hugetlb.h2
-rw-r--r--arch/powerpc/include/asm/iommu.h1
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h26
-rw-r--r--arch/powerpc/include/asm/pgtable.h22
-rw-r--r--arch/powerpc/include/asm/sections.h12
-rw-r--r--arch/powerpc/include/asm/vdso.h6
-rw-r--r--arch/powerpc/kernel/dma.c10
-rw-r--r--arch/powerpc/kernel/eeh.c32
-rw-r--r--arch/powerpc/kernel/eeh_driver.c8
-rw-r--r--arch/powerpc/kernel/iommu.c12
-rw-r--r--arch/powerpc/kernel/irq.c5
-rw-r--r--arch/powerpc/kernel/machine_kexec.c14
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c6
-rw-r--r--arch/powerpc/kernel/misc_32.S5
-rw-r--r--arch/powerpc/kernel/reloc_64.S4
-rw-r--r--arch/powerpc/kernel/setup_32.c5
-rw-r--r--arch/powerpc/kernel/vdso32/vdso32_wrapper.S2
-rw-r--r--arch/powerpc/kernel/vdso64/vdso64_wrapper.S2
-rw-r--r--arch/powerpc/mm/hash_utils_64.c14
-rw-r--r--arch/powerpc/mm/pgtable_64.c12
-rw-r--r--arch/powerpc/mm/subpage-prot.c2
-rw-r--r--arch/powerpc/perf/core-book3s.c5
-rw-r--r--arch/powerpc/perf/power8-pmu.c144
-rw-r--r--arch/powerpc/platforms/powernv/eeh-ioda.c32
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c84
-rw-r--r--arch/powerpc/platforms/powernv/pci.c10
-rw-r--r--arch/powerpc/platforms/powernv/pci.h6
-rw-r--r--arch/powerpc/platforms/powernv/powernv.h8
-rw-r--r--arch/powerpc/platforms/powernv/setup.c9
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig1
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c2
-rw-r--r--arch/powerpc/platforms/pseries/pci.c22
-rw-r--r--arch/powerpc/platforms/pseries/setup.c3
-rw-r--r--arch/powerpc/sysdev/mpic.c38
-rw-r--r--arch/powerpc/xmon/xmon.c24
-rw-r--r--arch/s390/appldata/appldata_base.c1
-rw-r--r--arch/s390/kernel/compat_wrapper.S2
-rw-r--r--arch/s390/kernel/head64.S7
-rw-r--r--arch/s390/mm/page-states.c10
-rw-r--r--arch/s390/pci/pci_dma.c8
-rw-r--r--arch/sparc/Kconfig2
-rw-r--r--arch/sparc/mm/srmmu.c2
-rw-r--r--arch/x86/include/asm/efi.h2
-rw-r--r--arch/x86/include/asm/pgtable.h14
-rw-r--r--arch/x86/include/asm/tsc.h2
-rw-r--r--arch/x86/kernel/cpu/common.c7
-rw-r--r--arch/x86/kernel/cpu/perf_event.c3
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c11
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c10
-rw-r--r--arch/x86/kernel/ftrace.c83
-rw-r--r--arch/x86/kernel/pci-dma.c4
-rw-r--r--arch/x86/kernel/tsc.c11
-rw-r--r--arch/x86/kernel/tsc_msr.c30
-rw-r--r--arch/x86/mm/fault.c14
-rw-r--r--arch/x86/platform/efi/efi-bgrt.c2
-rw-r--r--arch/x86/platform/efi/efi.c5
-rw-r--r--arch/x86/platform/efi/efi_32.c6
-rw-r--r--arch/x86/platform/efi/efi_64.c9
-rw-r--r--arch/x86/xen/mmu.c4
-rw-r--r--arch/xtensa/Kconfig3
-rw-r--r--arch/xtensa/boot/dts/xtfpga.dtsi12
-rw-r--r--arch/xtensa/include/asm/io.h2
-rw-r--r--arch/xtensa/include/asm/traps.h44
-rw-r--r--arch/xtensa/include/asm/vectors.h2
-rw-r--r--arch/xtensa/include/uapi/asm/unistd.h7
-rw-r--r--arch/xtensa/kernel/entry.S449
-rw-r--r--arch/xtensa/kernel/setup.c2
-rw-r--r--arch/xtensa/kernel/time.c1
-rw-r--r--arch/xtensa/kernel/vectors.S2
-rw-r--r--arch/xtensa/kernel/xtensa_ksyms.c2
-rw-r--r--arch/xtensa/mm/init.c13
-rw-r--r--arch/xtensa/mm/mmu.c2
-rw-r--r--arch/xtensa/platforms/xtfpga/setup.c7
-rw-r--r--arch/xtensa/variants/fsf/include/variant/tie.h9
-rw-r--r--block/blk-core.c20
-rw-r--r--block/blk-exec.c2
-rw-r--r--block/blk-flush.c101
-rw-r--r--block/blk-lib.c8
-rw-r--r--block/blk-merge.c91
-rw-r--r--block/blk-mq-tag.c2
-rw-r--r--block/blk-mq.c143
-rw-r--r--block/blk-mq.h4
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--block/blk-timeout.c2
-rw-r--r--block/blk.h2
-rw-r--r--drivers/acpi/ac.c2
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/blacklist.c58
-rw-r--r--drivers/acpi/button.c2
-rw-r--r--drivers/acpi/container.c5
-rw-r--r--drivers/acpi/dock.c13
-rw-r--r--drivers/acpi/fan.c3
-rw-r--r--drivers/acpi/pci_irq.c1
-rw-r--r--drivers/acpi/sbs.c4
-rw-r--r--drivers/acpi/thermal.c2
-rw-r--r--drivers/acpi/video.c147
-rw-r--r--drivers/acpi/video_detect.c16
-rw-r--r--drivers/ata/Kconfig1
-rw-r--r--drivers/ata/ahci.c18
-rw-r--r--drivers/ata/libata-pmp.c7
-rw-r--r--drivers/ata/pata_imx.c8
-rw-r--r--drivers/ata/sata_mv.c16
-rw-r--r--drivers/ata/sata_sil.c1
-rw-r--r--drivers/base/component.c8
-rw-r--r--drivers/base/dma-buf.c25
-rw-r--r--drivers/block/null_blk.c97
-rw-r--r--drivers/block/virtio_blk.c7
-rw-r--r--drivers/block/xen-blkback/blkback.c66
-rw-r--r--drivers/block/xen-blkback/common.h5
-rw-r--r--drivers/block/xen-blkback/xenbus.c14
-rw-r--r--drivers/block/xen-blkfront.c11
-rw-r--r--drivers/char/Kconfig1
-rw-r--r--drivers/char/raw.c2
-rw-r--r--drivers/char/virtio_console.c9
-rw-r--r--drivers/clocksource/bcm_kona_timer.c54
-rw-r--r--drivers/cpufreq/cpufreq.c3
-rw-r--r--drivers/cpufreq/intel_pstate.c26
-rw-r--r--drivers/cpufreq/powernow-k8.c10
-rw-r--r--drivers/crypto/nx/nx-842.c29
-rw-r--r--drivers/dma/Kconfig1
-rw-r--r--drivers/dma/ioat/dma.c52
-rw-r--r--drivers/dma/ioat/dma.h1
-rw-r--r--drivers/dma/ioat/dma_v2.c11
-rw-r--r--drivers/dma/ioat/dma_v3.c3
-rw-r--r--drivers/dma/mv_xor.c24
-rw-r--r--drivers/edac/edac_mc.c13
-rw-r--r--drivers/edac/edac_mc_sysfs.c10
-rw-r--r--drivers/edac/edac_module.h2
-rw-r--r--drivers/fmc/fmc-write-eeprom.c2
-rw-r--r--drivers/gpio/Kconfig1
-rw-r--r--drivers/gpio/gpio-bcm-kona.c4
-rw-r--r--drivers/gpio/gpio-clps711x.c1
-rw-r--r--drivers/gpio/gpio-intel-mid.c4
-rw-r--r--drivers/gpio/gpio-xtensa.c16
-rw-r--r--drivers/gpu/drm/drm_ioctl.c12
-rw-r--r--drivers/gpu/drm/exynos/Kconfig4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c14
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c66
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c19
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h8
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c5
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c3
-rw-r--r--drivers/gpu/drm/i915/intel_display.c14
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c31
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c7
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c9
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c21
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h1
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c179
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c7
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c9
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c3
-rw-r--r--drivers/gpu/drm/nouveau/Makefile1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv40.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/mc.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c45
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c26
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c15
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c7
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c32
-rw-r--r--drivers/gpu/drm/radeon/btcd.h4
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c2
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c10
-rw-r--r--drivers/gpu/drm/radeon/r600.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c19
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c14
-rw-r--r--drivers/gpu/drm/radeon/si.c4
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c5
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c3
-rw-r--r--drivers/gpu/drm/radeon/uvd_v2_2.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_reg.h122
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h11
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_reg.h9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c16
-rw-r--r--drivers/hid/hid-apple.c3
-rw-r--r--drivers/hid/hid-core.c3
-rw-r--r--drivers/hid/hid-hyperv.c11
-rw-r--r--drivers/hid/hid-ids.h8
-rw-r--r--drivers/hid/hid-input.c2
-rw-r--r--drivers/hid/hid-microsoft.c4
-rw-r--r--drivers/hid/hid-multitouch.c5
-rw-r--r--drivers/hid/hid-sensor-hub.c3
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c2
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hv/connection.c13
-rw-r--r--drivers/hwmon/max1668.c2
-rw-r--r--drivers/hwmon/ntc_thermistor.c6
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c33
-rw-r--r--drivers/iio/accel/bma180.c16
-rw-r--r--drivers/iio/adc/max1363.c2
-rw-r--r--drivers/iio/imu/adis16400.h1
-rw-r--r--drivers/iio/imu/adis16400_core.c10
-rw-r--r--drivers/iio/light/tsl2563.c16
-rw-r--r--drivers/iio/magnetometer/ak8975.c16
-rw-r--r--drivers/iio/magnetometer/mag3110.c8
-rw-r--r--drivers/infiniband/hw/amso1100/c2.c4
-rw-r--r--drivers/infiniband/hw/amso1100/c2_rnic.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c1
-rw-r--r--drivers/infiniband/hw/mlx4/main.c185
-rw-r--r--drivers/infiniband/hw/mlx5/Kconfig2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c22
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c18
-rw-r--r--drivers/infiniband/hw/mlx5/user.h7
-rw-r--r--drivers/infiniband/hw/nes/nes.c5
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c5
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c9
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c3
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c10
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c1
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c14
-rw-r--r--drivers/iommu/arm-smmu.c105
-rw-r--r--drivers/iommu/omap-iommu-debug.c4
-rw-r--r--drivers/irqchip/irq-orion.c22
-rw-r--r--drivers/isdn/hisax/q931.c2
-rw-r--r--drivers/md/bcache/bcache.h4
-rw-r--r--drivers/md/bcache/bset.c7
-rw-r--r--drivers/md/bcache/btree.c4
-rw-r--r--drivers/md/bcache/extents.c2
-rw-r--r--drivers/md/bcache/request.c6
-rw-r--r--drivers/md/bcache/sysfs.c2
-rw-r--r--drivers/md/raid1.c13
-rw-r--r--drivers/md/raid5.c90
-rw-r--r--drivers/message/i2o/i2o_config.c4
-rw-r--r--drivers/mfd/da9055-i2c.c12
-rw-r--r--drivers/mfd/max14577.c2
-rw-r--r--drivers/mfd/max8997.c6
-rw-r--r--drivers/mfd/max8998.c6
-rw-r--r--drivers/mfd/sec-core.c2
-rw-r--r--drivers/mfd/tps65217.c4
-rw-r--r--drivers/mfd/wm8994-core.c2
-rw-r--r--drivers/misc/genwqe/card_dev.c1
-rw-r--r--drivers/misc/mei/client.c15
-rw-r--r--drivers/misc/mic/host/mic_virtio.c3
-rw-r--r--drivers/misc/sgi-gru/grukdump.c11
-rw-r--r--drivers/mmc/card/queue.c2
-rw-r--r--drivers/mtd/nand/nand_base.c2
-rw-r--r--drivers/mtd/nand/omap2.c61
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/bonding/bond_3ad.c6
-rw-r--r--drivers/net/bonding/bond_3ad.h1
-rw-r--r--drivers/net/bonding/bond_main.c35
-rw-r--r--drivers/net/bonding/bond_options.c2
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/can/dev.c15
-rw-r--r--drivers/net/can/flexcan.c7
-rw-r--r--drivers/net/can/janz-ican3.c20
-rw-r--r--drivers/net/can/usb/kvaser_usb.c2
-rw-r--r--drivers/net/can/vcan.c9
-rw-r--r--drivers/net/ethernet/3com/3c59x.c2
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c3
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c6
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c17
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c1
-rw-r--r--drivers/net/ethernet/ethoc.c138
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c4
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c6
-rw-r--r--drivers/net/ethernet/lantiq_etop.c2
-rw-r--r--drivers/net/ethernet/marvell/Kconfig6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c6
-rw-r--r--drivers/net/ethernet/sfc/tx.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c330
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c5
-rw-r--r--drivers/net/ethernet/ti/cpsw.c25
-rw-r--r--drivers/net/ethernet/tile/tilegx.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c13
-rw-r--r--drivers/net/hyperv/netvsc_drv.c53
-rw-r--r--drivers/net/irda/Kconfig7
-rw-r--r--drivers/net/irda/Makefile1
-rw-r--r--drivers/net/irda/ep7211-sir.c70
-rw-r--r--drivers/net/irda/irtty-sir.c1
-rw-r--r--drivers/net/macvlan.c5
-rw-r--r--drivers/net/phy/dp83640.c32
-rw-r--r--drivers/net/phy/mdio-sun4i.c3
-rw-r--r--drivers/net/phy/phy_device.c38
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/usb/Kconfig15
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/asix_devices.c3
-rw-r--r--drivers/net/usb/ax88179_178a.c4
-rw-r--r--drivers/net/usb/gl620a.c4
-rw-r--r--drivers/net/usb/hso.c32
-rw-r--r--drivers/net/usb/mcs7830.c5
-rw-r--r--drivers/net/usb/net1080.c4
-rw-r--r--drivers/net/usb/qmi_wwan.c11
-rw-r--r--drivers/net/usb/r8152.c17
-rw-r--r--drivers/net/usb/rndis_host.c4
-rw-r--r--drivers/net/usb/smsc75xx.c4
-rw-r--r--drivers/net/usb/smsc95xx.c4
-rw-r--r--drivers/net/usb/sr9800.c874
-rw-r--r--drivers/net/usb/sr9800.h202
-rw-r--r--drivers/net/usb/usbnet.c25
-rw-r--r--drivers/net/vxlan.c3
-rw-r--r--drivers/net/wan/dlci.c5
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c63
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c8
-rw-r--r--drivers/net/wireless/hostap/hostap_proc.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c22
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-modparams.h11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c24
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c73
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c7
-rw-r--r--drivers/net/wireless/mwifiex/main.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c5
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c23
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/rtl8187.h10
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c18
-rw-r--r--drivers/net/xen-netback/common.h6
-rw-r--r--drivers/net/xen-netback/interface.c1
-rw-r--r--drivers/net/xen-netback/netback.c16
-rw-r--r--drivers/net/xen-netfront.c5
-rw-r--r--drivers/of/address.c5
-rw-r--r--drivers/of/base.c128
-rw-r--r--drivers/of/of_mdio.c22
-rw-r--r--drivers/of/selftest.c67
-rw-r--r--drivers/of/testcase-data/testcases.dtsi3
-rw-r--r--drivers/of/testcase-data/tests-interrupts.dtsi (renamed from arch/arm/boot/dts/testcases/tests-interrupts.dtsi)0
-rw-r--r--drivers/of/testcase-data/tests-match.dtsi19
-rw-r--r--drivers/of/testcase-data/tests-phandle.dtsi (renamed from arch/arm/boot/dts/testcases/tests-phandle.dtsi)0
-rw-r--r--drivers/pci/host/pci-mvebu.c11
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c15
-rw-r--r--drivers/pci/msi.c10
-rw-r--r--drivers/pci/pci.c10
-rw-r--r--drivers/phy/Kconfig3
-rw-r--r--drivers/phy/phy-core.c76
-rw-r--r--drivers/phy/phy-exynos-dp-video.c8
-rw-r--r--drivers/phy/phy-exynos-mipi-video.c10
-rw-r--r--drivers/phy/phy-mvebu-sata.c10
-rw-r--r--drivers/phy/phy-omap-usb2.c10
-rw-r--r--drivers/phy/phy-twl4030-usb.c10
-rw-r--r--drivers/power/ds2782_battery.c2
-rw-r--r--drivers/power/isp1704_charger.c2
-rw-r--r--drivers/power/max17040_battery.c5
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/regulator/da9055-regulator.c4
-rw-r--r--drivers/regulator/da9063-regulator.c4
-rw-r--r--drivers/regulator/max14577.c10
-rw-r--r--drivers/regulator/s5m8767.c4
-rw-r--r--drivers/s390/cio/chsc.c1
-rw-r--r--drivers/s390/cio/cio.c40
-rw-r--r--drivers/s390/cio/qdio.h14
-rw-r--r--drivers/s390/cio/qdio_main.c2
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c24
-rw-r--r--drivers/sbus/char/jsflash.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h1
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/spi/Kconfig4
-rw-r--r--drivers/spi/spi-nuc900.c2
-rw-r--r--drivers/spi/spi.c4
-rw-r--r--drivers/staging/android/ashmem.c45
-rw-r--r--drivers/staging/android/binder.c3
-rw-r--r--drivers/staging/android/ion/compat_ion.c26
-rw-r--r--drivers/staging/android/ion/ion_dummy_driver.c12
-rw-r--r--drivers/staging/android/ion/ion_heap.c2
-rw-r--r--drivers/staging/android/ion/ion_priv.h1
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c6
-rw-r--r--drivers/staging/android/sw_sync.h17
-rw-r--r--drivers/staging/android/sync.c14
-rw-r--r--drivers/staging/bcm/Bcmnet.c2
-rw-r--r--drivers/staging/comedi/drivers.c2
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c17
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c6
-rw-r--r--drivers/staging/dgrp/dgrp_net_ops.c330
-rw-r--r--drivers/staging/gdm72xx/gdm_usb.c3
-rw-r--r--drivers/staging/iio/Documentation/iio_utils.h6
-rw-r--r--drivers/staging/iio/adc/ad799x_core.c13
-rw-r--r--drivers/staging/iio/adc/mxs-lradc.c6
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c2
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c55
-rw-r--r--drivers/staging/imx-drm/imx-hdmi.c22
-rw-r--r--drivers/staging/lustre/TODO5
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h2
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c2
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c5
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_user.h3
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c2
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c6
-rw-r--r--drivers/staging/netlogic/xlr_net.c7
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.c273
-rw-r--r--drivers/staging/ozwpan/ozproto.c3
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c22
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c12
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c2
-rw-r--r--drivers/staging/rtl8821ae/Kconfig2
-rw-r--r--drivers/staging/rtl8821ae/wifi.h2
-rw-r--r--drivers/staging/usbip/userspace/libsrc/names.c8
-rw-r--r--drivers/staging/usbip/vhci_sysfs.c3
-rw-r--r--drivers/staging/wlags49_h2/wl_wext.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c4
-rw-r--r--drivers/target/target_core_alua.c2
-rw-r--r--drivers/target/target_core_pr.c11
-rw-r--r--drivers/target/target_core_sbc.c8
-rw-r--r--drivers/target/target_core_spc.c4
-rw-r--r--drivers/target/target_core_transport.c5
-rw-r--r--drivers/tty/hvc/hvc_opal.c8
-rw-r--r--drivers/tty/hvc/hvc_rtas.c12
-rw-r--r--drivers/tty/hvc/hvc_udbg.c9
-rw-r--r--drivers/tty/hvc/hvc_xen.c17
-rw-r--r--drivers/tty/n_gsm.c11
-rw-r--r--drivers/tty/n_tty.c14
-rw-r--r--drivers/tty/serial/8250/8250_core.c18
-rw-r--r--drivers/tty/serial/8250/8250_dw.c4
-rw-r--r--drivers/tty/serial/8250/8250_pci.c3
-rw-r--r--drivers/tty/serial/omap-serial.c11
-rw-r--r--drivers/tty/serial/sirfsoc_uart.c4
-rw-r--r--drivers/tty/vt/vt.c2
-rw-r--r--drivers/usb/chipidea/udc.c4
-rw-r--r--drivers/usb/core/driver.c24
-rw-r--r--drivers/usb/core/hcd.c1
-rw-r--r--drivers/usb/core/hub.c7
-rw-r--r--drivers/usb/core/usb.h1
-rw-r--r--drivers/usb/dwc2/core.c2
-rw-r--r--drivers/usb/dwc2/hcd.c11
-rw-r--r--drivers/usb/dwc2/platform.c3
-rw-r--r--drivers/usb/gadget/bcm63xx_udc.c58
-rw-r--r--drivers/usb/gadget/f_fs.c7
-rw-r--r--drivers/usb/gadget/printer.c2
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c2
-rw-r--r--drivers/usb/host/ehci-hub.c26
-rw-r--r--drivers/usb/host/xhci-dbg.c6
-rw-r--r--drivers/usb/host/xhci-mem.c14
-rw-r--r--drivers/usb/host/xhci-pci.c5
-rw-r--r--drivers/usb/host/xhci-ring.c68
-rw-r--r--drivers/usb/host/xhci.c38
-rw-r--r--drivers/usb/host/xhci.h41
-rw-r--r--drivers/usb/musb/musb_core.c15
-rw-r--r--drivers/usb/musb/musb_host.c3
-rw-r--r--drivers/usb/musb/musb_virthub.c26
-rw-r--r--drivers/usb/musb/omap2430.c2
-rw-r--r--drivers/usb/phy/phy-msm-usb.c57
-rw-r--r--drivers/usb/phy/phy.c8
-rw-r--r--drivers/usb/serial/ftdi_sio.c3
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h7
-rw-r--r--drivers/usb/serial/option.c6
-rw-r--r--drivers/usb/serial/qcserial.c3
-rw-r--r--drivers/usb/serial/usb-serial-simple.c3
-rw-r--r--drivers/usb/storage/Kconfig4
-rw-r--r--drivers/usb/storage/scsiglue.c6
-rw-r--r--drivers/usb/storage/unusual_cypress.h2
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/vhost/net.c47
-rw-r--r--drivers/video/Kconfig2
-rw-r--r--drivers/video/exynos/Kconfig3
-rw-r--r--drivers/video/omap2/dss/dispc.c16
-rw-r--r--drivers/video/omap2/dss/dpi.c2
-rw-r--r--drivers/video/omap2/dss/sdi.c2
-rw-r--r--drivers/vme/bridges/vme_ca91cx42.c4
-rw-r--r--drivers/vme/bridges/vme_tsi148.c4
-rw-r--r--drivers/watchdog/w83697hf_wdt.c2
-rw-r--r--drivers/xen/Makefile1
-rw-r--r--drivers/xen/events/events_base.c2
-rw-r--r--drivers/xen/xencomm.c219
-rw-r--r--fs/bio-integrity.c13
-rw-r--r--fs/bio.c15
-rw-r--r--fs/btrfs/compression.c2
-rw-r--r--fs/btrfs/disk-io.c1
-rw-r--r--fs/btrfs/extent-tree.c1
-rw-r--r--fs/btrfs/inode.c2
-rw-r--r--fs/btrfs/ioctl.c22
-rw-r--r--fs/btrfs/send.c18
-rw-r--r--fs/btrfs/super.c11
-rw-r--r--fs/btrfs/sysfs.c10
-rw-r--r--fs/ceph/acl.c11
-rw-r--r--fs/ceph/dir.c23
-rw-r--r--fs/ceph/file.c1
-rw-r--r--fs/ceph/super.c32
-rw-r--r--fs/ceph/super.h7
-rw-r--r--fs/ceph/xattr.c54
-rw-r--r--fs/cifs/cifsacl.c61
-rw-r--r--fs/cifs/cifsglob.h11
-rw-r--r--fs/cifs/cifsproto.h9
-rw-r--r--fs/cifs/cifssmb.c15
-rw-r--r--fs/cifs/dir.c2
-rw-r--r--fs/cifs/file.c74
-rw-r--r--fs/cifs/inode.c15
-rw-r--r--fs/cifs/smb1ops.c9
-rw-r--r--fs/cifs/smb2glob.h3
-rw-r--r--fs/cifs/smb2ops.c14
-rw-r--r--fs/cifs/smb2pdu.c9
-rw-r--r--fs/cifs/smb2proto.h3
-rw-r--r--fs/cifs/xattr.c15
-rw-r--r--fs/ext4/ext4.h2
-rw-r--r--fs/ext4/extents.c1
-rw-r--r--fs/ext4/file.c2
-rw-r--r--fs/ext4/ioctl.c3
-rw-r--r--fs/ext4/resize.c34
-rw-r--r--fs/ext4/super.c20
-rw-r--r--fs/file.c2
-rw-r--r--fs/fscache/object-list.c5
-rw-r--r--fs/fscache/object.c3
-rw-r--r--fs/jbd2/transaction.c6
-rw-r--r--fs/jfs/acl.c2
-rw-r--r--fs/lockd/svclock.c8
-rw-r--r--fs/nfs/dir.c5
-rw-r--r--fs/nfs/inode.c14
-rw-r--r--fs/nfs/internal.h12
-rw-r--r--fs/nfs/nfs3proc.c1
-rw-r--r--fs/nfs/nfs4client.c7
-rw-r--r--fs/nfs/nfs4namespace.c12
-rw-r--r--fs/nfs/nfs4state.c5
-rw-r--r--fs/nfsd/nfs4acl.c9
-rw-r--r--fs/ntfs/file.c2
-rw-r--r--fs/ocfs2/alloc.c2
-rw-r--r--fs/ocfs2/file.c52
-rw-r--r--fs/ocfs2/namei.c17
-rw-r--r--fs/proc/vmcore.c26
-rw-r--r--fs/reiserfs/do_balan.c895
-rw-r--r--fs/sync.c17
-rw-r--r--fs/xfs/xfs_file.c2
-rw-r--r--fs/xfs/xfs_iops.c16
-rw-r--r--fs/xfs/xfs_log_cil.c19
-rw-r--r--fs/xfs/xfs_mount.c24
-rw-r--r--fs/xfs/xfs_sb.c10
-rw-r--r--include/asm-generic/pgtable.h39
-rw-r--r--include/drm/drmP.h3
-rw-r--r--include/drm/drm_crtc.h3
-rw-r--r--include/drm/ttm/ttm_page_alloc.h2
-rw-r--r--include/linux/bio.h12
-rw-r--r--include/linux/blk-mq.h9
-rw-r--r--include/linux/blkdev.h11
-rw-r--r--include/linux/can/skb.h38
-rw-r--r--include/linux/ceph/ceph_fs.h5
-rw-r--r--include/linux/cgroup.h2
-rw-r--r--include/linux/compiler-gcc4.h6
-rw-r--r--include/linux/dma-buf.h2
-rw-r--r--include/linux/fs.h8
-rw-r--r--include/linux/gpio/consumer.h4
-rw-r--r--include/linux/hyperv.h2
-rw-r--r--include/linux/interrupt.h5
-rw-r--r--include/linux/ipc_namespace.h2
-rw-r--r--include/linux/mfd/max8997-private.h2
-rw-r--r--include/linux/mfd/max8998-private.h2
-rw-r--r--include/linux/mfd/tps65217.h4
-rw-r--r--include/linux/mlx5/driver.h3
-rw-r--r--include/linux/netdevice.h36
-rw-r--r--include/linux/of.h153
-rw-r--r--include/linux/of_device.h4
-rw-r--r--include/linux/pci.h20
-rw-r--r--include/linux/phy/phy.h14
-rw-r--r--include/linux/skbuff.h17
-rw-r--r--include/linux/smp.h3
-rw-r--r--include/linux/spi/spi.h7
-rw-r--r--include/linux/syscalls.h6
-rw-r--r--include/linux/usb.h2
-rw-r--r--include/linux/workqueue.h5
-rw-r--r--include/net/datalink.h2
-rw-r--r--include/net/dn.h2
-rw-r--r--include/net/dn_route.h2
-rw-r--r--include/net/ethoc.h1
-rw-r--r--include/net/ipx.h11
-rw-r--r--include/net/net_namespace.h8
-rw-r--r--include/net/netfilter/nf_conntrack.h2
-rw-r--r--include/net/netfilter/nf_tables.h9
-rw-r--r--include/net/netfilter/nft_reject.h25
-rw-r--r--include/net/sctp/structs.h14
-rw-r--r--include/rdma/ib_verbs.h3
-rw-r--r--include/target/target_core_base.h1
-rw-r--r--include/trace/events/power.h7
-rw-r--r--include/uapi/drm/drm.h2
-rw-r--r--include/uapi/drm/vmwgfx_drm.h1
-rw-r--r--include/uapi/linux/btrfs.h1
-rw-r--r--include/uapi/linux/in6.h23
-rw-r--r--include/uapi/linux/mic_ioctl.h2
-rw-r--r--include/uapi/xen/Kbuild2
-rw-r--r--include/uapi/xen/gntalloc.h (renamed from include/xen/gntalloc.h)0
-rw-r--r--include/uapi/xen/gntdev.h (renamed from include/xen/gntdev.h)0
-rw-r--r--include/xen/interface/io/blkif.h34
-rw-r--r--include/xen/interface/xencomm.h41
-rw-r--r--include/xen/xencomm.h77
-rw-r--r--ipc/mq_sysctl.c18
-rw-r--r--ipc/mqueue.c6
-rw-r--r--kernel/cgroup.c60
-rw-r--r--kernel/events/core.c12
-rw-r--r--kernel/irq/devres.c45
-rw-r--r--kernel/irq/irqdesc.c1
-rw-r--r--kernel/power/console.c1
-rw-r--r--kernel/printk/printk.c2
-rw-r--r--kernel/sched/core.c28
-rw-r--r--kernel/sched/cpudeadline.c6
-rw-r--r--kernel/sched/deadline.c10
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/sched/sched.h1
-rw-r--r--kernel/time/jiffies.c6
-rw-r--r--kernel/time/sched_clock.c46
-rw-r--r--kernel/time/tick-broadcast.c1
-rw-r--r--kernel/trace/ring_buffer.c7
-rw-r--r--kernel/user_namespace.c2
-rw-r--r--kernel/workqueue.c7
-rw-r--r--lib/percpu_ida.c7
-rw-r--r--mm/filemap.c4
-rw-r--r--mm/huge_memory.c18
-rw-r--r--mm/memcontrol.c6
-rw-r--r--mm/memory-failure.c6
-rw-r--r--mm/memory.c15
-rw-r--r--mm/mprotect.c25
-rw-r--r--mm/slub.c38
-rw-r--r--mm/vmpressure.c1
-rw-r--r--net/9p/client.c2
-rw-r--r--net/9p/trans_virtio.c5
-rw-r--r--net/batman-adv/bat_iv_ogm.c30
-rw-r--r--net/batman-adv/hard-interface.c22
-rw-r--r--net/batman-adv/originator.c36
-rw-r--r--net/batman-adv/originator.h4
-rw-r--r--net/batman-adv/routing.c4
-rw-r--r--net/batman-adv/send.c9
-rw-r--r--net/batman-adv/translation-table.c23
-rw-r--r--net/bluetooth/hidp/core.c16
-rw-r--r--net/bluetooth/hidp/hidp.h4
-rw-r--r--net/bridge/br_device.c54
-rw-r--r--net/bridge/br_fdb.c137
-rw-r--r--net/bridge/br_if.c6
-rw-r--r--net/bridge/br_input.c4
-rw-r--r--net/bridge/br_private.h13
-rw-r--r--net/bridge/br_stp_if.c2
-rw-r--r--net/bridge/br_vlan.c27
-rw-r--r--net/caif/caif_dev.c1
-rw-r--r--net/caif/cfsrvl.c1
-rw-r--r--net/can/af_can.c3
-rw-r--r--net/can/bcm.c4
-rw-r--r--net/can/raw.c1
-rw-r--r--net/core/dev.c28
-rw-r--r--net/core/fib_rules.c7
-rw-r--r--net/core/flow_dissector.c20
-rw-r--r--net/core/netpoll.c4
-rw-r--r--net/core/rtnetlink.c21
-rw-r--r--net/core/sock.c6
-rw-r--r--net/dccp/ccids/lib/tfrc.c2
-rw-r--r--net/dccp/ccids/lib/tfrc.h1
-rw-r--r--net/decnet/af_decnet.c5
-rw-r--r--net/ieee802154/6lowpan.c23
-rw-r--r--net/ipv4/devinet.c3
-rw-r--r--net/ipv4/ip_forward.c71
-rw-r--r--net/ipv4/ip_tunnel.c29
-rw-r--r--net/ipv4/ipconfig.c2
-rw-r--r--net/ipv4/netfilter/Kconfig5
-rw-r--r--net/ipv4/netfilter/Makefile1
-rw-r--r--net/ipv4/netfilter/nf_nat_h323.c5
-rw-r--r--net/ipv4/netfilter/nft_reject_ipv4.c75
-rw-r--r--net/ipv4/route.c13
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_input.c18
-rw-r--r--net/ipv4/tcp_output.c15
-rw-r--r--net/ipv4/udp_offload.c17
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/icmp.c2
-rw-r--r--net/ipv6/ip6_output.c17
-rw-r--r--net/ipv6/netfilter/Kconfig5
-rw-r--r--net/ipv6/netfilter/Makefile1
-rw-r--r--net/ipv6/netfilter/nft_reject_ipv6.c76
-rw-r--r--net/ipx/af_ipx.c22
-rw-r--r--net/ipx/ipx_route.c4
-rw-r--r--net/mac80211/cfg.c44
-rw-r--r--net/mac80211/ht.c4
-rw-r--r--net/mac80211/ibss.c5
-rw-r--r--net/mac80211/iface.c33
-rw-r--r--net/mac80211/tx.c2
-rw-r--r--net/netfilter/Kconfig6
-rw-r--r--net/netfilter/Makefile1
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c8
-rw-r--r--net/netfilter/nf_conntrack_core.c55
-rw-r--r--net/netfilter/nf_synproxy_core.c5
-rw-r--r--net/netfilter/nf_tables_api.c82
-rw-r--r--net/netfilter/nf_tables_core.c6
-rw-r--r--net/netfilter/nft_ct.c16
-rw-r--r--net/netfilter/nft_log.c5
-rw-r--r--net/netfilter/nft_lookup.c1
-rw-r--r--net/netfilter/nft_queue.c4
-rw-r--r--net/netfilter/nft_rbtree.c16
-rw-r--r--net/netfilter/nft_reject.c89
-rw-r--r--net/netfilter/nft_reject_inet.c63
-rw-r--r--net/netfilter/xt_CT.c7
-rw-r--r--net/openvswitch/datapath.c23
-rw-r--r--net/openvswitch/flow_table.c88
-rw-r--r--net/openvswitch/flow_table.h2
-rw-r--r--net/packet/af_packet.c26
-rw-r--r--net/sched/sch_pie.c21
-rw-r--r--net/sctp/associola.c82
-rw-r--r--net/sctp/ipv6.c2
-rw-r--r--net/sctp/sm_statefuns.c2
-rw-r--r--net/sctp/socket.c47
-rw-r--r--net/sctp/sysctl.c18
-rw-r--r--net/sctp/ulpevent.c8
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c19
-rw-r--r--net/sunrpc/backchannel_rqst.c6
-rw-r--r--net/sunrpc/svc_xprt.c6
-rw-r--r--net/sunrpc/xprtsock.c6
-rw-r--r--net/tipc/core.h1
-rw-r--r--net/tipc/link.c7
-rw-r--r--net/wireless/core.c17
-rw-r--r--net/wireless/core.h4
-rw-r--r--net/wireless/nl80211.c32
-rw-r--r--net/wireless/nl80211.h8
-rw-r--r--net/wireless/scan.c40
-rw-r--r--net/wireless/sme.c2
-rw-r--r--scripts/Makefile.lib1
-rwxr-xr-xscripts/checkpatch.pl4
-rwxr-xr-xscripts/get_maintainer.pl2
-rw-r--r--scripts/mod/file2alias.c4
-rw-r--r--security/selinux/nlmsgtab.c2
-rw-r--r--security/selinux/ss/policydb.c8
-rw-r--r--security/selinux/ss/services.c4
-rw-r--r--sound/pci/hda/hda_codec.c34
-rw-r--r--sound/pci/hda/hda_generic.c8
-rw-r--r--sound/pci/hda/hda_generic.h1
-rw-r--r--sound/pci/hda/hda_intel.c2
-rw-r--r--sound/pci/hda/patch_ca0132.c68
-rw-r--r--sound/pci/hda/patch_conexant.c3
-rw-r--r--sound/pci/hda/patch_realtek.c16
-rw-r--r--sound/pci/hda/patch_sigmatel.c40
-rw-r--r--sound/pci/hda/thinkpad_helper.c1
-rw-r--r--sound/soc/blackfin/Kconfig11
-rw-r--r--sound/soc/codecs/da9055.c11
-rw-r--r--sound/soc/codecs/max98090.c21
-rw-r--r--sound/soc/codecs/rt5640.c1
-rw-r--r--sound/soc/codecs/wm8993.c1
-rw-r--r--sound/soc/davinci/davinci-evm.c1
-rw-r--r--sound/soc/davinci/davinci-mcasp.c83
-rw-r--r--sound/soc/fsl/fsl_esai.c4
-rw-r--r--sound/soc/fsl/fsl_esai.h2
-rw-r--r--sound/soc/fsl/imx-mc13783.c1
-rw-r--r--sound/soc/fsl/imx-sgtl5000.c10
-rw-r--r--sound/soc/fsl/imx-wm8962.c11
-rw-r--r--sound/soc/samsung/Kconfig6
-rw-r--r--sound/soc/txx9/txx9aclc-ac97.c8
-rw-r--r--sound/usb/mixer_maps.c9
-rw-r--r--virt/kvm/arm/vgic.c1
-rw-r--r--virt/kvm/coalesced_mmio.c8
918 files changed, 10034 insertions, 5585 deletions
diff --git a/.gitignore b/.gitignore
index 7e9932e55475..42fa0d5626a9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -92,3 +92,6 @@ extra_certificates
92signing_key.priv 92signing_key.priv
93signing_key.x509 93signing_key.x509
94x509.genkey 94x509.genkey
95
96# Kconfig presets
97all.config
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index 38f8444bdd0e..07de7e19b4ce 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -29,6 +29,8 @@ DMA-ISA-LPC.txt
29 - How to do DMA with ISA (and LPC) devices. 29 - How to do DMA with ISA (and LPC) devices.
30DMA-attributes.txt 30DMA-attributes.txt
31 - listing of the various possible attributes a DMA region can have 31 - listing of the various possible attributes a DMA region can have
32dmatest.txt
33 - how to compile, configure and use the dmatest system.
32DocBook/ 34DocBook/
33 - directory with DocBook templates etc. for kernel documentation. 35 - directory with DocBook templates etc. for kernel documentation.
34EDID/ 36EDID/
@@ -77,6 +79,8 @@ arm/
77 - directory with info about Linux on the ARM architecture. 79 - directory with info about Linux on the ARM architecture.
78arm64/ 80arm64/
79 - directory with info about Linux on the 64 bit ARM architecture. 81 - directory with info about Linux on the 64 bit ARM architecture.
82assoc_array.txt
83 - generic associative array intro.
80atomic_ops.txt 84atomic_ops.txt
81 - semantics and behavior of atomic and bitmask operations. 85 - semantics and behavior of atomic and bitmask operations.
82auxdisplay/ 86auxdisplay/
@@ -87,6 +91,8 @@ bad_memory.txt
87 - how to use kernel parameters to exclude bad RAM regions. 91 - how to use kernel parameters to exclude bad RAM regions.
88basic_profiling.txt 92basic_profiling.txt
89 - basic instructions for those who wants to profile Linux kernel. 93 - basic instructions for those who wants to profile Linux kernel.
94bcache.txt
95 - Block-layer cache on fast SSDs to improve slow (raid) I/O performance.
90binfmt_misc.txt 96binfmt_misc.txt
91 - info on the kernel support for extra binary formats. 97 - info on the kernel support for extra binary formats.
92blackfin/ 98blackfin/
@@ -171,6 +177,8 @@ early-userspace/
171 - info about initramfs, klibc, and userspace early during boot. 177 - info about initramfs, klibc, and userspace early during boot.
172edac.txt 178edac.txt
173 - information on EDAC - Error Detection And Correction 179 - information on EDAC - Error Detection And Correction
180efi-stub.txt
181 - How to use the EFI boot stub to bypass GRUB or elilo on EFI systems.
174eisa.txt 182eisa.txt
175 - info on EISA bus support. 183 - info on EISA bus support.
176email-clients.txt 184email-clients.txt
@@ -195,8 +203,8 @@ futex-requeue-pi.txt
195 - info on requeueing of tasks from a non-PI futex to a PI futex 203 - info on requeueing of tasks from a non-PI futex to a PI futex
196gcov.txt 204gcov.txt
197 - use of GCC's coverage testing tool "gcov" with the Linux kernel 205 - use of GCC's coverage testing tool "gcov" with the Linux kernel
198gpio.txt 206gpio/
199 - overview of GPIO (General Purpose Input/Output) access conventions. 207 - gpio related documentation
200hid/ 208hid/
201 - directory with information on human interface devices 209 - directory with information on human interface devices
202highuid.txt 210highuid.txt
@@ -255,6 +263,8 @@ kernel-docs.txt
255 - listing of various WWW + books that document kernel internals. 263 - listing of various WWW + books that document kernel internals.
256kernel-parameters.txt 264kernel-parameters.txt
257 - summary listing of command line / boot prompt args for the kernel. 265 - summary listing of command line / boot prompt args for the kernel.
266kernel-per-CPU-kthreads.txt
267 - List of all per-CPU kthreads and how they introduce jitter.
258kmemcheck.txt 268kmemcheck.txt
259 - info on dynamic checker that detects uses of uninitialized memory. 269 - info on dynamic checker that detects uses of uninitialized memory.
260kmemleak.txt 270kmemleak.txt
@@ -299,8 +309,6 @@ memory-devices/
299 - directory with info on parts like the Texas Instruments EMIF driver 309 - directory with info on parts like the Texas Instruments EMIF driver
300memory-hotplug.txt 310memory-hotplug.txt
301 - Hotpluggable memory support, how to use and current status. 311 - Hotpluggable memory support, how to use and current status.
302memory.txt
303 - info on typical Linux memory problems.
304metag/ 312metag/
305 - directory with info about Linux on Meta architecture. 313 - directory with info about Linux on Meta architecture.
306mips/ 314mips/
@@ -311,6 +319,8 @@ mmc/
311 - directory with info about the MMC subsystem 319 - directory with info about the MMC subsystem
312mn10300/ 320mn10300/
313 - directory with info about the mn10300 architecture port 321 - directory with info about the mn10300 architecture port
322module-signing.txt
323 - Kernel module signing for increased security when loading modules.
314mtd/ 324mtd/
315 - directory with info about memory technology devices (flash) 325 - directory with info about memory technology devices (flash)
316mono.txt 326mono.txt
@@ -343,6 +353,8 @@ pcmcia/
343 - info on the Linux PCMCIA driver. 353 - info on the Linux PCMCIA driver.
344percpu-rw-semaphore.txt 354percpu-rw-semaphore.txt
345 - RCU based read-write semaphore optimized for locking for reading 355 - RCU based read-write semaphore optimized for locking for reading
356phy.txt
357 - Description of the generic PHY framework.
346pi-futex.txt 358pi-futex.txt
347 - documentation on lightweight priority inheritance futexes. 359 - documentation on lightweight priority inheritance futexes.
348pinctrl.txt 360pinctrl.txt
@@ -431,6 +443,8 @@ sysrq.txt
431 - info on the magic SysRq key. 443 - info on the magic SysRq key.
432target/ 444target/
433 - directory with info on generating TCM v4 fabric .ko modules 445 - directory with info on generating TCM v4 fabric .ko modules
446this_cpu_ops.txt
447 - List rationale behind and the way to use this_cpu operations.
434thermal/ 448thermal/
435 - directory with information on managing thermal issues (CPU/temp) 449 - directory with information on managing thermal issues (CPU/temp)
436trace/ 450trace/
@@ -469,6 +483,8 @@ wimax/
469 - directory with info about Intel Wireless Wimax Connections 483 - directory with info about Intel Wireless Wimax Connections
470workqueue.txt 484workqueue.txt
471 - information on the Concurrency Managed Workqueue implementation 485 - information on the Concurrency Managed Workqueue implementation
486ww-mutex-design.txt
487 - Intro to Mutex wait/would deadlock handling.s
472x86/x86_64/ 488x86/x86_64/
473 - directory with info on Linux support for AMD x86-64 (Hammer) machines. 489 - directory with info on Linux support for AMD x86-64 (Hammer) machines.
474xtensa/ 490xtensa/
diff --git a/Documentation/PCI/MSI-HOWTO.txt b/Documentation/PCI/MSI-HOWTO.txt
index a8d01005f480..10a93696e55a 100644
--- a/Documentation/PCI/MSI-HOWTO.txt
+++ b/Documentation/PCI/MSI-HOWTO.txt
@@ -82,7 +82,19 @@ Most of the hard work is done for the driver in the PCI layer. It simply
82has to request that the PCI layer set up the MSI capability for this 82has to request that the PCI layer set up the MSI capability for this
83device. 83device.
84 84
854.2.1 pci_enable_msi_range 854.2.1 pci_enable_msi
86
87int pci_enable_msi(struct pci_dev *dev)
88
89A successful call allocates ONE interrupt to the device, regardless
90of how many MSIs the device supports. The device is switched from
91pin-based interrupt mode to MSI mode. The dev->irq number is changed
92to a new number which represents the message signaled interrupt;
93consequently, this function should be called before the driver calls
94request_irq(), because an MSI is delivered via a vector that is
95different from the vector of a pin-based interrupt.
96
974.2.2 pci_enable_msi_range
86 98
87int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec) 99int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
88 100
@@ -147,6 +159,11 @@ static int foo_driver_enable_msi(struct pci_dev *pdev, int nvec)
147 return pci_enable_msi_range(pdev, nvec, nvec); 159 return pci_enable_msi_range(pdev, nvec, nvec);
148} 160}
149 161
162Note, unlike pci_enable_msi_exact() function, which could be also used to
163enable a particular number of MSI-X interrupts, pci_enable_msi_range()
164returns either a negative errno or 'nvec' (not negative errno or 0 - as
165pci_enable_msi_exact() does).
166
1504.2.1.3 Single MSI mode 1674.2.1.3 Single MSI mode
151 168
152The most notorious example of the request type described above is 169The most notorious example of the request type described above is
@@ -158,7 +175,27 @@ static int foo_driver_enable_single_msi(struct pci_dev *pdev)
158 return pci_enable_msi_range(pdev, 1, 1); 175 return pci_enable_msi_range(pdev, 1, 1);
159} 176}
160 177
1614.2.2 pci_disable_msi 178Note, unlike pci_enable_msi() function, which could be also used to
179enable the single MSI mode, pci_enable_msi_range() returns either a
180negative errno or 1 (not negative errno or 0 - as pci_enable_msi()
181does).
182
1834.2.3 pci_enable_msi_exact
184
185int pci_enable_msi_exact(struct pci_dev *dev, int nvec)
186
187This variation on pci_enable_msi_range() call allows a device driver to
188request exactly 'nvec' MSIs.
189
190If this function returns a negative number, it indicates an error and
191the driver should not attempt to request any more MSI interrupts for
192this device.
193
194By contrast with pci_enable_msi_range() function, pci_enable_msi_exact()
195returns zero in case of success, which indicates MSI interrupts have been
196successfully allocated.
197
1984.2.4 pci_disable_msi
162 199
163void pci_disable_msi(struct pci_dev *dev) 200void pci_disable_msi(struct pci_dev *dev)
164 201
@@ -172,7 +209,7 @@ on any interrupt for which it previously called request_irq().
172Failure to do so results in a BUG_ON(), leaving the device with 209Failure to do so results in a BUG_ON(), leaving the device with
173MSI enabled and thus leaking its vector. 210MSI enabled and thus leaking its vector.
174 211
1754.2.3 pci_msi_vec_count 2124.2.4 pci_msi_vec_count
176 213
177int pci_msi_vec_count(struct pci_dev *dev) 214int pci_msi_vec_count(struct pci_dev *dev)
178 215
@@ -257,8 +294,8 @@ possible, likely up to the limit returned by pci_msix_vec_count() function:
257 294
258static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec) 295static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec)
259{ 296{
260 return pci_enable_msi_range(adapter->pdev, adapter->msix_entries, 297 return pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
261 1, nvec); 298 1, nvec);
262} 299}
263 300
264Note the value of 'minvec' parameter is 1. As 'minvec' is inclusive, 301Note the value of 'minvec' parameter is 1. As 'minvec' is inclusive,
@@ -269,8 +306,8 @@ In this case the function could look like this:
269 306
270static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec) 307static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec)
271{ 308{
272 return pci_enable_msi_range(adapter->pdev, adapter->msix_entries, 309 return pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
273 FOO_DRIVER_MINIMUM_NVEC, nvec); 310 FOO_DRIVER_MINIMUM_NVEC, nvec);
274} 311}
275 312
2764.3.1.2 Exact number of MSI-X interrupts 3134.3.1.2 Exact number of MSI-X interrupts
@@ -282,10 +319,15 @@ parameters:
282 319
283static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec) 320static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec)
284{ 321{
285 return pci_enable_msi_range(adapter->pdev, adapter->msix_entries, 322 return pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
286 nvec, nvec); 323 nvec, nvec);
287} 324}
288 325
326Note, unlike pci_enable_msix_exact() function, which could be also used to
327enable a particular number of MSI-X interrupts, pci_enable_msix_range()
328returns either a negative errno or 'nvec' (not negative errno or 0 - as
329pci_enable_msix_exact() does).
330
2894.3.1.3 Specific requirements to the number of MSI-X interrupts 3314.3.1.3 Specific requirements to the number of MSI-X interrupts
290 332
291As noted above, there could be devices that can not operate with just any 333As noted above, there could be devices that can not operate with just any
@@ -332,7 +374,64 @@ Note how pci_enable_msix_range() return value is analized for a fallback -
332any error code other than -ENOSPC indicates a fatal error and should not 374any error code other than -ENOSPC indicates a fatal error and should not
333be retried. 375be retried.
334 376
3354.3.2 pci_disable_msix 3774.3.2 pci_enable_msix_exact
378
379int pci_enable_msix_exact(struct pci_dev *dev,
380 struct msix_entry *entries, int nvec)
381
382This variation on pci_enable_msix_range() call allows a device driver to
383request exactly 'nvec' MSI-Xs.
384
385If this function returns a negative number, it indicates an error and
386the driver should not attempt to allocate any more MSI-X interrupts for
387this device.
388
389By contrast with pci_enable_msix_range() function, pci_enable_msix_exact()
390returns zero in case of success, which indicates MSI-X interrupts have been
391successfully allocated.
392
393Another version of a routine that enables MSI-X mode for a device with
394specific requirements described in chapter 4.3.1.3 might look like this:
395
396/*
397 * Assume 'minvec' and 'maxvec' are non-zero
398 */
399static int foo_driver_enable_msix(struct foo_adapter *adapter,
400 int minvec, int maxvec)
401{
402 int rc;
403
404 minvec = roundup_pow_of_two(minvec);
405 maxvec = rounddown_pow_of_two(maxvec);
406
407 if (minvec > maxvec)
408 return -ERANGE;
409
410retry:
411 rc = pci_enable_msix_exact(adapter->pdev,
412 adapter->msix_entries, maxvec);
413
414 /*
415 * -ENOSPC is the only error code allowed to be analyzed
416 */
417 if (rc == -ENOSPC) {
418 if (maxvec == 1)
419 return -ENOSPC;
420
421 maxvec /= 2;
422
423 if (minvec > maxvec)
424 return -ENOSPC;
425
426 goto retry;
427 } else if (rc < 0) {
428 return rc;
429 }
430
431 return maxvec;
432}
433
4344.3.3 pci_disable_msix
336 435
337void pci_disable_msix(struct pci_dev *dev) 436void pci_disable_msix(struct pci_dev *dev)
338 437
diff --git a/Documentation/RCU/00-INDEX b/Documentation/RCU/00-INDEX
index 1d7a885761f5..fa57139f50bf 100644
--- a/Documentation/RCU/00-INDEX
+++ b/Documentation/RCU/00-INDEX
@@ -8,6 +8,8 @@ listRCU.txt
8 - Using RCU to Protect Read-Mostly Linked Lists 8 - Using RCU to Protect Read-Mostly Linked Lists
9lockdep.txt 9lockdep.txt
10 - RCU and lockdep checking 10 - RCU and lockdep checking
11lockdep-splat.txt
12 - RCU Lockdep splats explained.
11NMI-RCU.txt 13NMI-RCU.txt
12 - Using RCU to Protect Dynamic NMI Handlers 14 - Using RCU to Protect Dynamic NMI Handlers
13rcubarrier.txt 15rcubarrier.txt
diff --git a/Documentation/arm/00-INDEX b/Documentation/arm/00-INDEX
index 36420e116c90..a94090cc785d 100644
--- a/Documentation/arm/00-INDEX
+++ b/Documentation/arm/00-INDEX
@@ -4,6 +4,8 @@ Booting
4 - requirements for booting 4 - requirements for booting
5Interrupts 5Interrupts
6 - ARM Interrupt subsystem documentation 6 - ARM Interrupt subsystem documentation
7IXP4xx
8 - Intel IXP4xx Network processor.
7msm 9msm
8 - MSM specific documentation 10 - MSM specific documentation
9Netwinder 11Netwinder
@@ -24,8 +26,16 @@ SPEAr
24 - ST SPEAr platform Linux Overview 26 - ST SPEAr platform Linux Overview
25VFP/ 27VFP/
26 - Release notes for Linux Kernel Vector Floating Point support code 28 - Release notes for Linux Kernel Vector Floating Point support code
29cluster-pm-race-avoidance.txt
30 - Algorithm for CPU and Cluster setup/teardown
27empeg/ 31empeg/
28 - Ltd's Empeg MP3 Car Audio Player 32 - Ltd's Empeg MP3 Car Audio Player
33firmware.txt
34 - Secure firmware registration and calling.
35kernel_mode_neon.txt
36 - How to use NEON instructions in kernel mode
37kernel_user_helpers.txt
38 - Helper functions in kernel space made available for userspace.
29mem_alignment 39mem_alignment
30 - alignment abort handler documentation 40 - alignment abort handler documentation
31memory.txt 41memory.txt
@@ -34,3 +44,7 @@ nwfpe/
34 - NWFPE floating point emulator documentation 44 - NWFPE floating point emulator documentation
35swp_emulation 45swp_emulation
36 - SWP/SWPB emulation handler/logging description 46 - SWP/SWPB emulation handler/logging description
47tcm.txt
48 - ARM Tightly Coupled Memory
49vlocks.txt
50 - Voting locks, low-level mechanism relying on memory system atomic writes.
diff --git a/Documentation/blackfin/00-INDEX b/Documentation/blackfin/00-INDEX
index 2df0365f2dff..c54fcdd4ae9f 100644
--- a/Documentation/blackfin/00-INDEX
+++ b/Documentation/blackfin/00-INDEX
@@ -1,8 +1,10 @@
100-INDEX 100-INDEX
2 - This file 2 - This file
3 3Makefile
4 - Makefile for gptimers example file.
4bfin-gpio-notes.txt 5bfin-gpio-notes.txt
5 - Notes in developing/using bfin-gpio driver. 6 - Notes in developing/using bfin-gpio driver.
6
7bfin-spi-notes.txt 7bfin-spi-notes.txt
8 - Notes for using bfin spi bus driver. 8 - Notes for using bfin spi bus driver.
9gptimers-example.c
10 - gptimers example
diff --git a/Documentation/block/00-INDEX b/Documentation/block/00-INDEX
index 929d9904f74b..e840b47613f7 100644
--- a/Documentation/block/00-INDEX
+++ b/Documentation/block/00-INDEX
@@ -14,6 +14,8 @@ deadline-iosched.txt
14 - Deadline IO scheduler tunables 14 - Deadline IO scheduler tunables
15ioprio.txt 15ioprio.txt
16 - Block io priorities (in CFQ scheduler) 16 - Block io priorities (in CFQ scheduler)
17null_blk.txt
18 - Null block for block-layer benchmarking.
17queue-sysfs.txt 19queue-sysfs.txt
18 - Queue's sysfs entries 20 - Queue's sysfs entries
19request.txt 21request.txt
diff --git a/Documentation/devicetree/00-INDEX b/Documentation/devicetree/00-INDEX
index b78f691fd847..8c4102c6a5e7 100644
--- a/Documentation/devicetree/00-INDEX
+++ b/Documentation/devicetree/00-INDEX
@@ -8,3 +8,5 @@ https://lists.ozlabs.org/listinfo/devicetree-discuss
8 - this file 8 - this file
9booting-without-of.txt 9booting-without-of.txt
10 - Booting Linux without Open Firmware, describes history and format of device trees. 10 - Booting Linux without Open Firmware, describes history and format of device trees.
11usage-model.txt
12 - How Linux uses DT and what DT aims to solve. \ No newline at end of file
diff --git a/Documentation/devicetree/bindings/arm/omap/omap.txt b/Documentation/devicetree/bindings/arm/omap/omap.txt
index 34dc40cffdfd..af9b4a0d902b 100644
--- a/Documentation/devicetree/bindings/arm/omap/omap.txt
+++ b/Documentation/devicetree/bindings/arm/omap/omap.txt
@@ -91,7 +91,7 @@ Boards:
91 compatible = "ti,omap3-beagle", "ti,omap3" 91 compatible = "ti,omap3-beagle", "ti,omap3"
92 92
93- OMAP3 Tobi with Overo : Commercial expansion board with daughter board 93- OMAP3 Tobi with Overo : Commercial expansion board with daughter board
94 compatible = "ti,omap3-tobi", "ti,omap3-overo", "ti,omap3" 94 compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap3"
95 95
96- OMAP4 SDP : Software Development Board 96- OMAP4 SDP : Software Development Board
97 compatible = "ti,omap4-sdp", "ti,omap4430" 97 compatible = "ti,omap4-sdp", "ti,omap4430"
diff --git a/Documentation/devicetree/bindings/mmc/atmel-hsmci.txt b/Documentation/devicetree/bindings/mmc/atmel-hsmci.txt
index 0a85c70cd30a..07ad02075a93 100644
--- a/Documentation/devicetree/bindings/mmc/atmel-hsmci.txt
+++ b/Documentation/devicetree/bindings/mmc/atmel-hsmci.txt
@@ -13,6 +13,9 @@ Required properties:
13- #address-cells: should be one. The cell is the slot id. 13- #address-cells: should be one. The cell is the slot id.
14- #size-cells: should be zero. 14- #size-cells: should be zero.
15- at least one slot node 15- at least one slot node
16- clock-names: tuple listing input clock names.
17 Required elements: "mci_clk"
18- clocks: phandles to input clocks.
16 19
17The node contains child nodes for each slot that the platform uses 20The node contains child nodes for each slot that the platform uses
18 21
@@ -24,6 +27,8 @@ mmc0: mmc@f0008000 {
24 interrupts = <12 4>; 27 interrupts = <12 4>;
25 #address-cells = <1>; 28 #address-cells = <1>;
26 #size-cells = <0>; 29 #size-cells = <0>;
30 clock-names = "mci_clk";
31 clocks = <&mci0_clk>;
27 32
28 [ child node definitions...] 33 [ child node definitions...]
29}; 34};
diff --git a/Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt b/Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt
index b90bfcd138ff..863d5b8155c7 100644
--- a/Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt
+++ b/Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt
@@ -1,7 +1,8 @@
1* Allwinner EMAC ethernet controller 1* Allwinner EMAC ethernet controller
2 2
3Required properties: 3Required properties:
4- compatible: should be "allwinner,sun4i-emac". 4- compatible: should be "allwinner,sun4i-a10-emac" (Deprecated:
5 "allwinner,sun4i-emac")
5- reg: address and length of the register set for the device. 6- reg: address and length of the register set for the device.
6- interrupts: interrupt for the device 7- interrupts: interrupt for the device
7- phy: A phandle to a phy node defining the PHY address (as the reg 8- phy: A phandle to a phy node defining the PHY address (as the reg
@@ -14,7 +15,7 @@ Optional properties:
14Example: 15Example:
15 16
16emac: ethernet@01c0b000 { 17emac: ethernet@01c0b000 {
17 compatible = "allwinner,sun4i-emac"; 18 compatible = "allwinner,sun4i-a10-emac";
18 reg = <0x01c0b000 0x1000>; 19 reg = <0x01c0b000 0x1000>;
19 interrupts = <55>; 20 interrupts = <55>;
20 clocks = <&ahb_gates 17>; 21 clocks = <&ahb_gates 17>;
diff --git a/Documentation/devicetree/bindings/net/allwinner,sun4i-mdio.txt b/Documentation/devicetree/bindings/net/allwinner,sun4i-mdio.txt
index 00b9f9a3ec1d..4ec56413779d 100644
--- a/Documentation/devicetree/bindings/net/allwinner,sun4i-mdio.txt
+++ b/Documentation/devicetree/bindings/net/allwinner,sun4i-mdio.txt
@@ -1,7 +1,8 @@
1* Allwinner A10 MDIO Ethernet Controller interface 1* Allwinner A10 MDIO Ethernet Controller interface
2 2
3Required properties: 3Required properties:
4- compatible: should be "allwinner,sun4i-mdio". 4- compatible: should be "allwinner,sun4i-a10-mdio"
5 (Deprecated: "allwinner,sun4i-mdio").
5- reg: address and length of the register set for the device. 6- reg: address and length of the register set for the device.
6 7
7Optional properties: 8Optional properties:
@@ -9,7 +10,7 @@ Optional properties:
9 10
10Example at the SoC level: 11Example at the SoC level:
11mdio@01c0b080 { 12mdio@01c0b080 {
12 compatible = "allwinner,sun4i-mdio"; 13 compatible = "allwinner,sun4i-a10-mdio";
13 reg = <0x01c0b080 0x14>; 14 reg = <0x01c0b080 0x14>;
14 #address-cells = <1>; 15 #address-cells = <1>;
15 #size-cells = <0>; 16 #size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/net/sti-dwmac.txt b/Documentation/devicetree/bindings/net/sti-dwmac.txt
new file mode 100644
index 000000000000..3dd3d0bf112f
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/sti-dwmac.txt
@@ -0,0 +1,58 @@
1STMicroelectronics SoC DWMAC glue layer controller
2
3The device node has following properties.
4
5Required properties:
6 - compatible : Can be "st,stih415-dwmac", "st,stih416-dwmac" or
7 "st,stid127-dwmac".
8 - reg : Offset of the glue configuration register map in system
9 configuration regmap pointed by st,syscon property and size.
10
11 - reg-names : Should be "sti-ethconf".
12
13 - st,syscon : Should be phandle to system configuration node which
14 encompases this glue registers.
15
16 - st,tx-retime-src: On STi Parts for Giga bit speeds, 125Mhz clocks can be
17 wired up in from different sources. One via TXCLK pin and other via CLK_125
18 pin. This wiring is totally board dependent. However the retiming glue
19 logic should be configured accordingly. Possible values for this property
20
21 "txclk" - if 125Mhz clock is wired up via txclk line.
22 "clk_125" - if 125Mhz clock is wired up via clk_125 line.
23
24 This property is only valid for Giga bit setup( GMII, RGMII), and it is
25 un-used for non-giga bit (MII and RMII) setups. Also note that internal
26 clockgen can not generate stable 125Mhz clock.
27
28 - st,ext-phyclk: This boolean property indicates who is generating the clock
29 for tx and rx. This property is only valid for RMII case where the clock can
30 be generated from the MAC or PHY.
31
32 - clock-names: should be "sti-ethclk".
33 - clocks: Should point to ethernet clockgen which can generate phyclk.
34
35
36Example:
37
38ethernet0: dwmac@fe810000 {
39 device_type = "network";
40 compatible = "st,stih416-dwmac", "snps,dwmac", "snps,dwmac-3.710";
41 reg = <0xfe810000 0x8000>, <0x8bc 0x4>;
42 reg-names = "stmmaceth", "sti-ethconf";
43 interrupts = <0 133 0>, <0 134 0>, <0 135 0>;
44 interrupt-names = "macirq", "eth_wake_irq", "eth_lpi";
45 phy-mode = "mii";
46
47 st,syscon = <&syscfg_rear>;
48
49 snps,pbl = <32>;
50 snps,mixed-burst;
51
52 resets = <&softreset STIH416_ETH0_SOFTRESET>;
53 reset-names = "stmmaceth";
54 pinctrl-0 = <&pinctrl_mii0>;
55 pinctrl-names = "default";
56 clocks = <&CLK_S_GMAC0_PHY>;
57 clock-names = "stmmaceth";
58};
diff --git a/Documentation/devicetree/bindings/power/bq2415x.txt b/Documentation/devicetree/bindings/power/bq2415x.txt
new file mode 100644
index 000000000000..d0327f0b59ad
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/bq2415x.txt
@@ -0,0 +1,47 @@
1Binding for TI bq2415x Li-Ion Charger
2
3Required properties:
4- compatible: Should contain one of the following:
5 * "ti,bq24150"
6 * "ti,bq24150"
7 * "ti,bq24150a"
8 * "ti,bq24151"
9 * "ti,bq24151a"
10 * "ti,bq24152"
11 * "ti,bq24153"
12 * "ti,bq24153a"
13 * "ti,bq24155"
14 * "ti,bq24156"
15 * "ti,bq24156a"
16 * "ti,bq24158"
17- reg: integer, i2c address of the device.
18- ti,current-limit: integer, initial maximum current charger can pull
19 from power supply in mA.
20- ti,weak-battery-voltage: integer, weak battery voltage threshold in mV.
21 The chip will use slow precharge if battery voltage
22 is below this value.
23- ti,battery-regulation-voltage: integer, maximum charging voltage in mV.
24- ti,charge-current: integer, maximum charging current in mA.
25- ti,termination-current: integer, charge will be terminated when current in
26 constant-voltage phase drops below this value (in mA).
27- ti,resistor-sense: integer, value of sensing resistor in milliohm.
28
29Optional properties:
30- ti,usb-charger-detection: phandle to usb charger detection device.
31 (required for auto mode)
32
33Example from Nokia N900:
34
35bq24150a {
36 compatible = "ti,bq24150a";
37 reg = <0x6b>;
38
39 ti,current-limit = <100>;
40 ti,weak-battery-voltage = <3400>;
41 ti,battery-regulation-voltage = <4200>;
42 ti,charge-current = <650>;
43 ti,termination-current = <100>;
44 ti,resistor-sense = <68>;
45
46 ti,usb-charger-detection = <&isp1704>;
47};
diff --git a/Documentation/devicetree/bindings/spi/spi_atmel.txt b/Documentation/devicetree/bindings/spi/spi_atmel.txt
index 07e04cdc0c9e..4f8184d069cb 100644
--- a/Documentation/devicetree/bindings/spi/spi_atmel.txt
+++ b/Documentation/devicetree/bindings/spi/spi_atmel.txt
@@ -5,6 +5,9 @@ Required properties:
5- reg: Address and length of the register set for the device 5- reg: Address and length of the register set for the device
6- interrupts: Should contain spi interrupt 6- interrupts: Should contain spi interrupt
7- cs-gpios: chipselects 7- cs-gpios: chipselects
8- clock-names: tuple listing input clock names.
9 Required elements: "spi_clk"
10- clocks: phandles to input clocks.
8 11
9Example: 12Example:
10 13
@@ -14,6 +17,8 @@ spi1: spi@fffcc000 {
14 interrupts = <13 4 5>; 17 interrupts = <13 4 5>;
15 #address-cells = <1>; 18 #address-cells = <1>;
16 #size-cells = <0>; 19 #size-cells = <0>;
20 clocks = <&spi1_clk>;
21 clock-names = "spi_clk";
17 cs-gpios = <&pioB 3 0>; 22 cs-gpios = <&pioB 3 0>;
18 status = "okay"; 23 status = "okay";
19 24
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 3f900cd51bf0..40ce2df0e0e9 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -8,6 +8,7 @@ ad Avionic Design GmbH
8adi Analog Devices, Inc. 8adi Analog Devices, Inc.
9aeroflexgaisler Aeroflex Gaisler AB 9aeroflexgaisler Aeroflex Gaisler AB
10ak Asahi Kasei Corp. 10ak Asahi Kasei Corp.
11allwinner Allwinner Technology Co., Ltd.
11altr Altera Corp. 12altr Altera Corp.
12amcc Applied Micro Circuits Corporation (APM, formally AMCC) 13amcc Applied Micro Circuits Corporation (APM, formally AMCC)
13amstaos AMS-Taos Inc. 14amstaos AMS-Taos Inc.
@@ -40,6 +41,7 @@ gmt Global Mixed-mode Technology, Inc.
40gumstix Gumstix, Inc. 41gumstix Gumstix, Inc.
41haoyu Haoyu Microelectronic Co. Ltd. 42haoyu Haoyu Microelectronic Co. Ltd.
42hisilicon Hisilicon Limited. 43hisilicon Hisilicon Limited.
44honeywell Honeywell
43hp Hewlett Packard 45hp Hewlett Packard
44ibm International Business Machines (IBM) 46ibm International Business Machines (IBM)
45idt Integrated Device Technologies, Inc. 47idt Integrated Device Technologies, Inc.
@@ -55,6 +57,7 @@ maxim Maxim Integrated Products
55microchip Microchip Technology Inc. 57microchip Microchip Technology Inc.
56mosaixtech Mosaix Technologies, Inc. 58mosaixtech Mosaix Technologies, Inc.
57national National Semiconductor 59national National Semiconductor
60neonode Neonode Inc.
58nintendo Nintendo 61nintendo Nintendo
59nvidia NVIDIA 62nvidia NVIDIA
60nxp NXP Semiconductors 63nxp NXP Semiconductors
@@ -64,7 +67,7 @@ phytec PHYTEC Messtechnik GmbH
64picochip Picochip Ltd 67picochip Picochip Ltd
65powervr PowerVR (deprecated, use img) 68powervr PowerVR (deprecated, use img)
66qca Qualcomm Atheros, Inc. 69qca Qualcomm Atheros, Inc.
67qcom Qualcomm, Inc. 70qcom Qualcomm Technologies, Inc
68ralink Mediatek/Ralink Technology Corp. 71ralink Mediatek/Ralink Technology Corp.
69ramtron Ramtron International 72ramtron Ramtron International
70realtek Realtek Semiconductor Corp. 73realtek Realtek Semiconductor Corp.
@@ -78,6 +81,7 @@ silabs Silicon Laboratories
78simtek 81simtek
79sirf SiRF Technology, Inc. 82sirf SiRF Technology, Inc.
80snps Synopsys, Inc. 83snps Synopsys, Inc.
84spansion Spansion Inc.
81st STMicroelectronics 85st STMicroelectronics
82ste ST-Ericsson 86ste ST-Ericsson
83stericsson ST-Ericsson 87stericsson ST-Ericsson
diff --git a/Documentation/fb/00-INDEX b/Documentation/fb/00-INDEX
index 30a70542e823..fe85e7c5907a 100644
--- a/Documentation/fb/00-INDEX
+++ b/Documentation/fb/00-INDEX
@@ -5,6 +5,8 @@ please mail me.
5 5
600-INDEX 600-INDEX
7 - this file. 7 - this file.
8api.txt
9 - The frame buffer API between applications and buffer devices.
8arkfb.txt 10arkfb.txt
9 - info on the fbdev driver for ARK Logic chips. 11 - info on the fbdev driver for ARK Logic chips.
10aty128fb.txt 12aty128fb.txt
@@ -51,12 +53,16 @@ sh7760fb.txt
51 - info on the SH7760/SH7763 integrated LCDC Framebuffer driver. 53 - info on the SH7760/SH7763 integrated LCDC Framebuffer driver.
52sisfb.txt 54sisfb.txt
53 - info on the framebuffer device driver for various SiS chips. 55 - info on the framebuffer device driver for various SiS chips.
56sm501.txt
57 - info on the framebuffer device driver for sm501 videoframebuffer.
54sstfb.txt 58sstfb.txt
55 - info on the frame buffer driver for 3dfx' Voodoo Graphics boards. 59 - info on the frame buffer driver for 3dfx' Voodoo Graphics boards.
56tgafb.txt 60tgafb.txt
57 - info on the TGA (DECChip 21030) frame buffer driver. 61 - info on the TGA (DECChip 21030) frame buffer driver.
58tridentfb.txt 62tridentfb.txt
59 info on the framebuffer driver for some Trident chip based cards. 63 info on the framebuffer driver for some Trident chip based cards.
64udlfb.txt
65 - Driver for DisplayLink USB 2.0 chips.
60uvesafb.txt 66uvesafb.txt
61 - info on the userspace VESA (VBE2+ compliant) frame buffer device. 67 - info on the userspace VESA (VBE2+ compliant) frame buffer device.
62vesafb.txt 68vesafb.txt
diff --git a/Documentation/filesystems/00-INDEX b/Documentation/filesystems/00-INDEX
index 632211cbdd56..ac28149aede4 100644
--- a/Documentation/filesystems/00-INDEX
+++ b/Documentation/filesystems/00-INDEX
@@ -2,6 +2,8 @@
2 - this file (info on some of the filesystems supported by linux). 2 - this file (info on some of the filesystems supported by linux).
3Locking 3Locking
4 - info on locking rules as they pertain to Linux VFS. 4 - info on locking rules as they pertain to Linux VFS.
5Makefile
6 - Makefile for building the filsystems-part of DocBook.
59p.txt 79p.txt
6 - 9p (v9fs) is an implementation of the Plan 9 remote fs protocol. 8 - 9p (v9fs) is an implementation of the Plan 9 remote fs protocol.
7adfs.txt 9adfs.txt
diff --git a/Documentation/filesystems/nfs/00-INDEX b/Documentation/filesystems/nfs/00-INDEX
index 66eb6c8c5334..53f3b596ac0d 100644
--- a/Documentation/filesystems/nfs/00-INDEX
+++ b/Documentation/filesystems/nfs/00-INDEX
@@ -12,6 +12,8 @@ nfs41-server.txt
12 - info on the Linux server implementation of NFSv4 minor version 1. 12 - info on the Linux server implementation of NFSv4 minor version 1.
13nfs-rdma.txt 13nfs-rdma.txt
14 - how to install and setup the Linux NFS/RDMA client and server software 14 - how to install and setup the Linux NFS/RDMA client and server software
15nfsd-admin-interfaces.txt
16 - Administrative interfaces for nfsd.
15nfsroot.txt 17nfsroot.txt
16 - short guide on setting up a diskless box with NFS root filesystem. 18 - short guide on setting up a diskless box with NFS root filesystem.
17pnfs.txt 19pnfs.txt
@@ -20,5 +22,5 @@ rpc-cache.txt
20 - introduction to the caching mechanisms in the sunrpc layer. 22 - introduction to the caching mechanisms in the sunrpc layer.
21idmapper.txt 23idmapper.txt
22 - information for configuring request-keys to be used by idmapper 24 - information for configuring request-keys to be used by idmapper
23knfsd-rpcgss.txt 25rpc-server-gss.txt
24 - Information on GSS authentication support in the NFS Server 26 - Information on GSS authentication support in the NFS Server
diff --git a/Documentation/i2c/instantiating-devices b/Documentation/i2c/instantiating-devices
index c70e7a7638d1..0d85ac1935b7 100644
--- a/Documentation/i2c/instantiating-devices
+++ b/Documentation/i2c/instantiating-devices
@@ -8,8 +8,8 @@ reason, the kernel code must instantiate I2C devices explicitly. There are
8several ways to achieve this, depending on the context and requirements. 8several ways to achieve this, depending on the context and requirements.
9 9
10 10
11Method 1: Declare the I2C devices by bus number 11Method 1a: Declare the I2C devices by bus number
12----------------------------------------------- 12------------------------------------------------
13 13
14This method is appropriate when the I2C bus is a system bus as is the case 14This method is appropriate when the I2C bus is a system bus as is the case
15for many embedded systems. On such systems, each I2C bus has a number 15for many embedded systems. On such systems, each I2C bus has a number
@@ -51,6 +51,43 @@ The devices will be automatically unbound and destroyed when the I2C bus
51they sit on goes away (if ever.) 51they sit on goes away (if ever.)
52 52
53 53
54Method 1b: Declare the I2C devices via devicetree
55-------------------------------------------------
56
57This method has the same implications as method 1a. The declaration of I2C
58devices is here done via devicetree as subnodes of the master controller.
59
60Example:
61
62 i2c1: i2c@400a0000 {
63 /* ... master properties skipped ... */
64 clock-frequency = <100000>;
65
66 flash@50 {
67 compatible = "atmel,24c256";
68 reg = <0x50>;
69 };
70
71 pca9532: gpio@60 {
72 compatible = "nxp,pca9532";
73 gpio-controller;
74 #gpio-cells = <2>;
75 reg = <0x60>;
76 };
77 };
78
79Here, two devices are attached to the bus using a speed of 100kHz. For
80additional properties which might be needed to set up the device, please refer
81to its devicetree documentation in Documentation/devicetree/bindings/.
82
83
84Method 1c: Declare the I2C devices via ACPI
85-------------------------------------------
86
87ACPI can also describe I2C devices. There is special documentation for this
88which is currently located at Documentation/acpi/enumeration.txt.
89
90
54Method 2: Instantiate the devices explicitly 91Method 2: Instantiate the devices explicitly
55-------------------------------------------- 92--------------------------------------------
56 93
diff --git a/Documentation/ide/00-INDEX b/Documentation/ide/00-INDEX
index d6b778842b75..22f98ca79539 100644
--- a/Documentation/ide/00-INDEX
+++ b/Documentation/ide/00-INDEX
@@ -10,3 +10,5 @@ ide-tape.txt
10 - info on the IDE ATAPI streaming tape driver 10 - info on the IDE ATAPI streaming tape driver
11ide.txt 11ide.txt
12 - important info for users of ATA devices (IDE/EIDE disks and CD-ROMS). 12 - important info for users of ATA devices (IDE/EIDE disks and CD-ROMS).
13warm-plug-howto.txt
14 - using sysfs to remove and add IDE devices. \ No newline at end of file
diff --git a/Documentation/laptops/00-INDEX b/Documentation/laptops/00-INDEX
index fa688538e757..d13b9a9a9e00 100644
--- a/Documentation/laptops/00-INDEX
+++ b/Documentation/laptops/00-INDEX
@@ -1,13 +1,15 @@
100-INDEX 100-INDEX
2 - This file 2 - This file
3acer-wmi.txt 3Makefile
4 - information on the Acer Laptop WMI Extras driver. 4 - Makefile for building dslm example program.
5asus-laptop.txt 5asus-laptop.txt
6 - information on the Asus Laptop Extras driver. 6 - information on the Asus Laptop Extras driver.
7disk-shock-protection.txt 7disk-shock-protection.txt
8 - information on hard disk shock protection. 8 - information on hard disk shock protection.
9dslm.c 9dslm.c
10 - Simple Disk Sleep Monitor program 10 - Simple Disk Sleep Monitor program
11hpfall.c
12 - (HP) laptop accelerometer program for disk protection.
11laptop-mode.txt 13laptop-mode.txt
12 - how to conserve battery power using laptop-mode. 14 - how to conserve battery power using laptop-mode.
13sony-laptop.txt 15sony-laptop.txt
diff --git a/Documentation/leds/00-INDEX b/Documentation/leds/00-INDEX
index 1ecd1596633e..b4ef1f34e25f 100644
--- a/Documentation/leds/00-INDEX
+++ b/Documentation/leds/00-INDEX
@@ -1,3 +1,7 @@
100-INDEX
2 - This file
3leds-blinkm.txt
4 - Driver for BlinkM LED-devices.
1leds-class.txt 5leds-class.txt
2 - documents LED handling under Linux. 6 - documents LED handling under Linux.
3leds-lp3944.txt 7leds-lp3944.txt
@@ -12,3 +16,7 @@ leds-lp55xx.txt
12 - description about lp55xx common driver. 16 - description about lp55xx common driver.
13leds-lm3556.txt 17leds-lm3556.txt
14 - notes on how to use the leds-lm3556 driver. 18 - notes on how to use the leds-lm3556 driver.
19ledtrig-oneshot.txt
20 - One-shot LED trigger for both sporadic and dense events.
21ledtrig-transient.txt
22 - LED Transient Trigger, one shot timer activation.
diff --git a/Documentation/m68k/00-INDEX b/Documentation/m68k/00-INDEX
index a014e9f00765..2be8c6b00e74 100644
--- a/Documentation/m68k/00-INDEX
+++ b/Documentation/m68k/00-INDEX
@@ -1,5 +1,7 @@
100-INDEX 100-INDEX
2 - this file 2 - this file
3README.buddha
4 - Amiga Buddha and Catweasel IDE Driver
3kernel-options.txt 5kernel-options.txt
4 - command line options for Linux/m68k 6 - command line options for Linux/m68k
5 7
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX
index f11580f8719a..557b6ef70c26 100644
--- a/Documentation/networking/00-INDEX
+++ b/Documentation/networking/00-INDEX
@@ -6,8 +6,14 @@
6 - information on the 3Com Etherlink III Series Ethernet cards. 6 - information on the 3Com Etherlink III Series Ethernet cards.
76pack.txt 76pack.txt
8 - info on the 6pack protocol, an alternative to KISS for AX.25 8 - info on the 6pack protocol, an alternative to KISS for AX.25
9DLINK.txt 9LICENSE.qla3xxx
10 - info on the D-Link DE-600/DE-620 parallel port pocket adapters 10 - GPLv2 for QLogic Linux Networking HBA Driver
11LICENSE.qlge
12 - GPLv2 for QLogic Linux qlge NIC Driver
13LICENSE.qlcnic
14 - GPLv2 for QLogic Linux qlcnic NIC Driver
15Makefile
16 - Makefile for docsrc.
11PLIP.txt 17PLIP.txt
12 - PLIP: The Parallel Line Internet Protocol device driver 18 - PLIP: The Parallel Line Internet Protocol device driver
13README.ipw2100 19README.ipw2100
@@ -17,7 +23,7 @@ README.ipw2200
17README.sb1000 23README.sb1000
18 - info on General Instrument/NextLevel SURFboard1000 cable modem. 24 - info on General Instrument/NextLevel SURFboard1000 cable modem.
19alias.txt 25alias.txt
20 - info on using alias network devices 26 - info on using alias network devices.
21arcnet-hardware.txt 27arcnet-hardware.txt
22 - tons of info on ARCnet, hubs, jumper settings for ARCnet cards, etc. 28 - tons of info on ARCnet, hubs, jumper settings for ARCnet cards, etc.
23arcnet.txt 29arcnet.txt
@@ -80,7 +86,7 @@ framerelay.txt
80 - info on using Frame Relay/Data Link Connection Identifier (DLCI). 86 - info on using Frame Relay/Data Link Connection Identifier (DLCI).
81gen_stats.txt 87gen_stats.txt
82 - Generic networking statistics for netlink users. 88 - Generic networking statistics for netlink users.
83generic_hdlc.txt 89generic-hdlc.txt
84 - The generic High Level Data Link Control (HDLC) layer. 90 - The generic High Level Data Link Control (HDLC) layer.
85generic_netlink.txt 91generic_netlink.txt
86 - info on Generic Netlink 92 - info on Generic Netlink
@@ -88,6 +94,8 @@ gianfar.txt
88 - Gianfar Ethernet Driver. 94 - Gianfar Ethernet Driver.
89i40e.txt 95i40e.txt
90 - README for the Intel Ethernet Controller XL710 Driver (i40e). 96 - README for the Intel Ethernet Controller XL710 Driver (i40e).
97i40evf.txt
98 - Short note on the Driver for the Intel(R) XL710 X710 Virtual Function
91ieee802154.txt 99ieee802154.txt
92 - Linux IEEE 802.15.4 implementation, API and drivers 100 - Linux IEEE 802.15.4 implementation, API and drivers
93igb.txt 101igb.txt
@@ -102,6 +110,8 @@ ipddp.txt
102 - AppleTalk-IP Decapsulation and AppleTalk-IP Encapsulation 110 - AppleTalk-IP Decapsulation and AppleTalk-IP Encapsulation
103iphase.txt 111iphase.txt
104 - Interphase PCI ATM (i)Chip IA Linux driver info. 112 - Interphase PCI ATM (i)Chip IA Linux driver info.
113ipsec.txt
114 - Note on not compressing IPSec payload and resulting failed policy check.
105ipv6.txt 115ipv6.txt
106 - Options to the ipv6 kernel module. 116 - Options to the ipv6 kernel module.
107ipvs-sysctl.txt 117ipvs-sysctl.txt
@@ -120,6 +130,8 @@ lapb-module.txt
120 - programming information of the LAPB module. 130 - programming information of the LAPB module.
121ltpc.txt 131ltpc.txt
122 - the Apple or Farallon LocalTalk PC card driver 132 - the Apple or Farallon LocalTalk PC card driver
133mac80211-auth-assoc-deauth.txt
134 - authentication and association / deauth-disassoc with max80211
123mac80211-injection.txt 135mac80211-injection.txt
124 - HOWTO use packet injection with mac80211 136 - HOWTO use packet injection with mac80211
125multiqueue.txt 137multiqueue.txt
@@ -134,6 +146,10 @@ netdevices.txt
134 - info on network device driver functions exported to the kernel. 146 - info on network device driver functions exported to the kernel.
135netif-msg.txt 147netif-msg.txt
136 - Design of the network interface message level setting (NETIF_MSG_*). 148 - Design of the network interface message level setting (NETIF_MSG_*).
149netlink_mmap.txt
150 - memory mapped I/O with netlink
151nf_conntrack-sysctl.txt
152 - list of netfilter-sysctl knobs.
137nfc.txt 153nfc.txt
138 - The Linux Near Field Communication (NFS) subsystem. 154 - The Linux Near Field Communication (NFS) subsystem.
139openvswitch.txt 155openvswitch.txt
@@ -176,7 +192,7 @@ skfp.txt
176 - SysKonnect FDDI (SK-5xxx, Compaq Netelligent) driver info. 192 - SysKonnect FDDI (SK-5xxx, Compaq Netelligent) driver info.
177smc9.txt 193smc9.txt
178 - the driver for SMC's 9000 series of Ethernet cards 194 - the driver for SMC's 9000 series of Ethernet cards
179spider-net.txt 195spider_net.txt
180 - README for the Spidernet Driver (as found in PS3 / Cell BE). 196 - README for the Spidernet Driver (as found in PS3 / Cell BE).
181stmmac.txt 197stmmac.txt
182 - README for the STMicro Synopsys Ethernet driver. 198 - README for the STMicro Synopsys Ethernet driver.
@@ -188,6 +204,8 @@ tcp.txt
188 - short blurb on how TCP output takes place. 204 - short blurb on how TCP output takes place.
189tcp-thin.txt 205tcp-thin.txt
190 - kernel tuning options for low rate 'thin' TCP streams. 206 - kernel tuning options for low rate 'thin' TCP streams.
207team.txt
208 - pointer to information for ethernet teaming devices.
191tlan.txt 209tlan.txt
192 - ThunderLAN (Compaq Netelligent 10/100, Olicom OC-2xxx) driver info. 210 - ThunderLAN (Compaq Netelligent 10/100, Olicom OC-2xxx) driver info.
193tproxy.txt 211tproxy.txt
@@ -200,6 +218,8 @@ vortex.txt
200 - info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards. 218 - info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards.
201vxge.txt 219vxge.txt
202 - README for the Neterion X3100 PCIe Server Adapter. 220 - README for the Neterion X3100 PCIe Server Adapter.
221vxlan.txt
222 - Virtual extensible LAN overview
203x25.txt 223x25.txt
204 - general info on X.25 development. 224 - general info on X.25 development.
205x25-iface.txt 225x25-iface.txt
diff --git a/Documentation/networking/3c505.txt b/Documentation/networking/3c505.txt
deleted file mode 100644
index 72f38b13101d..000000000000
--- a/Documentation/networking/3c505.txt
+++ /dev/null
@@ -1,45 +0,0 @@
1The 3Com Etherlink Plus (3c505) driver.
2
3This driver now uses DMA. There is currently no support for PIO operation.
4The default DMA channel is 6; this is _not_ autoprobed, so you must
5make sure you configure it correctly. If loading the driver as a
6module, you can do this with "modprobe 3c505 dma=n". If the driver is
7linked statically into the kernel, you must either use an "ether="
8statement on the command line, or change the definition of ELP_DMA in 3c505.h.
9
10The driver will warn you if it has to fall back on the compiled in
11default DMA channel.
12
13If no base address is given at boot time, the driver will autoprobe
14ports 0x300, 0x280 and 0x310 (in that order). If no IRQ is given, the driver
15will try to probe for it.
16
17The driver can be used as a loadable module.
18
19Theoretically, one instance of the driver can now run multiple cards,
20in the standard way (when loading a module, say "modprobe 3c505
21io=0x300,0x340 irq=10,11 dma=6,7" or whatever). I have not tested
22this, though.
23
24The driver may now support revision 2 hardware; the dependency on
25being able to read the host control register has been removed. This
26is also untested, since I don't have a suitable card.
27
28Known problems:
29 I still see "DMA upload timed out" messages from time to time. These
30seem to be fairly non-fatal though.
31 The card is old and slow.
32
33To do:
34 Improve probe/setup code
35 Test multicast and promiscuous operation
36
37Authors:
38 The driver is mainly written by Craig Southeren, email
39 <craigs@ineluki.apana.org.au>.
40 Parts of the driver (adapting the driver to 1.1.4+ kernels,
41 IRQ/address detection, some changes) and this README by
42 Juha Laiho <jlaiho@ichaos.nullnet.fi>.
43 DMA mode, more fixes, etc, by Philip Blundell <pjb27@cam.ac.uk>
44 Multicard support, Software configurable DMA, etc., by
45 Christopher Collins <ccollins@pcug.org.au>
diff --git a/Documentation/phy.txt b/Documentation/phy.txt
index 0103e4b15b0e..ebff6ee52441 100644
--- a/Documentation/phy.txt
+++ b/Documentation/phy.txt
@@ -75,14 +75,26 @@ Before the controller can make use of the PHY, it has to get a reference to
75it. This framework provides the following APIs to get a reference to the PHY. 75it. This framework provides the following APIs to get a reference to the PHY.
76 76
77struct phy *phy_get(struct device *dev, const char *string); 77struct phy *phy_get(struct device *dev, const char *string);
78struct phy *phy_optional_get(struct device *dev, const char *string);
78struct phy *devm_phy_get(struct device *dev, const char *string); 79struct phy *devm_phy_get(struct device *dev, const char *string);
79 80struct phy *devm_phy_optional_get(struct device *dev, const char *string);
80phy_get and devm_phy_get can be used to get the PHY. In the case of dt boot, 81
81the string arguments should contain the phy name as given in the dt data and 82phy_get, phy_optional_get, devm_phy_get and devm_phy_optional_get can
82in the case of non-dt boot, it should contain the label of the PHY. 83be used to get the PHY. In the case of dt boot, the string arguments
83The only difference between the two APIs is that devm_phy_get associates the 84should contain the phy name as given in the dt data and in the case of
84device with the PHY using devres on successful PHY get. On driver detach, 85non-dt boot, it should contain the label of the PHY. The two
85release function is invoked on the the devres data and devres data is freed. 86devm_phy_get associates the device with the PHY using devres on
87successful PHY get. On driver detach, release function is invoked on
88the the devres data and devres data is freed. phy_optional_get and
89devm_phy_optional_get should be used when the phy is optional. These
90two functions will never return -ENODEV, but instead returns NULL when
91the phy cannot be found.
92
93It should be noted that NULL is a valid phy reference. All phy
94consumer calls on the NULL phy become NOPs. That is the release calls,
95the phy_init() and phy_exit() calls, and phy_power_on() and
96phy_power_off() calls are all NOP when applied to a NULL phy. The NULL
97phy is useful in devices for handling optional phy devices.
86 98
875. Releasing a reference to the PHY 995. Releasing a reference to the PHY
88 100
diff --git a/Documentation/power/00-INDEX b/Documentation/power/00-INDEX
index a4d682f54231..ad04cc8097ed 100644
--- a/Documentation/power/00-INDEX
+++ b/Documentation/power/00-INDEX
@@ -4,6 +4,8 @@ apm-acpi.txt
4 - basic info about the APM and ACPI support. 4 - basic info about the APM and ACPI support.
5basic-pm-debugging.txt 5basic-pm-debugging.txt
6 - Debugging suspend and resume 6 - Debugging suspend and resume
7charger-manager.txt
8 - Battery charger management.
7devices.txt 9devices.txt
8 - How drivers interact with system-wide power management 10 - How drivers interact with system-wide power management
9drivers-testing.txt 11drivers-testing.txt
@@ -22,6 +24,8 @@ pm_qos_interface.txt
22 - info on Linux PM Quality of Service interface 24 - info on Linux PM Quality of Service interface
23power_supply_class.txt 25power_supply_class.txt
24 - Tells userspace about battery, UPS, AC or DC power supply properties 26 - Tells userspace about battery, UPS, AC or DC power supply properties
27runtime_pm.txt
28 - Power management framework for I/O devices.
25s2ram.txt 29s2ram.txt
26 - How to get suspend to ram working (and debug it when it isn't) 30 - How to get suspend to ram working (and debug it when it isn't)
27states.txt 31states.txt
@@ -38,7 +42,5 @@ tricks.txt
38 - How to trick software suspend (to disk) into working when it isn't 42 - How to trick software suspend (to disk) into working when it isn't
39userland-swsusp.txt 43userland-swsusp.txt
40 - Experimental implementation of software suspend in userspace 44 - Experimental implementation of software suspend in userspace
41video_extension.txt
42 - ACPI video extensions
43video.txt 45video.txt
44 - Video issues during resume from suspend 46 - Video issues during resume from suspend
diff --git a/Documentation/ptp/testptp.c b/Documentation/ptp/testptp.c
index a74d0a84d329..4aba0436da65 100644
--- a/Documentation/ptp/testptp.c
+++ b/Documentation/ptp/testptp.c
@@ -117,6 +117,7 @@ static void usage(char *progname)
117 " -f val adjust the ptp clock frequency by 'val' ppb\n" 117 " -f val adjust the ptp clock frequency by 'val' ppb\n"
118 " -g get the ptp clock time\n" 118 " -g get the ptp clock time\n"
119 " -h prints this message\n" 119 " -h prints this message\n"
120 " -i val index for event/trigger\n"
120 " -k val measure the time offset between system and phc clock\n" 121 " -k val measure the time offset between system and phc clock\n"
121 " for 'val' times (Maximum 25)\n" 122 " for 'val' times (Maximum 25)\n"
122 " -p val enable output with a period of 'val' nanoseconds\n" 123 " -p val enable output with a period of 'val' nanoseconds\n"
@@ -154,6 +155,7 @@ int main(int argc, char *argv[])
154 int capabilities = 0; 155 int capabilities = 0;
155 int extts = 0; 156 int extts = 0;
156 int gettime = 0; 157 int gettime = 0;
158 int index = 0;
157 int oneshot = 0; 159 int oneshot = 0;
158 int pct_offset = 0; 160 int pct_offset = 0;
159 int n_samples = 0; 161 int n_samples = 0;
@@ -167,7 +169,7 @@ int main(int argc, char *argv[])
167 169
168 progname = strrchr(argv[0], '/'); 170 progname = strrchr(argv[0], '/');
169 progname = progname ? 1+progname : argv[0]; 171 progname = progname ? 1+progname : argv[0];
170 while (EOF != (c = getopt(argc, argv, "a:A:cd:e:f:ghk:p:P:sSt:v"))) { 172 while (EOF != (c = getopt(argc, argv, "a:A:cd:e:f:ghi:k:p:P:sSt:v"))) {
171 switch (c) { 173 switch (c) {
172 case 'a': 174 case 'a':
173 oneshot = atoi(optarg); 175 oneshot = atoi(optarg);
@@ -190,6 +192,9 @@ int main(int argc, char *argv[])
190 case 'g': 192 case 'g':
191 gettime = 1; 193 gettime = 1;
192 break; 194 break;
195 case 'i':
196 index = atoi(optarg);
197 break;
193 case 'k': 198 case 'k':
194 pct_offset = 1; 199 pct_offset = 1;
195 n_samples = atoi(optarg); 200 n_samples = atoi(optarg);
@@ -301,7 +306,7 @@ int main(int argc, char *argv[])
301 306
302 if (extts) { 307 if (extts) {
303 memset(&extts_request, 0, sizeof(extts_request)); 308 memset(&extts_request, 0, sizeof(extts_request));
304 extts_request.index = 0; 309 extts_request.index = index;
305 extts_request.flags = PTP_ENABLE_FEATURE; 310 extts_request.flags = PTP_ENABLE_FEATURE;
306 if (ioctl(fd, PTP_EXTTS_REQUEST, &extts_request)) { 311 if (ioctl(fd, PTP_EXTTS_REQUEST, &extts_request)) {
307 perror("PTP_EXTTS_REQUEST"); 312 perror("PTP_EXTTS_REQUEST");
@@ -375,7 +380,7 @@ int main(int argc, char *argv[])
375 return -1; 380 return -1;
376 } 381 }
377 memset(&perout_request, 0, sizeof(perout_request)); 382 memset(&perout_request, 0, sizeof(perout_request));
378 perout_request.index = 0; 383 perout_request.index = index;
379 perout_request.start.sec = ts.tv_sec + 2; 384 perout_request.start.sec = ts.tv_sec + 2;
380 perout_request.start.nsec = 0; 385 perout_request.start.nsec = 0;
381 perout_request.period.sec = 0; 386 perout_request.period.sec = 0;
diff --git a/Documentation/s390/00-INDEX b/Documentation/s390/00-INDEX
index 3a2b96302ecc..10c874ebdfe5 100644
--- a/Documentation/s390/00-INDEX
+++ b/Documentation/s390/00-INDEX
@@ -16,11 +16,13 @@ Debugging390.txt
16 - hints for debugging on s390 systems. 16 - hints for debugging on s390 systems.
17driver-model.txt 17driver-model.txt
18 - information on s390 devices and the driver model. 18 - information on s390 devices and the driver model.
19kvm.txt
20 - ioctl calls to /dev/kvm on s390.
19monreader.txt 21monreader.txt
20 - information on accessing the z/VM monitor stream from Linux. 22 - information on accessing the z/VM monitor stream from Linux.
23qeth.txt
24 - HiperSockets Bridge Port Support.
21s390dbf.txt 25s390dbf.txt
22 - information on using the s390 debug feature. 26 - information on using the s390 debug feature.
23TAPE 27zfcpdump.txt
24 - information on the driver for channel-attached tapes.
25zfcpdump
26 - information on the s390 SCSI dump tool. 28 - information on the s390 SCSI dump tool.
diff --git a/Documentation/scheduler/00-INDEX b/Documentation/scheduler/00-INDEX
index 46702e4f89c9..eccf7ad2e7f9 100644
--- a/Documentation/scheduler/00-INDEX
+++ b/Documentation/scheduler/00-INDEX
@@ -2,6 +2,8 @@
2 - this file. 2 - this file.
3sched-arch.txt 3sched-arch.txt
4 - CPU Scheduler implementation hints for architecture specific code. 4 - CPU Scheduler implementation hints for architecture specific code.
5sched-bwc.txt
6 - CFS bandwidth control overview.
5sched-design-CFS.txt 7sched-design-CFS.txt
6 - goals, design and implementation of the Completely Fair Scheduler. 8 - goals, design and implementation of the Completely Fair Scheduler.
7sched-domains.txt 9sched-domains.txt
diff --git a/Documentation/scsi/00-INDEX b/Documentation/scsi/00-INDEX
index 2044be565d93..c4b978a72f78 100644
--- a/Documentation/scsi/00-INDEX
+++ b/Documentation/scsi/00-INDEX
@@ -36,6 +36,8 @@ NinjaSCSI.txt
36 - info on WorkBiT NinjaSCSI-32/32Bi driver 36 - info on WorkBiT NinjaSCSI-32/32Bi driver
37aacraid.txt 37aacraid.txt
38 - Driver supporting Adaptec RAID controllers 38 - Driver supporting Adaptec RAID controllers
39advansys.txt
40 - List of Advansys Host Adapters
39aha152x.txt 41aha152x.txt
40 - info on driver for Adaptec AHA152x based adapters 42 - info on driver for Adaptec AHA152x based adapters
41aic79xx.txt 43aic79xx.txt
@@ -44,6 +46,12 @@ aic7xxx.txt
44 - info on driver for Adaptec controllers 46 - info on driver for Adaptec controllers
45arcmsr_spec.txt 47arcmsr_spec.txt
46 - ARECA FIRMWARE SPEC (for IOP331 adapter) 48 - ARECA FIRMWARE SPEC (for IOP331 adapter)
49bfa.txt
50 - Brocade FC/FCOE adapter driver.
51bnx2fc.txt
52 - FCoE hardware offload for Broadcom network interfaces.
53cxgb3i.txt
54 - Chelsio iSCSI Linux Driver
47dc395x.txt 55dc395x.txt
48 - README file for the dc395x SCSI driver 56 - README file for the dc395x SCSI driver
49dpti.txt 57dpti.txt
@@ -52,18 +60,24 @@ dtc3x80.txt
52 - info on driver for DTC 2x80 based adapters 60 - info on driver for DTC 2x80 based adapters
53g_NCR5380.txt 61g_NCR5380.txt
54 - info on driver for NCR5380 and NCR53c400 based adapters 62 - info on driver for NCR5380 and NCR53c400 based adapters
63hpsa.txt
64 - HP Smart Array Controller SCSI driver.
55hptiop.txt 65hptiop.txt
56 - HIGHPOINT ROCKETRAID 3xxx RAID DRIVER 66 - HIGHPOINT ROCKETRAID 3xxx RAID DRIVER
57in2000.txt 67in2000.txt
58 - info on in2000 driver 68 - info on in2000 driver
59libsas.txt 69libsas.txt
60 - Serial Attached SCSI management layer. 70 - Serial Attached SCSI management layer.
71link_power_management_policy.txt
72 - Link power management options.
61lpfc.txt 73lpfc.txt
62 - LPFC driver release notes 74 - LPFC driver release notes
63megaraid.txt 75megaraid.txt
64 - Common Management Module, shared code handling ioctls for LSI drivers 76 - Common Management Module, shared code handling ioctls for LSI drivers
65ncr53c8xx.txt 77ncr53c8xx.txt
66 - info on driver for NCR53c8xx based adapters 78 - info on driver for NCR53c8xx based adapters
79osd.txt
80 Object-Based Storage Device, command set introduction.
67osst.txt 81osst.txt
68 - info on driver for OnStream SC-x0 SCSI tape 82 - info on driver for OnStream SC-x0 SCSI tape
69ppa.txt 83ppa.txt
@@ -74,6 +88,8 @@ scsi-changer.txt
74 - README for the SCSI media changer driver 88 - README for the SCSI media changer driver
75scsi-generic.txt 89scsi-generic.txt
76 - info on the sg driver for generic (non-disk/CD/tape) SCSI devices. 90 - info on the sg driver for generic (non-disk/CD/tape) SCSI devices.
91scsi-parameters.txt
92 - List of SCSI-parameters to pass to the kernel at module load-time.
77scsi.txt 93scsi.txt
78 - short blurb on using SCSI support as a module. 94 - short blurb on using SCSI support as a module.
79scsi_mid_low_api.txt 95scsi_mid_low_api.txt
diff --git a/Documentation/serial/00-INDEX b/Documentation/serial/00-INDEX
index 1f1b22fbd739..f9c6b5ed03e7 100644
--- a/Documentation/serial/00-INDEX
+++ b/Documentation/serial/00-INDEX
@@ -4,10 +4,12 @@ README.cycladesZ
4 - info on Cyclades-Z firmware loading. 4 - info on Cyclades-Z firmware loading.
5digiepca.txt 5digiepca.txt
6 - info on Digi Intl. {PC,PCI,EISA}Xx and Xem series cards. 6 - info on Digi Intl. {PC,PCI,EISA}Xx and Xem series cards.
7hayes-esp.txt 7driver
8 - info on using the Hayes ESP serial driver. 8 - intro to the low level serial driver.
9moxa-smartio 9moxa-smartio
10 - file with info on installing/using Moxa multiport serial driver. 10 - file with info on installing/using Moxa multiport serial driver.
11n_gsm.txt
12 - GSM 0710 tty multiplexer howto.
11riscom8.txt 13riscom8.txt
12 - notes on using the RISCom/8 multi-port serial driver. 14 - notes on using the RISCom/8 multi-port serial driver.
13rocket.txt 15rocket.txt
diff --git a/Documentation/spi/00-INDEX b/Documentation/spi/00-INDEX
new file mode 100644
index 000000000000..a128fa835512
--- /dev/null
+++ b/Documentation/spi/00-INDEX
@@ -0,0 +1,22 @@
100-INDEX
2 - this file.
3Makefile
4 - Makefile for the example sourcefiles.
5butterfly
6 - AVR Butterfly SPI driver overview and pin configuration.
7ep93xx_spi
8 - Basic EP93xx SPI driver configuration.
9pxa2xx
10 - PXA2xx SPI master controller build by spi_message fifo wq
11spidev
12 - Intro to the userspace API for spi devices
13spidev_fdx.c
14 - spidev example file
15spi-lm70llp
16 - Connecting an LM70-LLP sensor to the kernel via the SPI subsys.
17spi-sc18is602
18 - NXP SC18IS602/603 I2C-bus to SPI bridge
19spi-summary
20 - (Linux) SPI overview. If unsure about SPI or SPI in Linux, start here.
21spidev_test.c
22 - SPI testing utility.
diff --git a/Documentation/spi/spi-summary b/Documentation/spi/spi-summary
index f72e0d1e0da8..7982bcc4d151 100644
--- a/Documentation/spi/spi-summary
+++ b/Documentation/spi/spi-summary
@@ -543,7 +543,22 @@ SPI MASTER METHODS
543 queuing transfers that arrive in the meantime. When the driver is 543 queuing transfers that arrive in the meantime. When the driver is
544 finished with this message, it must call 544 finished with this message, it must call
545 spi_finalize_current_message() so the subsystem can issue the next 545 spi_finalize_current_message() so the subsystem can issue the next
546 transfer. This may sleep. 546 message. This may sleep.
547
548 master->transfer_one(struct spi_master *master, struct spi_device *spi,
549 struct spi_transfer *transfer)
550 The subsystem calls the driver to transfer a single transfer while
551 queuing transfers that arrive in the meantime. When the driver is
552 finished with this transfer, it must call
553 spi_finalize_current_transfer() so the subsystem can issue the next
554 transfer. This may sleep. Note: transfer_one and transfer_one_message
555 are mutually exclusive; when both are set, the generic subsystem does
556 not call your transfer_one callback.
557
558 Return values:
559 negative errno: error
560 0: transfer is finished
561 1: transfer is still in progress
547 562
548 DEPRECATED METHODS 563 DEPRECATED METHODS
549 564
diff --git a/Documentation/timers/00-INDEX b/Documentation/timers/00-INDEX
index ef2ccbf77fa2..6d042dc1cce0 100644
--- a/Documentation/timers/00-INDEX
+++ b/Documentation/timers/00-INDEX
@@ -8,6 +8,8 @@ hpet_example.c
8 - sample hpet timer test program 8 - sample hpet timer test program
9hrtimers.txt 9hrtimers.txt
10 - subsystem for high-resolution kernel timers 10 - subsystem for high-resolution kernel timers
11Makefile
12 - Build and link hpet_example
11NO_HZ.txt 13NO_HZ.txt
12 - Summary of the different methods for the scheduler clock-interrupts management. 14 - Summary of the different methods for the scheduler clock-interrupts management.
13timers-howto.txt 15timers-howto.txt
diff --git a/Documentation/virtual/kvm/00-INDEX b/Documentation/virtual/kvm/00-INDEX
index 641ec9220179..fee9f2bf9c64 100644
--- a/Documentation/virtual/kvm/00-INDEX
+++ b/Documentation/virtual/kvm/00-INDEX
@@ -20,5 +20,7 @@ ppc-pv.txt
20 - the paravirtualization interface on PowerPC. 20 - the paravirtualization interface on PowerPC.
21review-checklist.txt 21review-checklist.txt
22 - review checklist for KVM patches. 22 - review checklist for KVM patches.
23s390-diag.txt
24 - Diagnose hypercall description (for IBM S/390)
23timekeeping.txt 25timekeeping.txt
24 - timekeeping virtualization for x86-based architectures. 26 - timekeeping virtualization for x86-based architectures.
diff --git a/Documentation/vm/00-INDEX b/Documentation/vm/00-INDEX
index a39d06680e1c..081c49777abb 100644
--- a/Documentation/vm/00-INDEX
+++ b/Documentation/vm/00-INDEX
@@ -16,8 +16,6 @@ hwpoison.txt
16 - explains what hwpoison is 16 - explains what hwpoison is
17ksm.txt 17ksm.txt
18 - how to use the Kernel Samepage Merging feature. 18 - how to use the Kernel Samepage Merging feature.
19locking
20 - info on how locking and synchronization is done in the Linux vm code.
21numa 19numa
22 - information about NUMA specific code in the Linux vm. 20 - information about NUMA specific code in the Linux vm.
23numa_memory_policy.txt 21numa_memory_policy.txt
@@ -32,6 +30,8 @@ slub.txt
32 - a short users guide for SLUB. 30 - a short users guide for SLUB.
33soft-dirty.txt 31soft-dirty.txt
34 - short explanation for soft-dirty PTEs 32 - short explanation for soft-dirty PTEs
33split_page_table_lock
34 - Separate per-table lock to improve scalability of the old page_table_lock.
35transhuge.txt 35transhuge.txt
36 - Transparent Hugepage Support, alternative way of using hugepages. 36 - Transparent Hugepage Support, alternative way of using hugepages.
37unevictable-lru.txt 37unevictable-lru.txt
diff --git a/Documentation/w1/masters/00-INDEX b/Documentation/w1/masters/00-INDEX
index d63fa024ac05..8330cf9325f0 100644
--- a/Documentation/w1/masters/00-INDEX
+++ b/Documentation/w1/masters/00-INDEX
@@ -4,7 +4,9 @@ ds2482
4 - The Maxim/Dallas Semiconductor DS2482 provides 1-wire busses. 4 - The Maxim/Dallas Semiconductor DS2482 provides 1-wire busses.
5ds2490 5ds2490
6 - The Maxim/Dallas Semiconductor DS2490 builds USB <-> W1 bridges. 6 - The Maxim/Dallas Semiconductor DS2490 builds USB <-> W1 bridges.
7mxc_w1 7mxc-w1
8 - W1 master controller driver found on Freescale MX2/MX3 SoCs 8 - W1 master controller driver found on Freescale MX2/MX3 SoCs
9omap-hdq
10 - HDQ/1-wire module of TI OMAP 2430/3430.
9w1-gpio 11w1-gpio
10 - GPIO 1-wire bus master driver. 12 - GPIO 1-wire bus master driver.
diff --git a/Documentation/w1/slaves/00-INDEX b/Documentation/w1/slaves/00-INDEX
index 75613c9ac4db..6e18c70c3474 100644
--- a/Documentation/w1/slaves/00-INDEX
+++ b/Documentation/w1/slaves/00-INDEX
@@ -4,3 +4,5 @@ w1_therm
4 - The Maxim/Dallas Semiconductor ds18*20 temperature sensor. 4 - The Maxim/Dallas Semiconductor ds18*20 temperature sensor.
5w1_ds2423 5w1_ds2423
6 - The Maxim/Dallas Semiconductor ds2423 counter device. 6 - The Maxim/Dallas Semiconductor ds2423 counter device.
7w1_ds28e04
8 - The Maxim/Dallas Semiconductor ds28e04 eeprom.
diff --git a/Documentation/x86/00-INDEX b/Documentation/x86/00-INDEX
index f37b46d34861..692264456f0f 100644
--- a/Documentation/x86/00-INDEX
+++ b/Documentation/x86/00-INDEX
@@ -1,6 +1,20 @@
100-INDEX 100-INDEX
2 - this file 2 - this file
3mtrr.txt 3boot.txt
4 - how to use x86 Memory Type Range Registers to increase performance 4 - List of boot protocol versions
5early-microcode.txt
6 - How to load microcode from an initrd-CPIO archive early to fix CPU issues.
7earlyprintk.txt
8 - Using earlyprintk with a USB2 debug port key.
9entry_64.txt
10 - Describe (some of the) kernel entry points for x86.
5exception-tables.txt 11exception-tables.txt
6 - why and how Linux kernel uses exception tables on x86 12 - why and how Linux kernel uses exception tables on x86
13mtrr.txt
14 - how to use x86 Memory Type Range Registers to increase performance
15pat.txt
16 - Page Attribute Table intro and API
17usb-legacy-support.txt
18 - how to fix/avoid quirks when using emulated PS/2 mouse/keyboard.
19zero-page.txt
20 - layout of the first page of memory.
diff --git a/Documentation/zh_CN/arm64/booting.txt b/Documentation/zh_CN/arm64/booting.txt
index 28fa325b7461..6f6d956ac1c9 100644
--- a/Documentation/zh_CN/arm64/booting.txt
+++ b/Documentation/zh_CN/arm64/booting.txt
@@ -7,7 +7,7 @@ help. Contact the Chinese maintainer if this translation is outdated
7or if there is a problem with the translation. 7or if there is a problem with the translation.
8 8
9Maintainer: Will Deacon <will.deacon@arm.com> 9Maintainer: Will Deacon <will.deacon@arm.com>
10Chinese maintainer: Fu Wei <tekkamanninja@gmail.com> 10Chinese maintainer: Fu Wei <wefu@redhat.com>
11--------------------------------------------------------------------- 11---------------------------------------------------------------------
12Documentation/arm64/booting.txt 的中文翻译 12Documentation/arm64/booting.txt 的中文翻译
13 13
@@ -16,9 +16,9 @@ Documentation/arm64/booting.txt 的中文翻译
16译存在问题,请è”系中文版维护者。 16译存在问题,请è”系中文版维护者。
17 17
18英文版维护者: Will Deacon <will.deacon@arm.com> 18英文版维护者: Will Deacon <will.deacon@arm.com>
19中文版维护者: 傅炜 Fu Wei <tekkamanninja@gmail.com> 19中文版维护者: 傅炜 Fu Wei <wefu@redhat.com>
20中文版翻译者: 傅炜 Fu Wei <tekkamanninja@gmail.com> 20中文版翻译者: 傅炜 Fu Wei <wefu@redhat.com>
21中文版校译者: 傅炜 Fu Wei <tekkamanninja@gmail.com> 21中文版校译者: 傅炜 Fu Wei <wefu@redhat.com>
22 22
23以下为正文 23以下为正文
24--------------------------------------------------------------------- 24---------------------------------------------------------------------
@@ -64,8 +64,8 @@ RAM,或å¯èƒ½ä½¿ç”¨å¯¹è¿™ä¸ªè®¾å¤‡å·²çŸ¥çš„ RAM ä¿¡æ¯ï¼Œè¿˜å¯èƒ½ä½¿ç”¨ä»»ä½•
64 64
65å¿…è¦æ€§: 强制 65å¿…è¦æ€§: 强制
66 66
67设备树数æ®å—(dtb)大å°å¿…é¡»ä¸å¤§äºŽ 2 MB,且ä½äºŽä»Žå†…核映åƒèµ·å§‹ç®—起第一个 67设备树数æ®å—(dtb)必须 8 字节对é½ï¼Œå¹¶ä½äºŽä»Žå†…核映åƒèµ·å§‹ç®—起第一个 512MB
68512MB 内的 2MB 边界上。这使得内核å¯ä»¥é€šè¿‡åˆå§‹é¡µè¡¨ä¸­çš„å•ä¸ªèŠ‚æè¿°ç¬¦æ¥ 68内,且ä¸å¾—跨越 2MB 对é½è¾¹ç•Œã€‚这使得内核å¯ä»¥é€šè¿‡åˆå§‹é¡µè¡¨ä¸­çš„å•ä¸ªèŠ‚æ述符æ¥
69映射此数æ®å—。 69映射此数æ®å—。
70 70
71 71
@@ -84,13 +84,23 @@ AArch64 内核当å‰æ²¡æœ‰æ供自解压代ç ï¼Œå› æ­¤å¦‚果使用了压缩内
84 84
85å¿…è¦æ€§: 强制 85å¿…è¦æ€§: 强制
86 86
87已解压的内核映åƒåŒ…å«ä¸€ä¸ª 32 字节的头,内容如下: 87已解压的内核映åƒåŒ…å«ä¸€ä¸ª 64 字节的头,内容如下:
88 88
89 u32 magic = 0x14000008; /* 跳转到 stext, å°ç«¯ */ 89 u32 code0; /* å¯æ‰§è¡Œä»£ç  */
90 u32 res0 = 0; /* ä¿ç•™ */ 90 u32 code1; /* å¯æ‰§è¡Œä»£ç  */
91 u64 text_offset; /* 映åƒè£…è½½å移 */ 91 u64 text_offset; /* 映åƒè£…è½½å移 */
92 u64 res0 = 0; /* ä¿ç•™ */
92 u64 res1 = 0; /* ä¿ç•™ */ 93 u64 res1 = 0; /* ä¿ç•™ */
93 u64 res2 = 0; /* ä¿ç•™ */ 94 u64 res2 = 0; /* ä¿ç•™ */
95 u64 res3 = 0; /* ä¿ç•™ */
96 u64 res4 = 0; /* ä¿ç•™ */
97 u32 magic = 0x644d5241; /* 魔数, å°ç«¯, "ARM\x64" */
98 u32 res5 = 0; /* ä¿ç•™ */
99
100
101映åƒå¤´æ³¨é‡Šï¼š
102
103- code0/code1 负责跳转到 stext.
94 104
95映åƒå¿…é¡»ä½äºŽç³»ç»Ÿ RAM 起始处的特定å移(当å‰æ˜¯ 0x80000)。系统 RAM 105映åƒå¿…é¡»ä½äºŽç³»ç»Ÿ RAM 起始处的特定å移(当å‰æ˜¯ 0x80000)。系统 RAM
96的起始地å€å¿…须是以 2MB 对é½çš„。 106的起始地å€å¿…须是以 2MB 对é½çš„。
@@ -118,9 +128,9 @@ AArch64 内核当å‰æ²¡æœ‰æ供自解压代ç ï¼Œå› æ­¤å¦‚果使用了压缩内
118 外部高速缓存(如果存在)必须é…置并ç¦ç”¨ã€‚ 128 外部高速缓存(如果存在)必须é…置并ç¦ç”¨ã€‚
119 129
120- 架构计时器 130- 架构计时器
121 CNTFRQ 必须设定为计时器的频率ã 131 CNTFRQ 必须设定为计时器的频率,且 CNTVOFF 必须设定为对æ‰æœ‰ CPU
122 如果在 EL1 模å¼ä¸‹è¿›å…¥å†…核,则 CNTHCTL_EL2 中的 EL1PCTEN (bit 0) 132 都一致的值。如果在 EL1 模å¼ä¸‹è¿›å…¥å†…核,则 CNTHCTL_EL2 中的
123 必须置ä½ã€‚ 133 EL1PCTEN (bit 0) 必须置ä½ã€‚
124 134
125- 一致性 135- 一致性
126 通过内核å¯åŠ¨çš„所有 CPU 在内核入å£åœ°å€ä¸Šå¿…须处于相åŒçš„一致性域中。 136 通过内核å¯åŠ¨çš„所有 CPU 在内核入å£åœ°å€ä¸Šå¿…须处于相åŒçš„一致性域中。
@@ -131,23 +141,40 @@ AArch64 内核当å‰æ²¡æœ‰æ供自解压代ç ï¼Œå› æ­¤å¦‚果使用了压缩内
131 在进入内核映åƒçš„异常级中,所有构架中å¯å†™çš„系统寄存器必须通过软件 141 在进入内核映åƒçš„异常级中,所有构架中å¯å†™çš„系统寄存器必须通过软件
132 在一个更高的异常级别下åˆå§‹åŒ–,以防止在 未知 状æ€ä¸‹è¿è¡Œã€‚ 142 在一个更高的异常级别下åˆå§‹åŒ–,以防止在 未知 状æ€ä¸‹è¿è¡Œã€‚
133 143
144以上对于 CPU 模å¼ã€é«˜é€Ÿç¼“å­˜ã€MMUã€æž¶æž„计时器ã€ä¸€è‡´æ€§ã€ç³»ç»Ÿå¯„存器的
145å¿…è¦æ¡ä»¶æ述适用于所有 CPU。所有 CPU 必须在åŒä¸€å¼‚常级别跳入内核。
146
134引导装载程åºå¿…须在æ¯ä¸ª CPU 处于以下状æ€æ—¶è·³å…¥å†…核入å£ï¼š 147引导装载程åºå¿…须在æ¯ä¸ª CPU 处于以下状æ€æ—¶è·³å…¥å†…核入å£ï¼š
135 148
136- 主 CPU 必须直接跳入内核映åƒçš„第一æ¡æŒ‡ä»¤ã€‚通过此 CPU 传递的设备树 149- 主 CPU 必须直接跳入内核映åƒçš„第一æ¡æŒ‡ä»¤ã€‚通过此 CPU 传递的设备树
137 æ•°æ®å—必须在æ¯ä¸ª CPU 节点中包å«ä»¥ä¸‹å†…容: 150 æ•°æ®å—必须在æ¯ä¸ª CPU 节点中包å«ä¸€ä¸ª ‘enable-method’ 属性,所
138 151 支æŒçš„ enable-method 请è§ä¸‹æ–‡ã€‚
139 1ã€â€˜enable-method’属性。目å‰ï¼Œæ­¤å­—段支æŒçš„值仅为字符串“spin-tableâ€ã€‚
140
141 2ã€â€˜cpu-release-addr’标识一个 64-bitã€åˆå§‹åŒ–为零的内存ä½ç½®ã€‚
142 152
143 引导装载程åºå¿…须生æˆè¿™äº›è®¾å¤‡æ ‘属性,并在跳入内核入å£ä¹‹å‰å°†å…¶æ’å…¥ 153 引导装载程åºå¿…须生æˆè¿™äº›è®¾å¤‡æ ‘属性,并在跳入内核入å£ä¹‹å‰å°†å…¶æ’å…¥
144 æ•°æ®å—。 154 æ•°æ®å—。
145 155
146- 任何辅助 CPU 必须在内存ä¿ç•™åŒºï¼ˆé€šè¿‡è®¾å¤‡æ ‘中的 /memreserve/ 域传递 156- enable-method 为 “spin-table†的 CPU 必须在它们的 CPU
157 节点中包å«ä¸€ä¸ª ‘cpu-release-addr’ 属性。这个属性标识了一个
158 64 ä½è‡ªç„¶å¯¹é½ä¸”åˆå§‹åŒ–为零的内存ä½ç½®ã€‚
159
160 这些 CPU 必须在内存ä¿ç•™åŒºï¼ˆé€šè¿‡è®¾å¤‡æ ‘中的 /memreserve/ 域传递
147 给内核)中自旋于内核之外,轮询它们的 cpu-release-addr ä½ç½®ï¼ˆå¿…é¡» 161 给内核)中自旋于内核之外,轮询它们的 cpu-release-addr ä½ç½®ï¼ˆå¿…é¡»
148 包å«åœ¨ä¿ç•™åŒºä¸­ï¼‰ã€‚å¯é€šè¿‡æ’å…¥ wfe 指令æ¥é™ä½Žå¿™å¾ªçŽ¯å¼€é”€ï¼Œè€Œä¸» CPU å°† 162 包å«åœ¨ä¿ç•™åŒºä¸­ï¼‰ã€‚å¯é€šè¿‡æ’å…¥ wfe 指令æ¥é™ä½Žå¿™å¾ªçŽ¯å¼€é”€ï¼Œè€Œä¸» CPU å°†
149 å‘出 sev 指令。当对 cpu-release-addr 所指ä½ç½®çš„读å–æ“作返回éžé›¶å€¼ 163 å‘出 sev 指令。当对 cpu-release-addr 所指ä½ç½®çš„读å–æ“作返回éžé›¶å€¼
150 时,CPU 必须直接跳入此值所指å‘的地å€ã€‚ 164 时,CPU 必须跳入此值所指å‘的地å€ã€‚此值为一个å•ç‹¬çš„ 64 ä½å°ç«¯å€¼ï¼Œ
165 å› æ­¤ CPU 须在跳转å‰å°†æ‰€è¯»å–的值转æ¢ä¸ºå…¶æœ¬èº«çš„端模å¼ã€‚
166
167- enable-method 为 “psci†的 CPU ä¿æŒåœ¨å†…核外(比如,在
168 memory 节点中æ述为内核空间的内存区外,或在通过设备树 /memreserve/
169 域中æ述为内核ä¿ç•™åŒºçš„空间中)。内核将会å‘起在 ARM 文档(编å·
170 ARM DEN 0022A:用于 ARM 上的电æºçŠ¶æ€å调接å£ç³»ç»Ÿè½¯ä»¶ï¼‰ä¸­æè¿°çš„
171 CPU_ON 调用æ¥å°† CPU 带入内核。
172
173 *译者注:到文档翻译时,此文档已更新为 ARM DEN 0022B。
174
175 设备树必须包å«ä¸€ä¸ª ‘psci’ 节点,请å‚考以下文档:
176 Documentation/devicetree/bindings/arm/psci.txt
177
151 178
152- 辅助 CPU 通用寄存器设置 179- 辅助 CPU 通用寄存器设置
153 x0 = 0 (ä¿ç•™ï¼Œå°†æ¥å¯èƒ½ä½¿ç”¨) 180 x0 = 0 (ä¿ç•™ï¼Œå°†æ¥å¯èƒ½ä½¿ç”¨)
diff --git a/Documentation/zh_CN/arm64/memory.txt b/Documentation/zh_CN/arm64/memory.txt
index a5f6283829f9..a782704c1cb5 100644
--- a/Documentation/zh_CN/arm64/memory.txt
+++ b/Documentation/zh_CN/arm64/memory.txt
@@ -7,7 +7,7 @@ help. Contact the Chinese maintainer if this translation is outdated
7or if there is a problem with the translation. 7or if there is a problem with the translation.
8 8
9Maintainer: Catalin Marinas <catalin.marinas@arm.com> 9Maintainer: Catalin Marinas <catalin.marinas@arm.com>
10Chinese maintainer: Fu Wei <tekkamanninja@gmail.com> 10Chinese maintainer: Fu Wei <wefu@redhat.com>
11--------------------------------------------------------------------- 11---------------------------------------------------------------------
12Documentation/arm64/memory.txt 的中文翻译 12Documentation/arm64/memory.txt 的中文翻译
13 13
@@ -16,9 +16,9 @@ Documentation/arm64/memory.txt 的中文翻译
16译存在问题,请è”系中文版维护者。 16译存在问题,请è”系中文版维护者。
17 17
18英文版维护者: Catalin Marinas <catalin.marinas@arm.com> 18英文版维护者: Catalin Marinas <catalin.marinas@arm.com>
19中文版维护者: 傅炜 Fu Wei <tekkamanninja@gmail.com> 19中文版维护者: 傅炜 Fu Wei <wefu@redhat.com>
20中文版翻译者: 傅炜 Fu Wei <tekkamanninja@gmail.com> 20中文版翻译者: 傅炜 Fu Wei <wefu@redhat.com>
21中文版校译者: 傅炜 Fu Wei <tekkamanninja@gmail.com> 21中文版校译者: 傅炜 Fu Wei <wefu@redhat.com>
22 22
23以下为正文 23以下为正文
24--------------------------------------------------------------------- 24---------------------------------------------------------------------
@@ -41,7 +41,7 @@ AArch64 Linux 使用页大å°ä¸º 4KB çš„ 3 级转æ¢è¡¨é…置,对于用户和å
41TTBR1 中,且从ä¸å†™å…¥ TTBR0。 41TTBR1 中,且从ä¸å†™å…¥ TTBR0。
42 42
43 43
44AArch64 Linux 内存布局: 44AArch64 Linux 在页大å°ä¸º 4KB 时的内存布局:
45 45
46èµ·å§‹åœ°å€ ç»“æŸåœ°å€ å¤§å° ç”¨é€” 46èµ·å§‹åœ°å€ ç»“æŸåœ°å€ å¤§å° ç”¨é€”
47----------------------------------------------------------------------- 47-----------------------------------------------------------------------
@@ -55,15 +55,42 @@ ffffffbc00000000 ffffffbdffffffff 8GB vmemmap
55 55
56ffffffbe00000000 ffffffbffbbfffff ~8GB [防护页,未æ¥ç”¨äºŽ vmmemap] 56ffffffbe00000000 ffffffbffbbfffff ~8GB [防护页,未æ¥ç”¨äºŽ vmmemap]
57 57
58ffffffbffbc00000 ffffffbffbdfffff 2MB earlyprintk 设备
59
58ffffffbffbe00000 ffffffbffbe0ffff 64KB PCI I/O 空间 60ffffffbffbe00000 ffffffbffbe0ffff 64KB PCI I/O 空间
59 61
60ffffffbbffff0000 ffffffbcffffffff ~2MB [防护页] 62ffffffbffbe10000 ffffffbcffffffff ~2MB [防护页]
61 63
62ffffffbffc000000 ffffffbfffffffff 64MB æ¨¡å— 64ffffffbffc000000 ffffffbfffffffff 64MB 模å—
63 65
64ffffffc000000000 ffffffffffffffff 256GB 内核逻辑内存映射 66ffffffc000000000 ffffffffffffffff 256GB 内核逻辑内存映射
65 67
66 68
69AArch64 Linux 在页大å°ä¸º 64KB 时的内存布局:
70
71èµ·å§‹åœ°å€ ç»“æŸåœ°å€ å¤§å° ç”¨é€”
72-----------------------------------------------------------------------
730000000000000000 000003ffffffffff 4TB 用户空间
74
75fffffc0000000000 fffffdfbfffeffff ~2TB vmalloc
76
77fffffdfbffff0000 fffffdfbffffffff 64KB [防护页]
78
79fffffdfc00000000 fffffdfdffffffff 8GB vmemmap
80
81fffffdfe00000000 fffffdfffbbfffff ~8GB [防护页,未æ¥ç”¨äºŽ vmmemap]
82
83fffffdfffbc00000 fffffdfffbdfffff 2MB earlyprintk 设备
84
85fffffdfffbe00000 fffffdfffbe0ffff 64KB PCI I/O 空间
86
87fffffdfffbe10000 fffffdfffbffffff ~2MB [防护页]
88
89fffffdfffc000000 fffffdffffffffff 64MB 模å—
90
91fffffe0000000000 ffffffffffffffff 2TB 内核逻辑内存映射
92
93
674KB 页大å°çš„转æ¢è¡¨æŸ¥æ‰¾ï¼š 944KB 页大å°çš„转æ¢è¡¨æŸ¥æ‰¾ï¼š
68 95
69+--------+--------+--------+--------+--------+--------+--------+--------+ 96+--------+--------+--------+--------+--------+--------+--------+--------+
@@ -91,3 +118,10 @@ ffffffc000000000 ffffffffffffffff 256GB 内核逻辑内存映射
91 | | +--------------------------> [41:29] L2 索引 (仅使用 38:29 ) 118 | | +--------------------------> [41:29] L2 索引 (仅使用 38:29 )
92 | +-------------------------------> [47:42] L1 索引 (未使用) 119 | +-------------------------------> [47:42] L1 索引 (未使用)
93 +-------------------------------------------------> [63] TTBR0/1 120 +-------------------------------------------------> [63] TTBR0/1
121
122当使用 KVM æ—¶, 管ç†ç¨‹åºï¼ˆhypervisor)在 EL2 中通过相对内核虚拟地å€çš„
123一个固定å移æ¥æ˜ å°„内核页(内核虚拟地å€çš„高 24 ä½è®¾ä¸ºé›¶ï¼‰:
124
125èµ·å§‹åœ°å€ ç»“æŸåœ°å€ å¤§å° ç”¨é€”
126-----------------------------------------------------------------------
1270000004000000000 0000007fffffffff 256GB 在 HYP 中映射的内核对象
diff --git a/Documentation/zh_CN/arm64/tagged-pointers.txt b/Documentation/zh_CN/arm64/tagged-pointers.txt
new file mode 100644
index 000000000000..2664d1bd5a1c
--- /dev/null
+++ b/Documentation/zh_CN/arm64/tagged-pointers.txt
@@ -0,0 +1,52 @@
1Chinese translated version of Documentation/arm64/tagged-pointers.txt
2
3If you have any comment or update to the content, please contact the
4original document maintainer directly. However, if you have a problem
5communicating in English you can also ask the Chinese maintainer for
6help. Contact the Chinese maintainer if this translation is outdated
7or if there is a problem with the translation.
8
9Maintainer: Will Deacon <will.deacon@arm.com>
10Chinese maintainer: Fu Wei <wefu@redhat.com>
11---------------------------------------------------------------------
12Documentation/arm64/tagged-pointers.txt 的中文翻译
13
14如果想评论或更新本文的内容,请直接è”系原文档的维护者。如果你使用英文
15交æµæœ‰å›°éš¾çš„è¯ï¼Œä¹Ÿå¯ä»¥å‘中文版维护者求助。如果本翻译更新ä¸åŠæ—¶æˆ–者翻
16译存在问题,请è”系中文版维护者。
17
18英文版维护者: Will Deacon <will.deacon@arm.com>
19中文版维护者: 傅炜 Fu Wei <wefu@redhat.com>
20中文版翻译者: 傅炜 Fu Wei <wefu@redhat.com>
21中文版校译者: 傅炜 Fu Wei <wefu@redhat.com>
22
23以下为正文
24---------------------------------------------------------------------
25 Linux 在 AArch64 中带标记的虚拟地å€
26 =================================
27
28作者: Will Deacon <will.deacon@arm.com>
29日期: 2013 年 06 月 12 日
30
31本文档简述了在 AArch64 地å€è½¬æ¢ç³»ç»Ÿä¸­æ供的带标记的虚拟地å€åŠå…¶åœ¨
32AArch64 Linux 中的潜在用途。
33
34内核æ供的地å€è½¬æ¢è¡¨é…置使通过 TTBR0 完æˆçš„虚拟地å€è½¬æ¢ï¼ˆå³ç”¨æˆ·ç©ºé—´
35映射),其虚拟地å€çš„最高 8 ä½ï¼ˆ63:56)会被转æ¢ç¡¬ä»¶æ‰€å¿½ç•¥ã€‚è¿™ç§æœºåˆ¶
36让这些ä½å¯ä¾›åº”用程åºè‡ªç”±ä½¿ç”¨ï¼Œå…¶æ³¨æ„事项如下:
37
38 (1) 内核è¦æ±‚所有传递到 EL1 的用户空间地å€å¸¦æœ‰ 0x00 标记。
39 è¿™æ„味ç€ä»»ä½•æºå¸¦ç”¨æˆ·ç©ºé—´è™šæ‹Ÿåœ°å€çš„系统调用(syscall)
40 å‚æ•° *å¿…é¡»* 在陷入内核å‰ä½¿å®ƒä»¬çš„最高字节被清零。
41
42 (2) éžé›¶æ ‡è®°åœ¨ä¼ é€’ä¿¡å·æ—¶ä¸è¢«ä¿å­˜ã€‚è¿™æ„味ç€åœ¨åº”用程åºä¸­åˆ©ç”¨äº†
43 标记的信å·å¤„ç†å‡½æ•°æ— æ³•ä¾èµ– siginfo_t 的用户空间虚拟
44 地å€æ‰€æºå¸¦çš„包å«å…¶å†…部域信æ¯çš„标记。此规则的一个例外是
45 当信å·æ˜¯åœ¨è°ƒè¯•è§‚察点的异常处ç†ç¨‹åºä¸­äº§ç”Ÿçš„,此时标记的
46 ä¿¡æ¯å°†è¢«ä¿å­˜ã€‚
47
48 (3) 当使用带标记的指针时需特别留心,因为仅对两个虚拟地å€
49 的高字节,C 编译器很å¯èƒ½æ— æ³•åˆ¤æ–­å®ƒä»¬æ˜¯ä¸åŒçš„。
50
51此构架会阻止对带标记的 PC 指针的利用,因此在异常返回时,其高字节
52将被设置æˆä¸€ä¸ªä¸º “55†的扩展符。
diff --git a/MAINTAINERS b/MAINTAINERS
index b2cf5cfb4d29..df8869d49c3f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -538,7 +538,7 @@ F: arch/alpha/
538ALTERA UART/JTAG UART SERIAL DRIVERS 538ALTERA UART/JTAG UART SERIAL DRIVERS
539M: Tobias Klauser <tklauser@distanz.ch> 539M: Tobias Klauser <tklauser@distanz.ch>
540L: linux-serial@vger.kernel.org 540L: linux-serial@vger.kernel.org
541L: nios2-dev@sopc.et.ntust.edu.tw (moderated for non-subscribers) 541L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
542S: Maintained 542S: Maintained
543F: drivers/tty/serial/altera_uart.c 543F: drivers/tty/serial/altera_uart.c
544F: drivers/tty/serial/altera_jtaguart.c 544F: drivers/tty/serial/altera_jtaguart.c
@@ -1860,6 +1860,7 @@ F: drivers/net/ethernet/broadcom/bnx2x/
1860 1860
1861BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE 1861BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE
1862M: Christian Daudt <bcm@fixthebug.org> 1862M: Christian Daudt <bcm@fixthebug.org>
1863M: Matt Porter <mporter@linaro.org>
1863L: bcm-kernel-feedback-list@broadcom.com 1864L: bcm-kernel-feedback-list@broadcom.com
1864T: git git://git.github.com/broadcom/bcm11351 1865T: git git://git.github.com/broadcom/bcm11351
1865S: Maintained 1866S: Maintained
@@ -2367,7 +2368,7 @@ F: include/linux/cpufreq.h
2367 2368
2368CPU FREQUENCY DRIVERS - ARM BIG LITTLE 2369CPU FREQUENCY DRIVERS - ARM BIG LITTLE
2369M: Viresh Kumar <viresh.kumar@linaro.org> 2370M: Viresh Kumar <viresh.kumar@linaro.org>
2370M: Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com> 2371M: Sudeep Holla <sudeep.holla@arm.com>
2371L: cpufreq@vger.kernel.org 2372L: cpufreq@vger.kernel.org
2372L: linux-pm@vger.kernel.org 2373L: linux-pm@vger.kernel.org
2373W: http://www.arm.com/products/processors/technologies/biglittleprocessing.php 2374W: http://www.arm.com/products/processors/technologies/biglittleprocessing.php
@@ -2408,8 +2409,10 @@ F: tools/power/cpupower/
2408 2409
2409CPUSETS 2410CPUSETS
2410M: Li Zefan <lizefan@huawei.com> 2411M: Li Zefan <lizefan@huawei.com>
2412L: cgroups@vger.kernel.org
2411W: http://www.bullopensource.org/cpuset/ 2413W: http://www.bullopensource.org/cpuset/
2412W: http://oss.sgi.com/projects/cpusets/ 2414W: http://oss.sgi.com/projects/cpusets/
2415T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git
2413S: Maintained 2416S: Maintained
2414F: Documentation/cgroups/cpusets.txt 2417F: Documentation/cgroups/cpusets.txt
2415F: include/linux/cpuset.h 2418F: include/linux/cpuset.h
@@ -2608,9 +2611,9 @@ DC395x SCSI driver
2608M: Oliver Neukum <oliver@neukum.org> 2611M: Oliver Neukum <oliver@neukum.org>
2609M: Ali Akcaagac <aliakc@web.de> 2612M: Ali Akcaagac <aliakc@web.de>
2610M: Jamie Lenehan <lenehan@twibble.org> 2613M: Jamie Lenehan <lenehan@twibble.org>
2611W: http://twibble.org/dist/dc395x/
2612L: dc395x@twibble.org 2614L: dc395x@twibble.org
2613L: http://lists.twibble.org/mailman/listinfo/dc395x/ 2615W: http://twibble.org/dist/dc395x/
2616W: http://lists.twibble.org/mailman/listinfo/dc395x/
2614S: Maintained 2617S: Maintained
2615F: Documentation/scsi/dc395x.txt 2618F: Documentation/scsi/dc395x.txt
2616F: drivers/scsi/dc395x.* 2619F: drivers/scsi/dc395x.*
@@ -2857,7 +2860,7 @@ M: Jani Nikula <jani.nikula@linux.intel.com>
2857L: intel-gfx@lists.freedesktop.org 2860L: intel-gfx@lists.freedesktop.org
2858L: dri-devel@lists.freedesktop.org 2861L: dri-devel@lists.freedesktop.org
2859Q: http://patchwork.freedesktop.org/project/intel-gfx/ 2862Q: http://patchwork.freedesktop.org/project/intel-gfx/
2860T: git git://people.freedesktop.org/~danvet/drm-intel 2863T: git git://anongit.freedesktop.org/drm-intel
2861S: Supported 2864S: Supported
2862F: drivers/gpu/drm/i915/ 2865F: drivers/gpu/drm/i915/
2863F: include/drm/i915* 2866F: include/drm/i915*
@@ -3324,6 +3327,17 @@ S: Maintained
3324F: include/linux/netfilter_bridge/ 3327F: include/linux/netfilter_bridge/
3325F: net/bridge/ 3328F: net/bridge/
3326 3329
3330ETHERNET PHY LIBRARY
3331M: Florian Fainelli <f.fainelli@gmail.com>
3332L: netdev@vger.kernel.org
3333S: Maintained
3334F: include/linux/phy.h
3335F: include/linux/phy_fixed.h
3336F: drivers/net/phy/
3337F: Documentation/networking/phy.txt
3338F: drivers/of/of_mdio.c
3339F: drivers/of/of_net.c
3340
3327EXT2 FILE SYSTEM 3341EXT2 FILE SYSTEM
3328M: Jan Kara <jack@suse.cz> 3342M: Jan Kara <jack@suse.cz>
3329L: linux-ext4@vger.kernel.org 3343L: linux-ext4@vger.kernel.org
@@ -7196,7 +7210,7 @@ S: Maintained
7196F: drivers/net/ethernet/rdc/r6040.c 7210F: drivers/net/ethernet/rdc/r6040.c
7197 7211
7198RDS - RELIABLE DATAGRAM SOCKETS 7212RDS - RELIABLE DATAGRAM SOCKETS
7199M: Venkat Venkatsubra <venkat.x.venkatsubra@oracle.com> 7213M: Chien Yen <chien.yen@oracle.com>
7200L: rds-devel@oss.oracle.com (moderated for non-subscribers) 7214L: rds-devel@oss.oracle.com (moderated for non-subscribers)
7201S: Supported 7215S: Supported
7202F: net/rds/ 7216F: net/rds/
@@ -8429,8 +8443,8 @@ TARGET SUBSYSTEM
8429M: Nicholas A. Bellinger <nab@linux-iscsi.org> 8443M: Nicholas A. Bellinger <nab@linux-iscsi.org>
8430L: linux-scsi@vger.kernel.org 8444L: linux-scsi@vger.kernel.org
8431L: target-devel@vger.kernel.org 8445L: target-devel@vger.kernel.org
8432L: http://groups.google.com/group/linux-iscsi-target-dev
8433W: http://www.linux-iscsi.org 8446W: http://www.linux-iscsi.org
8447W: http://groups.google.com/group/linux-iscsi-target-dev
8434T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master 8448T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master
8435S: Supported 8449S: Supported
8436F: drivers/target/ 8450F: drivers/target/
@@ -9715,7 +9729,6 @@ F: drivers/xen/*swiotlb*
9715XFS FILESYSTEM 9729XFS FILESYSTEM
9716P: Silicon Graphics Inc 9730P: Silicon Graphics Inc
9717M: Dave Chinner <david@fromorbit.com> 9731M: Dave Chinner <david@fromorbit.com>
9718M: Ben Myers <bpm@sgi.com>
9719M: xfs@oss.sgi.com 9732M: xfs@oss.sgi.com
9720L: xfs@oss.sgi.com 9733L: xfs@oss.sgi.com
9721W: http://oss.sgi.com/projects/xfs 9734W: http://oss.sgi.com/projects/xfs
diff --git a/Makefile b/Makefile
index 606ef7c4a544..81f1bf390310 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 14 2PATCHLEVEL = 14
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc1 4EXTRAVERSION = -rc4
5NAME = Shuffling Zombie Juror 5NAME = Shuffling Zombie Juror
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -605,10 +605,11 @@ endif
605ifdef CONFIG_CC_STACKPROTECTOR_REGULAR 605ifdef CONFIG_CC_STACKPROTECTOR_REGULAR
606 stackp-flag := -fstack-protector 606 stackp-flag := -fstack-protector
607 ifeq ($(call cc-option, $(stackp-flag)),) 607 ifeq ($(call cc-option, $(stackp-flag)),)
608 $(warning Cannot use CONFIG_CC_STACKPROTECTOR: \ 608 $(warning Cannot use CONFIG_CC_STACKPROTECTOR_REGULAR: \
609 -fstack-protector not supported by compiler)) 609 -fstack-protector not supported by compiler)
610 endif 610 endif
611else ifdef CONFIG_CC_STACKPROTECTOR_STRONG 611else
612ifdef CONFIG_CC_STACKPROTECTOR_STRONG
612 stackp-flag := -fstack-protector-strong 613 stackp-flag := -fstack-protector-strong
613 ifeq ($(call cc-option, $(stackp-flag)),) 614 ifeq ($(call cc-option, $(stackp-flag)),)
614 $(warning Cannot use CONFIG_CC_STACKPROTECTOR_STRONG: \ 615 $(warning Cannot use CONFIG_CC_STACKPROTECTOR_STRONG: \
@@ -618,6 +619,7 @@ else
618 # Force off for distro compilers that enable stack protector by default. 619 # Force off for distro compilers that enable stack protector by default.
619 stackp-flag := $(call cc-option, -fno-stack-protector) 620 stackp-flag := $(call cc-option, -fno-stack-protector)
620endif 621endif
622endif
621KBUILD_CFLAGS += $(stackp-flag) 623KBUILD_CFLAGS += $(stackp-flag)
622 624
623# This warning generated too much noise in a regular build. 625# This warning generated too much noise in a regular build.
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index b9d6a8b485e0..032030361bef 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -38,6 +38,7 @@ dtb-$(CONFIG_ARCH_AT91) += at91sam9g35ek.dtb
38dtb-$(CONFIG_ARCH_AT91) += at91sam9x25ek.dtb 38dtb-$(CONFIG_ARCH_AT91) += at91sam9x25ek.dtb
39dtb-$(CONFIG_ARCH_AT91) += at91sam9x35ek.dtb 39dtb-$(CONFIG_ARCH_AT91) += at91sam9x35ek.dtb
40# sama5d3 40# sama5d3
41dtb-$(CONFIG_ARCH_AT91) += at91-sama5d3_xplained.dtb
41dtb-$(CONFIG_ARCH_AT91) += sama5d31ek.dtb 42dtb-$(CONFIG_ARCH_AT91) += sama5d31ek.dtb
42dtb-$(CONFIG_ARCH_AT91) += sama5d33ek.dtb 43dtb-$(CONFIG_ARCH_AT91) += sama5d33ek.dtb
43dtb-$(CONFIG_ARCH_AT91) += sama5d34ek.dtb 44dtb-$(CONFIG_ARCH_AT91) += sama5d34ek.dtb
@@ -208,7 +209,8 @@ dtb-$(CONFIG_ARCH_OMAP2PLUS) += omap2420-h4.dtb \
208 omap3-n900.dtb \ 209 omap3-n900.dtb \
209 omap3-n9.dtb \ 210 omap3-n9.dtb \
210 omap3-n950.dtb \ 211 omap3-n950.dtb \
211 omap3-tobi.dtb \ 212 omap3-overo-tobi.dtb \
213 omap3-overo-storm-tobi.dtb \
212 omap3-gta04.dtb \ 214 omap3-gta04.dtb \
213 omap3-igep0020.dtb \ 215 omap3-igep0020.dtb \
214 omap3-igep0030.dtb \ 216 omap3-igep0030.dtb \
diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
index 4718ec4a4dbf..486880b74831 100644
--- a/arch/arm/boot/dts/am335x-evmsk.dts
+++ b/arch/arm/boot/dts/am335x-evmsk.dts
@@ -121,7 +121,7 @@
121 ti,model = "AM335x-EVMSK"; 121 ti,model = "AM335x-EVMSK";
122 ti,audio-codec = <&tlv320aic3106>; 122 ti,audio-codec = <&tlv320aic3106>;
123 ti,mcasp-controller = <&mcasp1>; 123 ti,mcasp-controller = <&mcasp1>;
124 ti,codec-clock-rate = <24576000>; 124 ti,codec-clock-rate = <24000000>;
125 ti,audio-routing = 125 ti,audio-routing =
126 "Headphone Jack", "HPLOUT", 126 "Headphone Jack", "HPLOUT",
127 "Headphone Jack", "HPROUT"; 127 "Headphone Jack", "HPROUT";
@@ -256,6 +256,12 @@
256 >; 256 >;
257 }; 257 };
258 258
259 mmc1_pins: pinmux_mmc1_pins {
260 pinctrl-single,pins = <
261 0x160 (PIN_INPUT | MUX_MODE7) /* spi0_cs1.gpio0_6 */
262 >;
263 };
264
259 mcasp1_pins: mcasp1_pins { 265 mcasp1_pins: mcasp1_pins {
260 pinctrl-single,pins = < 266 pinctrl-single,pins = <
261 0x10c (PIN_INPUT_PULLDOWN | MUX_MODE4) /* mii1_crs.mcasp1_aclkx */ 267 0x10c (PIN_INPUT_PULLDOWN | MUX_MODE4) /* mii1_crs.mcasp1_aclkx */
@@ -456,6 +462,9 @@
456 status = "okay"; 462 status = "okay";
457 vmmc-supply = <&vmmc_reg>; 463 vmmc-supply = <&vmmc_reg>;
458 bus-width = <4>; 464 bus-width = <4>;
465 pinctrl-names = "default";
466 pinctrl-0 = <&mmc1_pins>;
467 cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>;
459}; 468};
460 469
461&sham { 470&sham {
diff --git a/arch/arm/boot/dts/armada-xp-mv78260.dtsi b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
index 66609684d41b..9480cf891f8c 100644
--- a/arch/arm/boot/dts/armada-xp-mv78260.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
@@ -23,6 +23,7 @@
23 gpio0 = &gpio0; 23 gpio0 = &gpio0;
24 gpio1 = &gpio1; 24 gpio1 = &gpio1;
25 gpio2 = &gpio2; 25 gpio2 = &gpio2;
26 eth3 = &eth3;
26 }; 27 };
27 28
28 cpus { 29 cpus {
@@ -291,7 +292,7 @@
291 interrupts = <91>; 292 interrupts = <91>;
292 }; 293 };
293 294
294 ethernet@34000 { 295 eth3: ethernet@34000 {
295 compatible = "marvell,armada-370-neta"; 296 compatible = "marvell,armada-370-neta";
296 reg = <0x34000 0x4000>; 297 reg = <0x34000 0x4000>;
297 interrupts = <14>; 298 interrupts = <14>;
diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
new file mode 100644
index 000000000000..ce1375595e5f
--- /dev/null
+++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
@@ -0,0 +1,229 @@
1/*
2 * at91-sama5d3_xplained.dts - Device Tree file for the SAMA5D3 Xplained board
3 *
4 * Copyright (C) 2014 Atmel,
5 * 2014 Nicolas Ferre <nicolas.ferre@atmel.com>
6 *
7 * Licensed under GPLv2 or later.
8 */
9/dts-v1/;
10#include "sama5d36.dtsi"
11
12/ {
13 model = "SAMA5D3 Xplained";
14 compatible = "atmel,sama5d3-xplained", "atmel,sama5d3", "atmel,sama5";
15
16 chosen {
17 bootargs = "console=ttyS0,115200";
18 };
19
20 memory {
21 reg = <0x20000000 0x10000000>;
22 };
23
24 ahb {
25 apb {
26 mmc0: mmc@f0000000 {
27 pinctrl-0 = <&pinctrl_mmc0_clk_cmd_dat0 &pinctrl_mmc0_dat1_3 &pinctrl_mmc0_dat4_7 &pinctrl_mmc0_cd>;
28 status = "okay";
29 slot@0 {
30 reg = <0>;
31 bus-width = <8>;
32 cd-gpios = <&pioE 0 GPIO_ACTIVE_LOW>;
33 };
34 };
35
36 spi0: spi@f0004000 {
37 cs-gpios = <&pioD 13 0>;
38 status = "okay";
39 };
40
41 can0: can@f000c000 {
42 status = "okay";
43 };
44
45 i2c0: i2c@f0014000 {
46 status = "okay";
47 };
48
49 i2c1: i2c@f0018000 {
50 status = "okay";
51 };
52
53 macb0: ethernet@f0028000 {
54 phy-mode = "rgmii";
55 status = "okay";
56 };
57
58 usart0: serial@f001c000 {
59 status = "okay";
60 };
61
62 usart1: serial@f0020000 {
63 pinctrl-0 = <&pinctrl_usart1 &pinctrl_usart1_rts_cts>;
64 status = "okay";
65 };
66
67 uart0: serial@f0024000 {
68 status = "okay";
69 };
70
71 mmc1: mmc@f8000000 {
72 pinctrl-0 = <&pinctrl_mmc1_clk_cmd_dat0 &pinctrl_mmc1_dat1_3 &pinctrl_mmc1_cd>;
73 status = "okay";
74 slot@0 {
75 reg = <0>;
76 bus-width = <4>;
77 cd-gpios = <&pioE 1 GPIO_ACTIVE_HIGH>;
78 };
79 };
80
81 spi1: spi@f8008000 {
82 cs-gpios = <&pioC 25 0>, <0>, <0>, <&pioD 16 0>;
83 status = "okay";
84 };
85
86 adc0: adc@f8018000 {
87 pinctrl-0 = <
88 &pinctrl_adc0_adtrg
89 &pinctrl_adc0_ad0
90 &pinctrl_adc0_ad1
91 &pinctrl_adc0_ad2
92 &pinctrl_adc0_ad3
93 &pinctrl_adc0_ad4
94 &pinctrl_adc0_ad5
95 &pinctrl_adc0_ad6
96 &pinctrl_adc0_ad7
97 &pinctrl_adc0_ad8
98 &pinctrl_adc0_ad9
99 >;
100 status = "okay";
101 };
102
103 i2c2: i2c@f801c000 {
104 dmas = <0>, <0>; /* Do not use DMA for i2c2 */
105 status = "okay";
106 };
107
108 macb1: ethernet@f802c000 {
109 phy-mode = "rmii";
110 status = "okay";
111 };
112
113 dbgu: serial@ffffee00 {
114 status = "okay";
115 };
116
117 pinctrl@fffff200 {
118 board {
119 pinctrl_mmc0_cd: mmc0_cd {
120 atmel,pins =
121 <AT91_PIOE 0 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
122 };
123
124 pinctrl_mmc1_cd: mmc1_cd {
125 atmel,pins =
126 <AT91_PIOE 1 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
127 };
128
129 pinctrl_usba_vbus: usba_vbus {
130 atmel,pins =
131 <AT91_PIOE 9 AT91_PERIPH_GPIO AT91_PINCTRL_DEGLITCH>; /* PE9, conflicts with A9 */
132 };
133 };
134 };
135
136 pmc: pmc@fffffc00 {
137 main: mainck {
138 clock-frequency = <12000000>;
139 };
140 };
141 };
142
143 nand0: nand@60000000 {
144 nand-bus-width = <8>;
145 nand-ecc-mode = "hw";
146 atmel,has-pmecc;
147 atmel,pmecc-cap = <4>;
148 atmel,pmecc-sector-size = <512>;
149 nand-on-flash-bbt;
150 status = "okay";
151
152 at91bootstrap@0 {
153 label = "at91bootstrap";
154 reg = <0x0 0x40000>;
155 };
156
157 bootloader@40000 {
158 label = "bootloader";
159 reg = <0x40000 0x80000>;
160 };
161
162 bootloaderenv@c0000 {
163 label = "bootloader env";
164 reg = <0xc0000 0xc0000>;
165 };
166
167 dtb@180000 {
168 label = "device tree";
169 reg = <0x180000 0x80000>;
170 };
171
172 kernel@200000 {
173 label = "kernel";
174 reg = <0x200000 0x600000>;
175 };
176
177 rootfs@800000 {
178 label = "rootfs";
179 reg = <0x800000 0x0f800000>;
180 };
181 };
182
183 usb0: gadget@00500000 {
184 atmel,vbus-gpio = <&pioE 9 GPIO_ACTIVE_HIGH>; /* PE9, conflicts with A9 */
185 pinctrl-names = "default";
186 pinctrl-0 = <&pinctrl_usba_vbus>;
187 status = "okay";
188 };
189
190 usb1: ohci@00600000 {
191 num-ports = <3>;
192 atmel,vbus-gpio = <0
193 &pioE 3 GPIO_ACTIVE_LOW
194 &pioE 4 GPIO_ACTIVE_LOW
195 >;
196 status = "okay";
197 };
198
199 usb2: ehci@00700000 {
200 status = "okay";
201 };
202 };
203
204 gpio_keys {
205 compatible = "gpio-keys";
206
207 bp3 {
208 label = "PB_USER";
209 gpios = <&pioE 29 GPIO_ACTIVE_LOW>;
210 linux,code = <0x104>;
211 gpio-key,wakeup;
212 };
213 };
214
215 leds {
216 compatible = "gpio-leds";
217
218 d2 {
219 label = "d2";
220 gpios = <&pioE 23 GPIO_ACTIVE_LOW>; /* PE23, conflicts with A23, CTS2 */
221 linux,default-trigger = "heartbeat";
222 };
223
224 d3 {
225 label = "d3";
226 gpios = <&pioE 24 GPIO_ACTIVE_HIGH>;
227 };
228 };
229};
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
index 0042f73068b0..fece8665fb63 100644
--- a/arch/arm/boot/dts/at91sam9263.dtsi
+++ b/arch/arm/boot/dts/at91sam9263.dtsi
@@ -523,7 +523,7 @@
523 }; 523 };
524 524
525 i2c0: i2c@fff88000 { 525 i2c0: i2c@fff88000 {
526 compatible = "atmel,at91sam9263-i2c"; 526 compatible = "atmel,at91sam9260-i2c";
527 reg = <0xfff88000 0x100>; 527 reg = <0xfff88000 0x100>;
528 interrupts = <13 IRQ_TYPE_LEVEL_HIGH 6>; 528 interrupts = <13 IRQ_TYPE_LEVEL_HIGH 6>;
529 #address-cells = <1>; 529 #address-cells = <1>;
diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts
index e9487f6f0166..924a6a6ffd0f 100644
--- a/arch/arm/boot/dts/at91sam9n12ek.dts
+++ b/arch/arm/boot/dts/at91sam9n12ek.dts
@@ -124,6 +124,10 @@
124 nand-on-flash-bbt; 124 nand-on-flash-bbt;
125 status = "okay"; 125 status = "okay";
126 }; 126 };
127
128 usb0: ohci@00500000 {
129 status = "okay";
130 };
127 }; 131 };
128 132
129 leds { 133 leds {
diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi
index 2b76524f4aa7..187fd46b7b5e 100644
--- a/arch/arm/boot/dts/dove.dtsi
+++ b/arch/arm/boot/dts/dove.dtsi
@@ -379,15 +379,6 @@
379 #clock-cells = <1>; 379 #clock-cells = <1>;
380 }; 380 };
381 381
382 pmu_intc: pmu-interrupt-ctrl@d0050 {
383 compatible = "marvell,dove-pmu-intc";
384 interrupt-controller;
385 #interrupt-cells = <1>;
386 reg = <0xd0050 0x8>;
387 interrupts = <33>;
388 marvell,#interrupts = <7>;
389 };
390
391 pinctrl: pin-ctrl@d0200 { 382 pinctrl: pin-ctrl@d0200 {
392 compatible = "marvell,dove-pinctrl"; 383 compatible = "marvell,dove-pinctrl";
393 reg = <0xd0200 0x10>; 384 reg = <0xd0200 0x10>;
@@ -610,8 +601,6 @@
610 rtc: real-time-clock@d8500 { 601 rtc: real-time-clock@d8500 {
611 compatible = "marvell,orion-rtc"; 602 compatible = "marvell,orion-rtc";
612 reg = <0xd8500 0x20>; 603 reg = <0xd8500 0x20>;
613 interrupt-parent = <&pmu_intc>;
614 interrupts = <5>;
615 }; 604 };
616 605
617 gpio2: gpio-ctrl@e8400 { 606 gpio2: gpio-ctrl@e8400 {
diff --git a/arch/arm/boot/dts/imx6dl-hummingboard.dts b/arch/arm/boot/dts/imx6dl-hummingboard.dts
index fd8fc7cd53f3..5bfae54fb780 100644
--- a/arch/arm/boot/dts/imx6dl-hummingboard.dts
+++ b/arch/arm/boot/dts/imx6dl-hummingboard.dts
@@ -52,12 +52,6 @@
52 }; 52 };
53 }; 53 };
54 54
55 codec: spdif-transmitter {
56 compatible = "linux,spdif-dit";
57 pinctrl-names = "default";
58 pinctrl-0 = <&pinctrl_hummingboard_spdif>;
59 };
60
61 sound-spdif { 55 sound-spdif {
62 compatible = "fsl,imx-audio-spdif"; 56 compatible = "fsl,imx-audio-spdif";
63 model = "imx-spdif"; 57 model = "imx-spdif";
@@ -111,7 +105,7 @@
111 }; 105 };
112 106
113 pinctrl_hummingboard_spdif: hummingboard-spdif { 107 pinctrl_hummingboard_spdif: hummingboard-spdif {
114 fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x1b0b0>; 108 fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>;
115 }; 109 };
116 110
117 pinctrl_hummingboard_usbh1_vbus: hummingboard-usbh1-vbus { 111 pinctrl_hummingboard_usbh1_vbus: hummingboard-usbh1-vbus {
@@ -142,6 +136,8 @@
142}; 136};
143 137
144&spdif { 138&spdif {
139 pinctrl-names = "default";
140 pinctrl-0 = <&pinctrl_hummingboard_spdif>;
145 status = "okay"; 141 status = "okay";
146}; 142};
147 143
diff --git a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
index 64daa3b311f6..c2a24888a276 100644
--- a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
@@ -46,12 +46,6 @@
46 }; 46 };
47 }; 47 };
48 48
49 codec: spdif-transmitter {
50 compatible = "linux,spdif-dit";
51 pinctrl-names = "default";
52 pinctrl-0 = <&pinctrl_cubox_i_spdif>;
53 };
54
55 sound-spdif { 49 sound-spdif {
56 compatible = "fsl,imx-audio-spdif"; 50 compatible = "fsl,imx-audio-spdif";
57 model = "imx-spdif"; 51 model = "imx-spdif";
@@ -89,7 +83,7 @@
89 }; 83 };
90 84
91 pinctrl_cubox_i_spdif: cubox-i-spdif { 85 pinctrl_cubox_i_spdif: cubox-i-spdif {
92 fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x1b0b0>; 86 fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>;
93 }; 87 };
94 88
95 pinctrl_cubox_i_usbh1_vbus: cubox-i-usbh1-vbus { 89 pinctrl_cubox_i_usbh1_vbus: cubox-i-usbh1-vbus {
@@ -121,6 +115,8 @@
121}; 115};
122 116
123&spdif { 117&spdif {
118 pinctrl-names = "default";
119 pinctrl-0 = <&pinctrl_cubox_i_spdif>;
124 status = "okay"; 120 status = "okay";
125}; 121};
126 122
diff --git a/arch/arm/boot/dts/omap3-gta04.dts b/arch/arm/boot/dts/omap3-gta04.dts
index b9b55c95a566..c551e4af4d83 100644
--- a/arch/arm/boot/dts/omap3-gta04.dts
+++ b/arch/arm/boot/dts/omap3-gta04.dts
@@ -32,7 +32,7 @@
32 aux-button { 32 aux-button {
33 label = "aux"; 33 label = "aux";
34 linux,code = <169>; 34 linux,code = <169>;
35 gpios = <&gpio1 7 GPIO_ACTIVE_LOW>; 35 gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>;
36 gpio-key,wakeup; 36 gpio-key,wakeup;
37 }; 37 };
38 }; 38 };
@@ -92,6 +92,8 @@
92 bmp085@77 { 92 bmp085@77 {
93 compatible = "bosch,bmp085"; 93 compatible = "bosch,bmp085";
94 reg = <0x77>; 94 reg = <0x77>;
95 interrupt-parent = <&gpio4>;
96 interrupts = <17 IRQ_TYPE_EDGE_RISING>;
95 }; 97 };
96 98
97 /* leds */ 99 /* leds */
@@ -141,8 +143,8 @@
141 pinctrl-names = "default"; 143 pinctrl-names = "default";
142 pinctrl-0 = <&mmc1_pins>; 144 pinctrl-0 = <&mmc1_pins>;
143 vmmc-supply = <&vmmc1>; 145 vmmc-supply = <&vmmc1>;
144 vmmc_aux-supply = <&vsim>;
145 bus-width = <4>; 146 bus-width = <4>;
147 ti,non-removable;
146}; 148};
147 149
148&mmc2 { 150&mmc2 {
diff --git a/arch/arm/boot/dts/omap3-n9.dts b/arch/arm/boot/dts/omap3-n9.dts
index 39828ce464ee..9938b5dc1909 100644
--- a/arch/arm/boot/dts/omap3-n9.dts
+++ b/arch/arm/boot/dts/omap3-n9.dts
@@ -14,5 +14,5 @@
14 14
15/ { 15/ {
16 model = "Nokia N9"; 16 model = "Nokia N9";
17 compatible = "nokia,omap3-n9", "ti,omap3"; 17 compatible = "nokia,omap3-n9", "ti,omap36xx", "ti,omap3";
18}; 18};
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index 6fc85f963530..0bf40c90faba 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2013 Pavel Machek <pavel@ucw.cz> 2 * Copyright (C) 2013 Pavel Machek <pavel@ucw.cz>
3 * Copyright 2013 Aaro Koskinen <aaro.koskinen@iki.fi> 3 * Copyright (C) 2013-2014 Aaro Koskinen <aaro.koskinen@iki.fi>
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 (or later) as 6 * it under the terms of the GNU General Public License version 2 (or later) as
@@ -13,7 +13,7 @@
13 13
14/ { 14/ {
15 model = "Nokia N900"; 15 model = "Nokia N900";
16 compatible = "nokia,omap3-n900", "ti,omap3"; 16 compatible = "nokia,omap3-n900", "ti,omap3430", "ti,omap3";
17 17
18 cpus { 18 cpus {
19 cpu@0 { 19 cpu@0 {
diff --git a/arch/arm/boot/dts/omap3-n950.dts b/arch/arm/boot/dts/omap3-n950.dts
index b076a526b999..261c5589bfa3 100644
--- a/arch/arm/boot/dts/omap3-n950.dts
+++ b/arch/arm/boot/dts/omap3-n950.dts
@@ -14,5 +14,5 @@
14 14
15/ { 15/ {
16 model = "Nokia N950"; 16 model = "Nokia N950";
17 compatible = "nokia,omap3-n950", "ti,omap3"; 17 compatible = "nokia,omap3-n950", "ti,omap36xx", "ti,omap3";
18}; 18};
diff --git a/arch/arm/boot/dts/omap3-overo-storm-tobi.dts b/arch/arm/boot/dts/omap3-overo-storm-tobi.dts
new file mode 100644
index 000000000000..966b5c9cd96a
--- /dev/null
+++ b/arch/arm/boot/dts/omap3-overo-storm-tobi.dts
@@ -0,0 +1,22 @@
1/*
2 * Copyright (C) 2012 Florian Vaussard, EPFL Mobots group
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9/*
10 * Tobi expansion board is manufactured by Gumstix Inc.
11 */
12
13/dts-v1/;
14
15#include "omap36xx.dtsi"
16#include "omap3-overo-tobi-common.dtsi"
17
18/ {
19 model = "OMAP36xx/AM37xx/DM37xx Gumstix Overo on Tobi";
20 compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap36xx", "ti,omap3";
21};
22
diff --git a/arch/arm/boot/dts/omap3-tobi.dts b/arch/arm/boot/dts/omap3-overo-tobi-common.dtsi
index 7e4ad2aec37a..4edc013a91c1 100644
--- a/arch/arm/boot/dts/omap3-tobi.dts
+++ b/arch/arm/boot/dts/omap3-overo-tobi-common.dtsi
@@ -13,9 +13,6 @@
13#include "omap3-overo.dtsi" 13#include "omap3-overo.dtsi"
14 14
15/ { 15/ {
16 model = "TI OMAP3 Gumstix Overo on Tobi";
17 compatible = "ti,omap3-tobi", "ti,omap3-overo", "ti,omap3";
18
19 leds { 16 leds {
20 compatible = "gpio-leds"; 17 compatible = "gpio-leds";
21 heartbeat { 18 heartbeat {
diff --git a/arch/arm/boot/dts/omap3-overo-tobi.dts b/arch/arm/boot/dts/omap3-overo-tobi.dts
new file mode 100644
index 000000000000..de5653e1b5ca
--- /dev/null
+++ b/arch/arm/boot/dts/omap3-overo-tobi.dts
@@ -0,0 +1,22 @@
1/*
2 * Copyright (C) 2012 Florian Vaussard, EPFL Mobots group
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9/*
10 * Tobi expansion board is manufactured by Gumstix Inc.
11 */
12
13/dts-v1/;
14
15#include "omap34xx.dtsi"
16#include "omap3-overo-tobi-common.dtsi"
17
18/ {
19 model = "OMAP35xx Gumstix Overo on Tobi";
20 compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap3430", "ti,omap3";
21};
22
diff --git a/arch/arm/boot/dts/omap3-overo.dtsi b/arch/arm/boot/dts/omap3-overo.dtsi
index a461d2fd1fb0..597099907f8e 100644
--- a/arch/arm/boot/dts/omap3-overo.dtsi
+++ b/arch/arm/boot/dts/omap3-overo.dtsi
@@ -9,9 +9,6 @@
9/* 9/*
10 * The Gumstix Overo must be combined with an expansion board. 10 * The Gumstix Overo must be combined with an expansion board.
11 */ 11 */
12/dts-v1/;
13
14#include "omap34xx.dtsi"
15 12
16/ { 13/ {
17 pwmleds { 14 pwmleds {
diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi
index 52447c17537a..3d5faf85f51b 100644
--- a/arch/arm/boot/dts/sama5d3.dtsi
+++ b/arch/arm/boot/dts/sama5d3.dtsi
@@ -1228,7 +1228,7 @@
1228 compatible = "atmel,at91rm9200-ohci", "usb-ohci"; 1228 compatible = "atmel,at91rm9200-ohci", "usb-ohci";
1229 reg = <0x00600000 0x100000>; 1229 reg = <0x00600000 0x100000>;
1230 interrupts = <32 IRQ_TYPE_LEVEL_HIGH 2>; 1230 interrupts = <32 IRQ_TYPE_LEVEL_HIGH 2>;
1231 clocks = <&usb>, <&uhphs_clk>, <&udphs_clk>, 1231 clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>,
1232 <&uhpck>; 1232 <&uhpck>;
1233 clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck"; 1233 clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck";
1234 status = "disabled"; 1234 status = "disabled";
diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi
index 0c1e8d871ed1..6cb9b68e2188 100644
--- a/arch/arm/boot/dts/ste-href.dtsi
+++ b/arch/arm/boot/dts/ste-href.dtsi
@@ -188,7 +188,6 @@
188 msp2: msp@80117000 { 188 msp2: msp@80117000 {
189 pinctrl-names = "default"; 189 pinctrl-names = "default";
190 pinctrl-0 = <&msp2_default_mode>; 190 pinctrl-0 = <&msp2_default_mode>;
191 status = "okay";
192 }; 191 };
193 192
194 msp3: msp@80125000 { 193 msp3: msp@80125000 {
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index 040bb0eba152..10666ca8aee1 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -315,7 +315,7 @@
315 ranges; 315 ranges;
316 316
317 emac: ethernet@01c0b000 { 317 emac: ethernet@01c0b000 {
318 compatible = "allwinner,sun4i-emac"; 318 compatible = "allwinner,sun4i-a10-emac";
319 reg = <0x01c0b000 0x1000>; 319 reg = <0x01c0b000 0x1000>;
320 interrupts = <55>; 320 interrupts = <55>;
321 clocks = <&ahb_gates 17>; 321 clocks = <&ahb_gates 17>;
@@ -323,7 +323,7 @@
323 }; 323 };
324 324
325 mdio@01c0b080 { 325 mdio@01c0b080 {
326 compatible = "allwinner,sun4i-mdio"; 326 compatible = "allwinner,sun4i-a10-mdio";
327 reg = <0x01c0b080 0x14>; 327 reg = <0x01c0b080 0x14>;
328 status = "disabled"; 328 status = "disabled";
329 #address-cells = <1>; 329 #address-cells = <1>;
diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi
index ea16054857a4..64961595e8d6 100644
--- a/arch/arm/boot/dts/sun5i-a10s.dtsi
+++ b/arch/arm/boot/dts/sun5i-a10s.dtsi
@@ -278,7 +278,7 @@
278 ranges; 278 ranges;
279 279
280 emac: ethernet@01c0b000 { 280 emac: ethernet@01c0b000 {
281 compatible = "allwinner,sun4i-emac"; 281 compatible = "allwinner,sun4i-a10-emac";
282 reg = <0x01c0b000 0x1000>; 282 reg = <0x01c0b000 0x1000>;
283 interrupts = <55>; 283 interrupts = <55>;
284 clocks = <&ahb_gates 17>; 284 clocks = <&ahb_gates 17>;
@@ -286,7 +286,7 @@
286 }; 286 };
287 287
288 mdio@01c0b080 { 288 mdio@01c0b080 {
289 compatible = "allwinner,sun4i-mdio"; 289 compatible = "allwinner,sun4i-a10-mdio";
290 reg = <0x01c0b080 0x14>; 290 reg = <0x01c0b080 0x14>;
291 status = "disabled"; 291 status = "disabled";
292 #address-cells = <1>; 292 #address-cells = <1>;
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 119f066f0d98..9ff09484847b 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -340,7 +340,7 @@
340 ranges; 340 ranges;
341 341
342 emac: ethernet@01c0b000 { 342 emac: ethernet@01c0b000 {
343 compatible = "allwinner,sun4i-emac"; 343 compatible = "allwinner,sun4i-a10-emac";
344 reg = <0x01c0b000 0x1000>; 344 reg = <0x01c0b000 0x1000>;
345 interrupts = <0 55 4>; 345 interrupts = <0 55 4>;
346 clocks = <&ahb_gates 17>; 346 clocks = <&ahb_gates 17>;
@@ -348,7 +348,7 @@
348 }; 348 };
349 349
350 mdio@01c0b080 { 350 mdio@01c0b080 {
351 compatible = "allwinner,sun4i-mdio"; 351 compatible = "allwinner,sun4i-a10-mdio";
352 reg = <0x01c0b080 0x14>; 352 reg = <0x01c0b080 0x14>;
353 status = "disabled"; 353 status = "disabled";
354 #address-cells = <1>; 354 #address-cells = <1>;
diff --git a/arch/arm/boot/dts/tegra114.dtsi b/arch/arm/boot/dts/tegra114.dtsi
index 389e987ec281..44ec401ec366 100644
--- a/arch/arm/boot/dts/tegra114.dtsi
+++ b/arch/arm/boot/dts/tegra114.dtsi
@@ -57,6 +57,8 @@
57 resets = <&tegra_car 27>; 57 resets = <&tegra_car 27>;
58 reset-names = "dc"; 58 reset-names = "dc";
59 59
60 nvidia,head = <0>;
61
60 rgb { 62 rgb {
61 status = "disabled"; 63 status = "disabled";
62 }; 64 };
@@ -72,6 +74,8 @@
72 resets = <&tegra_car 26>; 74 resets = <&tegra_car 26>;
73 reset-names = "dc"; 75 reset-names = "dc";
74 76
77 nvidia,head = <1>;
78
75 rgb { 79 rgb {
76 status = "disabled"; 80 status = "disabled";
77 }; 81 };
diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi
index 480ecda3416b..48d2a7f4d0c0 100644
--- a/arch/arm/boot/dts/tegra20.dtsi
+++ b/arch/arm/boot/dts/tegra20.dtsi
@@ -94,6 +94,8 @@
94 resets = <&tegra_car 27>; 94 resets = <&tegra_car 27>;
95 reset-names = "dc"; 95 reset-names = "dc";
96 96
97 nvidia,head = <0>;
98
97 rgb { 99 rgb {
98 status = "disabled"; 100 status = "disabled";
99 }; 101 };
@@ -109,6 +111,8 @@
109 resets = <&tegra_car 26>; 111 resets = <&tegra_car 26>;
110 reset-names = "dc"; 112 reset-names = "dc";
111 113
114 nvidia,head = <1>;
115
112 rgb { 116 rgb {
113 status = "disabled"; 117 status = "disabled";
114 }; 118 };
diff --git a/arch/arm/boot/dts/tegra30-cardhu.dtsi b/arch/arm/boot/dts/tegra30-cardhu.dtsi
index 9104224124ee..1e156d9d0506 100644
--- a/arch/arm/boot/dts/tegra30-cardhu.dtsi
+++ b/arch/arm/boot/dts/tegra30-cardhu.dtsi
@@ -28,7 +28,7 @@
28 compatible = "nvidia,cardhu", "nvidia,tegra30"; 28 compatible = "nvidia,cardhu", "nvidia,tegra30";
29 29
30 aliases { 30 aliases {
31 rtc0 = "/i2c@7000d000/tps6586x@34"; 31 rtc0 = "/i2c@7000d000/tps65911@2d";
32 rtc1 = "/rtc@7000e000"; 32 rtc1 = "/rtc@7000e000";
33 }; 33 };
34 34
diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi
index ed8e7700b46d..19a84e933f4e 100644
--- a/arch/arm/boot/dts/tegra30.dtsi
+++ b/arch/arm/boot/dts/tegra30.dtsi
@@ -170,6 +170,8 @@
170 resets = <&tegra_car 27>; 170 resets = <&tegra_car 27>;
171 reset-names = "dc"; 171 reset-names = "dc";
172 172
173 nvidia,head = <0>;
174
173 rgb { 175 rgb {
174 status = "disabled"; 176 status = "disabled";
175 }; 177 };
@@ -185,6 +187,8 @@
185 resets = <&tegra_car 26>; 187 resets = <&tegra_car 26>;
186 reset-names = "dc"; 188 reset-names = "dc";
187 189
190 nvidia,head = <1>;
191
188 rgb { 192 rgb {
189 status = "disabled"; 193 status = "disabled";
190 }; 194 };
diff --git a/arch/arm/boot/dts/testcases/tests.dtsi b/arch/arm/boot/dts/testcases/tests.dtsi
deleted file mode 100644
index 3f123ecc9dd7..000000000000
--- a/arch/arm/boot/dts/testcases/tests.dtsi
+++ /dev/null
@@ -1,2 +0,0 @@
1/include/ "tests-phandle.dtsi"
2/include/ "tests-interrupts.dtsi"
diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts
index f43907c40c93..65f657711323 100644
--- a/arch/arm/boot/dts/versatile-pb.dts
+++ b/arch/arm/boot/dts/versatile-pb.dts
@@ -1,4 +1,4 @@
1/include/ "versatile-ab.dts" 1#include <versatile-ab.dts>
2 2
3/ { 3/ {
4 model = "ARM Versatile PB"; 4 model = "ARM Versatile PB";
@@ -47,4 +47,4 @@
47 }; 47 };
48}; 48};
49 49
50/include/ "testcases/tests.dtsi" 50#include <testcases.dtsi>
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 845bc745706b..ee6982976d66 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -29,6 +29,7 @@ CONFIG_ARCH_OMAP3=y
29CONFIG_ARCH_OMAP4=y 29CONFIG_ARCH_OMAP4=y
30CONFIG_SOC_OMAP5=y 30CONFIG_SOC_OMAP5=y
31CONFIG_SOC_AM33XX=y 31CONFIG_SOC_AM33XX=y
32CONFIG_SOC_DRA7XX=y
32CONFIG_SOC_AM43XX=y 33CONFIG_SOC_AM43XX=y
33CONFIG_ARCH_ROCKCHIP=y 34CONFIG_ARCH_ROCKCHIP=y
34CONFIG_ARCH_SOCFPGA=y 35CONFIG_ARCH_SOCFPGA=y
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index e9a49fe0284e..8b8b61685a34 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -212,6 +212,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
212static inline void __flush_icache_all(void) 212static inline void __flush_icache_all(void)
213{ 213{
214 __flush_icache_preferred(); 214 __flush_icache_preferred();
215 dsb();
215} 216}
216 217
217/* 218/*
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 03243f7eeddf..85c60adc8b60 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -120,13 +120,16 @@
120/* 120/*
121 * 2nd stage PTE definitions for LPAE. 121 * 2nd stage PTE definitions for LPAE.
122 */ 122 */
123#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x5) << 2) /* MemAttr[3:0] */ 123#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x0) << 2) /* strongly ordered */
124#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */ 124#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* normal inner write-through */
125#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */ 125#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* normal inner write-back */
126#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ 126#define L_PTE_S2_MT_DEV_SHARED (_AT(pteval_t, 0x1) << 2) /* device */
127#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ 127#define L_PTE_S2_MT_MASK (_AT(pteval_t, 0xf) << 2)
128 128
129#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */ 129#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
130#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
131
132#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
130 133
131/* 134/*
132 * Hyp-mode PL2 PTE definitions for LPAE. 135 * Hyp-mode PL2 PTE definitions for LPAE.
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index ef3c6072aa45..ac4bfae26702 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -37,18 +37,9 @@
37 37
38static inline void dsb_sev(void) 38static inline void dsb_sev(void)
39{ 39{
40#if __LINUX_ARM_ARCH__ >= 7 40
41 __asm__ __volatile__ ( 41 dsb(ishst);
42 "dsb ishst\n" 42 __asm__(SEV);
43 SEV
44 );
45#else
46 __asm__ __volatile__ (
47 "mcr p15, 0, %0, c7, c10, 4\n"
48 SEV
49 : : "r" (0)
50 );
51#endif
52} 43}
53 44
54/* 45/*
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index b0df9761de6d..1e8b030dbefd 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -731,7 +731,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
731 kernel_data.end = virt_to_phys(_end - 1); 731 kernel_data.end = virt_to_phys(_end - 1);
732 732
733 for_each_memblock(memory, region) { 733 for_each_memblock(memory, region) {
734 res = memblock_virt_alloc_low(sizeof(*res), 0); 734 res = memblock_virt_alloc(sizeof(*res), 0);
735 res->name = "System RAM"; 735 res->name = "System RAM";
736 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); 736 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
737 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; 737 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
diff --git a/arch/arm/mach-hisi/Kconfig b/arch/arm/mach-hisi/Kconfig
index 8f4649b301b2..1abae5f6a418 100644
--- a/arch/arm/mach-hisi/Kconfig
+++ b/arch/arm/mach-hisi/Kconfig
@@ -8,7 +8,7 @@ config ARCH_HI3xxx
8 select CLKSRC_OF 8 select CLKSRC_OF
9 select GENERIC_CLOCKEVENTS 9 select GENERIC_CLOCKEVENTS
10 select HAVE_ARM_SCU 10 select HAVE_ARM_SCU
11 select HAVE_ARM_TWD 11 select HAVE_ARM_TWD if SMP
12 select HAVE_SMP 12 select HAVE_SMP
13 select PINCTRL 13 select PINCTRL
14 select PINCTRL_SINGLE 14 select PINCTRL_SINGLE
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index befcaf5d0574..ec419649320f 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -101,11 +101,9 @@ obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
101obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o mach-imx6q.o 101obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o mach-imx6q.o
102obj-$(CONFIG_SOC_IMX6SL) += clk-imx6sl.o mach-imx6sl.o 102obj-$(CONFIG_SOC_IMX6SL) += clk-imx6sl.o mach-imx6sl.o
103 103
104ifeq ($(CONFIG_PM),y)
105obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o headsmp.o 104obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o headsmp.o
106# i.MX6SL reuses i.MX6Q code 105# i.MX6SL reuses i.MX6Q code
107obj-$(CONFIG_SOC_IMX6SL) += pm-imx6q.o headsmp.o 106obj-$(CONFIG_SOC_IMX6SL) += pm-imx6q.o headsmp.o
108endif
109 107
110# i.MX5 based machines 108# i.MX5 based machines
111obj-$(CONFIG_MACH_MX51_BABBAGE) += mach-mx51_babbage.o 109obj-$(CONFIG_MACH_MX51_BABBAGE) += mach-mx51_babbage.o
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index af2e582d2b74..4d677f442539 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -482,6 +482,9 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
482 if (IS_ENABLED(CONFIG_PCI_IMX6)) 482 if (IS_ENABLED(CONFIG_PCI_IMX6))
483 clk_set_parent(clk[lvds1_sel], clk[sata_ref]); 483 clk_set_parent(clk[lvds1_sel], clk[sata_ref]);
484 484
485 /* Set initial power mode */
486 imx6q_set_lpm(WAIT_CLOCKED);
487
485 np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt"); 488 np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt");
486 base = of_iomap(np, 0); 489 base = of_iomap(np, 0);
487 WARN_ON(!base); 490 WARN_ON(!base);
diff --git a/arch/arm/mach-imx/clk-imx6sl.c b/arch/arm/mach-imx/clk-imx6sl.c
index 3781a1853998..4c86f3035205 100644
--- a/arch/arm/mach-imx/clk-imx6sl.c
+++ b/arch/arm/mach-imx/clk-imx6sl.c
@@ -266,6 +266,9 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
266 /* Audio-related clocks configuration */ 266 /* Audio-related clocks configuration */
267 clk_set_parent(clks[IMX6SL_CLK_SPDIF0_SEL], clks[IMX6SL_CLK_PLL3_PFD3]); 267 clk_set_parent(clks[IMX6SL_CLK_SPDIF0_SEL], clks[IMX6SL_CLK_PLL3_PFD3]);
268 268
269 /* Set initial power mode */
270 imx6q_set_lpm(WAIT_CLOCKED);
271
269 np = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-gpt"); 272 np = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-gpt");
270 base = of_iomap(np, 0); 273 base = of_iomap(np, 0);
271 WARN_ON(!base); 274 WARN_ON(!base);
diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h
index 59c3b9b26bb4..baf439dc22d8 100644
--- a/arch/arm/mach-imx/common.h
+++ b/arch/arm/mach-imx/common.h
@@ -144,13 +144,11 @@ void imx6q_set_chicken_bit(void);
144void imx_cpu_die(unsigned int cpu); 144void imx_cpu_die(unsigned int cpu);
145int imx_cpu_kill(unsigned int cpu); 145int imx_cpu_kill(unsigned int cpu);
146 146
147#ifdef CONFIG_PM
148void imx6q_pm_init(void); 147void imx6q_pm_init(void);
149void imx6q_pm_set_ccm_base(void __iomem *base); 148void imx6q_pm_set_ccm_base(void __iomem *base);
149#ifdef CONFIG_PM
150void imx5_pm_init(void); 150void imx5_pm_init(void);
151#else 151#else
152static inline void imx6q_pm_init(void) {}
153static inline void imx6q_pm_set_ccm_base(void __iomem *base) {}
154static inline void imx5_pm_init(void) {} 152static inline void imx5_pm_init(void) {}
155#endif 153#endif
156 154
diff --git a/arch/arm/mach-imx/pm-imx6q.c b/arch/arm/mach-imx/pm-imx6q.c
index 9d47adc078aa..7a9b98589db7 100644
--- a/arch/arm/mach-imx/pm-imx6q.c
+++ b/arch/arm/mach-imx/pm-imx6q.c
@@ -236,8 +236,6 @@ void __init imx6q_pm_init(void)
236 regmap_update_bits(gpr, IOMUXC_GPR1, IMX6Q_GPR1_GINT, 236 regmap_update_bits(gpr, IOMUXC_GPR1, IMX6Q_GPR1_GINT,
237 IMX6Q_GPR1_GINT); 237 IMX6Q_GPR1_GINT);
238 238
239 /* Set initial power mode */
240 imx6q_set_lpm(WAIT_CLOCKED);
241 239
242 suspend_set_ops(&imx6q_pm_ops); 240 suspend_set_ops(&imx6q_pm_ops);
243} 241}
diff --git a/arch/arm/mach-moxart/Kconfig b/arch/arm/mach-moxart/Kconfig
index ba470d64493b..3795ae28a613 100644
--- a/arch/arm/mach-moxart/Kconfig
+++ b/arch/arm/mach-moxart/Kconfig
@@ -2,7 +2,6 @@ config ARCH_MOXART
2 bool "MOXA ART SoC" if ARCH_MULTI_V4T 2 bool "MOXA ART SoC" if ARCH_MULTI_V4T
3 select CPU_FA526 3 select CPU_FA526
4 select ARM_DMA_MEM_BUFFERABLE 4 select ARM_DMA_MEM_BUFFERABLE
5 select DMA_OF
6 select USE_OF 5 select USE_OF
7 select CLKSRC_OF 6 select CLKSRC_OF
8 select CLKSRC_MMIO 7 select CLKSRC_MMIO
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
index 91449c5cb70f..85089d821982 100644
--- a/arch/arm/mach-omap1/board-nokia770.c
+++ b/arch/arm/mach-omap1/board-nokia770.c
@@ -156,6 +156,7 @@ static struct omap_usb_config nokia770_usb_config __initdata = {
156 .register_dev = 1, 156 .register_dev = 1,
157 .hmc_mode = 16, 157 .hmc_mode = 16,
158 .pins[0] = 6, 158 .pins[0] = 6,
159 .extcon = "tahvo-usb",
159}; 160};
160 161
161#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE) 162#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE)
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 653b489479e0..0af7ca02314d 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -50,11 +50,12 @@ config SOC_OMAP5
50 bool "TI OMAP5" 50 bool "TI OMAP5"
51 depends on ARCH_MULTI_V7 51 depends on ARCH_MULTI_V7
52 select ARCH_OMAP2PLUS 52 select ARCH_OMAP2PLUS
53 select ARCH_HAS_OPP
53 select ARM_CPU_SUSPEND if PM 54 select ARM_CPU_SUSPEND if PM
54 select ARM_GIC 55 select ARM_GIC
55 select CPU_V7 56 select CPU_V7
56 select HAVE_ARM_SCU if SMP 57 select HAVE_ARM_SCU if SMP
57 select HAVE_ARM_TWD if LOCAL_TIMERS 58 select HAVE_ARM_TWD if SMP
58 select HAVE_SMP 59 select HAVE_SMP
59 select HAVE_ARM_ARCH_TIMER 60 select HAVE_ARM_ARCH_TIMER
60 select ARM_ERRATA_798181 if SMP 61 select ARM_ERRATA_798181 if SMP
@@ -63,6 +64,7 @@ config SOC_AM33XX
63 bool "TI AM33XX" 64 bool "TI AM33XX"
64 depends on ARCH_MULTI_V7 65 depends on ARCH_MULTI_V7
65 select ARCH_OMAP2PLUS 66 select ARCH_OMAP2PLUS
67 select ARCH_HAS_OPP
66 select ARM_CPU_SUSPEND if PM 68 select ARM_CPU_SUSPEND if PM
67 select CPU_V7 69 select CPU_V7
68 select MULTI_IRQ_HANDLER 70 select MULTI_IRQ_HANDLER
@@ -72,6 +74,7 @@ config SOC_AM43XX
72 depends on ARCH_MULTI_V7 74 depends on ARCH_MULTI_V7
73 select CPU_V7 75 select CPU_V7
74 select ARCH_OMAP2PLUS 76 select ARCH_OMAP2PLUS
77 select ARCH_HAS_OPP
75 select MULTI_IRQ_HANDLER 78 select MULTI_IRQ_HANDLER
76 select ARM_GIC 79 select ARM_GIC
77 select MACH_OMAP_GENERIC 80 select MACH_OMAP_GENERIC
@@ -80,6 +83,7 @@ config SOC_DRA7XX
80 bool "TI DRA7XX" 83 bool "TI DRA7XX"
81 depends on ARCH_MULTI_V7 84 depends on ARCH_MULTI_V7
82 select ARCH_OMAP2PLUS 85 select ARCH_OMAP2PLUS
86 select ARCH_HAS_OPP
83 select ARM_CPU_SUSPEND if PM 87 select ARM_CPU_SUSPEND if PM
84 select ARM_GIC 88 select ARM_GIC
85 select CPU_V7 89 select CPU_V7
@@ -268,9 +272,6 @@ config MACH_OMAP_3430SDP
268 default y 272 default y
269 select OMAP_PACKAGE_CBB 273 select OMAP_PACKAGE_CBB
270 274
271config MACH_NOKIA_N800
272 bool
273
274config MACH_NOKIA_N810 275config MACH_NOKIA_N810
275 bool 276 bool
276 277
@@ -281,7 +282,6 @@ config MACH_NOKIA_N8X0
281 bool "Nokia N800/N810" 282 bool "Nokia N800/N810"
282 depends on SOC_OMAP2420 283 depends on SOC_OMAP2420
283 default y 284 default y
284 select MACH_NOKIA_N800
285 select MACH_NOKIA_N810 285 select MACH_NOKIA_N810
286 select MACH_NOKIA_N810_WIMAX 286 select MACH_NOKIA_N810_WIMAX
287 select OMAP_PACKAGE_ZAC 287 select OMAP_PACKAGE_ZAC
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index d24926e6340f..ab43755364f5 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -1339,7 +1339,7 @@ static void __maybe_unused gpmc_read_timings_dt(struct device_node *np,
1339 of_property_read_bool(np, "gpmc,time-para-granularity"); 1339 of_property_read_bool(np, "gpmc,time-para-granularity");
1340} 1340}
1341 1341
1342#ifdef CONFIG_MTD_NAND 1342#if IS_ENABLED(CONFIG_MTD_NAND)
1343 1343
1344static const char * const nand_xfer_types[] = { 1344static const char * const nand_xfer_types[] = {
1345 [NAND_OMAP_PREFETCH_POLLED] = "prefetch-polled", 1345 [NAND_OMAP_PREFETCH_POLLED] = "prefetch-polled",
@@ -1429,7 +1429,7 @@ static int gpmc_probe_nand_child(struct platform_device *pdev,
1429} 1429}
1430#endif 1430#endif
1431 1431
1432#ifdef CONFIG_MTD_ONENAND 1432#if IS_ENABLED(CONFIG_MTD_ONENAND)
1433static int gpmc_probe_onenand_child(struct platform_device *pdev, 1433static int gpmc_probe_onenand_child(struct platform_device *pdev,
1434 struct device_node *child) 1434 struct device_node *child)
1435{ 1435{
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index d408b15b4fbf..af432b191255 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -179,15 +179,6 @@ static struct map_desc omap34xx_io_desc[] __initdata = {
179 .length = L4_EMU_34XX_SIZE, 179 .length = L4_EMU_34XX_SIZE,
180 .type = MT_DEVICE 180 .type = MT_DEVICE
181 }, 181 },
182#if defined(CONFIG_DEBUG_LL) && \
183 (defined(CONFIG_MACH_OMAP_ZOOM2) || defined(CONFIG_MACH_OMAP_ZOOM3))
184 {
185 .virtual = ZOOM_UART_VIRT,
186 .pfn = __phys_to_pfn(ZOOM_UART_BASE),
187 .length = SZ_1M,
188 .type = MT_DEVICE
189 },
190#endif
191}; 182};
192#endif 183#endif
193 184
diff --git a/arch/arm/mach-pxa/am300epd.c b/arch/arm/mach-pxa/am300epd.c
index c9f309ae88c5..8b90c4f2d430 100644
--- a/arch/arm/mach-pxa/am300epd.c
+++ b/arch/arm/mach-pxa/am300epd.c
@@ -30,6 +30,7 @@
30 30
31#include <mach/gumstix.h> 31#include <mach/gumstix.h>
32#include <mach/mfp-pxa25x.h> 32#include <mach/mfp-pxa25x.h>
33#include <mach/irqs.h>
33#include <linux/platform_data/video-pxafb.h> 34#include <linux/platform_data/video-pxafb.h>
34 35
35#include "generic.h" 36#include "generic.h"
diff --git a/arch/arm/mach-pxa/include/mach/balloon3.h b/arch/arm/mach-pxa/include/mach/balloon3.h
index 954641e6c8b1..1b0825911e62 100644
--- a/arch/arm/mach-pxa/include/mach/balloon3.h
+++ b/arch/arm/mach-pxa/include/mach/balloon3.h
@@ -14,6 +14,8 @@
14#ifndef ASM_ARCH_BALLOON3_H 14#ifndef ASM_ARCH_BALLOON3_H
15#define ASM_ARCH_BALLOON3_H 15#define ASM_ARCH_BALLOON3_H
16 16
17#include "irqs.h" /* PXA_NR_BUILTIN_GPIO */
18
17enum balloon3_features { 19enum balloon3_features {
18 BALLOON3_FEATURE_OHCI, 20 BALLOON3_FEATURE_OHCI,
19 BALLOON3_FEATURE_MMC, 21 BALLOON3_FEATURE_MMC,
diff --git a/arch/arm/mach-pxa/include/mach/corgi.h b/arch/arm/mach-pxa/include/mach/corgi.h
index f3c3493b468d..c030d955bbd7 100644
--- a/arch/arm/mach-pxa/include/mach/corgi.h
+++ b/arch/arm/mach-pxa/include/mach/corgi.h
@@ -13,6 +13,7 @@
13#ifndef __ASM_ARCH_CORGI_H 13#ifndef __ASM_ARCH_CORGI_H
14#define __ASM_ARCH_CORGI_H 1 14#define __ASM_ARCH_CORGI_H 1
15 15
16#include "irqs.h" /* PXA_NR_BUILTIN_GPIO */
16 17
17/* 18/*
18 * Corgi (Non Standard) GPIO Definitions 19 * Corgi (Non Standard) GPIO Definitions
diff --git a/arch/arm/mach-pxa/include/mach/csb726.h b/arch/arm/mach-pxa/include/mach/csb726.h
index 2628e7b72116..00cfbbbf73f7 100644
--- a/arch/arm/mach-pxa/include/mach/csb726.h
+++ b/arch/arm/mach-pxa/include/mach/csb726.h
@@ -11,6 +11,8 @@
11#ifndef CSB726_H 11#ifndef CSB726_H
12#define CSB726_H 12#define CSB726_H
13 13
14#include "irqs.h" /* PXA_GPIO_TO_IRQ */
15
14#define CSB726_GPIO_IRQ_LAN 52 16#define CSB726_GPIO_IRQ_LAN 52
15#define CSB726_GPIO_IRQ_SM501 53 17#define CSB726_GPIO_IRQ_SM501 53
16#define CSB726_GPIO_MMC_DETECT 100 18#define CSB726_GPIO_MMC_DETECT 100
diff --git a/arch/arm/mach-pxa/include/mach/gumstix.h b/arch/arm/mach-pxa/include/mach/gumstix.h
index dba14b6503ad..f7df27bbb42e 100644
--- a/arch/arm/mach-pxa/include/mach/gumstix.h
+++ b/arch/arm/mach-pxa/include/mach/gumstix.h
@@ -6,6 +6,7 @@
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 */ 7 */
8 8
9#include "irqs.h" /* PXA_GPIO_TO_IRQ */
9 10
10/* BTRESET - Reset line to Bluetooth module, active low signal. */ 11/* BTRESET - Reset line to Bluetooth module, active low signal. */
11#define GPIO_GUMSTIX_BTRESET 7 12#define GPIO_GUMSTIX_BTRESET 7
diff --git a/arch/arm/mach-pxa/include/mach/idp.h b/arch/arm/mach-pxa/include/mach/idp.h
index 22a96f87232b..7e63f4680271 100644
--- a/arch/arm/mach-pxa/include/mach/idp.h
+++ b/arch/arm/mach-pxa/include/mach/idp.h
@@ -23,6 +23,7 @@
23 * IDP hardware. 23 * IDP hardware.
24 */ 24 */
25 25
26#include "irqs.h" /* PXA_GPIO_TO_IRQ */
26 27
27#define IDP_FLASH_PHYS (PXA_CS0_PHYS) 28#define IDP_FLASH_PHYS (PXA_CS0_PHYS)
28#define IDP_ALT_FLASH_PHYS (PXA_CS1_PHYS) 29#define IDP_ALT_FLASH_PHYS (PXA_CS1_PHYS)
diff --git a/arch/arm/mach-pxa/include/mach/palmld.h b/arch/arm/mach-pxa/include/mach/palmld.h
index 2c4471336570..b184f296023b 100644
--- a/arch/arm/mach-pxa/include/mach/palmld.h
+++ b/arch/arm/mach-pxa/include/mach/palmld.h
@@ -13,6 +13,8 @@
13#ifndef _INCLUDE_PALMLD_H_ 13#ifndef _INCLUDE_PALMLD_H_
14#define _INCLUDE_PALMLD_H_ 14#define _INCLUDE_PALMLD_H_
15 15
16#include "irqs.h" /* PXA_GPIO_TO_IRQ */
17
16/** HERE ARE GPIOs **/ 18/** HERE ARE GPIOs **/
17 19
18/* GPIOs */ 20/* GPIOs */
diff --git a/arch/arm/mach-pxa/include/mach/palmt5.h b/arch/arm/mach-pxa/include/mach/palmt5.h
index 0bd4f036c72f..e342c5921405 100644
--- a/arch/arm/mach-pxa/include/mach/palmt5.h
+++ b/arch/arm/mach-pxa/include/mach/palmt5.h
@@ -15,6 +15,8 @@
15#ifndef _INCLUDE_PALMT5_H_ 15#ifndef _INCLUDE_PALMT5_H_
16#define _INCLUDE_PALMT5_H_ 16#define _INCLUDE_PALMT5_H_
17 17
18#include "irqs.h" /* PXA_GPIO_TO_IRQ */
19
18/** HERE ARE GPIOs **/ 20/** HERE ARE GPIOs **/
19 21
20/* GPIOs */ 22/* GPIOs */
diff --git a/arch/arm/mach-pxa/include/mach/palmtc.h b/arch/arm/mach-pxa/include/mach/palmtc.h
index c383a21680b6..81c727b3cfd2 100644
--- a/arch/arm/mach-pxa/include/mach/palmtc.h
+++ b/arch/arm/mach-pxa/include/mach/palmtc.h
@@ -16,6 +16,8 @@
16#ifndef _INCLUDE_PALMTC_H_ 16#ifndef _INCLUDE_PALMTC_H_
17#define _INCLUDE_PALMTC_H_ 17#define _INCLUDE_PALMTC_H_
18 18
19#include "irqs.h" /* PXA_GPIO_TO_IRQ */
20
19/** HERE ARE GPIOs **/ 21/** HERE ARE GPIOs **/
20 22
21/* GPIOs */ 23/* GPIOs */
diff --git a/arch/arm/mach-pxa/include/mach/palmtx.h b/arch/arm/mach-pxa/include/mach/palmtx.h
index f2e530380253..92bc1f05300d 100644
--- a/arch/arm/mach-pxa/include/mach/palmtx.h
+++ b/arch/arm/mach-pxa/include/mach/palmtx.h
@@ -16,6 +16,8 @@
16#ifndef _INCLUDE_PALMTX_H_ 16#ifndef _INCLUDE_PALMTX_H_
17#define _INCLUDE_PALMTX_H_ 17#define _INCLUDE_PALMTX_H_
18 18
19#include "irqs.h" /* PXA_GPIO_TO_IRQ */
20
19/** HERE ARE GPIOs **/ 21/** HERE ARE GPIOs **/
20 22
21/* GPIOs */ 23/* GPIOs */
diff --git a/arch/arm/mach-pxa/include/mach/pcm027.h b/arch/arm/mach-pxa/include/mach/pcm027.h
index 6bf28de228bd..86ebd7b6c960 100644
--- a/arch/arm/mach-pxa/include/mach/pcm027.h
+++ b/arch/arm/mach-pxa/include/mach/pcm027.h
@@ -23,6 +23,8 @@
23 * Definitions of CPU card resources only 23 * Definitions of CPU card resources only
24 */ 24 */
25 25
26#include "irqs.h" /* PXA_GPIO_TO_IRQ */
27
26/* phyCORE-PXA270 (PCM027) Interrupts */ 28/* phyCORE-PXA270 (PCM027) Interrupts */
27#define PCM027_IRQ(x) (IRQ_BOARD_START + (x)) 29#define PCM027_IRQ(x) (IRQ_BOARD_START + (x))
28#define PCM027_BTDET_IRQ PCM027_IRQ(0) 30#define PCM027_BTDET_IRQ PCM027_IRQ(0)
diff --git a/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h b/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h
index 0260aaa2fc17..7e544c14967e 100644
--- a/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h
+++ b/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h
@@ -20,6 +20,7 @@
20 */ 20 */
21 21
22#include <mach/pcm027.h> 22#include <mach/pcm027.h>
23#include "irqs.h" /* PXA_GPIO_TO_IRQ */
23 24
24/* 25/*
25 * definitions relevant only when the PCM-990 26 * definitions relevant only when the PCM-990
diff --git a/arch/arm/mach-pxa/include/mach/poodle.h b/arch/arm/mach-pxa/include/mach/poodle.h
index f32ff75dcca8..b56b19351a03 100644
--- a/arch/arm/mach-pxa/include/mach/poodle.h
+++ b/arch/arm/mach-pxa/include/mach/poodle.h
@@ -15,6 +15,8 @@
15#ifndef __ASM_ARCH_POODLE_H 15#ifndef __ASM_ARCH_POODLE_H
16#define __ASM_ARCH_POODLE_H 1 16#define __ASM_ARCH_POODLE_H 1
17 17
18#include "irqs.h" /* PXA_GPIO_TO_IRQ */
19
18/* 20/*
19 * GPIOs 21 * GPIOs
20 */ 22 */
diff --git a/arch/arm/mach-pxa/include/mach/spitz.h b/arch/arm/mach-pxa/include/mach/spitz.h
index 0bfe6507c95d..25c9f62e46aa 100644
--- a/arch/arm/mach-pxa/include/mach/spitz.h
+++ b/arch/arm/mach-pxa/include/mach/spitz.h
@@ -15,8 +15,8 @@
15#define __ASM_ARCH_SPITZ_H 1 15#define __ASM_ARCH_SPITZ_H 1
16#endif 16#endif
17 17
18#include "irqs.h" /* PXA_NR_BUILTIN_GPIO, PXA_GPIO_TO_IRQ */
18#include <linux/fb.h> 19#include <linux/fb.h>
19#include <linux/gpio.h>
20 20
21/* Spitz/Akita GPIOs */ 21/* Spitz/Akita GPIOs */
22 22
diff --git a/arch/arm/mach-pxa/include/mach/tosa.h b/arch/arm/mach-pxa/include/mach/tosa.h
index 2bb0e862598c..0497d95cef25 100644
--- a/arch/arm/mach-pxa/include/mach/tosa.h
+++ b/arch/arm/mach-pxa/include/mach/tosa.h
@@ -13,6 +13,8 @@
13#ifndef _ASM_ARCH_TOSA_H_ 13#ifndef _ASM_ARCH_TOSA_H_
14#define _ASM_ARCH_TOSA_H_ 1 14#define _ASM_ARCH_TOSA_H_ 1
15 15
16#include "irqs.h" /* PXA_NR_BUILTIN_GPIO */
17
16/* TOSA Chip selects */ 18/* TOSA Chip selects */
17#define TOSA_LCDC_PHYS PXA_CS4_PHYS 19#define TOSA_LCDC_PHYS PXA_CS4_PHYS
18/* Internel Scoop */ 20/* Internel Scoop */
diff --git a/arch/arm/mach-pxa/include/mach/trizeps4.h b/arch/arm/mach-pxa/include/mach/trizeps4.h
index d2ca01053f69..ae3ca013afab 100644
--- a/arch/arm/mach-pxa/include/mach/trizeps4.h
+++ b/arch/arm/mach-pxa/include/mach/trizeps4.h
@@ -10,6 +10,8 @@
10#ifndef _TRIPEPS4_H_ 10#ifndef _TRIPEPS4_H_
11#define _TRIPEPS4_H_ 11#define _TRIPEPS4_H_
12 12
13#include "irqs.h" /* PXA_GPIO_TO_IRQ */
14
13/* physical memory regions */ 15/* physical memory regions */
14#define TRIZEPS4_FLASH_PHYS (PXA_CS0_PHYS) /* Flash region */ 16#define TRIZEPS4_FLASH_PHYS (PXA_CS0_PHYS) /* Flash region */
15#define TRIZEPS4_DISK_PHYS (PXA_CS1_PHYS) /* Disk On Chip region */ 17#define TRIZEPS4_DISK_PHYS (PXA_CS1_PHYS) /* Disk On Chip region */
diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c
index f70583fee59f..29997bde277d 100644
--- a/arch/arm/mach-pxa/mioa701.c
+++ b/arch/arm/mach-pxa/mioa701.c
@@ -38,6 +38,7 @@
38#include <linux/mtd/physmap.h> 38#include <linux/mtd/physmap.h>
39#include <linux/usb/gpio_vbus.h> 39#include <linux/usb/gpio_vbus.h>
40#include <linux/reboot.h> 40#include <linux/reboot.h>
41#include <linux/regulator/fixed.h>
41#include <linux/regulator/max1586.h> 42#include <linux/regulator/max1586.h>
42#include <linux/slab.h> 43#include <linux/slab.h>
43#include <linux/i2c/pxa-i2c.h> 44#include <linux/i2c/pxa-i2c.h>
@@ -714,6 +715,10 @@ static struct gpio global_gpios[] = {
714 { GPIO56_MT9M111_nOE, GPIOF_OUT_INIT_LOW, "Camera nOE" }, 715 { GPIO56_MT9M111_nOE, GPIOF_OUT_INIT_LOW, "Camera nOE" },
715}; 716};
716 717
718static struct regulator_consumer_supply fixed_5v0_consumers[] = {
719 REGULATOR_SUPPLY("power", "pwm-backlight"),
720};
721
717static void __init mioa701_machine_init(void) 722static void __init mioa701_machine_init(void)
718{ 723{
719 int rc; 724 int rc;
@@ -753,6 +758,10 @@ static void __init mioa701_machine_init(void)
753 pxa_set_i2c_info(&i2c_pdata); 758 pxa_set_i2c_info(&i2c_pdata);
754 pxa27x_set_i2c_power_info(NULL); 759 pxa27x_set_i2c_power_info(NULL);
755 pxa_set_camera_info(&mioa701_pxacamera_platform_data); 760 pxa_set_camera_info(&mioa701_pxacamera_platform_data);
761
762 regulator_register_always_on(0, "fixed-5.0V", fixed_5v0_consumers,
763 ARRAY_SIZE(fixed_5v0_consumers),
764 5000000);
756} 765}
757 766
758static void mioa701_machine_exit(void) 767static void mioa701_machine_exit(void)
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig
index 338640631e08..05fa505df585 100644
--- a/arch/arm/mach-shmobile/Kconfig
+++ b/arch/arm/mach-shmobile/Kconfig
@@ -8,7 +8,7 @@ config ARCH_SHMOBILE_MULTI
8 select CPU_V7 8 select CPU_V7
9 select GENERIC_CLOCKEVENTS 9 select GENERIC_CLOCKEVENTS
10 select HAVE_ARM_SCU if SMP 10 select HAVE_ARM_SCU if SMP
11 select HAVE_ARM_TWD if LOCAL_TIMERS 11 select HAVE_ARM_TWD if SMP
12 select HAVE_SMP 12 select HAVE_SMP
13 select ARM_GIC 13 select ARM_GIC
14 select MIGHT_HAVE_CACHE_L2X0 14 select MIGHT_HAVE_CACHE_L2X0
diff --git a/arch/arm/mach-tegra/pm.c b/arch/arm/mach-tegra/pm.c
index 4ae0286b468d..f55b05a29b55 100644
--- a/arch/arm/mach-tegra/pm.c
+++ b/arch/arm/mach-tegra/pm.c
@@ -24,6 +24,7 @@
24#include <linux/cpu_pm.h> 24#include <linux/cpu_pm.h>
25#include <linux/suspend.h> 25#include <linux/suspend.h>
26#include <linux/err.h> 26#include <linux/err.h>
27#include <linux/slab.h>
27#include <linux/clk/tegra.h> 28#include <linux/clk/tegra.h>
28 29
29#include <asm/smp_plat.h> 30#include <asm/smp_plat.h>
diff --git a/arch/arm/mach-tegra/tegra.c b/arch/arm/mach-tegra/tegra.c
index 303a285d80fd..6191603379e1 100644
--- a/arch/arm/mach-tegra/tegra.c
+++ b/arch/arm/mach-tegra/tegra.c
@@ -73,10 +73,20 @@ u32 tegra_uart_config[3] = {
73static void __init tegra_init_cache(void) 73static void __init tegra_init_cache(void)
74{ 74{
75#ifdef CONFIG_CACHE_L2X0 75#ifdef CONFIG_CACHE_L2X0
76 static const struct of_device_id pl310_ids[] __initconst = {
77 { .compatible = "arm,pl310-cache", },
78 {}
79 };
80
81 struct device_node *np;
76 int ret; 82 int ret;
77 void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000; 83 void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000;
78 u32 aux_ctrl, cache_type; 84 u32 aux_ctrl, cache_type;
79 85
86 np = of_find_matching_node(NULL, pl310_ids);
87 if (!np)
88 return;
89
80 cache_type = readl(p + L2X0_CACHE_TYPE); 90 cache_type = readl(p + L2X0_CACHE_TYPE);
81 aux_ctrl = (cache_type & 0x700) << (17-8); 91 aux_ctrl = (cache_type & 0x700) << (17-8);
82 aux_ctrl |= 0x7C400001; 92 aux_ctrl |= 0x7C400001;
diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c
index 1db2a5ca9ab8..8c09a8393fb6 100644
--- a/arch/arm/mach-zynq/common.c
+++ b/arch/arm/mach-zynq/common.c
@@ -25,6 +25,7 @@
25#include <linux/of_irq.h> 25#include <linux/of_irq.h>
26#include <linux/of_platform.h> 26#include <linux/of_platform.h>
27#include <linux/of.h> 27#include <linux/of.h>
28#include <linux/memblock.h>
28#include <linux/irqchip.h> 29#include <linux/irqchip.h>
29#include <linux/irqchip/arm-gic.h> 30#include <linux/irqchip/arm-gic.h>
30 31
@@ -41,6 +42,18 @@
41 42
42void __iomem *zynq_scu_base; 43void __iomem *zynq_scu_base;
43 44
45/**
46 * zynq_memory_init - Initialize special memory
47 *
48 * We need to stop things allocating the low memory as DMA can't work in
49 * the 1st 512K of memory.
50 */
51static void __init zynq_memory_init(void)
52{
53 if (!__pa(PAGE_OFFSET))
54 memblock_reserve(__pa(PAGE_OFFSET), __pa(swapper_pg_dir));
55}
56
44static struct platform_device zynq_cpuidle_device = { 57static struct platform_device zynq_cpuidle_device = {
45 .name = "cpuidle-zynq", 58 .name = "cpuidle-zynq",
46}; 59};
@@ -117,5 +130,6 @@ DT_MACHINE_START(XILINX_EP107, "Xilinx Zynq Platform")
117 .init_machine = zynq_init_machine, 130 .init_machine = zynq_init_machine,
118 .init_time = zynq_timer_init, 131 .init_time = zynq_timer_init,
119 .dt_compat = zynq_dt_match, 132 .dt_compat = zynq_dt_match,
133 .reserve = zynq_memory_init,
120 .restart = zynq_system_reset, 134 .restart = zynq_system_reset,
121MACHINE_END 135MACHINE_END
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 1a77450e728a..11b3914660d2 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1358,7 +1358,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
1358 *handle = DMA_ERROR_CODE; 1358 *handle = DMA_ERROR_CODE;
1359 size = PAGE_ALIGN(size); 1359 size = PAGE_ALIGN(size);
1360 1360
1361 if (gfp & GFP_ATOMIC) 1361 if (!(gfp & __GFP_WAIT))
1362 return __iommu_alloc_atomic(dev, size, handle); 1362 return __iommu_alloc_atomic(dev, size, handle);
1363 1363
1364 /* 1364 /*
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index d5a982d15a88..7ea641b7aa7d 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -38,6 +38,7 @@ static inline pmd_t *pmd_off_k(unsigned long virt)
38 38
39struct mem_type { 39struct mem_type {
40 pteval_t prot_pte; 40 pteval_t prot_pte;
41 pteval_t prot_pte_s2;
41 pmdval_t prot_l1; 42 pmdval_t prot_l1;
42 pmdval_t prot_sect; 43 pmdval_t prot_sect;
43 unsigned int domain; 44 unsigned int domain;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 4f08c133cc25..a623cb3ad012 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -232,12 +232,16 @@ __setup("noalign", noalign_setup);
232#endif /* ifdef CONFIG_CPU_CP15 / else */ 232#endif /* ifdef CONFIG_CPU_CP15 / else */
233 233
234#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN 234#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
235#define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
235#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE 236#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
236 237
237static struct mem_type mem_types[] = { 238static struct mem_type mem_types[] = {
238 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ 239 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
239 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | 240 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
240 L_PTE_SHARED, 241 L_PTE_SHARED,
242 .prot_pte_s2 = s2_policy(PROT_PTE_S2_DEVICE) |
243 s2_policy(L_PTE_S2_MT_DEV_SHARED) |
244 L_PTE_SHARED,
241 .prot_l1 = PMD_TYPE_TABLE, 245 .prot_l1 = PMD_TYPE_TABLE,
242 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S, 246 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
243 .domain = DOMAIN_IO, 247 .domain = DOMAIN_IO,
@@ -508,7 +512,8 @@ static void __init build_mem_type_table(void)
508 cp = &cache_policies[cachepolicy]; 512 cp = &cache_policies[cachepolicy];
509 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; 513 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
510 s2_pgprot = cp->pte_s2; 514 s2_pgprot = cp->pte_s2;
511 hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte; 515 hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte;
516 s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2;
512 517
513 /* 518 /*
514 * ARMv6 and above have extended page tables. 519 * ARMv6 and above have extended page tables.
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 45dc29f85d56..32b3558321c4 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -208,7 +208,6 @@ __v6_setup:
208 mcr p15, 0, r0, c7, c14, 0 @ clean+invalidate D cache 208 mcr p15, 0, r0, c7, c14, 0 @ clean+invalidate D cache
209 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 209 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
210 mcr p15, 0, r0, c7, c15, 0 @ clean+invalidate cache 210 mcr p15, 0, r0, c7, c15, 0 @ clean+invalidate cache
211 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
212#ifdef CONFIG_MMU 211#ifdef CONFIG_MMU
213 mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs 212 mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs
214 mcr p15, 0, r0, c2, c0, 2 @ TTB control register 213 mcr p15, 0, r0, c2, c0, 2 @ TTB control register
@@ -218,6 +217,8 @@ __v6_setup:
218 ALT_UP(orr r8, r8, #TTB_FLAGS_UP) 217 ALT_UP(orr r8, r8, #TTB_FLAGS_UP)
219 mcr p15, 0, r8, c2, c0, 1 @ load TTB1 218 mcr p15, 0, r8, c2, c0, 1 @ load TTB1
220#endif /* CONFIG_MMU */ 219#endif /* CONFIG_MMU */
220 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer and
221 @ complete invalidations
221 adr r5, v6_crval 222 adr r5, v6_crval
222 ldmia r5, {r5, r6} 223 ldmia r5, {r5, r6}
223 ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables 224 ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index bd1781979a39..74f6033e76dd 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -351,7 +351,6 @@ __v7_setup:
351 351
3524: mov r10, #0 3524: mov r10, #0
353 mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate 353 mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate
354 dsb
355#ifdef CONFIG_MMU 354#ifdef CONFIG_MMU
356 mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs 355 mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
357 v7_ttb_setup r10, r4, r8, r5 @ TTBCR, TTBRx setup 356 v7_ttb_setup r10, r4, r8, r5 @ TTBCR, TTBRx setup
@@ -360,6 +359,7 @@ __v7_setup:
360 mcr p15, 0, r5, c10, c2, 0 @ write PRRR 359 mcr p15, 0, r5, c10, c2, 0 @ write PRRR
361 mcr p15, 0, r6, c10, c2, 1 @ write NMRR 360 mcr p15, 0, r6, c10, c2, 1 @ write NMRR
362#endif 361#endif
362 dsb @ Complete invalidations
363#ifndef CONFIG_ARM_THUMBEE 363#ifndef CONFIG_ARM_THUMBEE
364 mrc p15, 0, r0, c0, c1, 0 @ read ID_PFR0 for ThumbEE 364 mrc p15, 0, r0, c0, c1, 0 @ read ID_PFR0 for ThumbEE
365 and r0, r0, #(0xf << 12) @ ThumbEE enabled field 365 and r0, r0, #(0xf << 12) @ ThumbEE enabled field
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 495ab6f84a61..eaf54a30bedc 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -148,6 +148,15 @@ struct kvm_arch_memory_slot {
148#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2) 148#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2)
149#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2) 149#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2)
150 150
151/* Device Control API: ARM VGIC */
152#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
153#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
154#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS 2
155#define KVM_DEV_ARM_VGIC_CPUID_SHIFT 32
156#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT)
157#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0
158#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
159
151/* KVM_IRQ_LINE irq field index values */ 160/* KVM_IRQ_LINE irq field index values */
152#define KVM_ARM_IRQ_TYPE_SHIFT 24 161#define KVM_ARM_IRQ_TYPE_SHIFT 24
153#define KVM_ARM_IRQ_TYPE_MASK 0xff 162#define KVM_ARM_IRQ_TYPE_MASK 0xff
diff --git a/arch/avr32/Makefile b/arch/avr32/Makefile
index 22fb66590dcd..dba48a5d5bb9 100644
--- a/arch/avr32/Makefile
+++ b/arch/avr32/Makefile
@@ -11,7 +11,7 @@ all: uImage vmlinux.elf
11 11
12KBUILD_DEFCONFIG := atstk1002_defconfig 12KBUILD_DEFCONFIG := atstk1002_defconfig
13 13
14KBUILD_CFLAGS += -pipe -fno-builtin -mno-pic 14KBUILD_CFLAGS += -pipe -fno-builtin -mno-pic -D__linux__
15KBUILD_AFLAGS += -mrelax -mno-pic 15KBUILD_AFLAGS += -mrelax -mno-pic
16KBUILD_CFLAGS_MODULE += -mno-relax 16KBUILD_CFLAGS_MODULE += -mno-relax
17LDFLAGS_vmlinux += --relax 17LDFLAGS_vmlinux += --relax
diff --git a/arch/avr32/boards/mimc200/fram.c b/arch/avr32/boards/mimc200/fram.c
index 9764a1a1073e..c1466a872b9c 100644
--- a/arch/avr32/boards/mimc200/fram.c
+++ b/arch/avr32/boards/mimc200/fram.c
@@ -11,6 +11,7 @@
11#define FRAM_VERSION "1.0" 11#define FRAM_VERSION "1.0"
12 12
13#include <linux/miscdevice.h> 13#include <linux/miscdevice.h>
14#include <linux/module.h>
14#include <linux/proc_fs.h> 15#include <linux/proc_fs.h>
15#include <linux/mm.h> 16#include <linux/mm.h>
16#include <linux/io.h> 17#include <linux/io.h>
diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild
index cfb9fe1b8df9..c7c64a63c29f 100644
--- a/arch/avr32/include/asm/Kbuild
+++ b/arch/avr32/include/asm/Kbuild
@@ -17,5 +17,6 @@ generic-y += scatterlist.h
17generic-y += sections.h 17generic-y += sections.h
18generic-y += topology.h 18generic-y += topology.h
19generic-y += trace_clock.h 19generic-y += trace_clock.h
20generic-y += vga.h
20generic-y += xor.h 21generic-y += xor.h
21generic-y += hash.h 22generic-y += hash.h
diff --git a/arch/avr32/include/asm/io.h b/arch/avr32/include/asm/io.h
index fc6483f83ccc..4f5ec2bb7172 100644
--- a/arch/avr32/include/asm/io.h
+++ b/arch/avr32/include/asm/io.h
@@ -295,6 +295,8 @@ extern void __iounmap(void __iomem *addr);
295#define iounmap(addr) \ 295#define iounmap(addr) \
296 __iounmap(addr) 296 __iounmap(addr)
297 297
298#define ioremap_wc ioremap_nocache
299
298#define cached(addr) P1SEGADDR(addr) 300#define cached(addr) P1SEGADDR(addr)
299#define uncached(addr) P2SEGADDR(addr) 301#define uncached(addr) P2SEGADDR(addr)
300 302
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index 7cc8c364924d..6fb9e813a910 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -1,4 +1,4 @@
1 1generic-y += barrier.h
2generic-y += bitsperlong.h 2generic-y += bitsperlong.h
3generic-y += clkdev.h 3generic-y += clkdev.h
4generic-y += cputime.h 4generic-y += cputime.h
@@ -6,6 +6,7 @@ generic-y += device.h
6generic-y += emergency-restart.h 6generic-y += emergency-restart.h
7generic-y += errno.h 7generic-y += errno.h
8generic-y += exec.h 8generic-y += exec.h
9generic-y += hash.h
9generic-y += hw_irq.h 10generic-y += hw_irq.h
10generic-y += ioctl.h 11generic-y += ioctl.h
11generic-y += ipcbuf.h 12generic-y += ipcbuf.h
@@ -18,6 +19,7 @@ generic-y += local.h
18generic-y += mman.h 19generic-y += mman.h
19generic-y += mutex.h 20generic-y += mutex.h
20generic-y += percpu.h 21generic-y += percpu.h
22generic-y += preempt.h
21generic-y += resource.h 23generic-y += resource.h
22generic-y += scatterlist.h 24generic-y += scatterlist.h
23generic-y += sections.h 25generic-y += sections.h
@@ -31,5 +33,3 @@ generic-y += trace_clock.h
31generic-y += types.h 33generic-y += types.h
32generic-y += word-at-a-time.h 34generic-y += word-at-a-time.h
33generic-y += xor.h 35generic-y += xor.h
34generic-y += preempt.h
35generic-y += hash.h
diff --git a/arch/m68k/include/asm/barrier.h b/arch/m68k/include/asm/barrier.h
deleted file mode 100644
index 15c5f77c1614..000000000000
--- a/arch/m68k/include/asm/barrier.h
+++ /dev/null
@@ -1,8 +0,0 @@
1#ifndef _M68K_BARRIER_H
2#define _M68K_BARRIER_H
3
4#define nop() do { asm volatile ("nop"); barrier(); } while (0)
5
6#include <asm-generic/barrier.h>
7
8#endif /* _M68K_BARRIER_H */
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 014f288fc813..9d38b73989eb 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -4,7 +4,7 @@
4#include <uapi/asm/unistd.h> 4#include <uapi/asm/unistd.h>
5 5
6 6
7#define NR_syscalls 349 7#define NR_syscalls 351
8 8
9#define __ARCH_WANT_OLD_READDIR 9#define __ARCH_WANT_OLD_READDIR
10#define __ARCH_WANT_OLD_STAT 10#define __ARCH_WANT_OLD_STAT
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h
index 625f321001dc..b932dd470041 100644
--- a/arch/m68k/include/uapi/asm/unistd.h
+++ b/arch/m68k/include/uapi/asm/unistd.h
@@ -354,5 +354,7 @@
354#define __NR_process_vm_writev 346 354#define __NR_process_vm_writev 346
355#define __NR_kcmp 347 355#define __NR_kcmp 347
356#define __NR_finit_module 348 356#define __NR_finit_module 348
357#define __NR_sched_setattr 349
358#define __NR_sched_getattr 350
357 359
358#endif /* _UAPI_ASM_M68K_UNISTD_H_ */ 360#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index 3f04ea0ab802..b6223dc41d82 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -369,4 +369,6 @@ ENTRY(sys_call_table)
369 .long sys_process_vm_writev 369 .long sys_process_vm_writev
370 .long sys_kcmp 370 .long sys_kcmp
371 .long sys_finit_module 371 .long sys_finit_module
372 .long sys_sched_setattr
373 .long sys_sched_getattr /* 350 */
372 374
diff --git a/arch/microblaze/include/asm/delay.h b/arch/microblaze/include/asm/delay.h
index 05b7d39e4391..66fc24c24238 100644
--- a/arch/microblaze/include/asm/delay.h
+++ b/arch/microblaze/include/asm/delay.h
@@ -13,6 +13,8 @@
13#ifndef _ASM_MICROBLAZE_DELAY_H 13#ifndef _ASM_MICROBLAZE_DELAY_H
14#define _ASM_MICROBLAZE_DELAY_H 14#define _ASM_MICROBLAZE_DELAY_H
15 15
16#include <linux/param.h>
17
16extern inline void __delay(unsigned long loops) 18extern inline void __delay(unsigned long loops)
17{ 19{
18 asm volatile ("# __delay \n\t" \ 20 asm volatile ("# __delay \n\t" \
diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h
index a2cea7206077..3fbb7f1db3bc 100644
--- a/arch/microblaze/include/asm/io.h
+++ b/arch/microblaze/include/asm/io.h
@@ -89,6 +89,11 @@ static inline unsigned int readl(const volatile void __iomem *addr)
89{ 89{
90 return le32_to_cpu(*(volatile unsigned int __force *)addr); 90 return le32_to_cpu(*(volatile unsigned int __force *)addr);
91} 91}
92#define readq readq
93static inline u64 readq(const volatile void __iomem *addr)
94{
95 return le64_to_cpu(__raw_readq(addr));
96}
92static inline void writeb(unsigned char v, volatile void __iomem *addr) 97static inline void writeb(unsigned char v, volatile void __iomem *addr)
93{ 98{
94 *(volatile unsigned char __force *)addr = v; 99 *(volatile unsigned char __force *)addr = v;
@@ -101,6 +106,7 @@ static inline void writel(unsigned int v, volatile void __iomem *addr)
101{ 106{
102 *(volatile unsigned int __force *)addr = cpu_to_le32(v); 107 *(volatile unsigned int __force *)addr = cpu_to_le32(v);
103} 108}
109#define writeq(b, addr) __raw_writeq(cpu_to_le64(b), addr)
104 110
105/* ioread and iowrite variants. thease are for now same as __raw_ 111/* ioread and iowrite variants. thease are for now same as __raw_
106 * variants of accessors. we might check for endianess in the feature 112 * variants of accessors. we might check for endianess in the feature
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
index b7fb0438458c..17645b2e2f07 100644
--- a/arch/microblaze/kernel/head.S
+++ b/arch/microblaze/kernel/head.S
@@ -66,7 +66,7 @@ real_start:
66 mts rmsr, r0 66 mts rmsr, r0
67/* Disable stack protection from bootloader */ 67/* Disable stack protection from bootloader */
68 mts rslr, r0 68 mts rslr, r0
69 addi r8, r0, 0xFFFFFFF 69 addi r8, r0, 0xFFFFFFFF
70 mts rshr, r8 70 mts rshr, r8
71/* 71/*
72 * According to Xilinx, msrclr instruction behaves like 'mfs rX,rpc' 72 * According to Xilinx, msrclr instruction behaves like 'mfs rX,rpc'
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index e27e9ad6818e..150866b2a3fe 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -134,6 +134,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
134} 134}
135 135
136extern int dma_set_mask(struct device *dev, u64 dma_mask); 136extern int dma_set_mask(struct device *dev, u64 dma_mask);
137extern int __dma_set_mask(struct device *dev, u64 dma_mask);
137 138
138#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) 139#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
139 140
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index 9e39ceb1d19f..d4dd41fb951b 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -172,10 +172,20 @@ struct eeh_ops {
172}; 172};
173 173
174extern struct eeh_ops *eeh_ops; 174extern struct eeh_ops *eeh_ops;
175extern int eeh_subsystem_enabled; 175extern bool eeh_subsystem_enabled;
176extern raw_spinlock_t confirm_error_lock; 176extern raw_spinlock_t confirm_error_lock;
177extern int eeh_probe_mode; 177extern int eeh_probe_mode;
178 178
179static inline bool eeh_enabled(void)
180{
181 return eeh_subsystem_enabled;
182}
183
184static inline void eeh_set_enable(bool mode)
185{
186 eeh_subsystem_enabled = mode;
187}
188
179#define EEH_PROBE_MODE_DEV (1<<0) /* From PCI device */ 189#define EEH_PROBE_MODE_DEV (1<<0) /* From PCI device */
180#define EEH_PROBE_MODE_DEVTREE (1<<1) /* From device tree */ 190#define EEH_PROBE_MODE_DEVTREE (1<<1) /* From device tree */
181 191
@@ -246,7 +256,7 @@ void eeh_remove_device(struct pci_dev *);
246 * If this macro yields TRUE, the caller relays to eeh_check_failure() 256 * If this macro yields TRUE, the caller relays to eeh_check_failure()
247 * which does further tests out of line. 257 * which does further tests out of line.
248 */ 258 */
249#define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0 && eeh_subsystem_enabled) 259#define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0 && eeh_enabled())
250 260
251/* 261/*
252 * Reads from a device which has been isolated by EEH will return 262 * Reads from a device which has been isolated by EEH will return
@@ -257,6 +267,13 @@ void eeh_remove_device(struct pci_dev *);
257 267
258#else /* !CONFIG_EEH */ 268#else /* !CONFIG_EEH */
259 269
270static inline bool eeh_enabled(void)
271{
272 return false;
273}
274
275static inline void eeh_set_enable(bool mode) { }
276
260static inline int eeh_init(void) 277static inline int eeh_init(void)
261{ 278{
262 return 0; 279 return 0;
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index d750336b171d..623f2971ce0e 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -127,7 +127,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
127 unsigned long addr, pte_t *ptep) 127 unsigned long addr, pte_t *ptep)
128{ 128{
129#ifdef CONFIG_PPC64 129#ifdef CONFIG_PPC64
130 return __pte(pte_update(mm, addr, ptep, ~0UL, 1)); 130 return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
131#else 131#else
132 return __pte(pte_update(ptep, ~0UL, 0)); 132 return __pte(pte_update(ptep, ~0UL, 0));
133#endif 133#endif
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index f7a8036579b5..42632c7a2a4e 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -77,6 +77,7 @@ struct iommu_table {
77#ifdef CONFIG_IOMMU_API 77#ifdef CONFIG_IOMMU_API
78 struct iommu_group *it_group; 78 struct iommu_group *it_group;
79#endif 79#endif
80 void (*set_bypass)(struct iommu_table *tbl, bool enable);
80}; 81};
81 82
82/* Pure 2^n version of get_order */ 83/* Pure 2^n version of get_order */
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index bc141c950b1e..eb9261024f51 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -195,6 +195,7 @@ extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
195static inline unsigned long pte_update(struct mm_struct *mm, 195static inline unsigned long pte_update(struct mm_struct *mm,
196 unsigned long addr, 196 unsigned long addr,
197 pte_t *ptep, unsigned long clr, 197 pte_t *ptep, unsigned long clr,
198 unsigned long set,
198 int huge) 199 int huge)
199{ 200{
200#ifdef PTE_ATOMIC_UPDATES 201#ifdef PTE_ATOMIC_UPDATES
@@ -205,14 +206,15 @@ static inline unsigned long pte_update(struct mm_struct *mm,
205 andi. %1,%0,%6\n\ 206 andi. %1,%0,%6\n\
206 bne- 1b \n\ 207 bne- 1b \n\
207 andc %1,%0,%4 \n\ 208 andc %1,%0,%4 \n\
209 or %1,%1,%7\n\
208 stdcx. %1,0,%3 \n\ 210 stdcx. %1,0,%3 \n\
209 bne- 1b" 211 bne- 1b"
210 : "=&r" (old), "=&r" (tmp), "=m" (*ptep) 212 : "=&r" (old), "=&r" (tmp), "=m" (*ptep)
211 : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY) 213 : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY), "r" (set)
212 : "cc" ); 214 : "cc" );
213#else 215#else
214 unsigned long old = pte_val(*ptep); 216 unsigned long old = pte_val(*ptep);
215 *ptep = __pte(old & ~clr); 217 *ptep = __pte((old & ~clr) | set);
216#endif 218#endif
217 /* huge pages use the old page table lock */ 219 /* huge pages use the old page table lock */
218 if (!huge) 220 if (!huge)
@@ -231,9 +233,9 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
231{ 233{
232 unsigned long old; 234 unsigned long old;
233 235
234 if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) 236 if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
235 return 0; 237 return 0;
236 old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0); 238 old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
237 return (old & _PAGE_ACCESSED) != 0; 239 return (old & _PAGE_ACCESSED) != 0;
238} 240}
239#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 241#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
@@ -252,7 +254,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
252 if ((pte_val(*ptep) & _PAGE_RW) == 0) 254 if ((pte_val(*ptep) & _PAGE_RW) == 0)
253 return; 255 return;
254 256
255 pte_update(mm, addr, ptep, _PAGE_RW, 0); 257 pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
256} 258}
257 259
258static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, 260static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
@@ -261,7 +263,7 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
261 if ((pte_val(*ptep) & _PAGE_RW) == 0) 263 if ((pte_val(*ptep) & _PAGE_RW) == 0)
262 return; 264 return;
263 265
264 pte_update(mm, addr, ptep, _PAGE_RW, 1); 266 pte_update(mm, addr, ptep, _PAGE_RW, 0, 1);
265} 267}
266 268
267/* 269/*
@@ -284,14 +286,14 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
284static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 286static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
285 unsigned long addr, pte_t *ptep) 287 unsigned long addr, pte_t *ptep)
286{ 288{
287 unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0); 289 unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
288 return __pte(old); 290 return __pte(old);
289} 291}
290 292
291static inline void pte_clear(struct mm_struct *mm, unsigned long addr, 293static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
292 pte_t * ptep) 294 pte_t * ptep)
293{ 295{
294 pte_update(mm, addr, ptep, ~0UL, 0); 296 pte_update(mm, addr, ptep, ~0UL, 0, 0);
295} 297}
296 298
297 299
@@ -506,7 +508,9 @@ extern int pmdp_set_access_flags(struct vm_area_struct *vma,
506 508
507extern unsigned long pmd_hugepage_update(struct mm_struct *mm, 509extern unsigned long pmd_hugepage_update(struct mm_struct *mm,
508 unsigned long addr, 510 unsigned long addr,
509 pmd_t *pmdp, unsigned long clr); 511 pmd_t *pmdp,
512 unsigned long clr,
513 unsigned long set);
510 514
511static inline int __pmdp_test_and_clear_young(struct mm_struct *mm, 515static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
512 unsigned long addr, pmd_t *pmdp) 516 unsigned long addr, pmd_t *pmdp)
@@ -515,7 +519,7 @@ static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
515 519
516 if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) 520 if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
517 return 0; 521 return 0;
518 old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED); 522 old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0);
519 return ((old & _PAGE_ACCESSED) != 0); 523 return ((old & _PAGE_ACCESSED) != 0);
520} 524}
521 525
@@ -542,7 +546,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
542 if ((pmd_val(*pmdp) & _PAGE_RW) == 0) 546 if ((pmd_val(*pmdp) & _PAGE_RW) == 0)
543 return; 547 return;
544 548
545 pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW); 549 pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW, 0);
546} 550}
547 551
548#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH 552#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index f83b6f3e1b39..3ebb188c3ff5 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -75,12 +75,34 @@ static inline pte_t pte_mknuma(pte_t pte)
75 return pte; 75 return pte;
76} 76}
77 77
78#define ptep_set_numa ptep_set_numa
79static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr,
80 pte_t *ptep)
81{
82 if ((pte_val(*ptep) & _PAGE_PRESENT) == 0)
83 VM_BUG_ON(1);
84
85 pte_update(mm, addr, ptep, _PAGE_PRESENT, _PAGE_NUMA, 0);
86 return;
87}
88
78#define pmd_numa pmd_numa 89#define pmd_numa pmd_numa
79static inline int pmd_numa(pmd_t pmd) 90static inline int pmd_numa(pmd_t pmd)
80{ 91{
81 return pte_numa(pmd_pte(pmd)); 92 return pte_numa(pmd_pte(pmd));
82} 93}
83 94
95#define pmdp_set_numa pmdp_set_numa
96static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
97 pmd_t *pmdp)
98{
99 if ((pmd_val(*pmdp) & _PAGE_PRESENT) == 0)
100 VM_BUG_ON(1);
101
102 pmd_hugepage_update(mm, addr, pmdp, _PAGE_PRESENT, _PAGE_NUMA);
103 return;
104}
105
84#define pmd_mknonnuma pmd_mknonnuma 106#define pmd_mknonnuma pmd_mknonnuma
85static inline pmd_t pmd_mknonnuma(pmd_t pmd) 107static inline pmd_t pmd_mknonnuma(pmd_t pmd)
86{ 108{
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
index 4ee06fe15de4..d0e784e0ff48 100644
--- a/arch/powerpc/include/asm/sections.h
+++ b/arch/powerpc/include/asm/sections.h
@@ -8,6 +8,7 @@
8 8
9#ifdef __powerpc64__ 9#ifdef __powerpc64__
10 10
11extern char __start_interrupts[];
11extern char __end_interrupts[]; 12extern char __end_interrupts[];
12 13
13extern char __prom_init_toc_start[]; 14extern char __prom_init_toc_start[];
@@ -21,6 +22,17 @@ static inline int in_kernel_text(unsigned long addr)
21 return 0; 22 return 0;
22} 23}
23 24
25static inline int overlaps_interrupt_vector_text(unsigned long start,
26 unsigned long end)
27{
28 unsigned long real_start, real_end;
29 real_start = __start_interrupts - _stext;
30 real_end = __end_interrupts - _stext;
31
32 return start < (unsigned long)__va(real_end) &&
33 (unsigned long)__va(real_start) < end;
34}
35
24static inline int overlaps_kernel_text(unsigned long start, unsigned long end) 36static inline int overlaps_kernel_text(unsigned long start, unsigned long end)
25{ 37{
26 return start < (unsigned long)__init_end && 38 return start < (unsigned long)__init_end &&
diff --git a/arch/powerpc/include/asm/vdso.h b/arch/powerpc/include/asm/vdso.h
index 0d9cecddf8a4..c53f5f6d1761 100644
--- a/arch/powerpc/include/asm/vdso.h
+++ b/arch/powerpc/include/asm/vdso.h
@@ -4,11 +4,11 @@
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
6/* Default link addresses for the vDSOs */ 6/* Default link addresses for the vDSOs */
7#define VDSO32_LBASE 0x100000 7#define VDSO32_LBASE 0x0
8#define VDSO64_LBASE 0x100000 8#define VDSO64_LBASE 0x0
9 9
10/* Default map addresses for 32bit vDSO */ 10/* Default map addresses for 32bit vDSO */
11#define VDSO32_MBASE VDSO32_LBASE 11#define VDSO32_MBASE 0x100000
12 12
13#define VDSO_VERSION_STRING LINUX_2.6.15 13#define VDSO_VERSION_STRING LINUX_2.6.15
14 14
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 8032b97ccdcb..ee78f6e49d64 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -191,12 +191,10 @@ EXPORT_SYMBOL(dma_direct_ops);
191 191
192#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) 192#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
193 193
194int dma_set_mask(struct device *dev, u64 dma_mask) 194int __dma_set_mask(struct device *dev, u64 dma_mask)
195{ 195{
196 struct dma_map_ops *dma_ops = get_dma_ops(dev); 196 struct dma_map_ops *dma_ops = get_dma_ops(dev);
197 197
198 if (ppc_md.dma_set_mask)
199 return ppc_md.dma_set_mask(dev, dma_mask);
200 if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL)) 198 if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL))
201 return dma_ops->set_dma_mask(dev, dma_mask); 199 return dma_ops->set_dma_mask(dev, dma_mask);
202 if (!dev->dma_mask || !dma_supported(dev, dma_mask)) 200 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
@@ -204,6 +202,12 @@ int dma_set_mask(struct device *dev, u64 dma_mask)
204 *dev->dma_mask = dma_mask; 202 *dev->dma_mask = dma_mask;
205 return 0; 203 return 0;
206} 204}
205int dma_set_mask(struct device *dev, u64 dma_mask)
206{
207 if (ppc_md.dma_set_mask)
208 return ppc_md.dma_set_mask(dev, dma_mask);
209 return __dma_set_mask(dev, dma_mask);
210}
207EXPORT_SYMBOL(dma_set_mask); 211EXPORT_SYMBOL(dma_set_mask);
208 212
209u64 dma_get_required_mask(struct device *dev) 213u64 dma_get_required_mask(struct device *dev)
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 148db72a8c43..e7b76a6bf150 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -28,6 +28,7 @@
28#include <linux/pci.h> 28#include <linux/pci.h>
29#include <linux/proc_fs.h> 29#include <linux/proc_fs.h>
30#include <linux/rbtree.h> 30#include <linux/rbtree.h>
31#include <linux/reboot.h>
31#include <linux/seq_file.h> 32#include <linux/seq_file.h>
32#include <linux/spinlock.h> 33#include <linux/spinlock.h>
33#include <linux/export.h> 34#include <linux/export.h>
@@ -89,7 +90,7 @@
89/* Platform dependent EEH operations */ 90/* Platform dependent EEH operations */
90struct eeh_ops *eeh_ops = NULL; 91struct eeh_ops *eeh_ops = NULL;
91 92
92int eeh_subsystem_enabled; 93bool eeh_subsystem_enabled = false;
93EXPORT_SYMBOL(eeh_subsystem_enabled); 94EXPORT_SYMBOL(eeh_subsystem_enabled);
94 95
95/* 96/*
@@ -364,7 +365,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
364 365
365 eeh_stats.total_mmio_ffs++; 366 eeh_stats.total_mmio_ffs++;
366 367
367 if (!eeh_subsystem_enabled) 368 if (!eeh_enabled())
368 return 0; 369 return 0;
369 370
370 if (!edev) { 371 if (!edev) {
@@ -747,6 +748,17 @@ int __exit eeh_ops_unregister(const char *name)
747 return -EEXIST; 748 return -EEXIST;
748} 749}
749 750
751static int eeh_reboot_notifier(struct notifier_block *nb,
752 unsigned long action, void *unused)
753{
754 eeh_set_enable(false);
755 return NOTIFY_DONE;
756}
757
758static struct notifier_block eeh_reboot_nb = {
759 .notifier_call = eeh_reboot_notifier,
760};
761
750/** 762/**
751 * eeh_init - EEH initialization 763 * eeh_init - EEH initialization
752 * 764 *
@@ -778,6 +790,14 @@ int eeh_init(void)
778 if (machine_is(powernv) && cnt++ <= 0) 790 if (machine_is(powernv) && cnt++ <= 0)
779 return ret; 791 return ret;
780 792
793 /* Register reboot notifier */
794 ret = register_reboot_notifier(&eeh_reboot_nb);
795 if (ret) {
796 pr_warn("%s: Failed to register notifier (%d)\n",
797 __func__, ret);
798 return ret;
799 }
800
781 /* call platform initialization function */ 801 /* call platform initialization function */
782 if (!eeh_ops) { 802 if (!eeh_ops) {
783 pr_warning("%s: Platform EEH operation not found\n", 803 pr_warning("%s: Platform EEH operation not found\n",
@@ -822,7 +842,7 @@ int eeh_init(void)
822 return ret; 842 return ret;
823 } 843 }
824 844
825 if (eeh_subsystem_enabled) 845 if (eeh_enabled())
826 pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n"); 846 pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n");
827 else 847 else
828 pr_warning("EEH: No capable adapters found\n"); 848 pr_warning("EEH: No capable adapters found\n");
@@ -897,7 +917,7 @@ void eeh_add_device_late(struct pci_dev *dev)
897 struct device_node *dn; 917 struct device_node *dn;
898 struct eeh_dev *edev; 918 struct eeh_dev *edev;
899 919
900 if (!dev || !eeh_subsystem_enabled) 920 if (!dev || !eeh_enabled())
901 return; 921 return;
902 922
903 pr_debug("EEH: Adding device %s\n", pci_name(dev)); 923 pr_debug("EEH: Adding device %s\n", pci_name(dev));
@@ -1005,7 +1025,7 @@ void eeh_remove_device(struct pci_dev *dev)
1005{ 1025{
1006 struct eeh_dev *edev; 1026 struct eeh_dev *edev;
1007 1027
1008 if (!dev || !eeh_subsystem_enabled) 1028 if (!dev || !eeh_enabled())
1009 return; 1029 return;
1010 edev = pci_dev_to_eeh_dev(dev); 1030 edev = pci_dev_to_eeh_dev(dev);
1011 1031
@@ -1045,7 +1065,7 @@ void eeh_remove_device(struct pci_dev *dev)
1045 1065
1046static int proc_eeh_show(struct seq_file *m, void *v) 1066static int proc_eeh_show(struct seq_file *m, void *v)
1047{ 1067{
1048 if (0 == eeh_subsystem_enabled) { 1068 if (!eeh_enabled()) {
1049 seq_printf(m, "EEH Subsystem is globally disabled\n"); 1069 seq_printf(m, "EEH Subsystem is globally disabled\n");
1050 seq_printf(m, "eeh_total_mmio_ffs=%llu\n", eeh_stats.total_mmio_ffs); 1070 seq_printf(m, "eeh_total_mmio_ffs=%llu\n", eeh_stats.total_mmio_ffs);
1051 } else { 1071 } else {
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 7bb30dca4e19..fdc679d309ec 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -362,9 +362,13 @@ static void *eeh_rmv_device(void *data, void *userdata)
362 */ 362 */
363 if (!dev || (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE)) 363 if (!dev || (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE))
364 return NULL; 364 return NULL;
365
365 driver = eeh_pcid_get(dev); 366 driver = eeh_pcid_get(dev);
366 if (driver && driver->err_handler) 367 if (driver) {
367 return NULL; 368 eeh_pcid_put(dev);
369 if (driver->err_handler)
370 return NULL;
371 }
368 372
369 /* Remove it from PCI subsystem */ 373 /* Remove it from PCI subsystem */
370 pr_debug("EEH: Removing %s without EEH sensitive driver\n", 374 pr_debug("EEH: Removing %s without EEH sensitive driver\n",
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index d773dd440a45..88e3ec6e1d96 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -1088,6 +1088,14 @@ int iommu_take_ownership(struct iommu_table *tbl)
1088 memset(tbl->it_map, 0xff, sz); 1088 memset(tbl->it_map, 0xff, sz);
1089 iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size); 1089 iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size);
1090 1090
1091 /*
1092 * Disable iommu bypass, otherwise the user can DMA to all of
1093 * our physical memory via the bypass window instead of just
1094 * the pages that has been explicitly mapped into the iommu
1095 */
1096 if (tbl->set_bypass)
1097 tbl->set_bypass(tbl, false);
1098
1091 return 0; 1099 return 0;
1092} 1100}
1093EXPORT_SYMBOL_GPL(iommu_take_ownership); 1101EXPORT_SYMBOL_GPL(iommu_take_ownership);
@@ -1102,6 +1110,10 @@ void iommu_release_ownership(struct iommu_table *tbl)
1102 /* Restore bit#0 set by iommu_init_table() */ 1110 /* Restore bit#0 set by iommu_init_table() */
1103 if (tbl->it_offset == 0) 1111 if (tbl->it_offset == 0)
1104 set_bit(0, tbl->it_map); 1112 set_bit(0, tbl->it_map);
1113
1114 /* The kernel owns the device now, we can restore the iommu bypass */
1115 if (tbl->set_bypass)
1116 tbl->set_bypass(tbl, true);
1105} 1117}
1106EXPORT_SYMBOL_GPL(iommu_release_ownership); 1118EXPORT_SYMBOL_GPL(iommu_release_ownership);
1107 1119
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 9729b23bfb0a..1d0848bba049 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -559,8 +559,13 @@ void exc_lvl_ctx_init(void)
559#ifdef CONFIG_PPC64 559#ifdef CONFIG_PPC64
560 cpu_nr = i; 560 cpu_nr = i;
561#else 561#else
562#ifdef CONFIG_SMP
562 cpu_nr = get_hard_smp_processor_id(i); 563 cpu_nr = get_hard_smp_processor_id(i);
564#else
565 cpu_nr = 0;
563#endif 566#endif
567#endif
568
564 memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE); 569 memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE);
565 tp = critirq_ctx[cpu_nr]; 570 tp = critirq_ctx[cpu_nr];
566 tp->cpu = cpu_nr; 571 tp->cpu = cpu_nr;
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index 75d4f7340da8..015ae55c1868 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -196,7 +196,9 @@ int overlaps_crashkernel(unsigned long start, unsigned long size)
196 196
197/* Values we need to export to the second kernel via the device tree. */ 197/* Values we need to export to the second kernel via the device tree. */
198static phys_addr_t kernel_end; 198static phys_addr_t kernel_end;
199static phys_addr_t crashk_base;
199static phys_addr_t crashk_size; 200static phys_addr_t crashk_size;
201static unsigned long long mem_limit;
200 202
201static struct property kernel_end_prop = { 203static struct property kernel_end_prop = {
202 .name = "linux,kernel-end", 204 .name = "linux,kernel-end",
@@ -207,7 +209,7 @@ static struct property kernel_end_prop = {
207static struct property crashk_base_prop = { 209static struct property crashk_base_prop = {
208 .name = "linux,crashkernel-base", 210 .name = "linux,crashkernel-base",
209 .length = sizeof(phys_addr_t), 211 .length = sizeof(phys_addr_t),
210 .value = &crashk_res.start, 212 .value = &crashk_base
211}; 213};
212 214
213static struct property crashk_size_prop = { 215static struct property crashk_size_prop = {
@@ -219,9 +221,11 @@ static struct property crashk_size_prop = {
219static struct property memory_limit_prop = { 221static struct property memory_limit_prop = {
220 .name = "linux,memory-limit", 222 .name = "linux,memory-limit",
221 .length = sizeof(unsigned long long), 223 .length = sizeof(unsigned long long),
222 .value = &memory_limit, 224 .value = &mem_limit,
223}; 225};
224 226
227#define cpu_to_be_ulong __PASTE(cpu_to_be, BITS_PER_LONG)
228
225static void __init export_crashk_values(struct device_node *node) 229static void __init export_crashk_values(struct device_node *node)
226{ 230{
227 struct property *prop; 231 struct property *prop;
@@ -237,8 +241,9 @@ static void __init export_crashk_values(struct device_node *node)
237 of_remove_property(node, prop); 241 of_remove_property(node, prop);
238 242
239 if (crashk_res.start != 0) { 243 if (crashk_res.start != 0) {
244 crashk_base = cpu_to_be_ulong(crashk_res.start),
240 of_add_property(node, &crashk_base_prop); 245 of_add_property(node, &crashk_base_prop);
241 crashk_size = resource_size(&crashk_res); 246 crashk_size = cpu_to_be_ulong(resource_size(&crashk_res));
242 of_add_property(node, &crashk_size_prop); 247 of_add_property(node, &crashk_size_prop);
243 } 248 }
244 249
@@ -246,6 +251,7 @@ static void __init export_crashk_values(struct device_node *node)
246 * memory_limit is required by the kexec-tools to limit the 251 * memory_limit is required by the kexec-tools to limit the
247 * crash regions to the actual memory used. 252 * crash regions to the actual memory used.
248 */ 253 */
254 mem_limit = cpu_to_be_ulong(memory_limit);
249 of_update_property(node, &memory_limit_prop); 255 of_update_property(node, &memory_limit_prop);
250} 256}
251 257
@@ -264,7 +270,7 @@ static int __init kexec_setup(void)
264 of_remove_property(node, prop); 270 of_remove_property(node, prop);
265 271
266 /* information needed by userspace when using default_machine_kexec */ 272 /* information needed by userspace when using default_machine_kexec */
267 kernel_end = __pa(_end); 273 kernel_end = cpu_to_be_ulong(__pa(_end));
268 of_add_property(node, &kernel_end_prop); 274 of_add_property(node, &kernel_end_prop);
269 275
270 export_crashk_values(node); 276 export_crashk_values(node);
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index be4e6d648f60..59d229a2a3e0 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -369,6 +369,7 @@ void default_machine_kexec(struct kimage *image)
369 369
370/* Values we need to export to the second kernel via the device tree. */ 370/* Values we need to export to the second kernel via the device tree. */
371static unsigned long htab_base; 371static unsigned long htab_base;
372static unsigned long htab_size;
372 373
373static struct property htab_base_prop = { 374static struct property htab_base_prop = {
374 .name = "linux,htab-base", 375 .name = "linux,htab-base",
@@ -379,7 +380,7 @@ static struct property htab_base_prop = {
379static struct property htab_size_prop = { 380static struct property htab_size_prop = {
380 .name = "linux,htab-size", 381 .name = "linux,htab-size",
381 .length = sizeof(unsigned long), 382 .length = sizeof(unsigned long),
382 .value = &htab_size_bytes, 383 .value = &htab_size,
383}; 384};
384 385
385static int __init export_htab_values(void) 386static int __init export_htab_values(void)
@@ -403,8 +404,9 @@ static int __init export_htab_values(void)
403 if (prop) 404 if (prop)
404 of_remove_property(node, prop); 405 of_remove_property(node, prop);
405 406
406 htab_base = __pa(htab_address); 407 htab_base = cpu_to_be64(__pa(htab_address));
407 of_add_property(node, &htab_base_prop); 408 of_add_property(node, &htab_base_prop);
409 htab_size = cpu_to_be64(htab_size_bytes);
408 of_add_property(node, &htab_size_prop); 410 of_add_property(node, &htab_size_prop);
409 411
410 of_node_put(node); 412 of_node_put(node);
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 879f09620f83..7c6bb4b17b49 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -57,11 +57,14 @@ _GLOBAL(call_do_softirq)
57 mtlr r0 57 mtlr r0
58 blr 58 blr
59 59
60/*
61 * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp);
62 */
60_GLOBAL(call_do_irq) 63_GLOBAL(call_do_irq)
61 mflr r0 64 mflr r0
62 stw r0,4(r1) 65 stw r0,4(r1)
63 lwz r10,THREAD+KSP_LIMIT(r2) 66 lwz r10,THREAD+KSP_LIMIT(r2)
64 addi r11,r3,THREAD_INFO_GAP 67 addi r11,r4,THREAD_INFO_GAP
65 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4) 68 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
66 mr r1,r4 69 mr r1,r4
67 stw r10,8(r1) 70 stw r10,8(r1)
diff --git a/arch/powerpc/kernel/reloc_64.S b/arch/powerpc/kernel/reloc_64.S
index b47a0e1ab001..1482327cfeba 100644
--- a/arch/powerpc/kernel/reloc_64.S
+++ b/arch/powerpc/kernel/reloc_64.S
@@ -69,8 +69,8 @@ _GLOBAL(relocate)
69 * R_PPC64_RELATIVE ones. 69 * R_PPC64_RELATIVE ones.
70 */ 70 */
71 mtctr r8 71 mtctr r8
725: lwz r0,12(9) /* ELF64_R_TYPE(reloc->r_info) */ 725: ld r0,8(9) /* ELF64_R_TYPE(reloc->r_info) */
73 cmpwi r0,R_PPC64_RELATIVE 73 cmpdi r0,R_PPC64_RELATIVE
74 bne 6f 74 bne 6f
75 ld r6,0(r9) /* reloc->r_offset */ 75 ld r6,0(r9) /* reloc->r_offset */
76 ld r0,16(r9) /* reloc->r_addend */ 76 ld r0,16(r9) /* reloc->r_addend */
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 2b0da27eaee4..04cc4fcca78b 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -247,7 +247,12 @@ static void __init exc_lvl_early_init(void)
247 /* interrupt stacks must be in lowmem, we get that for free on ppc32 247 /* interrupt stacks must be in lowmem, we get that for free on ppc32
248 * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */ 248 * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */
249 for_each_possible_cpu(i) { 249 for_each_possible_cpu(i) {
250#ifdef CONFIG_SMP
250 hw_cpu = get_hard_smp_processor_id(i); 251 hw_cpu = get_hard_smp_processor_id(i);
252#else
253 hw_cpu = 0;
254#endif
255
251 critirq_ctx[hw_cpu] = (struct thread_info *) 256 critirq_ctx[hw_cpu] = (struct thread_info *)
252 __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); 257 __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
253#ifdef CONFIG_BOOKE 258#ifdef CONFIG_BOOKE
diff --git a/arch/powerpc/kernel/vdso32/vdso32_wrapper.S b/arch/powerpc/kernel/vdso32/vdso32_wrapper.S
index 79683d0393f5..6ac107ac402a 100644
--- a/arch/powerpc/kernel/vdso32/vdso32_wrapper.S
+++ b/arch/powerpc/kernel/vdso32/vdso32_wrapper.S
@@ -6,7 +6,7 @@
6 .globl vdso32_start, vdso32_end 6 .globl vdso32_start, vdso32_end
7 .balign PAGE_SIZE 7 .balign PAGE_SIZE
8vdso32_start: 8vdso32_start:
9 .incbin "arch/powerpc/kernel/vdso32/vdso32.so" 9 .incbin "arch/powerpc/kernel/vdso32/vdso32.so.dbg"
10 .balign PAGE_SIZE 10 .balign PAGE_SIZE
11vdso32_end: 11vdso32_end:
12 12
diff --git a/arch/powerpc/kernel/vdso64/vdso64_wrapper.S b/arch/powerpc/kernel/vdso64/vdso64_wrapper.S
index 8df9e2463007..df60fca6a13d 100644
--- a/arch/powerpc/kernel/vdso64/vdso64_wrapper.S
+++ b/arch/powerpc/kernel/vdso64/vdso64_wrapper.S
@@ -6,7 +6,7 @@
6 .globl vdso64_start, vdso64_end 6 .globl vdso64_start, vdso64_end
7 .balign PAGE_SIZE 7 .balign PAGE_SIZE
8vdso64_start: 8vdso64_start:
9 .incbin "arch/powerpc/kernel/vdso64/vdso64.so" 9 .incbin "arch/powerpc/kernel/vdso64/vdso64.so.dbg"
10 .balign PAGE_SIZE 10 .balign PAGE_SIZE
11vdso64_end: 11vdso64_end:
12 12
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index de6881259aef..d766d6ee33fe 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -207,6 +207,20 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
207 if (overlaps_kernel_text(vaddr, vaddr + step)) 207 if (overlaps_kernel_text(vaddr, vaddr + step))
208 tprot &= ~HPTE_R_N; 208 tprot &= ~HPTE_R_N;
209 209
210 /*
211 * If relocatable, check if it overlaps interrupt vectors that
212 * are copied down to real 0. For relocatable kernel
213 * (e.g. kdump case) we copy interrupt vectors down to real
214 * address 0. Mark that region as executable. This is
215 * because on p8 system with relocation on exception feature
216 * enabled, exceptions are raised with MMU (IR=DR=1) ON. Hence
217 * in order to execute the interrupt handlers in virtual
218 * mode the vector region need to be marked as executable.
219 */
220 if ((PHYSICAL_START > MEMORY_START) &&
221 overlaps_interrupt_vector_text(vaddr, vaddr + step))
222 tprot &= ~HPTE_R_N;
223
210 hash = hpt_hash(vpn, shift, ssize); 224 hash = hpt_hash(vpn, shift, ssize);
211 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 225 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
212 226
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 65b7b65e8708..62bf5e8e78da 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -510,7 +510,8 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
510} 510}
511 511
512unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, 512unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
513 pmd_t *pmdp, unsigned long clr) 513 pmd_t *pmdp, unsigned long clr,
514 unsigned long set)
514{ 515{
515 516
516 unsigned long old, tmp; 517 unsigned long old, tmp;
@@ -526,14 +527,15 @@ unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
526 andi. %1,%0,%6\n\ 527 andi. %1,%0,%6\n\
527 bne- 1b \n\ 528 bne- 1b \n\
528 andc %1,%0,%4 \n\ 529 andc %1,%0,%4 \n\
530 or %1,%1,%7\n\
529 stdcx. %1,0,%3 \n\ 531 stdcx. %1,0,%3 \n\
530 bne- 1b" 532 bne- 1b"
531 : "=&r" (old), "=&r" (tmp), "=m" (*pmdp) 533 : "=&r" (old), "=&r" (tmp), "=m" (*pmdp)
532 : "r" (pmdp), "r" (clr), "m" (*pmdp), "i" (_PAGE_BUSY) 534 : "r" (pmdp), "r" (clr), "m" (*pmdp), "i" (_PAGE_BUSY), "r" (set)
533 : "cc" ); 535 : "cc" );
534#else 536#else
535 old = pmd_val(*pmdp); 537 old = pmd_val(*pmdp);
536 *pmdp = __pmd(old & ~clr); 538 *pmdp = __pmd((old & ~clr) | set);
537#endif 539#endif
538 if (old & _PAGE_HASHPTE) 540 if (old & _PAGE_HASHPTE)
539 hpte_do_hugepage_flush(mm, addr, pmdp); 541 hpte_do_hugepage_flush(mm, addr, pmdp);
@@ -708,7 +710,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
708void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 710void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
709 pmd_t *pmdp) 711 pmd_t *pmdp)
710{ 712{
711 pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT); 713 pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
712} 714}
713 715
714/* 716/*
@@ -835,7 +837,7 @@ pmd_t pmdp_get_and_clear(struct mm_struct *mm,
835 unsigned long old; 837 unsigned long old;
836 pgtable_t *pgtable_slot; 838 pgtable_t *pgtable_slot;
837 839
838 old = pmd_hugepage_update(mm, addr, pmdp, ~0UL); 840 old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
839 old_pmd = __pmd(old); 841 old_pmd = __pmd(old);
840 /* 842 /*
841 * We have pmd == none and we are holding page_table_lock. 843 * We have pmd == none and we are holding page_table_lock.
diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c
index a770df2dae70..6c0b1f5f8d2c 100644
--- a/arch/powerpc/mm/subpage-prot.c
+++ b/arch/powerpc/mm/subpage-prot.c
@@ -78,7 +78,7 @@ static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
78 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 78 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
79 arch_enter_lazy_mmu_mode(); 79 arch_enter_lazy_mmu_mode();
80 for (; npages > 0; --npages) { 80 for (; npages > 0; --npages) {
81 pte_update(mm, addr, pte, 0, 0); 81 pte_update(mm, addr, pte, 0, 0, 0);
82 addr += PAGE_SIZE; 82 addr += PAGE_SIZE;
83 ++pte; 83 ++pte;
84 } 84 }
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 29b89e863d7c..67cf22083f4c 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1147,6 +1147,9 @@ static void power_pmu_enable(struct pmu *pmu)
1147 mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]); 1147 mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]);
1148 1148
1149 mb(); 1149 mb();
1150 if (cpuhw->bhrb_users)
1151 ppmu->config_bhrb(cpuhw->bhrb_filter);
1152
1150 write_mmcr0(cpuhw, mmcr0); 1153 write_mmcr0(cpuhw, mmcr0);
1151 1154
1152 /* 1155 /*
@@ -1158,8 +1161,6 @@ static void power_pmu_enable(struct pmu *pmu)
1158 } 1161 }
1159 1162
1160 out: 1163 out:
1161 if (cpuhw->bhrb_users)
1162 ppmu->config_bhrb(cpuhw->bhrb_filter);
1163 1164
1164 local_irq_restore(flags); 1165 local_irq_restore(flags);
1165} 1166}
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index a3f7abd2f13f..96cee20dcd34 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -25,6 +25,37 @@
25#define PM_BRU_FIN 0x10068 25#define PM_BRU_FIN 0x10068
26#define PM_BR_MPRED_CMPL 0x400f6 26#define PM_BR_MPRED_CMPL 0x400f6
27 27
28/* All L1 D cache load references counted at finish, gated by reject */
29#define PM_LD_REF_L1 0x100ee
30/* Load Missed L1 */
31#define PM_LD_MISS_L1 0x3e054
32/* Store Missed L1 */
33#define PM_ST_MISS_L1 0x300f0
34/* L1 cache data prefetches */
35#define PM_L1_PREF 0x0d8b8
36/* Instruction fetches from L1 */
37#define PM_INST_FROM_L1 0x04080
38/* Demand iCache Miss */
39#define PM_L1_ICACHE_MISS 0x200fd
40/* Instruction Demand sectors wriittent into IL1 */
41#define PM_L1_DEMAND_WRITE 0x0408c
42/* Instruction prefetch written into IL1 */
43#define PM_IC_PREF_WRITE 0x0408e
44/* The data cache was reloaded from local core's L3 due to a demand load */
45#define PM_DATA_FROM_L3 0x4c042
46/* Demand LD - L3 Miss (not L2 hit and not L3 hit) */
47#define PM_DATA_FROM_L3MISS 0x300fe
48/* All successful D-side store dispatches for this thread */
49#define PM_L2_ST 0x17080
50/* All successful D-side store dispatches for this thread that were L2 Miss */
51#define PM_L2_ST_MISS 0x17082
52/* Total HW L3 prefetches(Load+store) */
53#define PM_L3_PREF_ALL 0x4e052
54/* Data PTEG reload */
55#define PM_DTLB_MISS 0x300fc
56/* ITLB Reloaded */
57#define PM_ITLB_MISS 0x400fc
58
28 59
29/* 60/*
30 * Raw event encoding for POWER8: 61 * Raw event encoding for POWER8:
@@ -557,6 +588,8 @@ static int power8_generic_events[] = {
557 [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL, 588 [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
558 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN, 589 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN,
559 [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL, 590 [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
591 [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
592 [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1,
560}; 593};
561 594
562static u64 power8_bhrb_filter_map(u64 branch_sample_type) 595static u64 power8_bhrb_filter_map(u64 branch_sample_type)
@@ -596,6 +629,116 @@ static void power8_config_bhrb(u64 pmu_bhrb_filter)
596 mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter)); 629 mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
597} 630}
598 631
632#define C(x) PERF_COUNT_HW_CACHE_##x
633
634/*
635 * Table of generalized cache-related events.
636 * 0 means not supported, -1 means nonsensical, other values
637 * are event codes.
638 */
639static int power8_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
640 [ C(L1D) ] = {
641 [ C(OP_READ) ] = {
642 [ C(RESULT_ACCESS) ] = PM_LD_REF_L1,
643 [ C(RESULT_MISS) ] = PM_LD_MISS_L1,
644 },
645 [ C(OP_WRITE) ] = {
646 [ C(RESULT_ACCESS) ] = 0,
647 [ C(RESULT_MISS) ] = PM_ST_MISS_L1,
648 },
649 [ C(OP_PREFETCH) ] = {
650 [ C(RESULT_ACCESS) ] = PM_L1_PREF,
651 [ C(RESULT_MISS) ] = 0,
652 },
653 },
654 [ C(L1I) ] = {
655 [ C(OP_READ) ] = {
656 [ C(RESULT_ACCESS) ] = PM_INST_FROM_L1,
657 [ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS,
658 },
659 [ C(OP_WRITE) ] = {
660 [ C(RESULT_ACCESS) ] = PM_L1_DEMAND_WRITE,
661 [ C(RESULT_MISS) ] = -1,
662 },
663 [ C(OP_PREFETCH) ] = {
664 [ C(RESULT_ACCESS) ] = PM_IC_PREF_WRITE,
665 [ C(RESULT_MISS) ] = 0,
666 },
667 },
668 [ C(LL) ] = {
669 [ C(OP_READ) ] = {
670 [ C(RESULT_ACCESS) ] = PM_DATA_FROM_L3,
671 [ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS,
672 },
673 [ C(OP_WRITE) ] = {
674 [ C(RESULT_ACCESS) ] = PM_L2_ST,
675 [ C(RESULT_MISS) ] = PM_L2_ST_MISS,
676 },
677 [ C(OP_PREFETCH) ] = {
678 [ C(RESULT_ACCESS) ] = PM_L3_PREF_ALL,
679 [ C(RESULT_MISS) ] = 0,
680 },
681 },
682 [ C(DTLB) ] = {
683 [ C(OP_READ) ] = {
684 [ C(RESULT_ACCESS) ] = 0,
685 [ C(RESULT_MISS) ] = PM_DTLB_MISS,
686 },
687 [ C(OP_WRITE) ] = {
688 [ C(RESULT_ACCESS) ] = -1,
689 [ C(RESULT_MISS) ] = -1,
690 },
691 [ C(OP_PREFETCH) ] = {
692 [ C(RESULT_ACCESS) ] = -1,
693 [ C(RESULT_MISS) ] = -1,
694 },
695 },
696 [ C(ITLB) ] = {
697 [ C(OP_READ) ] = {
698 [ C(RESULT_ACCESS) ] = 0,
699 [ C(RESULT_MISS) ] = PM_ITLB_MISS,
700 },
701 [ C(OP_WRITE) ] = {
702 [ C(RESULT_ACCESS) ] = -1,
703 [ C(RESULT_MISS) ] = -1,
704 },
705 [ C(OP_PREFETCH) ] = {
706 [ C(RESULT_ACCESS) ] = -1,
707 [ C(RESULT_MISS) ] = -1,
708 },
709 },
710 [ C(BPU) ] = {
711 [ C(OP_READ) ] = {
712 [ C(RESULT_ACCESS) ] = PM_BRU_FIN,
713 [ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL,
714 },
715 [ C(OP_WRITE) ] = {
716 [ C(RESULT_ACCESS) ] = -1,
717 [ C(RESULT_MISS) ] = -1,
718 },
719 [ C(OP_PREFETCH) ] = {
720 [ C(RESULT_ACCESS) ] = -1,
721 [ C(RESULT_MISS) ] = -1,
722 },
723 },
724 [ C(NODE) ] = {
725 [ C(OP_READ) ] = {
726 [ C(RESULT_ACCESS) ] = -1,
727 [ C(RESULT_MISS) ] = -1,
728 },
729 [ C(OP_WRITE) ] = {
730 [ C(RESULT_ACCESS) ] = -1,
731 [ C(RESULT_MISS) ] = -1,
732 },
733 [ C(OP_PREFETCH) ] = {
734 [ C(RESULT_ACCESS) ] = -1,
735 [ C(RESULT_MISS) ] = -1,
736 },
737 },
738};
739
740#undef C
741
599static struct power_pmu power8_pmu = { 742static struct power_pmu power8_pmu = {
600 .name = "POWER8", 743 .name = "POWER8",
601 .n_counter = 6, 744 .n_counter = 6,
@@ -611,6 +754,7 @@ static struct power_pmu power8_pmu = {
611 .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB | PPMU_EBB, 754 .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB | PPMU_EBB,
612 .n_generic = ARRAY_SIZE(power8_generic_events), 755 .n_generic = ARRAY_SIZE(power8_generic_events),
613 .generic_events = power8_generic_events, 756 .generic_events = power8_generic_events,
757 .cache_events = &power8_cache_events,
614 .attr_groups = power8_pmu_attr_groups, 758 .attr_groups = power8_pmu_attr_groups,
615 .bhrb_nr = 32, 759 .bhrb_nr = 32,
616}; 760};
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c
index e1e71618b70c..f51474336460 100644
--- a/arch/powerpc/platforms/powernv/eeh-ioda.c
+++ b/arch/powerpc/platforms/powernv/eeh-ioda.c
@@ -44,7 +44,8 @@ static int ioda_eeh_event(struct notifier_block *nb,
44 44
45 /* We simply send special EEH event */ 45 /* We simply send special EEH event */
46 if ((changed_evts & OPAL_EVENT_PCI_ERROR) && 46 if ((changed_evts & OPAL_EVENT_PCI_ERROR) &&
47 (events & OPAL_EVENT_PCI_ERROR)) 47 (events & OPAL_EVENT_PCI_ERROR) &&
48 eeh_enabled())
48 eeh_send_failure_event(NULL); 49 eeh_send_failure_event(NULL);
49 50
50 return 0; 51 return 0;
@@ -489,8 +490,7 @@ static int ioda_eeh_bridge_reset(struct pci_controller *hose,
489static int ioda_eeh_reset(struct eeh_pe *pe, int option) 490static int ioda_eeh_reset(struct eeh_pe *pe, int option)
490{ 491{
491 struct pci_controller *hose = pe->phb; 492 struct pci_controller *hose = pe->phb;
492 struct eeh_dev *edev; 493 struct pci_bus *bus;
493 struct pci_dev *dev;
494 int ret; 494 int ret;
495 495
496 /* 496 /*
@@ -519,31 +519,11 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option)
519 if (pe->type & EEH_PE_PHB) { 519 if (pe->type & EEH_PE_PHB) {
520 ret = ioda_eeh_phb_reset(hose, option); 520 ret = ioda_eeh_phb_reset(hose, option);
521 } else { 521 } else {
522 if (pe->type & EEH_PE_DEVICE) { 522 bus = eeh_pe_bus_get(pe);
523 /* 523 if (pci_is_root_bus(bus))
524 * If it's device PE, we didn't refer to the parent
525 * PCI bus yet. So we have to figure it out indirectly.
526 */
527 edev = list_first_entry(&pe->edevs,
528 struct eeh_dev, list);
529 dev = eeh_dev_to_pci_dev(edev);
530 dev = dev->bus->self;
531 } else {
532 /*
533 * If it's bus PE, the parent PCI bus is already there
534 * and just pick it up.
535 */
536 dev = pe->bus->self;
537 }
538
539 /*
540 * Do reset based on the fact that the direct upstream bridge
541 * is root bridge (port) or not.
542 */
543 if (dev->bus->number == 0)
544 ret = ioda_eeh_root_reset(hose, option); 524 ret = ioda_eeh_root_reset(hose, option);
545 else 525 else
546 ret = ioda_eeh_bridge_reset(hose, dev, option); 526 ret = ioda_eeh_bridge_reset(hose, bus->self, option);
547 } 527 }
548 528
549 return ret; 529 return ret;
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index a79fddc5e74e..a59788e83b8b 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -145,7 +145,7 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
145 * Enable EEH explicitly so that we will do EEH check 145 * Enable EEH explicitly so that we will do EEH check
146 * while accessing I/O stuff 146 * while accessing I/O stuff
147 */ 147 */
148 eeh_subsystem_enabled = 1; 148 eeh_set_enable(true);
149 149
150 /* Save memory bars */ 150 /* Save memory bars */
151 eeh_save_bars(edev); 151 eeh_save_bars(edev);
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 7d6dcc6d5fa9..3b2b4fb3585b 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -21,6 +21,7 @@
21#include <linux/irq.h> 21#include <linux/irq.h>
22#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/msi.h> 23#include <linux/msi.h>
24#include <linux/memblock.h>
24 25
25#include <asm/sections.h> 26#include <asm/sections.h>
26#include <asm/io.h> 27#include <asm/io.h>
@@ -460,9 +461,39 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev
460 return; 461 return;
461 462
462 pe = &phb->ioda.pe_array[pdn->pe_number]; 463 pe = &phb->ioda.pe_array[pdn->pe_number];
464 WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
463 set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table); 465 set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table);
464} 466}
465 467
468static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb,
469 struct pci_dev *pdev, u64 dma_mask)
470{
471 struct pci_dn *pdn = pci_get_pdn(pdev);
472 struct pnv_ioda_pe *pe;
473 uint64_t top;
474 bool bypass = false;
475
476 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
477 return -ENODEV;;
478
479 pe = &phb->ioda.pe_array[pdn->pe_number];
480 if (pe->tce_bypass_enabled) {
481 top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1;
482 bypass = (dma_mask >= top);
483 }
484
485 if (bypass) {
486 dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n");
487 set_dma_ops(&pdev->dev, &dma_direct_ops);
488 set_dma_offset(&pdev->dev, pe->tce_bypass_base);
489 } else {
490 dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n");
491 set_dma_ops(&pdev->dev, &dma_iommu_ops);
492 set_iommu_table_base(&pdev->dev, &pe->tce32_table);
493 }
494 return 0;
495}
496
466static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus) 497static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
467{ 498{
468 struct pci_dev *dev; 499 struct pci_dev *dev;
@@ -657,6 +688,56 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
657 __free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs)); 688 __free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs));
658} 689}
659 690
691static void pnv_pci_ioda2_set_bypass(struct iommu_table *tbl, bool enable)
692{
693 struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe,
694 tce32_table);
695 uint16_t window_id = (pe->pe_number << 1 ) + 1;
696 int64_t rc;
697
698 pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis");
699 if (enable) {
700 phys_addr_t top = memblock_end_of_DRAM();
701
702 top = roundup_pow_of_two(top);
703 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
704 pe->pe_number,
705 window_id,
706 pe->tce_bypass_base,
707 top);
708 } else {
709 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
710 pe->pe_number,
711 window_id,
712 pe->tce_bypass_base,
713 0);
714
715 /*
716 * We might want to reset the DMA ops of all devices on
717 * this PE. However in theory, that shouldn't be necessary
718 * as this is used for VFIO/KVM pass-through and the device
719 * hasn't yet been returned to its kernel driver
720 */
721 }
722 if (rc)
723 pe_err(pe, "OPAL error %lld configuring bypass window\n", rc);
724 else
725 pe->tce_bypass_enabled = enable;
726}
727
728static void pnv_pci_ioda2_setup_bypass_pe(struct pnv_phb *phb,
729 struct pnv_ioda_pe *pe)
730{
731 /* TVE #1 is selected by PCI address bit 59 */
732 pe->tce_bypass_base = 1ull << 59;
733
734 /* Install set_bypass callback for VFIO */
735 pe->tce32_table.set_bypass = pnv_pci_ioda2_set_bypass;
736
737 /* Enable bypass by default */
738 pnv_pci_ioda2_set_bypass(&pe->tce32_table, true);
739}
740
660static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, 741static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
661 struct pnv_ioda_pe *pe) 742 struct pnv_ioda_pe *pe)
662{ 743{
@@ -727,6 +808,8 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
727 else 808 else
728 pnv_ioda_setup_bus_dma(pe, pe->pbus); 809 pnv_ioda_setup_bus_dma(pe, pe->pbus);
729 810
811 /* Also create a bypass window */
812 pnv_pci_ioda2_setup_bypass_pe(phb, pe);
730 return; 813 return;
731fail: 814fail:
732 if (pe->tce32_seg >= 0) 815 if (pe->tce32_seg >= 0)
@@ -1286,6 +1369,7 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
1286 1369
1287 /* Setup TCEs */ 1370 /* Setup TCEs */
1288 phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup; 1371 phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
1372 phb->dma_set_mask = pnv_pci_ioda_dma_set_mask;
1289 1373
1290 /* Setup shutdown function for kexec */ 1374 /* Setup shutdown function for kexec */
1291 phb->shutdown = pnv_pci_ioda_shutdown; 1375 phb->shutdown = pnv_pci_ioda_shutdown;
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index b555ebc57ef5..95633d79ef5d 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -634,6 +634,16 @@ static void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
634 pnv_pci_dma_fallback_setup(hose, pdev); 634 pnv_pci_dma_fallback_setup(hose, pdev);
635} 635}
636 636
637int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
638{
639 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
640 struct pnv_phb *phb = hose->private_data;
641
642 if (phb && phb->dma_set_mask)
643 return phb->dma_set_mask(phb, pdev, dma_mask);
644 return __dma_set_mask(&pdev->dev, dma_mask);
645}
646
637void pnv_pci_shutdown(void) 647void pnv_pci_shutdown(void)
638{ 648{
639 struct pci_controller *hose; 649 struct pci_controller *hose;
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 13f1942a9a5f..cde169442775 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -54,7 +54,9 @@ struct pnv_ioda_pe {
54 struct iommu_table tce32_table; 54 struct iommu_table tce32_table;
55 phys_addr_t tce_inval_reg_phys; 55 phys_addr_t tce_inval_reg_phys;
56 56
57 /* XXX TODO: Add support for additional 64-bit iommus */ 57 /* 64-bit TCE bypass region */
58 bool tce_bypass_enabled;
59 uint64_t tce_bypass_base;
58 60
59 /* MSIs. MVE index is identical for for 32 and 64 bit MSI 61 /* MSIs. MVE index is identical for for 32 and 64 bit MSI
60 * and -1 if not supported. (It's actually identical to the 62 * and -1 if not supported. (It's actually identical to the
@@ -113,6 +115,8 @@ struct pnv_phb {
113 unsigned int hwirq, unsigned int virq, 115 unsigned int hwirq, unsigned int virq,
114 unsigned int is_64, struct msi_msg *msg); 116 unsigned int is_64, struct msi_msg *msg);
115 void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev); 117 void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev);
118 int (*dma_set_mask)(struct pnv_phb *phb, struct pci_dev *pdev,
119 u64 dma_mask);
116 void (*fixup_phb)(struct pci_controller *hose); 120 void (*fixup_phb)(struct pci_controller *hose);
117 u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn); 121 u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn);
118 void (*shutdown)(struct pnv_phb *phb); 122 void (*shutdown)(struct pnv_phb *phb);
diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h
index de6819be1f95..0051e108ef0f 100644
--- a/arch/powerpc/platforms/powernv/powernv.h
+++ b/arch/powerpc/platforms/powernv/powernv.h
@@ -7,12 +7,20 @@ extern void pnv_smp_init(void);
7static inline void pnv_smp_init(void) { } 7static inline void pnv_smp_init(void) { }
8#endif 8#endif
9 9
10struct pci_dev;
11
10#ifdef CONFIG_PCI 12#ifdef CONFIG_PCI
11extern void pnv_pci_init(void); 13extern void pnv_pci_init(void);
12extern void pnv_pci_shutdown(void); 14extern void pnv_pci_shutdown(void);
15extern int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask);
13#else 16#else
14static inline void pnv_pci_init(void) { } 17static inline void pnv_pci_init(void) { }
15static inline void pnv_pci_shutdown(void) { } 18static inline void pnv_pci_shutdown(void) { }
19
20static inline int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
21{
22 return -ENODEV;
23}
16#endif 24#endif
17 25
18extern void pnv_lpc_init(void); 26extern void pnv_lpc_init(void);
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 21166f65c97c..110f4fbd319f 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -27,6 +27,7 @@
27#include <linux/interrupt.h> 27#include <linux/interrupt.h>
28#include <linux/bug.h> 28#include <linux/bug.h>
29#include <linux/cpuidle.h> 29#include <linux/cpuidle.h>
30#include <linux/pci.h>
30 31
31#include <asm/machdep.h> 32#include <asm/machdep.h>
32#include <asm/firmware.h> 33#include <asm/firmware.h>
@@ -141,6 +142,13 @@ static void pnv_progress(char *s, unsigned short hex)
141{ 142{
142} 143}
143 144
145static int pnv_dma_set_mask(struct device *dev, u64 dma_mask)
146{
147 if (dev_is_pci(dev))
148 return pnv_pci_dma_set_mask(to_pci_dev(dev), dma_mask);
149 return __dma_set_mask(dev, dma_mask);
150}
151
144static void pnv_shutdown(void) 152static void pnv_shutdown(void)
145{ 153{
146 /* Let the PCI code clear up IODA tables */ 154 /* Let the PCI code clear up IODA tables */
@@ -238,6 +246,7 @@ define_machine(powernv) {
238 .machine_shutdown = pnv_shutdown, 246 .machine_shutdown = pnv_shutdown,
239 .power_save = powernv_idle, 247 .power_save = powernv_idle,
240 .calibrate_decr = generic_calibrate_decr, 248 .calibrate_decr = generic_calibrate_decr,
249 .dma_set_mask = pnv_dma_set_mask,
241#ifdef CONFIG_KEXEC 250#ifdef CONFIG_KEXEC
242 .kexec_cpu_down = pnv_kexec_cpu_down, 251 .kexec_cpu_down = pnv_kexec_cpu_down,
243#endif 252#endif
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 37300f6ee244..80b1d57c306a 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -20,6 +20,7 @@ config PPC_PSERIES
20 select PPC_DOORBELL 20 select PPC_DOORBELL
21 select HAVE_CONTEXT_TRACKING 21 select HAVE_CONTEXT_TRACKING
22 select HOTPLUG_CPU if SMP 22 select HOTPLUG_CPU if SMP
23 select ARCH_RANDOM
23 default y 24 default y
24 25
25config PPC_SPLPAR 26config PPC_SPLPAR
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index 9ef3cc8ebc11..8a8f0472d98f 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -265,7 +265,7 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
265 enable = 1; 265 enable = 1;
266 266
267 if (enable) { 267 if (enable) {
268 eeh_subsystem_enabled = 1; 268 eeh_set_enable(true);
269 eeh_add_to_parent_pe(edev); 269 eeh_add_to_parent_pe(edev);
270 270
271 pr_debug("%s: EEH enabled on %s PHB#%d-PE#%x, config addr#%x\n", 271 pr_debug("%s: EEH enabled on %s PHB#%d-PE#%x, config addr#%x\n",
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index 70670a2d9cf2..c413ec158ff5 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -113,7 +113,8 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
113{ 113{
114 struct device_node *dn, *pdn; 114 struct device_node *dn, *pdn;
115 struct pci_bus *bus; 115 struct pci_bus *bus;
116 const __be32 *pcie_link_speed_stats; 116 u32 pcie_link_speed_stats[2];
117 int rc;
117 118
118 bus = bridge->bus; 119 bus = bridge->bus;
119 120
@@ -122,38 +123,45 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
122 return 0; 123 return 0;
123 124
124 for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) { 125 for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) {
125 pcie_link_speed_stats = of_get_property(pdn, 126 rc = of_property_read_u32_array(pdn,
126 "ibm,pcie-link-speed-stats", NULL); 127 "ibm,pcie-link-speed-stats",
127 if (pcie_link_speed_stats) 128 &pcie_link_speed_stats[0], 2);
129 if (!rc)
128 break; 130 break;
129 } 131 }
130 132
131 of_node_put(pdn); 133 of_node_put(pdn);
132 134
133 if (!pcie_link_speed_stats) { 135 if (rc) {
134 pr_err("no ibm,pcie-link-speed-stats property\n"); 136 pr_err("no ibm,pcie-link-speed-stats property\n");
135 return 0; 137 return 0;
136 } 138 }
137 139
138 switch (be32_to_cpup(pcie_link_speed_stats)) { 140 switch (pcie_link_speed_stats[0]) {
139 case 0x01: 141 case 0x01:
140 bus->max_bus_speed = PCIE_SPEED_2_5GT; 142 bus->max_bus_speed = PCIE_SPEED_2_5GT;
141 break; 143 break;
142 case 0x02: 144 case 0x02:
143 bus->max_bus_speed = PCIE_SPEED_5_0GT; 145 bus->max_bus_speed = PCIE_SPEED_5_0GT;
144 break; 146 break;
147 case 0x04:
148 bus->max_bus_speed = PCIE_SPEED_8_0GT;
149 break;
145 default: 150 default:
146 bus->max_bus_speed = PCI_SPEED_UNKNOWN; 151 bus->max_bus_speed = PCI_SPEED_UNKNOWN;
147 break; 152 break;
148 } 153 }
149 154
150 switch (be32_to_cpup(pcie_link_speed_stats)) { 155 switch (pcie_link_speed_stats[1]) {
151 case 0x01: 156 case 0x01:
152 bus->cur_bus_speed = PCIE_SPEED_2_5GT; 157 bus->cur_bus_speed = PCIE_SPEED_2_5GT;
153 break; 158 break;
154 case 0x02: 159 case 0x02:
155 bus->cur_bus_speed = PCIE_SPEED_5_0GT; 160 bus->cur_bus_speed = PCIE_SPEED_5_0GT;
156 break; 161 break;
162 case 0x04:
163 bus->cur_bus_speed = PCIE_SPEED_8_0GT;
164 break;
157 default: 165 default:
158 bus->cur_bus_speed = PCI_SPEED_UNKNOWN; 166 bus->cur_bus_speed = PCI_SPEED_UNKNOWN;
159 break; 167 break;
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 8e639d7cbda7..972df0ffd4dc 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -430,8 +430,7 @@ static void pSeries_machine_kexec(struct kimage *image)
430{ 430{
431 long rc; 431 long rc;
432 432
433 if (firmware_has_feature(FW_FEATURE_SET_MODE) && 433 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
434 (image->type != KEXEC_TYPE_CRASH)) {
435 rc = pSeries_disable_reloc_on_exc(); 434 rc = pSeries_disable_reloc_on_exc();
436 if (rc != H_SUCCESS) 435 if (rc != H_SUCCESS)
437 pr_warning("Warning: Failed to disable relocation on " 436 pr_warning("Warning: Failed to disable relocation on "
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 0e166ed4cd16..8209744b2829 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -886,25 +886,25 @@ int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type)
886 886
887 /* Default: read HW settings */ 887 /* Default: read HW settings */
888 if (flow_type == IRQ_TYPE_DEFAULT) { 888 if (flow_type == IRQ_TYPE_DEFAULT) {
889 switch(vold & (MPIC_INFO(VECPRI_POLARITY_MASK) | 889 int vold_ps;
890 MPIC_INFO(VECPRI_SENSE_MASK))) { 890
891 case MPIC_INFO(VECPRI_SENSE_EDGE) | 891 vold_ps = vold & (MPIC_INFO(VECPRI_POLARITY_MASK) |
892 MPIC_INFO(VECPRI_POLARITY_POSITIVE): 892 MPIC_INFO(VECPRI_SENSE_MASK));
893 flow_type = IRQ_TYPE_EDGE_RISING; 893
894 break; 894 if (vold_ps == (MPIC_INFO(VECPRI_SENSE_EDGE) |
895 case MPIC_INFO(VECPRI_SENSE_EDGE) | 895 MPIC_INFO(VECPRI_POLARITY_POSITIVE)))
896 MPIC_INFO(VECPRI_POLARITY_NEGATIVE): 896 flow_type = IRQ_TYPE_EDGE_RISING;
897 flow_type = IRQ_TYPE_EDGE_FALLING; 897 else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_EDGE) |
898 break; 898 MPIC_INFO(VECPRI_POLARITY_NEGATIVE)))
899 case MPIC_INFO(VECPRI_SENSE_LEVEL) | 899 flow_type = IRQ_TYPE_EDGE_FALLING;
900 MPIC_INFO(VECPRI_POLARITY_POSITIVE): 900 else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_LEVEL) |
901 flow_type = IRQ_TYPE_LEVEL_HIGH; 901 MPIC_INFO(VECPRI_POLARITY_POSITIVE)))
902 break; 902 flow_type = IRQ_TYPE_LEVEL_HIGH;
903 case MPIC_INFO(VECPRI_SENSE_LEVEL) | 903 else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_LEVEL) |
904 MPIC_INFO(VECPRI_POLARITY_NEGATIVE): 904 MPIC_INFO(VECPRI_POLARITY_NEGATIVE)))
905 flow_type = IRQ_TYPE_LEVEL_LOW; 905 flow_type = IRQ_TYPE_LEVEL_LOW;
906 break; 906 else
907 } 907 WARN_ONCE(1, "mpic: unknown IRQ type %d\n", vold);
908 } 908 }
909 909
910 /* Apply to irq desc */ 910 /* Apply to irq desc */
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index a90731b3d44a..b07909850f77 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -309,16 +309,23 @@ static void get_output_lock(void)
309 309
310 if (xmon_speaker == me) 310 if (xmon_speaker == me)
311 return; 311 return;
312
312 for (;;) { 313 for (;;) {
313 if (xmon_speaker == 0) { 314 last_speaker = cmpxchg(&xmon_speaker, 0, me);
314 last_speaker = cmpxchg(&xmon_speaker, 0, me); 315 if (last_speaker == 0)
315 if (last_speaker == 0) 316 return;
316 return; 317
317 } 318 /*
318 timeout = 10000000; 319 * Wait a full second for the lock, we might be on a slow
320 * console, but check every 100us.
321 */
322 timeout = 10000;
319 while (xmon_speaker == last_speaker) { 323 while (xmon_speaker == last_speaker) {
320 if (--timeout > 0) 324 if (--timeout > 0) {
325 udelay(100);
321 continue; 326 continue;
327 }
328
322 /* hostile takeover */ 329 /* hostile takeover */
323 prev = cmpxchg(&xmon_speaker, last_speaker, me); 330 prev = cmpxchg(&xmon_speaker, last_speaker, me);
324 if (prev == last_speaker) 331 if (prev == last_speaker)
@@ -397,7 +404,6 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
397 } 404 }
398 405
399 xmon_fault_jmp[cpu] = recurse_jmp; 406 xmon_fault_jmp[cpu] = recurse_jmp;
400 cpumask_set_cpu(cpu, &cpus_in_xmon);
401 407
402 bp = NULL; 408 bp = NULL;
403 if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) 409 if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT))
@@ -419,6 +425,8 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
419 release_output_lock(); 425 release_output_lock();
420 } 426 }
421 427
428 cpumask_set_cpu(cpu, &cpus_in_xmon);
429
422 waiting: 430 waiting:
423 secondary = 1; 431 secondary = 1;
424 while (secondary && !xmon_gate) { 432 while (secondary && !xmon_gate) {
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 4c4a1cef5208..47c8630c93cd 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -529,6 +529,7 @@ static int __init appldata_init(void)
529{ 529{
530 int rc; 530 int rc;
531 531
532 init_virt_timer(&appldata_timer);
532 appldata_timer.function = appldata_timer_function; 533 appldata_timer.function = appldata_timer_function;
533 appldata_timer.data = (unsigned long) &appldata_work; 534 appldata_timer.data = (unsigned long) &appldata_work;
534 535
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 59c8efce1b99..0248949a756d 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -1421,5 +1421,5 @@ ENTRY(sys_sched_setattr_wrapper)
1421ENTRY(sys_sched_getattr_wrapper) 1421ENTRY(sys_sched_getattr_wrapper)
1422 lgfr %r2,%r2 # pid_t 1422 lgfr %r2,%r2 # pid_t
1423 llgtr %r3,%r3 # const char __user * 1423 llgtr %r3,%r3 # const char __user *
1424 llgfr %r3,%r3 # unsigned int 1424 llgfr %r4,%r4 # unsigned int
1425 jg sys_sched_getattr 1425 jg sys_sched_getattr
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index b9e25ae2579c..d7c00507568a 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -59,7 +59,7 @@ ENTRY(startup_continue)
59 .quad 0 # cr12: tracing off 59 .quad 0 # cr12: tracing off
60 .quad 0 # cr13: home space segment table 60 .quad 0 # cr13: home space segment table
61 .quad 0xc0000000 # cr14: machine check handling off 61 .quad 0xc0000000 # cr14: machine check handling off
62 .quad 0 # cr15: linkage stack operations 62 .quad .Llinkage_stack # cr15: linkage stack operations
63.Lpcmsk:.quad 0x0000000180000000 63.Lpcmsk:.quad 0x0000000180000000
64.L4malign:.quad 0xffffffffffc00000 64.L4malign:.quad 0xffffffffffc00000
65.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 65.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
@@ -67,12 +67,15 @@ ENTRY(startup_continue)
67.Lparmaddr: 67.Lparmaddr:
68 .quad PARMAREA 68 .quad PARMAREA
69 .align 64 69 .align 64
70.Lduct: .long 0,0,0,0,.Lduald,0,0,0 70.Lduct: .long 0,.Laste,.Laste,0,.Lduald,0,0,0
71 .long 0,0,0,0,0,0,0,0 71 .long 0,0,0,0,0,0,0,0
72.Laste: .quad 0,0xffffffffffffffff,0,0,0,0,0,0
72 .align 128 73 .align 128
73.Lduald:.rept 8 74.Lduald:.rept 8
74 .long 0x80000000,0,0,0 # invalid access-list entries 75 .long 0x80000000,0,0,0 # invalid access-list entries
75 .endr 76 .endr
77.Llinkage_stack:
78 .long 0,0,0x89000000,0,0,0,0x8a000000,0
76 79
77ENTRY(_ehead) 80ENTRY(_ehead)
78 81
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
index a90d45e9dfb0..27c50f4d90cb 100644
--- a/arch/s390/mm/page-states.c
+++ b/arch/s390/mm/page-states.c
@@ -12,6 +12,8 @@
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <linux/gfp.h> 13#include <linux/gfp.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <asm/setup.h>
16#include <asm/ipl.h>
15 17
16#define ESSA_SET_STABLE 1 18#define ESSA_SET_STABLE 1
17#define ESSA_SET_UNUSED 2 19#define ESSA_SET_UNUSED 2
@@ -41,6 +43,14 @@ void __init cmma_init(void)
41 43
42 if (!cmma_flag) 44 if (!cmma_flag)
43 return; 45 return;
46 /*
47 * Disable CMM for dump, otherwise the tprot based memory
48 * detection can fail because of unstable pages.
49 */
50 if (OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP) {
51 cmma_flag = 0;
52 return;
53 }
44 asm volatile( 54 asm volatile(
45 " .insn rrf,0xb9ab0000,%1,%1,0,0\n" 55 " .insn rrf,0xb9ab0000,%1,%1,0,0\n"
46 "0: la %0,0\n" 56 "0: la %0,0\n"
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 60c11a629d96..f91c03119804 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -206,11 +206,13 @@ static void dma_cleanup_tables(struct zpci_dev *zdev)
206 zdev->dma_table = NULL; 206 zdev->dma_table = NULL;
207} 207}
208 208
209static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev, unsigned long start, 209static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev,
210 int size) 210 unsigned long start, int size)
211{ 211{
212 unsigned long boundary_size = 0x1000000; 212 unsigned long boundary_size;
213 213
214 boundary_size = ALIGN(dma_get_seg_boundary(&zdev->pdev->dev) + 1,
215 PAGE_SIZE) >> PAGE_SHIFT;
214 return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages, 216 return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
215 start, size, 0, boundary_size, 0); 217 start, size, 0, boundary_size, 0);
216} 218}
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index c51efdcd07a2..7d8b7e94b93b 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -27,7 +27,7 @@ config SPARC
27 select RTC_DRV_M48T59 27 select RTC_DRV_M48T59
28 select HAVE_DMA_ATTRS 28 select HAVE_DMA_ATTRS
29 select HAVE_DMA_API_DEBUG 29 select HAVE_DMA_API_DEBUG
30 select HAVE_ARCH_JUMP_LABEL 30 select HAVE_ARCH_JUMP_LABEL if SPARC64
31 select GENERIC_IRQ_SHOW 31 select GENERIC_IRQ_SHOW
32 select ARCH_WANT_IPC_PARSE_VERSION 32 select ARCH_WANT_IPC_PARSE_VERSION
33 select GENERIC_PCI_IOMAP 33 select GENERIC_PCI_IOMAP
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 869023abe5a4..cfbe53c17b0d 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -14,6 +14,7 @@
14#include <linux/pagemap.h> 14#include <linux/pagemap.h>
15#include <linux/vmalloc.h> 15#include <linux/vmalloc.h>
16#include <linux/kdebug.h> 16#include <linux/kdebug.h>
17#include <linux/export.h>
17#include <linux/kernel.h> 18#include <linux/kernel.h>
18#include <linux/init.h> 19#include <linux/init.h>
19#include <linux/log2.h> 20#include <linux/log2.h>
@@ -62,6 +63,7 @@ extern unsigned long last_valid_pfn;
62static pgd_t *srmmu_swapper_pg_dir; 63static pgd_t *srmmu_swapper_pg_dir;
63 64
64const struct sparc32_cachetlb_ops *sparc32_cachetlb_ops; 65const struct sparc32_cachetlb_ops *sparc32_cachetlb_ops;
66EXPORT_SYMBOL(sparc32_cachetlb_ops);
65 67
66#ifdef CONFIG_SMP 68#ifdef CONFIG_SMP
67const struct sparc32_cachetlb_ops *local_ops; 69const struct sparc32_cachetlb_ops *local_ops;
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 3b978c472d08..3d6b9f81cc68 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -132,6 +132,8 @@ extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
132extern void efi_sync_low_kernel_mappings(void); 132extern void efi_sync_low_kernel_mappings(void);
133extern void efi_setup_page_tables(void); 133extern void efi_setup_page_tables(void);
134extern void __init old_map_region(efi_memory_desc_t *md); 134extern void __init old_map_region(efi_memory_desc_t *md);
135extern void __init runtime_code_page_mkexec(void);
136extern void __init efi_runtime_mkexec(void);
135 137
136struct efi_setup_data { 138struct efi_setup_data {
137 u64 fw_vendor; 139 u64 fw_vendor;
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index bbc8b12fa443..5ad38ad07890 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -445,10 +445,20 @@ static inline int pte_same(pte_t a, pte_t b)
445 return a.pte == b.pte; 445 return a.pte == b.pte;
446} 446}
447 447
448static inline int pteval_present(pteval_t pteval)
449{
450 /*
451 * Yes Linus, _PAGE_PROTNONE == _PAGE_NUMA. Expressing it this
452 * way clearly states that the intent is that protnone and numa
453 * hinting ptes are considered present for the purposes of
454 * pagetable operations like zapping, protection changes, gup etc.
455 */
456 return pteval & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_NUMA);
457}
458
448static inline int pte_present(pte_t a) 459static inline int pte_present(pte_t a)
449{ 460{
450 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE | 461 return pteval_present(pte_flags(a));
451 _PAGE_NUMA);
452} 462}
453 463
454#define pte_accessible pte_accessible 464#define pte_accessible pte_accessible
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 57ae63cd6ee2..94605c0e9cee 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -66,6 +66,6 @@ extern void tsc_save_sched_clock_state(void);
66extern void tsc_restore_sched_clock_state(void); 66extern void tsc_restore_sched_clock_state(void);
67 67
68/* MSR based TSC calibration for Intel Atom SoC platforms */ 68/* MSR based TSC calibration for Intel Atom SoC platforms */
69int try_msr_calibrate_tsc(unsigned long *fast_calibrate); 69unsigned long try_msr_calibrate_tsc(void);
70 70
71#endif /* _ASM_X86_TSC_H */ 71#endif /* _ASM_X86_TSC_H */
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 24b6fd10625a..8e28bf2fc3ef 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -284,8 +284,13 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
284 raw_local_save_flags(eflags); 284 raw_local_save_flags(eflags);
285 BUG_ON(eflags & X86_EFLAGS_AC); 285 BUG_ON(eflags & X86_EFLAGS_AC);
286 286
287 if (cpu_has(c, X86_FEATURE_SMAP)) 287 if (cpu_has(c, X86_FEATURE_SMAP)) {
288#ifdef CONFIG_X86_SMAP
288 set_in_cr4(X86_CR4_SMAP); 289 set_in_cr4(X86_CR4_SMAP);
290#else
291 clear_in_cr4(X86_CR4_SMAP);
292#endif
293 }
289} 294}
290 295
291/* 296/*
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 895604f2e916..79f9f848bee4 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1192,6 +1192,9 @@ static void x86_pmu_del(struct perf_event *event, int flags)
1192 for (i = 0; i < cpuc->n_events; i++) { 1192 for (i = 0; i < cpuc->n_events; i++) {
1193 if (event == cpuc->event_list[i]) { 1193 if (event == cpuc->event_list[i]) {
1194 1194
1195 if (i >= cpuc->n_events - cpuc->n_added)
1196 --cpuc->n_added;
1197
1195 if (x86_pmu.put_event_constraints) 1198 if (x86_pmu.put_event_constraints)
1196 x86_pmu.put_event_constraints(cpuc, event); 1199 x86_pmu.put_event_constraints(cpuc, event);
1197 1200
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 0fa4f242f050..aa333d966886 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1361,10 +1361,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
1361 intel_pmu_disable_all(); 1361 intel_pmu_disable_all();
1362 handled = intel_pmu_drain_bts_buffer(); 1362 handled = intel_pmu_drain_bts_buffer();
1363 status = intel_pmu_get_status(); 1363 status = intel_pmu_get_status();
1364 if (!status) { 1364 if (!status)
1365 intel_pmu_enable_all(0); 1365 goto done;
1366 return handled;
1367 }
1368 1366
1369 loops = 0; 1367 loops = 0;
1370again: 1368again:
@@ -2310,10 +2308,7 @@ __init int intel_pmu_init(void)
2310 if (version > 1) 2308 if (version > 1)
2311 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3); 2309 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
2312 2310
2313 /* 2311 if (boot_cpu_has(X86_FEATURE_PDCM)) {
2314 * v2 and above have a perf capabilities MSR
2315 */
2316 if (version > 1) {
2317 u64 capabilities; 2312 u64 capabilities;
2318 2313
2319 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities); 2314 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index de5c56899ae3..b262c6124cf3 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -542,8 +542,11 @@ static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
542 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, 542 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
543 SNBEP_CBO_PMON_CTL_TID_EN, 0x1), 543 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
544 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), 544 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
545 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
545 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), 546 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
547 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
546 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), 548 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
549 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
547 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6), 550 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
548 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8), 551 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
549 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8), 552 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
@@ -1219,10 +1222,15 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
1219 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, 1222 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1220 SNBEP_CBO_PMON_CTL_TID_EN, 0x1), 1223 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1221 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2), 1224 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1225 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1226 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1227 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1222 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), 1228 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1229 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1223 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), 1230 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1231 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1224 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), 1232 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1225 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc), 1233 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1226 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10), 1234 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1227 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10), 1235 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1228 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10), 1236 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index d4bdd253fea7..e6253195a301 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -77,8 +77,7 @@ within(unsigned long addr, unsigned long start, unsigned long end)
77 return addr >= start && addr < end; 77 return addr >= start && addr < end;
78} 78}
79 79
80static int 80static unsigned long text_ip_addr(unsigned long ip)
81do_ftrace_mod_code(unsigned long ip, const void *new_code)
82{ 81{
83 /* 82 /*
84 * On x86_64, kernel text mappings are mapped read-only with 83 * On x86_64, kernel text mappings are mapped read-only with
@@ -91,7 +90,7 @@ do_ftrace_mod_code(unsigned long ip, const void *new_code)
91 if (within(ip, (unsigned long)_text, (unsigned long)_etext)) 90 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
92 ip = (unsigned long)__va(__pa_symbol(ip)); 91 ip = (unsigned long)__va(__pa_symbol(ip));
93 92
94 return probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE); 93 return ip;
95} 94}
96 95
97static const unsigned char *ftrace_nop_replace(void) 96static const unsigned char *ftrace_nop_replace(void)
@@ -123,8 +122,10 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
123 if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) 122 if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
124 return -EINVAL; 123 return -EINVAL;
125 124
125 ip = text_ip_addr(ip);
126
126 /* replace the text with the new text */ 127 /* replace the text with the new text */
127 if (do_ftrace_mod_code(ip, new_code)) 128 if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
128 return -EPERM; 129 return -EPERM;
129 130
130 sync_core(); 131 sync_core();
@@ -221,37 +222,51 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
221 return -EINVAL; 222 return -EINVAL;
222} 223}
223 224
224int ftrace_update_ftrace_func(ftrace_func_t func) 225static unsigned long ftrace_update_func;
226
227static int update_ftrace_func(unsigned long ip, void *new)
225{ 228{
226 unsigned long ip = (unsigned long)(&ftrace_call); 229 unsigned char old[MCOUNT_INSN_SIZE];
227 unsigned char old[MCOUNT_INSN_SIZE], *new;
228 int ret; 230 int ret;
229 231
230 memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE); 232 memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
231 new = ftrace_call_replace(ip, (unsigned long)func); 233
234 ftrace_update_func = ip;
235 /* Make sure the breakpoints see the ftrace_update_func update */
236 smp_wmb();
232 237
233 /* See comment above by declaration of modifying_ftrace_code */ 238 /* See comment above by declaration of modifying_ftrace_code */
234 atomic_inc(&modifying_ftrace_code); 239 atomic_inc(&modifying_ftrace_code);
235 240
236 ret = ftrace_modify_code(ip, old, new); 241 ret = ftrace_modify_code(ip, old, new);
237 242
243 atomic_dec(&modifying_ftrace_code);
244
245 return ret;
246}
247
248int ftrace_update_ftrace_func(ftrace_func_t func)
249{
250 unsigned long ip = (unsigned long)(&ftrace_call);
251 unsigned char *new;
252 int ret;
253
254 new = ftrace_call_replace(ip, (unsigned long)func);
255 ret = update_ftrace_func(ip, new);
256
238 /* Also update the regs callback function */ 257 /* Also update the regs callback function */
239 if (!ret) { 258 if (!ret) {
240 ip = (unsigned long)(&ftrace_regs_call); 259 ip = (unsigned long)(&ftrace_regs_call);
241 memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
242 new = ftrace_call_replace(ip, (unsigned long)func); 260 new = ftrace_call_replace(ip, (unsigned long)func);
243 ret = ftrace_modify_code(ip, old, new); 261 ret = update_ftrace_func(ip, new);
244 } 262 }
245 263
246 atomic_dec(&modifying_ftrace_code);
247
248 return ret; 264 return ret;
249} 265}
250 266
251static int is_ftrace_caller(unsigned long ip) 267static int is_ftrace_caller(unsigned long ip)
252{ 268{
253 if (ip == (unsigned long)(&ftrace_call) || 269 if (ip == ftrace_update_func)
254 ip == (unsigned long)(&ftrace_regs_call))
255 return 1; 270 return 1;
256 271
257 return 0; 272 return 0;
@@ -677,45 +692,41 @@ int __init ftrace_dyn_arch_init(void *data)
677#ifdef CONFIG_DYNAMIC_FTRACE 692#ifdef CONFIG_DYNAMIC_FTRACE
678extern void ftrace_graph_call(void); 693extern void ftrace_graph_call(void);
679 694
680static int ftrace_mod_jmp(unsigned long ip, 695static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
681 int old_offset, int new_offset)
682{ 696{
683 unsigned char code[MCOUNT_INSN_SIZE]; 697 static union ftrace_code_union calc;
684 698
685 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE)) 699 /* Jmp not a call (ignore the .e8) */
686 return -EFAULT; 700 calc.e8 = 0xe9;
701 calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
687 702
688 if (code[0] != 0xe9 || old_offset != *(int *)(&code[1])) 703 /*
689 return -EINVAL; 704 * ftrace external locks synchronize the access to the static variable.
705 */
706 return calc.code;
707}
690 708
691 *(int *)(&code[1]) = new_offset; 709static int ftrace_mod_jmp(unsigned long ip, void *func)
710{
711 unsigned char *new;
692 712
693 if (do_ftrace_mod_code(ip, &code)) 713 new = ftrace_jmp_replace(ip, (unsigned long)func);
694 return -EPERM;
695 714
696 return 0; 715 return update_ftrace_func(ip, new);
697} 716}
698 717
699int ftrace_enable_ftrace_graph_caller(void) 718int ftrace_enable_ftrace_graph_caller(void)
700{ 719{
701 unsigned long ip = (unsigned long)(&ftrace_graph_call); 720 unsigned long ip = (unsigned long)(&ftrace_graph_call);
702 int old_offset, new_offset;
703 721
704 old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE); 722 return ftrace_mod_jmp(ip, &ftrace_graph_caller);
705 new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
706
707 return ftrace_mod_jmp(ip, old_offset, new_offset);
708} 723}
709 724
710int ftrace_disable_ftrace_graph_caller(void) 725int ftrace_disable_ftrace_graph_caller(void)
711{ 726{
712 unsigned long ip = (unsigned long)(&ftrace_graph_call); 727 unsigned long ip = (unsigned long)(&ftrace_graph_call);
713 int old_offset, new_offset;
714
715 old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
716 new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
717 728
718 return ftrace_mod_jmp(ip, old_offset, new_offset); 729 return ftrace_mod_jmp(ip, &ftrace_stub);
719} 730}
720 731
721#endif /* !CONFIG_DYNAMIC_FTRACE */ 732#endif /* !CONFIG_DYNAMIC_FTRACE */
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 872079a67e4d..f7d0672481fd 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -100,8 +100,10 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
100 flag |= __GFP_ZERO; 100 flag |= __GFP_ZERO;
101again: 101again:
102 page = NULL; 102 page = NULL;
103 if (!(flag & GFP_ATOMIC)) 103 /* CMA can be used only in the context which permits sleeping */
104 if (flag & __GFP_WAIT)
104 page = dma_alloc_from_contiguous(dev, count, get_order(size)); 105 page = dma_alloc_from_contiguous(dev, count, get_order(size));
106 /* fallback */
105 if (!page) 107 if (!page)
106 page = alloc_pages_node(dev_to_node(dev), flag, get_order(size)); 108 page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
107 if (!page) 109 if (!page)
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 19e5adb49a27..cfbe99f88830 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -209,7 +209,7 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc)
209 * dance when its actually needed. 209 * dance when its actually needed.
210 */ 210 */
211 211
212 preempt_disable(); 212 preempt_disable_notrace();
213 data = this_cpu_read(cyc2ns.head); 213 data = this_cpu_read(cyc2ns.head);
214 tail = this_cpu_read(cyc2ns.tail); 214 tail = this_cpu_read(cyc2ns.tail);
215 215
@@ -229,7 +229,7 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc)
229 if (!--data->__count) 229 if (!--data->__count)
230 this_cpu_write(cyc2ns.tail, data); 230 this_cpu_write(cyc2ns.tail, data);
231 } 231 }
232 preempt_enable(); 232 preempt_enable_notrace();
233 233
234 return ns; 234 return ns;
235} 235}
@@ -653,13 +653,10 @@ unsigned long native_calibrate_tsc(void)
653 653
654 /* Calibrate TSC using MSR for Intel Atom SoCs */ 654 /* Calibrate TSC using MSR for Intel Atom SoCs */
655 local_irq_save(flags); 655 local_irq_save(flags);
656 i = try_msr_calibrate_tsc(&fast_calibrate); 656 fast_calibrate = try_msr_calibrate_tsc();
657 local_irq_restore(flags); 657 local_irq_restore(flags);
658 if (i >= 0) { 658 if (fast_calibrate)
659 if (i == 0)
660 pr_warn("Fast TSC calibration using MSR failed\n");
661 return fast_calibrate; 659 return fast_calibrate;
662 }
663 660
664 local_irq_save(flags); 661 local_irq_save(flags);
665 fast_calibrate = quick_pit_calibrate(); 662 fast_calibrate = quick_pit_calibrate();
diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c
index 8b5434f4389f..92ae6acac8a7 100644
--- a/arch/x86/kernel/tsc_msr.c
+++ b/arch/x86/kernel/tsc_msr.c
@@ -53,7 +53,7 @@ static struct freq_desc freq_desc_tables[] = {
53 /* TNG */ 53 /* TNG */
54 { 6, 0x4a, 1, { 0, FREQ_100, FREQ_133, 0, 0, 0, 0, 0 } }, 54 { 6, 0x4a, 1, { 0, FREQ_100, FREQ_133, 0, 0, 0, 0, 0 } },
55 /* VLV2 */ 55 /* VLV2 */
56 { 6, 0x37, 1, { 0, FREQ_100, FREQ_133, FREQ_166, 0, 0, 0, 0 } }, 56 { 6, 0x37, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_166, 0, 0, 0, 0 } },
57 /* ANN */ 57 /* ANN */
58 { 6, 0x5a, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_100, 0, 0, 0, 0 } }, 58 { 6, 0x5a, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_100, 0, 0, 0, 0 } },
59}; 59};
@@ -77,21 +77,18 @@ static int match_cpu(u8 family, u8 model)
77 77
78/* 78/*
79 * Do MSR calibration only for known/supported CPUs. 79 * Do MSR calibration only for known/supported CPUs.
80 * Return values: 80 *
81 * -1: CPU is unknown/unsupported for MSR based calibration 81 * Returns the calibration value or 0 if MSR calibration failed.
82 * 0: CPU is known/supported, but calibration failed
83 * 1: CPU is known/supported, and calibration succeeded
84 */ 82 */
85int try_msr_calibrate_tsc(unsigned long *fast_calibrate) 83unsigned long try_msr_calibrate_tsc(void)
86{ 84{
87 int cpu_index;
88 u32 lo, hi, ratio, freq_id, freq; 85 u32 lo, hi, ratio, freq_id, freq;
86 unsigned long res;
87 int cpu_index;
89 88
90 cpu_index = match_cpu(boot_cpu_data.x86, boot_cpu_data.x86_model); 89 cpu_index = match_cpu(boot_cpu_data.x86, boot_cpu_data.x86_model);
91 if (cpu_index < 0) 90 if (cpu_index < 0)
92 return -1; 91 return 0;
93
94 *fast_calibrate = 0;
95 92
96 if (freq_desc_tables[cpu_index].msr_plat) { 93 if (freq_desc_tables[cpu_index].msr_plat) {
97 rdmsr(MSR_PLATFORM_INFO, lo, hi); 94 rdmsr(MSR_PLATFORM_INFO, lo, hi);
@@ -103,7 +100,7 @@ int try_msr_calibrate_tsc(unsigned long *fast_calibrate)
103 pr_info("Maximum core-clock to bus-clock ratio: 0x%x\n", ratio); 100 pr_info("Maximum core-clock to bus-clock ratio: 0x%x\n", ratio);
104 101
105 if (!ratio) 102 if (!ratio)
106 return 0; 103 goto fail;
107 104
108 /* Get FSB FREQ ID */ 105 /* Get FSB FREQ ID */
109 rdmsr(MSR_FSB_FREQ, lo, hi); 106 rdmsr(MSR_FSB_FREQ, lo, hi);
@@ -112,16 +109,19 @@ int try_msr_calibrate_tsc(unsigned long *fast_calibrate)
112 pr_info("Resolved frequency ID: %u, frequency: %u KHz\n", 109 pr_info("Resolved frequency ID: %u, frequency: %u KHz\n",
113 freq_id, freq); 110 freq_id, freq);
114 if (!freq) 111 if (!freq)
115 return 0; 112 goto fail;
116 113
117 /* TSC frequency = maximum resolved freq * maximum resolved bus ratio */ 114 /* TSC frequency = maximum resolved freq * maximum resolved bus ratio */
118 *fast_calibrate = freq * ratio; 115 res = freq * ratio;
119 pr_info("TSC runs at %lu KHz\n", *fast_calibrate); 116 pr_info("TSC runs at %lu KHz\n", res);
120 117
121#ifdef CONFIG_X86_LOCAL_APIC 118#ifdef CONFIG_X86_LOCAL_APIC
122 lapic_timer_frequency = (freq * 1000) / HZ; 119 lapic_timer_frequency = (freq * 1000) / HZ;
123 pr_info("lapic_timer_frequency = %d\n", lapic_timer_frequency); 120 pr_info("lapic_timer_frequency = %d\n", lapic_timer_frequency);
124#endif 121#endif
122 return res;
125 123
126 return 1; 124fail:
125 pr_warn("Fast TSC calibration using MSR failed\n");
126 return 0;
127} 127}
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 9d591c895803..6dea040cc3a1 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1001,6 +1001,12 @@ static int fault_in_kernel_space(unsigned long address)
1001 1001
1002static inline bool smap_violation(int error_code, struct pt_regs *regs) 1002static inline bool smap_violation(int error_code, struct pt_regs *regs)
1003{ 1003{
1004 if (!IS_ENABLED(CONFIG_X86_SMAP))
1005 return false;
1006
1007 if (!static_cpu_has(X86_FEATURE_SMAP))
1008 return false;
1009
1004 if (error_code & PF_USER) 1010 if (error_code & PF_USER)
1005 return false; 1011 return false;
1006 1012
@@ -1087,11 +1093,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
1087 if (unlikely(error_code & PF_RSVD)) 1093 if (unlikely(error_code & PF_RSVD))
1088 pgtable_bad(regs, error_code, address); 1094 pgtable_bad(regs, error_code, address);
1089 1095
1090 if (static_cpu_has(X86_FEATURE_SMAP)) { 1096 if (unlikely(smap_violation(error_code, regs))) {
1091 if (unlikely(smap_violation(error_code, regs))) { 1097 bad_area_nosemaphore(regs, error_code, address);
1092 bad_area_nosemaphore(regs, error_code, address); 1098 return;
1093 return;
1094 }
1095 } 1099 }
1096 1100
1097 /* 1101 /*
diff --git a/arch/x86/platform/efi/efi-bgrt.c b/arch/x86/platform/efi/efi-bgrt.c
index 4df9591eadad..f15103dff4b4 100644
--- a/arch/x86/platform/efi/efi-bgrt.c
+++ b/arch/x86/platform/efi/efi-bgrt.c
@@ -42,7 +42,7 @@ void __init efi_bgrt_init(void)
42 42
43 if (bgrt_tab->header.length < sizeof(*bgrt_tab)) 43 if (bgrt_tab->header.length < sizeof(*bgrt_tab))
44 return; 44 return;
45 if (bgrt_tab->version != 1) 45 if (bgrt_tab->version != 1 || bgrt_tab->status != 1)
46 return; 46 return;
47 if (bgrt_tab->image_type != 0 || !bgrt_tab->image_address) 47 if (bgrt_tab->image_type != 0 || !bgrt_tab->image_address)
48 return; 48 return;
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index d62ec87a2b26..1a201ac7cef8 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -792,7 +792,7 @@ void __init efi_set_executable(efi_memory_desc_t *md, bool executable)
792 set_memory_nx(addr, npages); 792 set_memory_nx(addr, npages);
793} 793}
794 794
795static void __init runtime_code_page_mkexec(void) 795void __init runtime_code_page_mkexec(void)
796{ 796{
797 efi_memory_desc_t *md; 797 efi_memory_desc_t *md;
798 void *p; 798 void *p;
@@ -1069,8 +1069,7 @@ void __init efi_enter_virtual_mode(void)
1069 efi.update_capsule = virt_efi_update_capsule; 1069 efi.update_capsule = virt_efi_update_capsule;
1070 efi.query_capsule_caps = virt_efi_query_capsule_caps; 1070 efi.query_capsule_caps = virt_efi_query_capsule_caps;
1071 1071
1072 if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX)) 1072 efi_runtime_mkexec();
1073 runtime_code_page_mkexec();
1074 1073
1075 kfree(new_memmap); 1074 kfree(new_memmap);
1076 1075
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
index 249b183cf417..0b74cdf7f816 100644
--- a/arch/x86/platform/efi/efi_32.c
+++ b/arch/x86/platform/efi/efi_32.c
@@ -77,3 +77,9 @@ void efi_call_phys_epilog(void)
77 77
78 local_irq_restore(efi_rt_eflags); 78 local_irq_restore(efi_rt_eflags);
79} 79}
80
81void __init efi_runtime_mkexec(void)
82{
83 if (__supported_pte_mask & _PAGE_NX)
84 runtime_code_page_mkexec();
85}
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 6284f158a47d..0c2a234fef1e 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -233,3 +233,12 @@ void __init parse_efi_setup(u64 phys_addr, u32 data_len)
233{ 233{
234 efi_setup = phys_addr + sizeof(struct setup_data); 234 efi_setup = phys_addr + sizeof(struct setup_data);
235} 235}
236
237void __init efi_runtime_mkexec(void)
238{
239 if (!efi_enabled(EFI_OLD_MEMMAP))
240 return;
241
242 if (__supported_pte_mask & _PAGE_NX)
243 runtime_code_page_mkexec();
244}
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 2423ef04ffea..256282e7888b 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -365,7 +365,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
365/* Assume pteval_t is equivalent to all the other *val_t types. */ 365/* Assume pteval_t is equivalent to all the other *val_t types. */
366static pteval_t pte_mfn_to_pfn(pteval_t val) 366static pteval_t pte_mfn_to_pfn(pteval_t val)
367{ 367{
368 if (val & _PAGE_PRESENT) { 368 if (pteval_present(val)) {
369 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; 369 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
370 unsigned long pfn = mfn_to_pfn(mfn); 370 unsigned long pfn = mfn_to_pfn(mfn);
371 371
@@ -381,7 +381,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
381 381
382static pteval_t pte_pfn_to_mfn(pteval_t val) 382static pteval_t pte_pfn_to_mfn(pteval_t val)
383{ 383{
384 if (val & _PAGE_PRESENT) { 384 if (pteval_present(val)) {
385 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; 385 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
386 pteval_t flags = val & PTE_FLAGS_MASK; 386 pteval_t flags = val & PTE_FLAGS_MASK;
387 unsigned long mfn; 387 unsigned long mfn;
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index ba56e11cbf77..c87ae7c6e5f9 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -20,6 +20,7 @@ config XTENSA
20 select HAVE_FUNCTION_TRACER 20 select HAVE_FUNCTION_TRACER
21 select HAVE_IRQ_TIME_ACCOUNTING 21 select HAVE_IRQ_TIME_ACCOUNTING
22 select HAVE_PERF_EVENTS 22 select HAVE_PERF_EVENTS
23 select COMMON_CLK
23 help 24 help
24 Xtensa processors are 32-bit RISC machines designed by Tensilica 25 Xtensa processors are 32-bit RISC machines designed by Tensilica
25 primarily for embedded systems. These processors are both 26 primarily for embedded systems. These processors are both
@@ -80,7 +81,6 @@ choice
80config XTENSA_VARIANT_FSF 81config XTENSA_VARIANT_FSF
81 bool "fsf - default (not generic) configuration" 82 bool "fsf - default (not generic) configuration"
82 select MMU 83 select MMU
83 select HAVE_XTENSA_GPIO32
84 84
85config XTENSA_VARIANT_DC232B 85config XTENSA_VARIANT_DC232B
86 bool "dc232b - Diamond 232L Standard Core Rev.B (LE)" 86 bool "dc232b - Diamond 232L Standard Core Rev.B (LE)"
@@ -135,7 +135,6 @@ config HAVE_SMP
135config SMP 135config SMP
136 bool "Enable Symmetric multi-processing support" 136 bool "Enable Symmetric multi-processing support"
137 depends on HAVE_SMP 137 depends on HAVE_SMP
138 select USE_GENERIC_SMP_HELPERS
139 select GENERIC_SMP_IDLE_THREAD 138 select GENERIC_SMP_IDLE_THREAD
140 help 139 help
141 Enabled SMP Software; allows more than one CPU/CORE 140 Enabled SMP Software; allows more than one CPU/CORE
diff --git a/arch/xtensa/boot/dts/xtfpga.dtsi b/arch/xtensa/boot/dts/xtfpga.dtsi
index 46b4f5eab421..e7370b11348e 100644
--- a/arch/xtensa/boot/dts/xtfpga.dtsi
+++ b/arch/xtensa/boot/dts/xtfpga.dtsi
@@ -35,6 +35,13 @@
35 interrupt-controller; 35 interrupt-controller;
36 }; 36 };
37 37
38 clocks {
39 osc: main-oscillator {
40 #clock-cells = <0>;
41 compatible = "fixed-clock";
42 };
43 };
44
38 serial0: serial@fd050020 { 45 serial0: serial@fd050020 {
39 device_type = "serial"; 46 device_type = "serial";
40 compatible = "ns16550a"; 47 compatible = "ns16550a";
@@ -42,9 +49,7 @@
42 reg = <0xfd050020 0x20>; 49 reg = <0xfd050020 0x20>;
43 reg-shift = <2>; 50 reg-shift = <2>;
44 interrupts = <0 1>; /* external irq 0 */ 51 interrupts = <0 1>; /* external irq 0 */
45 /* Filled in by platform_setup from FPGA register 52 clocks = <&osc>;
46 * clock-frequency = <100000000>;
47 */
48 }; 53 };
49 54
50 enet0: ethoc@fd030000 { 55 enet0: ethoc@fd030000 {
@@ -52,5 +57,6 @@
52 reg = <0xfd030000 0x4000 0xfd800000 0x4000>; 57 reg = <0xfd030000 0x4000 0xfd800000 0x4000>;
53 interrupts = <1 1>; /* external irq 1 */ 58 interrupts = <1 1>; /* external irq 1 */
54 local-mac-address = [00 50 c2 13 6f 00]; 59 local-mac-address = [00 50 c2 13 6f 00];
60 clocks = <&osc>;
55 }; 61 };
56}; 62};
diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h
index 2a042d430c25..74944207167e 100644
--- a/arch/xtensa/include/asm/io.h
+++ b/arch/xtensa/include/asm/io.h
@@ -25,7 +25,7 @@
25 25
26#ifdef CONFIG_MMU 26#ifdef CONFIG_MMU
27 27
28#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && CONFIG_OF 28#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
29extern unsigned long xtensa_kio_paddr; 29extern unsigned long xtensa_kio_paddr;
30 30
31static inline unsigned long xtensa_get_kio_paddr(void) 31static inline unsigned long xtensa_get_kio_paddr(void)
diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h
index 8c194f6af45e..677bfcf4ee5d 100644
--- a/arch/xtensa/include/asm/traps.h
+++ b/arch/xtensa/include/asm/traps.h
@@ -23,25 +23,37 @@ void secondary_trap_init(void);
23 23
24static inline void spill_registers(void) 24static inline void spill_registers(void)
25{ 25{
26 26#if XCHAL_NUM_AREGS > 16
27 __asm__ __volatile__ ( 27 __asm__ __volatile__ (
28 "movi a14, "__stringify((1 << PS_EXCM_BIT) | LOCKLEVEL)"\n\t" 28 " call12 1f\n"
29 "mov a12, a0\n\t" 29 " _j 2f\n"
30 "rsr a13, sar\n\t" 30 " retw\n"
31 "xsr a14, ps\n\t" 31 " .align 4\n"
32 "movi a0, _spill_registers\n\t" 32 "1:\n"
33 "rsync\n\t" 33 " _entry a1, 48\n"
34 "callx0 a0\n\t" 34 " addi a12, a0, 3\n"
35 "mov a0, a12\n\t" 35#if XCHAL_NUM_AREGS > 32
36 "wsr a13, sar\n\t" 36 " .rept (" __stringify(XCHAL_NUM_AREGS) " - 32) / 12\n"
37 "wsr a14, ps\n\t" 37 " _entry a1, 48\n"
38 : : 38 " mov a12, a0\n"
39#if defined(CONFIG_FRAME_POINTER) 39 " .endr\n"
40 : "a2", "a3", "a4", "a11", "a12", "a13", "a14", "a15", 40#endif
41 " _entry a1, 48\n"
42#if XCHAL_NUM_AREGS % 12 == 0
43 " mov a8, a8\n"
44#elif XCHAL_NUM_AREGS % 12 == 4
45 " mov a12, a12\n"
46#elif XCHAL_NUM_AREGS % 12 == 8
47 " mov a4, a4\n"
48#endif
49 " retw\n"
50 "2:\n"
51 : : : "a12", "a13", "memory");
41#else 52#else
42 : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15", 53 __asm__ __volatile__ (
54 " mov a12, a12\n"
55 : : : "memory");
43#endif 56#endif
44 "memory");
45} 57}
46 58
47#endif /* _XTENSA_TRAPS_H */ 59#endif /* _XTENSA_TRAPS_H */
diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h
index 5791b45d5a5d..f74ddfbb92ef 100644
--- a/arch/xtensa/include/asm/vectors.h
+++ b/arch/xtensa/include/asm/vectors.h
@@ -25,7 +25,7 @@
25#define XCHAL_KIO_DEFAULT_PADDR 0xf0000000 25#define XCHAL_KIO_DEFAULT_PADDR 0xf0000000
26#define XCHAL_KIO_SIZE 0x10000000 26#define XCHAL_KIO_SIZE 0x10000000
27 27
28#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && CONFIG_OF 28#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
29#define XCHAL_KIO_PADDR xtensa_get_kio_paddr() 29#define XCHAL_KIO_PADDR xtensa_get_kio_paddr()
30#else 30#else
31#define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR 31#define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR
diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h
index 51940fec6990..b9395529f02d 100644
--- a/arch/xtensa/include/uapi/asm/unistd.h
+++ b/arch/xtensa/include/uapi/asm/unistd.h
@@ -734,7 +734,12 @@ __SYSCALL(332, sys_finit_module, 3)
734#define __NR_accept4 333 734#define __NR_accept4 333
735__SYSCALL(333, sys_accept4, 4) 735__SYSCALL(333, sys_accept4, 4)
736 736
737#define __NR_syscall_count 334 737#define __NR_sched_setattr 334
738__SYSCALL(334, sys_sched_setattr, 2)
739#define __NR_sched_getattr 335
740__SYSCALL(335, sys_sched_getattr, 3)
741
742#define __NR_syscall_count 336
738 743
739/* 744/*
740 * sysxtensa syscall handler 745 * sysxtensa syscall handler
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index 21dbe6bdb8ed..ef7f4990722b 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -1081,196 +1081,53 @@ ENTRY(fast_syscall_spill_registers)
1081 1081
1082 rsr a0, sar 1082 rsr a0, sar
1083 s32i a3, a2, PT_AREG3 1083 s32i a3, a2, PT_AREG3
1084 s32i a4, a2, PT_AREG4 1084 s32i a0, a2, PT_SAR
1085 s32i a0, a2, PT_AREG5 # store SAR to PT_AREG5
1086 1085
1087 /* The spill routine might clobber a7, a11, and a15. */ 1086 /* The spill routine might clobber a4, a7, a8, a11, a12, and a15. */
1088 1087
1088 s32i a4, a2, PT_AREG4
1089 s32i a7, a2, PT_AREG7 1089 s32i a7, a2, PT_AREG7
1090 s32i a8, a2, PT_AREG8
1090 s32i a11, a2, PT_AREG11 1091 s32i a11, a2, PT_AREG11
1092 s32i a12, a2, PT_AREG12
1091 s32i a15, a2, PT_AREG15 1093 s32i a15, a2, PT_AREG15
1092 1094
1093 call0 _spill_registers # destroys a3, a4, and SAR
1094
1095 /* Advance PC, restore registers and SAR, and return from exception. */
1096
1097 l32i a3, a2, PT_AREG5
1098 l32i a4, a2, PT_AREG4
1099 l32i a0, a2, PT_AREG0
1100 wsr a3, sar
1101 l32i a3, a2, PT_AREG3
1102
1103 /* Restore clobbered registers. */
1104
1105 l32i a7, a2, PT_AREG7
1106 l32i a11, a2, PT_AREG11
1107 l32i a15, a2, PT_AREG15
1108
1109 movi a2, 0
1110 rfe
1111
1112ENDPROC(fast_syscall_spill_registers)
1113
1114/* Fixup handler.
1115 *
1116 * We get here if the spill routine causes an exception, e.g. tlb miss.
1117 * We basically restore WINDOWBASE and WINDOWSTART to the condition when
1118 * we entered the spill routine and jump to the user exception handler.
1119 *
1120 * a0: value of depc, original value in depc
1121 * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
1122 * a3: exctable, original value in excsave1
1123 */
1124
1125ENTRY(fast_syscall_spill_registers_fixup)
1126
1127 rsr a2, windowbase # get current windowbase (a2 is saved)
1128 xsr a0, depc # restore depc and a0
1129 ssl a2 # set shift (32 - WB)
1130
1131 /* We need to make sure the current registers (a0-a3) are preserved.
1132 * To do this, we simply set the bit for the current window frame
1133 * in WS, so that the exception handlers save them to the task stack.
1134 */
1135
1136 xsr a3, excsave1 # get spill-mask
1137 slli a3, a3, 1 # shift left by one
1138
1139 slli a2, a3, 32-WSBITS
1140 src a2, a3, a2 # a2 = xxwww1yyxxxwww1yy......
1141 wsr a2, windowstart # set corrected windowstart
1142
1143 srli a3, a3, 1
1144 rsr a2, excsave1
1145 l32i a2, a2, EXC_TABLE_DOUBLE_SAVE # restore a2
1146 xsr a2, excsave1
1147 s32i a3, a2, EXC_TABLE_DOUBLE_SAVE # save a3
1148 l32i a3, a2, EXC_TABLE_PARAM # original WB (in user task)
1149 xsr a2, excsave1
1150
1151 /* Return to the original (user task) WINDOWBASE.
1152 * We leave the following frame behind:
1153 * a0, a1, a2 same
1154 * a3: trashed (saved in EXC_TABLE_DOUBLE_SAVE)
1155 * depc: depc (we have to return to that address)
1156 * excsave_1: exctable
1157 */
1158
1159 wsr a3, windowbase
1160 rsync
1161
1162 /* We are now in the original frame when we entered _spill_registers:
1163 * a0: return address
1164 * a1: used, stack pointer
1165 * a2: kernel stack pointer
1166 * a3: available
1167 * depc: exception address
1168 * excsave: exctable
1169 * Note: This frame might be the same as above.
1170 */
1171
1172 /* Setup stack pointer. */
1173
1174 addi a2, a2, -PT_USER_SIZE
1175 s32i a0, a2, PT_AREG0
1176
1177 /* Make sure we return to this fixup handler. */
1178
1179 movi a3, fast_syscall_spill_registers_fixup_return
1180 s32i a3, a2, PT_DEPC # setup depc
1181
1182 /* Jump to the exception handler. */
1183
1184 rsr a3, excsave1
1185 rsr a0, exccause
1186 addx4 a0, a0, a3 # find entry in table
1187 l32i a0, a0, EXC_TABLE_FAST_USER # load handler
1188 l32i a3, a3, EXC_TABLE_DOUBLE_SAVE
1189 jx a0
1190
1191ENDPROC(fast_syscall_spill_registers_fixup)
1192
1193ENTRY(fast_syscall_spill_registers_fixup_return)
1194
1195 /* When we return here, all registers have been restored (a2: DEPC) */
1196
1197 wsr a2, depc # exception address
1198
1199 /* Restore fixup handler. */
1200
1201 rsr a2, excsave1
1202 s32i a3, a2, EXC_TABLE_DOUBLE_SAVE
1203 movi a3, fast_syscall_spill_registers_fixup
1204 s32i a3, a2, EXC_TABLE_FIXUP
1205 rsr a3, windowbase
1206 s32i a3, a2, EXC_TABLE_PARAM
1207 l32i a2, a2, EXC_TABLE_KSTK
1208
1209 /* Load WB at the time the exception occurred. */
1210
1211 rsr a3, sar # WB is still in SAR
1212 neg a3, a3
1213 wsr a3, windowbase
1214 rsync
1215
1216 rsr a3, excsave1
1217 l32i a3, a3, EXC_TABLE_DOUBLE_SAVE
1218
1219 rfde
1220
1221ENDPROC(fast_syscall_spill_registers_fixup_return)
1222
1223/*
1224 * spill all registers.
1225 *
1226 * This is not a real function. The following conditions must be met:
1227 *
1228 * - must be called with call0.
1229 * - uses a3, a4 and SAR.
1230 * - the last 'valid' register of each frame are clobbered.
1231 * - the caller must have registered a fixup handler
1232 * (or be inside a critical section)
1233 * - PS_EXCM must be set (PS_WOE cleared?)
1234 */
1235
1236ENTRY(_spill_registers)
1237
1238 /* 1095 /*
1239 * Rotate ws so that the current windowbase is at bit 0. 1096 * Rotate ws so that the current windowbase is at bit 0.
1240 * Assume ws = xxxwww1yy (www1 current window frame). 1097 * Assume ws = xxxwww1yy (www1 current window frame).
1241 * Rotate ws right so that a4 = yyxxxwww1. 1098 * Rotate ws right so that a4 = yyxxxwww1.
1242 */ 1099 */
1243 1100
1244 rsr a4, windowbase 1101 rsr a0, windowbase
1245 rsr a3, windowstart # a3 = xxxwww1yy 1102 rsr a3, windowstart # a3 = xxxwww1yy
1246 ssr a4 # holds WB 1103 ssr a0 # holds WB
1247 slli a4, a3, WSBITS 1104 slli a0, a3, WSBITS
1248 or a3, a3, a4 # a3 = xxxwww1yyxxxwww1yy 1105 or a3, a3, a0 # a3 = xxxwww1yyxxxwww1yy
1249 srl a3, a3 # a3 = 00xxxwww1yyxxxwww1 1106 srl a3, a3 # a3 = 00xxxwww1yyxxxwww1
1250 1107
1251 /* We are done if there are no more than the current register frame. */ 1108 /* We are done if there are no more than the current register frame. */
1252 1109
1253 extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww 1110 extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww
1254 movi a4, (1 << (WSBITS-1)) 1111 movi a0, (1 << (WSBITS-1))
1255 _beqz a3, .Lnospill # only one active frame? jump 1112 _beqz a3, .Lnospill # only one active frame? jump
1256 1113
1257 /* We want 1 at the top, so that we return to the current windowbase */ 1114 /* We want 1 at the top, so that we return to the current windowbase */
1258 1115
1259 or a3, a3, a4 # 1yyxxxwww 1116 or a3, a3, a0 # 1yyxxxwww
1260 1117
1261 /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */ 1118 /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
1262 1119
1263 wsr a3, windowstart # save shifted windowstart 1120 wsr a3, windowstart # save shifted windowstart
1264 neg a4, a3 1121 neg a0, a3
1265 and a3, a4, a3 # first bit set from right: 000010000 1122 and a3, a0, a3 # first bit set from right: 000010000
1266 1123
1267 ffs_ws a4, a3 # a4: shifts to skip empty frames 1124 ffs_ws a0, a3 # a0: shifts to skip empty frames
1268 movi a3, WSBITS 1125 movi a3, WSBITS
1269 sub a4, a3, a4 # WSBITS-a4:number of 0-bits from right 1126 sub a0, a3, a0 # WSBITS-a0:number of 0-bits from right
1270 ssr a4 # save in SAR for later. 1127 ssr a0 # save in SAR for later.
1271 1128
1272 rsr a3, windowbase 1129 rsr a3, windowbase
1273 add a3, a3, a4 1130 add a3, a3, a0
1274 wsr a3, windowbase 1131 wsr a3, windowbase
1275 rsync 1132 rsync
1276 1133
@@ -1285,22 +1142,6 @@ ENTRY(_spill_registers)
1285 * we have to save 4,8. or 12 registers. 1142 * we have to save 4,8. or 12 registers.
1286 */ 1143 */
1287 1144
1288 _bbsi.l a3, 1, .Lc4
1289 _bbsi.l a3, 2, .Lc8
1290
1291 /* Special case: we have a call12-frame starting at a4. */
1292
1293 _bbci.l a3, 3, .Lc12 # bit 3 shouldn't be zero! (Jump to Lc12 first)
1294
1295 s32e a4, a1, -16 # a1 is valid with an empty spill area
1296 l32e a4, a5, -12
1297 s32e a8, a4, -48
1298 mov a8, a4
1299 l32e a4, a1, -16
1300 j .Lc12c
1301
1302.Lnospill:
1303 ret
1304 1145
1305.Lloop: _bbsi.l a3, 1, .Lc4 1146.Lloop: _bbsi.l a3, 1, .Lc4
1306 _bbci.l a3, 2, .Lc12 1147 _bbci.l a3, 2, .Lc12
@@ -1314,20 +1155,10 @@ ENTRY(_spill_registers)
1314 s32e a9, a4, -28 1155 s32e a9, a4, -28
1315 s32e a10, a4, -24 1156 s32e a10, a4, -24
1316 s32e a11, a4, -20 1157 s32e a11, a4, -20
1317
1318 srli a11, a3, 2 # shift windowbase by 2 1158 srli a11, a3, 2 # shift windowbase by 2
1319 rotw 2 1159 rotw 2
1320 _bnei a3, 1, .Lloop 1160 _bnei a3, 1, .Lloop
1321 1161 j .Lexit
1322.Lexit: /* Done. Do the final rotation, set WS, and return. */
1323
1324 rotw 1
1325 rsr a3, windowbase
1326 ssl a3
1327 movi a3, 1
1328 sll a3, a3
1329 wsr a3, windowstart
1330 ret
1331 1162
1332.Lc4: s32e a4, a9, -16 1163.Lc4: s32e a4, a9, -16
1333 s32e a5, a9, -12 1164 s32e a5, a9, -12
@@ -1343,11 +1174,11 @@ ENTRY(_spill_registers)
1343 1174
1344 /* 12-register frame (call12) */ 1175 /* 12-register frame (call12) */
1345 1176
1346 l32e a2, a5, -12 1177 l32e a0, a5, -12
1347 s32e a8, a2, -48 1178 s32e a8, a0, -48
1348 mov a8, a2 1179 mov a8, a0
1349 1180
1350.Lc12c: s32e a9, a8, -44 1181 s32e a9, a8, -44
1351 s32e a10, a8, -40 1182 s32e a10, a8, -40
1352 s32e a11, a8, -36 1183 s32e a11, a8, -36
1353 s32e a12, a8, -32 1184 s32e a12, a8, -32
@@ -1367,30 +1198,54 @@ ENTRY(_spill_registers)
1367 */ 1198 */
1368 1199
1369 rotw 1 1200 rotw 1
1370 mov a5, a13 1201 mov a4, a13
1371 rotw -1 1202 rotw -1
1372 1203
1373 s32e a4, a9, -16 1204 s32e a4, a8, -16
1374 s32e a5, a9, -12 1205 s32e a5, a8, -12
1375 s32e a6, a9, -8 1206 s32e a6, a8, -8
1376 s32e a7, a9, -4 1207 s32e a7, a8, -4
1377 1208
1378 rotw 3 1209 rotw 3
1379 1210
1380 _beqi a3, 1, .Lexit 1211 _beqi a3, 1, .Lexit
1381 j .Lloop 1212 j .Lloop
1382 1213
1383.Linvalid_mask: 1214.Lexit:
1384 1215
1385 /* We get here because of an unrecoverable error in the window 1216 /* Done. Do the final rotation and set WS */
1386 * registers. If we are in user space, we kill the application, 1217
1387 * however, this condition is unrecoverable in kernel space. 1218 rotw 1
1388 */ 1219 rsr a3, windowbase
1220 ssl a3
1221 movi a3, 1
1222 sll a3, a3
1223 wsr a3, windowstart
1224.Lnospill:
1225
1226 /* Advance PC, restore registers and SAR, and return from exception. */
1227
1228 l32i a3, a2, PT_SAR
1229 l32i a0, a2, PT_AREG0
1230 wsr a3, sar
1231 l32i a3, a2, PT_AREG3
1389 1232
1390 rsr a0, ps 1233 /* Restore clobbered registers. */
1391 _bbci.l a0, PS_UM_BIT, 1f
1392 1234
1393 /* User space: Setup a dummy frame and kill application. 1235 l32i a4, a2, PT_AREG4
1236 l32i a7, a2, PT_AREG7
1237 l32i a8, a2, PT_AREG8
1238 l32i a11, a2, PT_AREG11
1239 l32i a12, a2, PT_AREG12
1240 l32i a15, a2, PT_AREG15
1241
1242 movi a2, 0
1243 rfe
1244
1245.Linvalid_mask:
1246
1247 /* We get here because of an unrecoverable error in the window
1248 * registers, so set up a dummy frame and kill the user application.
1394 * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer. 1249 * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
1395 */ 1250 */
1396 1251
@@ -1414,14 +1269,136 @@ ENTRY(_spill_registers)
1414 movi a4, do_exit 1269 movi a4, do_exit
1415 callx4 a4 1270 callx4 a4
1416 1271
14171: /* Kernel space: PANIC! */ 1272 /* shouldn't return, so panic */
1418 1273
1419 wsr a0, excsave1 1274 wsr a0, excsave1
1420 movi a0, unrecoverable_exception 1275 movi a0, unrecoverable_exception
1421 callx0 a0 # should not return 1276 callx0 a0 # should not return
14221: j 1b 12771: j 1b
1423 1278
1424ENDPROC(_spill_registers) 1279
1280ENDPROC(fast_syscall_spill_registers)
1281
1282/* Fixup handler.
1283 *
1284 * We get here if the spill routine causes an exception, e.g. tlb miss.
1285 * We basically restore WINDOWBASE and WINDOWSTART to the condition when
1286 * we entered the spill routine and jump to the user exception handler.
1287 *
1288 * Note that we only need to restore the bits in windowstart that have not
1289 * been spilled yet by the _spill_register routine. Luckily, a3 contains a
1290 * rotated windowstart with only those bits set for frames that haven't been
1291 * spilled yet. Because a3 is rotated such that bit 0 represents the register
1292 * frame for the current windowbase - 1, we need to rotate a3 left by the
1293 * value of the current windowbase + 1 and move it to windowstart.
1294 *
1295 * a0: value of depc, original value in depc
1296 * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
1297 * a3: exctable, original value in excsave1
1298 */
1299
1300ENTRY(fast_syscall_spill_registers_fixup)
1301
1302 rsr a2, windowbase # get current windowbase (a2 is saved)
1303 xsr a0, depc # restore depc and a0
1304 ssl a2 # set shift (32 - WB)
1305
1306 /* We need to make sure the current registers (a0-a3) are preserved.
1307 * To do this, we simply set the bit for the current window frame
1308 * in WS, so that the exception handlers save them to the task stack.
1309 *
1310 * Note: we use a3 to set the windowbase, so we take a special care
1311 * of it, saving it in the original _spill_registers frame across
1312 * the exception handler call.
1313 */
1314
1315 xsr a3, excsave1 # get spill-mask
1316 slli a3, a3, 1 # shift left by one
1317 addi a3, a3, 1 # set the bit for the current window frame
1318
1319 slli a2, a3, 32-WSBITS
1320 src a2, a3, a2 # a2 = xxwww1yyxxxwww1yy......
1321 wsr a2, windowstart # set corrected windowstart
1322
1323 srli a3, a3, 1
1324 rsr a2, excsave1
1325 l32i a2, a2, EXC_TABLE_DOUBLE_SAVE # restore a2
1326 xsr a2, excsave1
1327 s32i a3, a2, EXC_TABLE_DOUBLE_SAVE # save a3
1328 l32i a3, a2, EXC_TABLE_PARAM # original WB (in user task)
1329 xsr a2, excsave1
1330
1331 /* Return to the original (user task) WINDOWBASE.
1332 * We leave the following frame behind:
1333 * a0, a1, a2 same
1334 * a3: trashed (saved in EXC_TABLE_DOUBLE_SAVE)
1335 * depc: depc (we have to return to that address)
1336 * excsave_1: exctable
1337 */
1338
1339 wsr a3, windowbase
1340 rsync
1341
1342 /* We are now in the original frame when we entered _spill_registers:
1343 * a0: return address
1344 * a1: used, stack pointer
1345 * a2: kernel stack pointer
1346 * a3: available
1347 * depc: exception address
1348 * excsave: exctable
1349 * Note: This frame might be the same as above.
1350 */
1351
1352 /* Setup stack pointer. */
1353
1354 addi a2, a2, -PT_USER_SIZE
1355 s32i a0, a2, PT_AREG0
1356
1357 /* Make sure we return to this fixup handler. */
1358
1359 movi a3, fast_syscall_spill_registers_fixup_return
1360 s32i a3, a2, PT_DEPC # setup depc
1361
1362 /* Jump to the exception handler. */
1363
1364 rsr a3, excsave1
1365 rsr a0, exccause
1366 addx4 a0, a0, a3 # find entry in table
1367 l32i a0, a0, EXC_TABLE_FAST_USER # load handler
1368 l32i a3, a3, EXC_TABLE_DOUBLE_SAVE
1369 jx a0
1370
1371ENDPROC(fast_syscall_spill_registers_fixup)
1372
1373ENTRY(fast_syscall_spill_registers_fixup_return)
1374
1375 /* When we return here, all registers have been restored (a2: DEPC) */
1376
1377 wsr a2, depc # exception address
1378
1379 /* Restore fixup handler. */
1380
1381 rsr a2, excsave1
1382 s32i a3, a2, EXC_TABLE_DOUBLE_SAVE
1383 movi a3, fast_syscall_spill_registers_fixup
1384 s32i a3, a2, EXC_TABLE_FIXUP
1385 rsr a3, windowbase
1386 s32i a3, a2, EXC_TABLE_PARAM
1387 l32i a2, a2, EXC_TABLE_KSTK
1388
1389 /* Load WB at the time the exception occurred. */
1390
1391 rsr a3, sar # WB is still in SAR
1392 neg a3, a3
1393 wsr a3, windowbase
1394 rsync
1395
1396 rsr a3, excsave1
1397 l32i a3, a3, EXC_TABLE_DOUBLE_SAVE
1398
1399 rfde
1400
1401ENDPROC(fast_syscall_spill_registers_fixup_return)
1425 1402
1426#ifdef CONFIG_MMU 1403#ifdef CONFIG_MMU
1427/* 1404/*
@@ -1794,6 +1771,43 @@ ENTRY(system_call)
1794 1771
1795ENDPROC(system_call) 1772ENDPROC(system_call)
1796 1773
1774/*
1775 * Spill live registers on the kernel stack macro.
1776 *
1777 * Entry condition: ps.woe is set, ps.excm is cleared
1778 * Exit condition: windowstart has single bit set
1779 * May clobber: a12, a13
1780 */
1781 .macro spill_registers_kernel
1782
1783#if XCHAL_NUM_AREGS > 16
1784 call12 1f
1785 _j 2f
1786 retw
1787 .align 4
17881:
1789 _entry a1, 48
1790 addi a12, a0, 3
1791#if XCHAL_NUM_AREGS > 32
1792 .rept (XCHAL_NUM_AREGS - 32) / 12
1793 _entry a1, 48
1794 mov a12, a0
1795 .endr
1796#endif
1797 _entry a1, 48
1798#if XCHAL_NUM_AREGS % 12 == 0
1799 mov a8, a8
1800#elif XCHAL_NUM_AREGS % 12 == 4
1801 mov a12, a12
1802#elif XCHAL_NUM_AREGS % 12 == 8
1803 mov a4, a4
1804#endif
1805 retw
18062:
1807#else
1808 mov a12, a12
1809#endif
1810 .endm
1797 1811
1798/* 1812/*
1799 * Task switch. 1813 * Task switch.
@@ -1806,21 +1820,20 @@ ENTRY(_switch_to)
1806 1820
1807 entry a1, 16 1821 entry a1, 16
1808 1822
1809 mov a12, a2 # preserve 'prev' (a2) 1823 mov a10, a2 # preserve 'prev' (a2)
1810 mov a13, a3 # and 'next' (a3) 1824 mov a11, a3 # and 'next' (a3)
1811 1825
1812 l32i a4, a2, TASK_THREAD_INFO 1826 l32i a4, a2, TASK_THREAD_INFO
1813 l32i a5, a3, TASK_THREAD_INFO 1827 l32i a5, a3, TASK_THREAD_INFO
1814 1828
1815 save_xtregs_user a4 a6 a8 a9 a10 a11 THREAD_XTREGS_USER 1829 save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
1816 1830
1817 s32i a0, a12, THREAD_RA # save return address 1831 s32i a0, a10, THREAD_RA # save return address
1818 s32i a1, a12, THREAD_SP # save stack pointer 1832 s32i a1, a10, THREAD_SP # save stack pointer
1819 1833
1820 /* Disable ints while we manipulate the stack pointer. */ 1834 /* Disable ints while we manipulate the stack pointer. */
1821 1835
1822 movi a14, (1 << PS_EXCM_BIT) | LOCKLEVEL 1836 rsil a14, LOCKLEVEL
1823 xsr a14, ps
1824 rsr a3, excsave1 1837 rsr a3, excsave1
1825 rsync 1838 rsync
1826 s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */ 1839 s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */
@@ -1835,7 +1848,7 @@ ENTRY(_switch_to)
1835 1848
1836 /* Flush register file. */ 1849 /* Flush register file. */
1837 1850
1838 call0 _spill_registers # destroys a3, a4, and SAR 1851 spill_registers_kernel
1839 1852
1840 /* Set kernel stack (and leave critical section) 1853 /* Set kernel stack (and leave critical section)
1841 * Note: It's save to set it here. The stack will not be overwritten 1854 * Note: It's save to set it here. The stack will not be overwritten
@@ -1851,13 +1864,13 @@ ENTRY(_switch_to)
1851 1864
1852 /* restore context of the task 'next' */ 1865 /* restore context of the task 'next' */
1853 1866
1854 l32i a0, a13, THREAD_RA # restore return address 1867 l32i a0, a11, THREAD_RA # restore return address
1855 l32i a1, a13, THREAD_SP # restore stack pointer 1868 l32i a1, a11, THREAD_SP # restore stack pointer
1856 1869
1857 load_xtregs_user a5 a6 a8 a9 a10 a11 THREAD_XTREGS_USER 1870 load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
1858 1871
1859 wsr a14, ps 1872 wsr a14, ps
1860 mov a2, a12 # return 'prev' 1873 mov a2, a10 # return 'prev'
1861 rsync 1874 rsync
1862 1875
1863 retw 1876 retw
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 7d12af1317f1..84fe931bb60e 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -22,6 +22,7 @@
22#include <linux/bootmem.h> 22#include <linux/bootmem.h>
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/percpu.h> 24#include <linux/percpu.h>
25#include <linux/clk-provider.h>
25#include <linux/cpu.h> 26#include <linux/cpu.h>
26#include <linux/of_fdt.h> 27#include <linux/of_fdt.h>
27#include <linux/of_platform.h> 28#include <linux/of_platform.h>
@@ -276,6 +277,7 @@ void __init early_init_devtree(void *params)
276 277
277static int __init xtensa_device_probe(void) 278static int __init xtensa_device_probe(void)
278{ 279{
280 of_clk_init(NULL);
279 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); 281 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
280 return 0; 282 return 0;
281} 283}
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index 08b769d3b3a1..2a1823de69cc 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -30,6 +30,7 @@
30#include <asm/platform.h> 30#include <asm/platform.h>
31 31
32unsigned long ccount_freq; /* ccount Hz */ 32unsigned long ccount_freq; /* ccount Hz */
33EXPORT_SYMBOL(ccount_freq);
33 34
34static cycle_t ccount_read(struct clocksource *cs) 35static cycle_t ccount_read(struct clocksource *cs)
35{ 36{
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
index cb8fd44caabc..f9e1ec346e35 100644
--- a/arch/xtensa/kernel/vectors.S
+++ b/arch/xtensa/kernel/vectors.S
@@ -235,7 +235,7 @@ ENTRY(_DoubleExceptionVector)
235 235
236 /* Check for overflow/underflow exception, jump if overflow. */ 236 /* Check for overflow/underflow exception, jump if overflow. */
237 237
238 _bbci.l a0, 6, _DoubleExceptionVector_WindowOverflow 238 bbci.l a0, 6, _DoubleExceptionVector_WindowOverflow
239 239
240 /* 240 /*
241 * Restart window underflow exception. 241 * Restart window underflow exception.
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
index 74a60c7e085e..80b33ed51f31 100644
--- a/arch/xtensa/kernel/xtensa_ksyms.c
+++ b/arch/xtensa/kernel/xtensa_ksyms.c
@@ -122,9 +122,7 @@ EXPORT_SYMBOL(insw);
122EXPORT_SYMBOL(insl); 122EXPORT_SYMBOL(insl);
123 123
124extern long common_exception_return; 124extern long common_exception_return;
125extern long _spill_registers;
126EXPORT_SYMBOL(common_exception_return); 125EXPORT_SYMBOL(common_exception_return);
127EXPORT_SYMBOL(_spill_registers);
128 126
129#ifdef CONFIG_FUNCTION_TRACER 127#ifdef CONFIG_FUNCTION_TRACER
130EXPORT_SYMBOL(_mcount); 128EXPORT_SYMBOL(_mcount);
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 479d7537a32a..aff108df92d3 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -90,7 +90,7 @@ int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
90 90
91 91
92/* 92/*
93 * Initialize the bootmem system and give it all the memory we have available. 93 * Initialize the bootmem system and give it all low memory we have available.
94 */ 94 */
95 95
96void __init bootmem_init(void) 96void __init bootmem_init(void)
@@ -142,9 +142,14 @@ void __init bootmem_init(void)
142 142
143 /* Add all remaining memory pieces into the bootmem map */ 143 /* Add all remaining memory pieces into the bootmem map */
144 144
145 for (i=0; i<sysmem.nr_banks; i++) 145 for (i = 0; i < sysmem.nr_banks; i++) {
146 free_bootmem(sysmem.bank[i].start, 146 if (sysmem.bank[i].start >> PAGE_SHIFT < max_low_pfn) {
147 sysmem.bank[i].end - sysmem.bank[i].start); 147 unsigned long end = min(max_low_pfn << PAGE_SHIFT,
148 sysmem.bank[i].end);
149 free_bootmem(sysmem.bank[i].start,
150 end - sysmem.bank[i].start);
151 }
152 }
148 153
149} 154}
150 155
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index 36ec171698b8..861203e958da 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -39,7 +39,7 @@ void init_mmu(void)
39 set_itlbcfg_register(0); 39 set_itlbcfg_register(0);
40 set_dtlbcfg_register(0); 40 set_dtlbcfg_register(0);
41#endif 41#endif
42#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && CONFIG_OF 42#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
43 /* 43 /*
44 * Update the IO area mapping in case xtensa_kio_paddr has changed 44 * Update the IO area mapping in case xtensa_kio_paddr has changed
45 */ 45 */
diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c
index 800227862fe8..57fd08b36f51 100644
--- a/arch/xtensa/platforms/xtfpga/setup.c
+++ b/arch/xtensa/platforms/xtfpga/setup.c
@@ -135,11 +135,11 @@ static void __init update_local_mac(struct device_node *node)
135 135
136static int __init machine_setup(void) 136static int __init machine_setup(void)
137{ 137{
138 struct device_node *serial; 138 struct device_node *clock;
139 struct device_node *eth = NULL; 139 struct device_node *eth = NULL;
140 140
141 for_each_compatible_node(serial, NULL, "ns16550a") 141 for_each_node_by_name(clock, "main-oscillator")
142 update_clock_frequency(serial); 142 update_clock_frequency(clock);
143 143
144 if ((eth = of_find_compatible_node(eth, NULL, "opencores,ethoc"))) 144 if ((eth = of_find_compatible_node(eth, NULL, "opencores,ethoc")))
145 update_local_mac(eth); 145 update_local_mac(eth);
@@ -290,6 +290,7 @@ static int __init xtavnet_init(void)
290 * knows whether they set it correctly on the DIP switches. 290 * knows whether they set it correctly on the DIP switches.
291 */ 291 */
292 pr_info("XTFPGA: Ethernet MAC %pM\n", ethoc_pdata.hwaddr); 292 pr_info("XTFPGA: Ethernet MAC %pM\n", ethoc_pdata.hwaddr);
293 ethoc_pdata.eth_clkfreq = *(long *)XTFPGA_CLKFRQ_VADDR;
293 294
294 return 0; 295 return 0;
295} 296}
diff --git a/arch/xtensa/variants/fsf/include/variant/tie.h b/arch/xtensa/variants/fsf/include/variant/tie.h
index bf4020116df5..244cdea4dee5 100644
--- a/arch/xtensa/variants/fsf/include/variant/tie.h
+++ b/arch/xtensa/variants/fsf/include/variant/tie.h
@@ -18,13 +18,6 @@
18#define XCHAL_CP_MASK 0x00 /* bitmask of all CPs by ID */ 18#define XCHAL_CP_MASK 0x00 /* bitmask of all CPs by ID */
19#define XCHAL_CP_PORT_MASK 0x00 /* bitmask of only port CPs */ 19#define XCHAL_CP_PORT_MASK 0x00 /* bitmask of only port CPs */
20 20
21/* Basic parameters of each coprocessor: */
22#define XCHAL_CP7_NAME "XTIOP"
23#define XCHAL_CP7_IDENT XTIOP
24#define XCHAL_CP7_SA_SIZE 0 /* size of state save area */
25#define XCHAL_CP7_SA_ALIGN 1 /* min alignment of save area */
26#define XCHAL_CP_ID_XTIOP 7 /* coprocessor ID (0..7) */
27
28/* Filler info for unassigned coprocessors, to simplify arrays etc: */ 21/* Filler info for unassigned coprocessors, to simplify arrays etc: */
29#define XCHAL_NCP_SA_SIZE 0 22#define XCHAL_NCP_SA_SIZE 0
30#define XCHAL_NCP_SA_ALIGN 1 23#define XCHAL_NCP_SA_ALIGN 1
@@ -42,6 +35,8 @@
42#define XCHAL_CP5_SA_ALIGN 1 35#define XCHAL_CP5_SA_ALIGN 1
43#define XCHAL_CP6_SA_SIZE 0 36#define XCHAL_CP6_SA_SIZE 0
44#define XCHAL_CP6_SA_ALIGN 1 37#define XCHAL_CP6_SA_ALIGN 1
38#define XCHAL_CP7_SA_SIZE 0
39#define XCHAL_CP7_SA_ALIGN 1
45 40
46/* Save area for non-coprocessor optional and custom (TIE) state: */ 41/* Save area for non-coprocessor optional and custom (TIE) state: */
47#define XCHAL_NCP_SA_SIZE 0 42#define XCHAL_NCP_SA_SIZE 0
diff --git a/block/blk-core.c b/block/blk-core.c
index c00e0bdeab4a..853f92749202 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -693,11 +693,20 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
693 if (!uninit_q) 693 if (!uninit_q)
694 return NULL; 694 return NULL;
695 695
696 uninit_q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
697 if (!uninit_q->flush_rq)
698 goto out_cleanup_queue;
699
696 q = blk_init_allocated_queue(uninit_q, rfn, lock); 700 q = blk_init_allocated_queue(uninit_q, rfn, lock);
697 if (!q) 701 if (!q)
698 blk_cleanup_queue(uninit_q); 702 goto out_free_flush_rq;
699
700 return q; 703 return q;
704
705out_free_flush_rq:
706 kfree(uninit_q->flush_rq);
707out_cleanup_queue:
708 blk_cleanup_queue(uninit_q);
709 return NULL;
701} 710}
702EXPORT_SYMBOL(blk_init_queue_node); 711EXPORT_SYMBOL(blk_init_queue_node);
703 712
@@ -1127,7 +1136,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
1127struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) 1136struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
1128{ 1137{
1129 if (q->mq_ops) 1138 if (q->mq_ops)
1130 return blk_mq_alloc_request(q, rw, gfp_mask, false); 1139 return blk_mq_alloc_request(q, rw, gfp_mask);
1131 else 1140 else
1132 return blk_old_get_request(q, rw, gfp_mask); 1141 return blk_old_get_request(q, rw, gfp_mask);
1133} 1142}
@@ -1278,6 +1287,11 @@ void __blk_put_request(struct request_queue *q, struct request *req)
1278 if (unlikely(!q)) 1287 if (unlikely(!q))
1279 return; 1288 return;
1280 1289
1290 if (q->mq_ops) {
1291 blk_mq_free_request(req);
1292 return;
1293 }
1294
1281 blk_pm_put_request(req); 1295 blk_pm_put_request(req);
1282 1296
1283 elv_completed_request(q, req); 1297 elv_completed_request(q, req);
diff --git a/block/blk-exec.c b/block/blk-exec.c
index bbfc072a79c2..c68613bb4c79 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -65,7 +65,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
65 * be resued after dying flag is set 65 * be resued after dying flag is set
66 */ 66 */
67 if (q->mq_ops) { 67 if (q->mq_ops) {
68 blk_mq_insert_request(q, rq, true); 68 blk_mq_insert_request(q, rq, at_head, true);
69 return; 69 return;
70 } 70 }
71 71
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 9288aaf35c21..66e2b697f5db 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -130,20 +130,26 @@ static void blk_flush_restore_request(struct request *rq)
130 blk_clear_rq_complete(rq); 130 blk_clear_rq_complete(rq);
131} 131}
132 132
133static void mq_flush_data_run(struct work_struct *work) 133static void mq_flush_run(struct work_struct *work)
134{ 134{
135 struct request *rq; 135 struct request *rq;
136 136
137 rq = container_of(work, struct request, mq_flush_data); 137 rq = container_of(work, struct request, mq_flush_work);
138 138
139 memset(&rq->csd, 0, sizeof(rq->csd)); 139 memset(&rq->csd, 0, sizeof(rq->csd));
140 blk_mq_run_request(rq, true, false); 140 blk_mq_run_request(rq, true, false);
141} 141}
142 142
143static void blk_mq_flush_data_insert(struct request *rq) 143static bool blk_flush_queue_rq(struct request *rq)
144{ 144{
145 INIT_WORK(&rq->mq_flush_data, mq_flush_data_run); 145 if (rq->q->mq_ops) {
146 kblockd_schedule_work(rq->q, &rq->mq_flush_data); 146 INIT_WORK(&rq->mq_flush_work, mq_flush_run);
147 kblockd_schedule_work(rq->q, &rq->mq_flush_work);
148 return false;
149 } else {
150 list_add_tail(&rq->queuelist, &rq->q->queue_head);
151 return true;
152 }
147} 153}
148 154
149/** 155/**
@@ -187,12 +193,7 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
187 193
188 case REQ_FSEQ_DATA: 194 case REQ_FSEQ_DATA:
189 list_move_tail(&rq->flush.list, &q->flush_data_in_flight); 195 list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
190 if (q->mq_ops) 196 queued = blk_flush_queue_rq(rq);
191 blk_mq_flush_data_insert(rq);
192 else {
193 list_add(&rq->queuelist, &q->queue_head);
194 queued = true;
195 }
196 break; 197 break;
197 198
198 case REQ_FSEQ_DONE: 199 case REQ_FSEQ_DONE:
@@ -216,9 +217,6 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
216 } 217 }
217 218
218 kicked = blk_kick_flush(q); 219 kicked = blk_kick_flush(q);
219 /* blk_mq_run_flush will run queue */
220 if (q->mq_ops)
221 return queued;
222 return kicked | queued; 220 return kicked | queued;
223} 221}
224 222
@@ -230,10 +228,9 @@ static void flush_end_io(struct request *flush_rq, int error)
230 struct request *rq, *n; 228 struct request *rq, *n;
231 unsigned long flags = 0; 229 unsigned long flags = 0;
232 230
233 if (q->mq_ops) { 231 if (q->mq_ops)
234 blk_mq_free_request(flush_rq);
235 spin_lock_irqsave(&q->mq_flush_lock, flags); 232 spin_lock_irqsave(&q->mq_flush_lock, flags);
236 } 233
237 running = &q->flush_queue[q->flush_running_idx]; 234 running = &q->flush_queue[q->flush_running_idx];
238 BUG_ON(q->flush_pending_idx == q->flush_running_idx); 235 BUG_ON(q->flush_pending_idx == q->flush_running_idx);
239 236
@@ -263,49 +260,14 @@ static void flush_end_io(struct request *flush_rq, int error)
263 * kblockd. 260 * kblockd.
264 */ 261 */
265 if (queued || q->flush_queue_delayed) { 262 if (queued || q->flush_queue_delayed) {
266 if (!q->mq_ops) 263 WARN_ON(q->mq_ops);
267 blk_run_queue_async(q); 264 blk_run_queue_async(q);
268 else
269 /*
270 * This can be optimized to only run queues with requests
271 * queued if necessary.
272 */
273 blk_mq_run_queues(q, true);
274 } 265 }
275 q->flush_queue_delayed = 0; 266 q->flush_queue_delayed = 0;
276 if (q->mq_ops) 267 if (q->mq_ops)
277 spin_unlock_irqrestore(&q->mq_flush_lock, flags); 268 spin_unlock_irqrestore(&q->mq_flush_lock, flags);
278} 269}
279 270
280static void mq_flush_work(struct work_struct *work)
281{
282 struct request_queue *q;
283 struct request *rq;
284
285 q = container_of(work, struct request_queue, mq_flush_work);
286
287 /* We don't need set REQ_FLUSH_SEQ, it's for consistency */
288 rq = blk_mq_alloc_request(q, WRITE_FLUSH|REQ_FLUSH_SEQ,
289 __GFP_WAIT|GFP_ATOMIC, true);
290 rq->cmd_type = REQ_TYPE_FS;
291 rq->end_io = flush_end_io;
292
293 blk_mq_run_request(rq, true, false);
294}
295
296/*
297 * We can't directly use q->flush_rq, because it doesn't have tag and is not in
298 * hctx->rqs[]. so we must allocate a new request, since we can't sleep here,
299 * so offload the work to workqueue.
300 *
301 * Note: we assume a flush request finished in any hardware queue will flush
302 * the whole disk cache.
303 */
304static void mq_run_flush(struct request_queue *q)
305{
306 kblockd_schedule_work(q, &q->mq_flush_work);
307}
308
309/** 271/**
310 * blk_kick_flush - consider issuing flush request 272 * blk_kick_flush - consider issuing flush request
311 * @q: request_queue being kicked 273 * @q: request_queue being kicked
@@ -340,19 +302,31 @@ static bool blk_kick_flush(struct request_queue *q)
340 * different from running_idx, which means flush is in flight. 302 * different from running_idx, which means flush is in flight.
341 */ 303 */
342 q->flush_pending_idx ^= 1; 304 q->flush_pending_idx ^= 1;
305
343 if (q->mq_ops) { 306 if (q->mq_ops) {
344 mq_run_flush(q); 307 struct blk_mq_ctx *ctx = first_rq->mq_ctx;
345 return true; 308 struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu);
309
310 blk_mq_rq_init(hctx, q->flush_rq);
311 q->flush_rq->mq_ctx = ctx;
312
313 /*
314 * Reuse the tag value from the fist waiting request,
315 * with blk-mq the tag is generated during request
316 * allocation and drivers can rely on it being inside
317 * the range they asked for.
318 */
319 q->flush_rq->tag = first_rq->tag;
320 } else {
321 blk_rq_init(q, q->flush_rq);
346 } 322 }
347 323
348 blk_rq_init(q, &q->flush_rq); 324 q->flush_rq->cmd_type = REQ_TYPE_FS;
349 q->flush_rq.cmd_type = REQ_TYPE_FS; 325 q->flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
350 q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ; 326 q->flush_rq->rq_disk = first_rq->rq_disk;
351 q->flush_rq.rq_disk = first_rq->rq_disk; 327 q->flush_rq->end_io = flush_end_io;
352 q->flush_rq.end_io = flush_end_io;
353 328
354 list_add_tail(&q->flush_rq.queuelist, &q->queue_head); 329 return blk_flush_queue_rq(q->flush_rq);
355 return true;
356} 330}
357 331
358static void flush_data_end_io(struct request *rq, int error) 332static void flush_data_end_io(struct request *rq, int error)
@@ -558,5 +532,4 @@ EXPORT_SYMBOL(blkdev_issue_flush);
558void blk_mq_init_flush(struct request_queue *q) 532void blk_mq_init_flush(struct request_queue *q)
559{ 533{
560 spin_lock_init(&q->mq_flush_lock); 534 spin_lock_init(&q->mq_flush_lock);
561 INIT_WORK(&q->mq_flush_work, mq_flush_work);
562} 535}
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 2da76c999ef3..97a733cf3d5f 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -119,6 +119,14 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
119 119
120 atomic_inc(&bb.done); 120 atomic_inc(&bb.done);
121 submit_bio(type, bio); 121 submit_bio(type, bio);
122
123 /*
124 * We can loop for a long time in here, if someone does
125 * full device discards (like mkfs). Be nice and allow
126 * us to schedule out to avoid softlocking if preempt
127 * is disabled.
128 */
129 cond_resched();
122 } 130 }
123 blk_finish_plug(&plug); 131 blk_finish_plug(&plug);
124 132
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 8f8adaa95466..6c583f9c5b65 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -21,6 +21,16 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
21 if (!bio) 21 if (!bio)
22 return 0; 22 return 0;
23 23
24 /*
25 * This should probably be returning 0, but blk_add_request_payload()
26 * (Christoph!!!!)
27 */
28 if (bio->bi_rw & REQ_DISCARD)
29 return 1;
30
31 if (bio->bi_rw & REQ_WRITE_SAME)
32 return 1;
33
24 fbio = bio; 34 fbio = bio;
25 cluster = blk_queue_cluster(q); 35 cluster = blk_queue_cluster(q);
26 seg_size = 0; 36 seg_size = 0;
@@ -161,30 +171,60 @@ new_segment:
161 *bvprv = *bvec; 171 *bvprv = *bvec;
162} 172}
163 173
164/* 174static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
165 * map a request to scatterlist, return number of sg entries setup. Caller 175 struct scatterlist *sglist,
166 * must make sure sg can hold rq->nr_phys_segments entries 176 struct scatterlist **sg)
167 */
168int blk_rq_map_sg(struct request_queue *q, struct request *rq,
169 struct scatterlist *sglist)
170{ 177{
171 struct bio_vec bvec, bvprv = { NULL }; 178 struct bio_vec bvec, bvprv = { NULL };
172 struct req_iterator iter; 179 struct bvec_iter iter;
173 struct scatterlist *sg;
174 int nsegs, cluster; 180 int nsegs, cluster;
175 181
176 nsegs = 0; 182 nsegs = 0;
177 cluster = blk_queue_cluster(q); 183 cluster = blk_queue_cluster(q);
178 184
179 /* 185 if (bio->bi_rw & REQ_DISCARD) {
180 * for each bio in rq 186 /*
181 */ 187 * This is a hack - drivers should be neither modifying the
182 sg = NULL; 188 * biovec, nor relying on bi_vcnt - but because of
183 rq_for_each_segment(bvec, rq, iter) { 189 * blk_add_request_payload(), a discard bio may or may not have
184 __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg, 190 * a payload we need to set up here (thank you Christoph) and
185 &nsegs, &cluster); 191 * bi_vcnt is really the only way of telling if we need to.
186 } /* segments in rq */ 192 */
193
194 if (bio->bi_vcnt)
195 goto single_segment;
196
197 return 0;
198 }
199
200 if (bio->bi_rw & REQ_WRITE_SAME) {
201single_segment:
202 *sg = sglist;
203 bvec = bio_iovec(bio);
204 sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
205 return 1;
206 }
207
208 for_each_bio(bio)
209 bio_for_each_segment(bvec, bio, iter)
210 __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
211 &nsegs, &cluster);
187 212
213 return nsegs;
214}
215
216/*
217 * map a request to scatterlist, return number of sg entries setup. Caller
218 * must make sure sg can hold rq->nr_phys_segments entries
219 */
220int blk_rq_map_sg(struct request_queue *q, struct request *rq,
221 struct scatterlist *sglist)
222{
223 struct scatterlist *sg = NULL;
224 int nsegs = 0;
225
226 if (rq->bio)
227 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
188 228
189 if (unlikely(rq->cmd_flags & REQ_COPY_USER) && 229 if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
190 (blk_rq_bytes(rq) & q->dma_pad_mask)) { 230 (blk_rq_bytes(rq) & q->dma_pad_mask)) {
@@ -230,20 +270,13 @@ EXPORT_SYMBOL(blk_rq_map_sg);
230int blk_bio_map_sg(struct request_queue *q, struct bio *bio, 270int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
231 struct scatterlist *sglist) 271 struct scatterlist *sglist)
232{ 272{
233 struct bio_vec bvec, bvprv = { NULL }; 273 struct scatterlist *sg = NULL;
234 struct scatterlist *sg; 274 int nsegs;
235 int nsegs, cluster; 275 struct bio *next = bio->bi_next;
236 struct bvec_iter iter; 276 bio->bi_next = NULL;
237
238 nsegs = 0;
239 cluster = blk_queue_cluster(q);
240
241 sg = NULL;
242 bio_for_each_segment(bvec, bio, iter) {
243 __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg,
244 &nsegs, &cluster);
245 } /* segments in bio */
246 277
278 nsegs = __blk_bios_map_sg(q, bio, sglist, &sg);
279 bio->bi_next = next;
247 if (sg) 280 if (sg)
248 sg_mark_end(sg); 281 sg_mark_end(sg);
249 282
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 5d70edc9855f..83ae96c51a27 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -184,7 +184,7 @@ void blk_mq_free_tags(struct blk_mq_tags *tags)
184ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page) 184ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
185{ 185{
186 char *orig_page = page; 186 char *orig_page = page;
187 int cpu; 187 unsigned int cpu;
188 188
189 if (!tags) 189 if (!tags)
190 return 0; 190 return 0;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 57039fcd9c93..1fa9dd153fde 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -226,15 +226,14 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
226 return rq; 226 return rq;
227} 227}
228 228
229struct request *blk_mq_alloc_request(struct request_queue *q, int rw, 229struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp)
230 gfp_t gfp, bool reserved)
231{ 230{
232 struct request *rq; 231 struct request *rq;
233 232
234 if (blk_mq_queue_enter(q)) 233 if (blk_mq_queue_enter(q))
235 return NULL; 234 return NULL;
236 235
237 rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved); 236 rq = blk_mq_alloc_request_pinned(q, rw, gfp, false);
238 if (rq) 237 if (rq)
239 blk_mq_put_ctx(rq->mq_ctx); 238 blk_mq_put_ctx(rq->mq_ctx);
240 return rq; 239 return rq;
@@ -258,7 +257,7 @@ EXPORT_SYMBOL(blk_mq_alloc_reserved_request);
258/* 257/*
259 * Re-init and set pdu, if we have it 258 * Re-init and set pdu, if we have it
260 */ 259 */
261static void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq) 260void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq)
262{ 261{
263 blk_rq_init(hctx->queue, rq); 262 blk_rq_init(hctx->queue, rq);
264 263
@@ -305,7 +304,7 @@ static void blk_mq_bio_endio(struct request *rq, struct bio *bio, int error)
305 bio_endio(bio, error); 304 bio_endio(bio, error);
306} 305}
307 306
308void blk_mq_complete_request(struct request *rq, int error) 307void blk_mq_end_io(struct request *rq, int error)
309{ 308{
310 struct bio *bio = rq->bio; 309 struct bio *bio = rq->bio;
311 unsigned int bytes = 0; 310 unsigned int bytes = 0;
@@ -330,48 +329,55 @@ void blk_mq_complete_request(struct request *rq, int error)
330 else 329 else
331 blk_mq_free_request(rq); 330 blk_mq_free_request(rq);
332} 331}
332EXPORT_SYMBOL(blk_mq_end_io);
333 333
334void __blk_mq_end_io(struct request *rq, int error) 334static void __blk_mq_complete_request_remote(void *data)
335{
336 if (!blk_mark_rq_complete(rq))
337 blk_mq_complete_request(rq, error);
338}
339
340static void blk_mq_end_io_remote(void *data)
341{ 335{
342 struct request *rq = data; 336 struct request *rq = data;
343 337
344 __blk_mq_end_io(rq, rq->errors); 338 rq->q->softirq_done_fn(rq);
345} 339}
346 340
347/* 341void __blk_mq_complete_request(struct request *rq)
348 * End IO on this request on a multiqueue enabled driver. We'll either do
349 * it directly inline, or punt to a local IPI handler on the matching
350 * remote CPU.
351 */
352void blk_mq_end_io(struct request *rq, int error)
353{ 342{
354 struct blk_mq_ctx *ctx = rq->mq_ctx; 343 struct blk_mq_ctx *ctx = rq->mq_ctx;
355 int cpu; 344 int cpu;
356 345
357 if (!ctx->ipi_redirect) 346 if (!ctx->ipi_redirect) {
358 return __blk_mq_end_io(rq, error); 347 rq->q->softirq_done_fn(rq);
348 return;
349 }
359 350
360 cpu = get_cpu(); 351 cpu = get_cpu();
361 if (cpu != ctx->cpu && cpu_online(ctx->cpu)) { 352 if (cpu != ctx->cpu && cpu_online(ctx->cpu)) {
362 rq->errors = error; 353 rq->csd.func = __blk_mq_complete_request_remote;
363 rq->csd.func = blk_mq_end_io_remote;
364 rq->csd.info = rq; 354 rq->csd.info = rq;
365 rq->csd.flags = 0; 355 rq->csd.flags = 0;
366 __smp_call_function_single(ctx->cpu, &rq->csd, 0); 356 __smp_call_function_single(ctx->cpu, &rq->csd, 0);
367 } else { 357 } else {
368 __blk_mq_end_io(rq, error); 358 rq->q->softirq_done_fn(rq);
369 } 359 }
370 put_cpu(); 360 put_cpu();
371} 361}
372EXPORT_SYMBOL(blk_mq_end_io);
373 362
374static void blk_mq_start_request(struct request *rq) 363/**
364 * blk_mq_complete_request - end I/O on a request
365 * @rq: the request being processed
366 *
367 * Description:
368 * Ends all I/O on a request. It does not handle partial completions.
369 * The actual completion happens out-of-order, through a IPI handler.
370 **/
371void blk_mq_complete_request(struct request *rq)
372{
373 if (unlikely(blk_should_fake_timeout(rq->q)))
374 return;
375 if (!blk_mark_rq_complete(rq))
376 __blk_mq_complete_request(rq);
377}
378EXPORT_SYMBOL(blk_mq_complete_request);
379
380static void blk_mq_start_request(struct request *rq, bool last)
375{ 381{
376 struct request_queue *q = rq->q; 382 struct request_queue *q = rq->q;
377 383
@@ -384,6 +390,25 @@ static void blk_mq_start_request(struct request *rq)
384 */ 390 */
385 rq->deadline = jiffies + q->rq_timeout; 391 rq->deadline = jiffies + q->rq_timeout;
386 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags); 392 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
393
394 if (q->dma_drain_size && blk_rq_bytes(rq)) {
395 /*
396 * Make sure space for the drain appears. We know we can do
397 * this because max_hw_segments has been adjusted to be one
398 * fewer than the device can handle.
399 */
400 rq->nr_phys_segments++;
401 }
402
403 /*
404 * Flag the last request in the series so that drivers know when IO
405 * should be kicked off, if they don't do it on a per-request basis.
406 *
407 * Note: the flag isn't the only condition drivers should do kick off.
408 * If drive is busy, the last request might not have the bit set.
409 */
410 if (last)
411 rq->cmd_flags |= REQ_END;
387} 412}
388 413
389static void blk_mq_requeue_request(struct request *rq) 414static void blk_mq_requeue_request(struct request *rq)
@@ -392,6 +417,11 @@ static void blk_mq_requeue_request(struct request *rq)
392 417
393 trace_block_rq_requeue(q, rq); 418 trace_block_rq_requeue(q, rq);
394 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); 419 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
420
421 rq->cmd_flags &= ~REQ_END;
422
423 if (q->dma_drain_size && blk_rq_bytes(rq))
424 rq->nr_phys_segments--;
395} 425}
396 426
397struct blk_mq_timeout_data { 427struct blk_mq_timeout_data {
@@ -559,19 +589,8 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
559 589
560 rq = list_first_entry(&rq_list, struct request, queuelist); 590 rq = list_first_entry(&rq_list, struct request, queuelist);
561 list_del_init(&rq->queuelist); 591 list_del_init(&rq->queuelist);
562 blk_mq_start_request(rq);
563 592
564 /* 593 blk_mq_start_request(rq, list_empty(&rq_list));
565 * Last request in the series. Flag it as such, this
566 * enables drivers to know when IO should be kicked off,
567 * if they don't do it on a per-request basis.
568 *
569 * Note: the flag isn't the only condition drivers
570 * should do kick off. If drive is busy, the last
571 * request might not have the bit set.
572 */
573 if (list_empty(&rq_list))
574 rq->cmd_flags |= REQ_END;
575 594
576 ret = q->mq_ops->queue_rq(hctx, rq); 595 ret = q->mq_ops->queue_rq(hctx, rq);
577 switch (ret) { 596 switch (ret) {
@@ -589,8 +608,8 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
589 break; 608 break;
590 default: 609 default:
591 pr_err("blk-mq: bad return on queue: %d\n", ret); 610 pr_err("blk-mq: bad return on queue: %d\n", ret);
592 rq->errors = -EIO;
593 case BLK_MQ_RQ_QUEUE_ERROR: 611 case BLK_MQ_RQ_QUEUE_ERROR:
612 rq->errors = -EIO;
594 blk_mq_end_io(rq, rq->errors); 613 blk_mq_end_io(rq, rq->errors);
595 break; 614 break;
596 } 615 }
@@ -693,13 +712,16 @@ static void blk_mq_work_fn(struct work_struct *work)
693} 712}
694 713
695static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, 714static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
696 struct request *rq) 715 struct request *rq, bool at_head)
697{ 716{
698 struct blk_mq_ctx *ctx = rq->mq_ctx; 717 struct blk_mq_ctx *ctx = rq->mq_ctx;
699 718
700 trace_block_rq_insert(hctx->queue, rq); 719 trace_block_rq_insert(hctx->queue, rq);
701 720
702 list_add_tail(&rq->queuelist, &ctx->rq_list); 721 if (at_head)
722 list_add(&rq->queuelist, &ctx->rq_list);
723 else
724 list_add_tail(&rq->queuelist, &ctx->rq_list);
703 blk_mq_hctx_mark_pending(hctx, ctx); 725 blk_mq_hctx_mark_pending(hctx, ctx);
704 726
705 /* 727 /*
@@ -709,7 +731,7 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
709} 731}
710 732
711void blk_mq_insert_request(struct request_queue *q, struct request *rq, 733void blk_mq_insert_request(struct request_queue *q, struct request *rq,
712 bool run_queue) 734 bool at_head, bool run_queue)
713{ 735{
714 struct blk_mq_hw_ctx *hctx; 736 struct blk_mq_hw_ctx *hctx;
715 struct blk_mq_ctx *ctx, *current_ctx; 737 struct blk_mq_ctx *ctx, *current_ctx;
@@ -728,7 +750,7 @@ void blk_mq_insert_request(struct request_queue *q, struct request *rq,
728 rq->mq_ctx = ctx; 750 rq->mq_ctx = ctx;
729 } 751 }
730 spin_lock(&ctx->lock); 752 spin_lock(&ctx->lock);
731 __blk_mq_insert_request(hctx, rq); 753 __blk_mq_insert_request(hctx, rq, at_head);
732 spin_unlock(&ctx->lock); 754 spin_unlock(&ctx->lock);
733 755
734 blk_mq_put_ctx(current_ctx); 756 blk_mq_put_ctx(current_ctx);
@@ -760,7 +782,7 @@ void blk_mq_run_request(struct request *rq, bool run_queue, bool async)
760 782
761 /* ctx->cpu might be offline */ 783 /* ctx->cpu might be offline */
762 spin_lock(&ctx->lock); 784 spin_lock(&ctx->lock);
763 __blk_mq_insert_request(hctx, rq); 785 __blk_mq_insert_request(hctx, rq, false);
764 spin_unlock(&ctx->lock); 786 spin_unlock(&ctx->lock);
765 787
766 blk_mq_put_ctx(current_ctx); 788 blk_mq_put_ctx(current_ctx);
@@ -798,7 +820,7 @@ static void blk_mq_insert_requests(struct request_queue *q,
798 rq = list_first_entry(list, struct request, queuelist); 820 rq = list_first_entry(list, struct request, queuelist);
799 list_del_init(&rq->queuelist); 821 list_del_init(&rq->queuelist);
800 rq->mq_ctx = ctx; 822 rq->mq_ctx = ctx;
801 __blk_mq_insert_request(hctx, rq); 823 __blk_mq_insert_request(hctx, rq, false);
802 } 824 }
803 spin_unlock(&ctx->lock); 825 spin_unlock(&ctx->lock);
804 826
@@ -888,6 +910,11 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
888 910
889 blk_queue_bounce(q, &bio); 911 blk_queue_bounce(q, &bio);
890 912
913 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
914 bio_endio(bio, -EIO);
915 return;
916 }
917
891 if (use_plug && blk_attempt_plug_merge(q, bio, &request_count)) 918 if (use_plug && blk_attempt_plug_merge(q, bio, &request_count))
892 return; 919 return;
893 920
@@ -950,7 +977,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
950 __blk_mq_free_request(hctx, ctx, rq); 977 __blk_mq_free_request(hctx, ctx, rq);
951 else { 978 else {
952 blk_mq_bio_to_request(rq, bio); 979 blk_mq_bio_to_request(rq, bio);
953 __blk_mq_insert_request(hctx, rq); 980 __blk_mq_insert_request(hctx, rq, false);
954 } 981 }
955 982
956 spin_unlock(&ctx->lock); 983 spin_unlock(&ctx->lock);
@@ -1309,15 +1336,6 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
1309 reg->queue_depth = BLK_MQ_MAX_DEPTH; 1336 reg->queue_depth = BLK_MQ_MAX_DEPTH;
1310 } 1337 }
1311 1338
1312 /*
1313 * Set aside a tag for flush requests. It will only be used while
1314 * another flush request is in progress but outside the driver.
1315 *
1316 * TODO: only allocate if flushes are supported
1317 */
1318 reg->queue_depth++;
1319 reg->reserved_tags++;
1320
1321 if (reg->queue_depth < (reg->reserved_tags + BLK_MQ_TAG_MIN)) 1339 if (reg->queue_depth < (reg->reserved_tags + BLK_MQ_TAG_MIN))
1322 return ERR_PTR(-EINVAL); 1340 return ERR_PTR(-EINVAL);
1323 1341
@@ -1360,17 +1378,27 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
1360 q->mq_ops = reg->ops; 1378 q->mq_ops = reg->ops;
1361 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; 1379 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
1362 1380
1381 q->sg_reserved_size = INT_MAX;
1382
1363 blk_queue_make_request(q, blk_mq_make_request); 1383 blk_queue_make_request(q, blk_mq_make_request);
1364 blk_queue_rq_timed_out(q, reg->ops->timeout); 1384 blk_queue_rq_timed_out(q, reg->ops->timeout);
1365 if (reg->timeout) 1385 if (reg->timeout)
1366 blk_queue_rq_timeout(q, reg->timeout); 1386 blk_queue_rq_timeout(q, reg->timeout);
1367 1387
1388 if (reg->ops->complete)
1389 blk_queue_softirq_done(q, reg->ops->complete);
1390
1368 blk_mq_init_flush(q); 1391 blk_mq_init_flush(q);
1369 blk_mq_init_cpu_queues(q, reg->nr_hw_queues); 1392 blk_mq_init_cpu_queues(q, reg->nr_hw_queues);
1370 1393
1371 if (blk_mq_init_hw_queues(q, reg, driver_data)) 1394 q->flush_rq = kzalloc(round_up(sizeof(struct request) + reg->cmd_size,
1395 cache_line_size()), GFP_KERNEL);
1396 if (!q->flush_rq)
1372 goto err_hw; 1397 goto err_hw;
1373 1398
1399 if (blk_mq_init_hw_queues(q, reg, driver_data))
1400 goto err_flush_rq;
1401
1374 blk_mq_map_swqueue(q); 1402 blk_mq_map_swqueue(q);
1375 1403
1376 mutex_lock(&all_q_mutex); 1404 mutex_lock(&all_q_mutex);
@@ -1378,6 +1406,9 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
1378 mutex_unlock(&all_q_mutex); 1406 mutex_unlock(&all_q_mutex);
1379 1407
1380 return q; 1408 return q;
1409
1410err_flush_rq:
1411 kfree(q->flush_rq);
1381err_hw: 1412err_hw:
1382 kfree(q->mq_map); 1413 kfree(q->mq_map);
1383err_map: 1414err_map:
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 5c3917984b00..ed0035cd458e 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -22,13 +22,13 @@ struct blk_mq_ctx {
22 struct kobject kobj; 22 struct kobject kobj;
23}; 23};
24 24
25void __blk_mq_end_io(struct request *rq, int error); 25void __blk_mq_complete_request(struct request *rq);
26void blk_mq_complete_request(struct request *rq, int error);
27void blk_mq_run_request(struct request *rq, bool run_queue, bool async); 26void blk_mq_run_request(struct request *rq, bool run_queue, bool async);
28void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); 27void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
29void blk_mq_init_flush(struct request_queue *q); 28void blk_mq_init_flush(struct request_queue *q);
30void blk_mq_drain_queue(struct request_queue *q); 29void blk_mq_drain_queue(struct request_queue *q);
31void blk_mq_free_queue(struct request_queue *q); 30void blk_mq_free_queue(struct request_queue *q);
31void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq);
32 32
33/* 33/*
34 * CPU hotplug helpers 34 * CPU hotplug helpers
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 8095c4a21fc0..7500f876dae4 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -549,6 +549,8 @@ static void blk_release_queue(struct kobject *kobj)
549 if (q->mq_ops) 549 if (q->mq_ops)
550 blk_mq_free_queue(q); 550 blk_mq_free_queue(q);
551 551
552 kfree(q->flush_rq);
553
552 blk_trace_shutdown(q); 554 blk_trace_shutdown(q);
553 555
554 bdi_destroy(&q->backing_dev_info); 556 bdi_destroy(&q->backing_dev_info);
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index bba81c9348e1..d96f7061c6fd 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -91,7 +91,7 @@ static void blk_rq_timed_out(struct request *req)
91 case BLK_EH_HANDLED: 91 case BLK_EH_HANDLED:
92 /* Can we use req->errors here? */ 92 /* Can we use req->errors here? */
93 if (q->mq_ops) 93 if (q->mq_ops)
94 blk_mq_complete_request(req, req->errors); 94 __blk_mq_complete_request(req);
95 else 95 else
96 __blk_complete_request(req); 96 __blk_complete_request(req);
97 break; 97 break;
diff --git a/block/blk.h b/block/blk.h
index c90e1d8f7a2b..d23b415b8a28 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -113,7 +113,7 @@ static inline struct request *__elv_next_request(struct request_queue *q)
113 q->flush_queue_delayed = 1; 113 q->flush_queue_delayed = 1;
114 return NULL; 114 return NULL;
115 } 115 }
116 if (unlikely(blk_queue_dying(q)) || 116 if (unlikely(blk_queue_bypass(q)) ||
117 !q->elevator->type->ops.elevator_dispatch_fn(q, 0)) 117 !q->elevator->type->ops.elevator_dispatch_fn(q, 0))
118 return NULL; 118 return NULL;
119 } 119 }
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index e7515aa43d6b..6f190bc2b8b7 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -243,6 +243,8 @@ static int acpi_ac_resume(struct device *dev)
243 kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE); 243 kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
244 return 0; 244 return 0;
245} 245}
246#else
247#define acpi_ac_resume NULL
246#endif 248#endif
247static SIMPLE_DEV_PM_OPS(acpi_ac_pm_ops, NULL, acpi_ac_resume); 249static SIMPLE_DEV_PM_OPS(acpi_ac_pm_ops, NULL, acpi_ac_resume);
248 250
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 018a42883706..797a6938d051 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -841,6 +841,8 @@ static int acpi_battery_resume(struct device *dev)
841 acpi_battery_update(battery); 841 acpi_battery_update(battery);
842 return 0; 842 return 0;
843} 843}
844#else
845#define acpi_battery_resume NULL
844#endif 846#endif
845 847
846static SIMPLE_DEV_PM_OPS(acpi_battery_pm, NULL, acpi_battery_resume); 848static SIMPLE_DEV_PM_OPS(acpi_battery_pm, NULL, acpi_battery_resume);
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 10e4964d051a..afec4526c48a 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -260,14 +260,6 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
260 }, 260 },
261 { 261 {
262 .callback = dmi_disable_osi_win8, 262 .callback = dmi_disable_osi_win8,
263 .ident = "Dell Inspiron 15R SE",
264 .matches = {
265 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
266 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7520"),
267 },
268 },
269 {
270 .callback = dmi_disable_osi_win8,
271 .ident = "ThinkPad Edge E530", 263 .ident = "ThinkPad Edge E530",
272 .matches = { 264 .matches = {
273 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 265 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
@@ -322,56 +314,6 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
322 DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"), 314 DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"),
323 }, 315 },
324 }, 316 },
325 {
326 .callback = dmi_disable_osi_win8,
327 .ident = "HP ProBook 2013 models",
328 .matches = {
329 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
330 DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook "),
331 DMI_MATCH(DMI_PRODUCT_NAME, " G1"),
332 },
333 },
334 {
335 .callback = dmi_disable_osi_win8,
336 .ident = "HP EliteBook 2013 models",
337 .matches = {
338 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
339 DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook "),
340 DMI_MATCH(DMI_PRODUCT_NAME, " G1"),
341 },
342 },
343 {
344 .callback = dmi_disable_osi_win8,
345 .ident = "HP ZBook 14",
346 .matches = {
347 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
348 DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 14"),
349 },
350 },
351 {
352 .callback = dmi_disable_osi_win8,
353 .ident = "HP ZBook 15",
354 .matches = {
355 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
356 DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 15"),
357 },
358 },
359 {
360 .callback = dmi_disable_osi_win8,
361 .ident = "HP ZBook 17",
362 .matches = {
363 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
364 DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 17"),
365 },
366 },
367 {
368 .callback = dmi_disable_osi_win8,
369 .ident = "HP EliteBook 8780w",
370 .matches = {
371 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
372 DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8780w"),
373 },
374 },
375 317
376 /* 318 /*
377 * BIOS invocation of _OSI(Linux) is almost always a BIOS bug. 319 * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 11c11f6b8fa1..714e957a871a 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -80,6 +80,8 @@ static void acpi_button_notify(struct acpi_device *device, u32 event);
80 80
81#ifdef CONFIG_PM_SLEEP 81#ifdef CONFIG_PM_SLEEP
82static int acpi_button_resume(struct device *dev); 82static int acpi_button_resume(struct device *dev);
83#else
84#define acpi_button_resume NULL
83#endif 85#endif
84static SIMPLE_DEV_PM_OPS(acpi_button_pm, NULL, acpi_button_resume); 86static SIMPLE_DEV_PM_OPS(acpi_button_pm, NULL, acpi_button_resume);
85 87
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
index 0b6ae6eb5c4a..368f9ddb8480 100644
--- a/drivers/acpi/container.c
+++ b/drivers/acpi/container.c
@@ -79,9 +79,10 @@ static int container_device_attach(struct acpi_device *adev,
79 ACPI_COMPANION_SET(dev, adev); 79 ACPI_COMPANION_SET(dev, adev);
80 dev->release = acpi_container_release; 80 dev->release = acpi_container_release;
81 ret = device_register(dev); 81 ret = device_register(dev);
82 if (ret) 82 if (ret) {
83 put_device(dev);
83 return ret; 84 return ret;
84 85 }
85 adev->driver_data = dev; 86 adev->driver_data = dev;
86 return 1; 87 return 1;
87} 88}
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index c431c88faaff..5bfd769fc91f 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -609,7 +609,7 @@ static int handle_eject_request(struct dock_station *ds, u32 event)
609static void dock_notify(struct dock_station *ds, u32 event) 609static void dock_notify(struct dock_station *ds, u32 event)
610{ 610{
611 acpi_handle handle = ds->handle; 611 acpi_handle handle = ds->handle;
612 struct acpi_device *ad; 612 struct acpi_device *adev = NULL;
613 int surprise_removal = 0; 613 int surprise_removal = 0;
614 614
615 /* 615 /*
@@ -632,7 +632,8 @@ static void dock_notify(struct dock_station *ds, u32 event)
632 switch (event) { 632 switch (event) {
633 case ACPI_NOTIFY_BUS_CHECK: 633 case ACPI_NOTIFY_BUS_CHECK:
634 case ACPI_NOTIFY_DEVICE_CHECK: 634 case ACPI_NOTIFY_DEVICE_CHECK:
635 if (!dock_in_progress(ds) && acpi_bus_get_device(handle, &ad)) { 635 acpi_bus_get_device(handle, &adev);
636 if (!dock_in_progress(ds) && !acpi_device_enumerated(adev)) {
636 begin_dock(ds); 637 begin_dock(ds);
637 dock(ds); 638 dock(ds);
638 if (!dock_present(ds)) { 639 if (!dock_present(ds)) {
@@ -712,13 +713,11 @@ static acpi_status __init find_dock_devices(acpi_handle handle, u32 lvl,
712static ssize_t show_docked(struct device *dev, 713static ssize_t show_docked(struct device *dev,
713 struct device_attribute *attr, char *buf) 714 struct device_attribute *attr, char *buf)
714{ 715{
715 struct acpi_device *tmp;
716
717 struct dock_station *dock_station = dev->platform_data; 716 struct dock_station *dock_station = dev->platform_data;
717 struct acpi_device *adev = NULL;
718 718
719 if (!acpi_bus_get_device(dock_station->handle, &tmp)) 719 acpi_bus_get_device(dock_station->handle, &adev);
720 return snprintf(buf, PAGE_SIZE, "1\n"); 720 return snprintf(buf, PAGE_SIZE, "%u\n", acpi_device_enumerated(adev));
721 return snprintf(buf, PAGE_SIZE, "0\n");
722} 721}
723static DEVICE_ATTR(docked, S_IRUGO, show_docked, NULL); 722static DEVICE_ATTR(docked, S_IRUGO, show_docked, NULL);
724 723
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 1fb62900f32a..09e423f3d8ad 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -55,6 +55,9 @@ MODULE_DEVICE_TABLE(acpi, fan_device_ids);
55#ifdef CONFIG_PM_SLEEP 55#ifdef CONFIG_PM_SLEEP
56static int acpi_fan_suspend(struct device *dev); 56static int acpi_fan_suspend(struct device *dev);
57static int acpi_fan_resume(struct device *dev); 57static int acpi_fan_resume(struct device *dev);
58#else
59#define acpi_fan_suspend NULL
60#define acpi_fan_resume NULL
58#endif 61#endif
59static SIMPLE_DEV_PM_OPS(acpi_fan_pm, acpi_fan_suspend, acpi_fan_resume); 62static SIMPLE_DEV_PM_OPS(acpi_fan_pm, acpi_fan_suspend, acpi_fan_resume);
60 63
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 52d45ea2bc4f..361b40c10c3f 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -430,6 +430,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
430 pin_name(pin)); 430 pin_name(pin));
431 } 431 }
432 432
433 kfree(entry);
433 return 0; 434 return 0;
434 } 435 }
435 436
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index d465ae6cdd00..dbd48498b938 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -450,7 +450,7 @@ static ssize_t acpi_battery_alarm_store(struct device *dev,
450{ 450{
451 unsigned long x; 451 unsigned long x;
452 struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev)); 452 struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev));
453 if (sscanf(buf, "%ld\n", &x) == 1) 453 if (sscanf(buf, "%lu\n", &x) == 1)
454 battery->alarm_capacity = x / 454 battery->alarm_capacity = x /
455 (1000 * acpi_battery_scale(battery)); 455 (1000 * acpi_battery_scale(battery));
456 if (battery->present) 456 if (battery->present)
@@ -668,6 +668,8 @@ static int acpi_sbs_resume(struct device *dev)
668 acpi_sbs_callback(sbs); 668 acpi_sbs_callback(sbs);
669 return 0; 669 return 0;
670} 670}
671#else
672#define acpi_sbs_resume NULL
671#endif 673#endif
672 674
673static SIMPLE_DEV_PM_OPS(acpi_sbs_pm, NULL, acpi_sbs_resume); 675static SIMPLE_DEV_PM_OPS(acpi_sbs_pm, NULL, acpi_sbs_resume);
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 8349a555b92b..08626c851be7 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -102,6 +102,8 @@ MODULE_DEVICE_TABLE(acpi, thermal_device_ids);
102 102
103#ifdef CONFIG_PM_SLEEP 103#ifdef CONFIG_PM_SLEEP
104static int acpi_thermal_resume(struct device *dev); 104static int acpi_thermal_resume(struct device *dev);
105#else
106#define acpi_thermal_resume NULL
105#endif 107#endif
106static SIMPLE_DEV_PM_OPS(acpi_thermal_pm, NULL, acpi_thermal_resume); 108static SIMPLE_DEV_PM_OPS(acpi_thermal_pm, NULL, acpi_thermal_resume);
107 109
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index b727d105046d..b6ba88ed31ae 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -81,11 +81,12 @@ static bool allow_duplicates;
81module_param(allow_duplicates, bool, 0644); 81module_param(allow_duplicates, bool, 0644);
82 82
83/* 83/*
84 * For Windows 8 systems: if set ture and the GPU driver has 84 * For Windows 8 systems: used to decide if video module
85 * registered a backlight interface, skip registering ACPI video's. 85 * should skip registering backlight interface of its own.
86 */ 86 */
87static bool use_native_backlight = false; 87static int use_native_backlight_param = -1;
88module_param(use_native_backlight, bool, 0644); 88module_param_named(use_native_backlight, use_native_backlight_param, int, 0444);
89static bool use_native_backlight_dmi = false;
89 90
90static int register_count; 91static int register_count;
91static struct mutex video_list_lock; 92static struct mutex video_list_lock;
@@ -231,9 +232,17 @@ static int acpi_video_get_next_level(struct acpi_video_device *device,
231static int acpi_video_switch_brightness(struct acpi_video_device *device, 232static int acpi_video_switch_brightness(struct acpi_video_device *device,
232 int event); 233 int event);
233 234
235static bool acpi_video_use_native_backlight(void)
236{
237 if (use_native_backlight_param != -1)
238 return use_native_backlight_param;
239 else
240 return use_native_backlight_dmi;
241}
242
234static bool acpi_video_verify_backlight_support(void) 243static bool acpi_video_verify_backlight_support(void)
235{ 244{
236 if (acpi_osi_is_win8() && use_native_backlight && 245 if (acpi_osi_is_win8() && acpi_video_use_native_backlight() &&
237 backlight_device_registered(BACKLIGHT_RAW)) 246 backlight_device_registered(BACKLIGHT_RAW))
238 return false; 247 return false;
239 return acpi_video_backlight_support(); 248 return acpi_video_backlight_support();
@@ -398,6 +407,12 @@ static int __init video_set_bqc_offset(const struct dmi_system_id *d)
398 return 0; 407 return 0;
399} 408}
400 409
410static int __init video_set_use_native_backlight(const struct dmi_system_id *d)
411{
412 use_native_backlight_dmi = true;
413 return 0;
414}
415
401static struct dmi_system_id video_dmi_table[] __initdata = { 416static struct dmi_system_id video_dmi_table[] __initdata = {
402 /* 417 /*
403 * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121 418 * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
@@ -442,6 +457,120 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
442 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"), 457 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"),
443 }, 458 },
444 }, 459 },
460 {
461 .callback = video_set_use_native_backlight,
462 .ident = "ThinkPad T430s",
463 .matches = {
464 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
465 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430s"),
466 },
467 },
468 {
469 .callback = video_set_use_native_backlight,
470 .ident = "ThinkPad X230",
471 .matches = {
472 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
473 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X230"),
474 },
475 },
476 {
477 .callback = video_set_use_native_backlight,
478 .ident = "ThinkPad X1 Carbon",
479 .matches = {
480 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
481 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X1 Carbon"),
482 },
483 },
484 {
485 .callback = video_set_use_native_backlight,
486 .ident = "Lenovo Yoga 13",
487 .matches = {
488 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
489 DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo IdeaPad Yoga 13"),
490 },
491 },
492 {
493 .callback = video_set_use_native_backlight,
494 .ident = "Dell Inspiron 7520",
495 .matches = {
496 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
497 DMI_MATCH(DMI_PRODUCT_VERSION, "Inspiron 7520"),
498 },
499 },
500 {
501 .callback = video_set_use_native_backlight,
502 .ident = "Acer Aspire 5733Z",
503 .matches = {
504 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
505 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5733Z"),
506 },
507 },
508 {
509 .callback = video_set_use_native_backlight,
510 .ident = "Acer Aspire V5-431",
511 .matches = {
512 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
513 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-431"),
514 },
515 },
516 {
517 .callback = video_set_use_native_backlight,
518 .ident = "HP ProBook 4340s",
519 .matches = {
520 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
521 DMI_MATCH(DMI_PRODUCT_VERSION, "HP ProBook 4340s"),
522 },
523 },
524 {
525 .callback = video_set_use_native_backlight,
526 .ident = "HP ProBook 2013 models",
527 .matches = {
528 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
529 DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook "),
530 DMI_MATCH(DMI_PRODUCT_NAME, " G1"),
531 },
532 },
533 {
534 .callback = video_set_use_native_backlight,
535 .ident = "HP EliteBook 2013 models",
536 .matches = {
537 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
538 DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook "),
539 DMI_MATCH(DMI_PRODUCT_NAME, " G1"),
540 },
541 },
542 {
543 .callback = video_set_use_native_backlight,
544 .ident = "HP ZBook 14",
545 .matches = {
546 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
547 DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 14"),
548 },
549 },
550 {
551 .callback = video_set_use_native_backlight,
552 .ident = "HP ZBook 15",
553 .matches = {
554 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
555 DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 15"),
556 },
557 },
558 {
559 .callback = video_set_use_native_backlight,
560 .ident = "HP ZBook 17",
561 .matches = {
562 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
563 DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 17"),
564 },
565 },
566 {
567 .callback = video_set_use_native_backlight,
568 .ident = "HP EliteBook 8780w",
569 .matches = {
570 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
571 DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8780w"),
572 },
573 },
445 {} 574 {}
446}; 575};
447 576
@@ -685,6 +814,7 @@ acpi_video_init_brightness(struct acpi_video_device *device)
685 union acpi_object *o; 814 union acpi_object *o;
686 struct acpi_video_device_brightness *br = NULL; 815 struct acpi_video_device_brightness *br = NULL;
687 int result = -EINVAL; 816 int result = -EINVAL;
817 u32 value;
688 818
689 if (!ACPI_SUCCESS(acpi_video_device_lcd_query_levels(device, &obj))) { 819 if (!ACPI_SUCCESS(acpi_video_device_lcd_query_levels(device, &obj))) {
690 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Could not query available " 820 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Could not query available "
@@ -715,7 +845,12 @@ acpi_video_init_brightness(struct acpi_video_device *device)
715 printk(KERN_ERR PREFIX "Invalid data\n"); 845 printk(KERN_ERR PREFIX "Invalid data\n");
716 continue; 846 continue;
717 } 847 }
718 br->levels[count] = (u32) o->integer.value; 848 value = (u32) o->integer.value;
849 /* Skip duplicate entries */
850 if (count > 2 && br->levels[count - 1] == value)
851 continue;
852
853 br->levels[count] = value;
719 854
720 if (br->levels[count] > max_level) 855 if (br->levels[count] > max_level)
721 max_level = br->levels[count]; 856 max_level = br->levels[count];
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index a697b77b8865..19080c8e2f2a 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -168,22 +168,6 @@ static struct dmi_system_id video_detect_dmi_table[] = {
168 DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"), 168 DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
169 }, 169 },
170 }, 170 },
171 {
172 .callback = video_detect_force_vendor,
173 .ident = "HP EliteBook Revolve 810",
174 .matches = {
175 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
176 DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook Revolve 810 G1"),
177 },
178 },
179 {
180 .callback = video_detect_force_vendor,
181 .ident = "Lenovo Yoga 13",
182 .matches = {
183 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
184 DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo IdeaPad Yoga 13"),
185 },
186 },
187 { }, 171 { },
188}; 172};
189 173
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 4e737728aee2..868429a47be4 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -247,6 +247,7 @@ config SATA_HIGHBANK
247 247
248config SATA_MV 248config SATA_MV
249 tristate "Marvell SATA support" 249 tristate "Marvell SATA support"
250 select GENERIC_PHY
250 help 251 help
251 This option enables support for the Marvell Serial ATA family. 252 This option enables support for the Marvell Serial ATA family.
252 Currently supports 88SX[56]0[48][01] PCI(-X) chips, 253 Currently supports 88SX[56]0[48][01] PCI(-X) chips,
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index dc2756fb6f33..c81d809c111b 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -61,6 +61,7 @@ enum board_ids {
61 /* board IDs by feature in alphabetical order */ 61 /* board IDs by feature in alphabetical order */
62 board_ahci, 62 board_ahci,
63 board_ahci_ign_iferr, 63 board_ahci_ign_iferr,
64 board_ahci_noncq,
64 board_ahci_nosntf, 65 board_ahci_nosntf,
65 board_ahci_yes_fbs, 66 board_ahci_yes_fbs,
66 67
@@ -121,6 +122,13 @@ static const struct ata_port_info ahci_port_info[] = {
121 .udma_mask = ATA_UDMA6, 122 .udma_mask = ATA_UDMA6,
122 .port_ops = &ahci_ops, 123 .port_ops = &ahci_ops,
123 }, 124 },
125 [board_ahci_noncq] = {
126 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ),
127 .flags = AHCI_FLAG_COMMON,
128 .pio_mask = ATA_PIO4,
129 .udma_mask = ATA_UDMA6,
130 .port_ops = &ahci_ops,
131 },
124 [board_ahci_nosntf] = { 132 [board_ahci_nosntf] = {
125 AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF), 133 AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF),
126 .flags = AHCI_FLAG_COMMON, 134 .flags = AHCI_FLAG_COMMON,
@@ -452,6 +460,12 @@ static const struct pci_device_id ahci_pci_tbl[] = {
452 { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */ 460 { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */
453 { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */ 461 { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
454 462
463 /*
464 * Samsung SSDs found on some macbooks. NCQ times out.
465 * https://bugzilla.kernel.org/show_bug.cgi?id=60731
466 */
467 { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_noncq },
468
455 /* Enmotus */ 469 /* Enmotus */
456 { PCI_DEVICE(0x1c44, 0x8000), board_ahci }, 470 { PCI_DEVICE(0x1c44, 0x8000), board_ahci },
457 471
@@ -1170,8 +1184,10 @@ static int ahci_init_interrupts(struct pci_dev *pdev, unsigned int n_ports,
1170 1184
1171 nvec = rc; 1185 nvec = rc;
1172 rc = pci_enable_msi_block(pdev, nvec); 1186 rc = pci_enable_msi_block(pdev, nvec);
1173 if (rc) 1187 if (rc < 0)
1174 goto intx; 1188 goto intx;
1189 else if (rc > 0)
1190 goto single_msi;
1175 1191
1176 return nvec; 1192 return nvec;
1177 1193
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 20fd337a5731..7ccc084bf1df 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -447,8 +447,11 @@ static void sata_pmp_quirks(struct ata_port *ap)
447 * otherwise. Don't try hard to recover it. 447 * otherwise. Don't try hard to recover it.
448 */ 448 */
449 ap->pmp_link[ap->nr_pmp_links - 1].flags |= ATA_LFLAG_NO_RETRY; 449 ap->pmp_link[ap->nr_pmp_links - 1].flags |= ATA_LFLAG_NO_RETRY;
450 } else if (vendor == 0x197b && devid == 0x2352) { 450 } else if (vendor == 0x197b && (devid == 0x2352 || devid == 0x0325)) {
451 /* chip found in Thermaltake BlackX Duet, jmicron JMB350? */ 451 /*
452 * 0x2352: found in Thermaltake BlackX Duet, jmicron JMB350?
453 * 0x0325: jmicron JMB394.
454 */
452 ata_for_each_link(link, ap, EDGE) { 455 ata_for_each_link(link, ap, EDGE) {
453 /* SRST breaks detection and disks get misclassified 456 /* SRST breaks detection and disks get misclassified
454 * LPM disabled to avoid potential problems 457 * LPM disabled to avoid potential problems
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
index 26386f0b89a8..b0b18ec5465f 100644
--- a/drivers/ata/pata_imx.c
+++ b/drivers/ata/pata_imx.c
@@ -119,7 +119,9 @@ static int pata_imx_probe(struct platform_device *pdev)
119 return PTR_ERR(priv->clk); 119 return PTR_ERR(priv->clk);
120 } 120 }
121 121
122 clk_prepare_enable(priv->clk); 122 ret = clk_prepare_enable(priv->clk);
123 if (ret)
124 return ret;
123 125
124 host = ata_host_alloc(&pdev->dev, 1); 126 host = ata_host_alloc(&pdev->dev, 1);
125 if (!host) { 127 if (!host) {
@@ -212,7 +214,9 @@ static int pata_imx_resume(struct device *dev)
212 struct ata_host *host = dev_get_drvdata(dev); 214 struct ata_host *host = dev_get_drvdata(dev);
213 struct pata_imx_priv *priv = host->private_data; 215 struct pata_imx_priv *priv = host->private_data;
214 216
215 clk_prepare_enable(priv->clk); 217 int ret = clk_prepare_enable(priv->clk);
218 if (ret)
219 return ret;
216 220
217 __raw_writel(priv->ata_ctl, priv->host_regs + PATA_IMX_ATA_CONTROL); 221 __raw_writel(priv->ata_ctl, priv->host_regs + PATA_IMX_ATA_CONTROL);
218 222
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 20a7517bd339..05c8a44adf8e 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -4104,7 +4104,6 @@ static int mv_platform_probe(struct platform_device *pdev)
4104 if (!hpriv->port_phys) 4104 if (!hpriv->port_phys)
4105 return -ENOMEM; 4105 return -ENOMEM;
4106 host->private_data = hpriv; 4106 host->private_data = hpriv;
4107 hpriv->n_ports = n_ports;
4108 hpriv->board_idx = chip_soc; 4107 hpriv->board_idx = chip_soc;
4109 4108
4110 host->iomap = NULL; 4109 host->iomap = NULL;
@@ -4126,17 +4125,24 @@ static int mv_platform_probe(struct platform_device *pdev)
4126 clk_prepare_enable(hpriv->port_clks[port]); 4125 clk_prepare_enable(hpriv->port_clks[port]);
4127 4126
4128 sprintf(port_number, "port%d", port); 4127 sprintf(port_number, "port%d", port);
4129 hpriv->port_phys[port] = devm_phy_get(&pdev->dev, port_number); 4128 hpriv->port_phys[port] = devm_phy_optional_get(&pdev->dev,
4129 port_number);
4130 if (IS_ERR(hpriv->port_phys[port])) { 4130 if (IS_ERR(hpriv->port_phys[port])) {
4131 rc = PTR_ERR(hpriv->port_phys[port]); 4131 rc = PTR_ERR(hpriv->port_phys[port]);
4132 hpriv->port_phys[port] = NULL; 4132 hpriv->port_phys[port] = NULL;
4133 if ((rc != -EPROBE_DEFER) && (rc != -ENODEV)) 4133 if (rc != -EPROBE_DEFER)
4134 dev_warn(&pdev->dev, "error getting phy"); 4134 dev_warn(&pdev->dev, "error getting phy %d", rc);
4135
4136 /* Cleanup only the initialized ports */
4137 hpriv->n_ports = port;
4135 goto err; 4138 goto err;
4136 } else 4139 } else
4137 phy_power_on(hpriv->port_phys[port]); 4140 phy_power_on(hpriv->port_phys[port]);
4138 } 4141 }
4139 4142
4143 /* All the ports have been initialized */
4144 hpriv->n_ports = n_ports;
4145
4140 /* 4146 /*
4141 * (Re-)program MBUS remapping windows if we are asked to. 4147 * (Re-)program MBUS remapping windows if we are asked to.
4142 */ 4148 */
@@ -4174,7 +4180,7 @@ err:
4174 clk_disable_unprepare(hpriv->clk); 4180 clk_disable_unprepare(hpriv->clk);
4175 clk_put(hpriv->clk); 4181 clk_put(hpriv->clk);
4176 } 4182 }
4177 for (port = 0; port < n_ports; port++) { 4183 for (port = 0; port < hpriv->n_ports; port++) {
4178 if (!IS_ERR(hpriv->port_clks[port])) { 4184 if (!IS_ERR(hpriv->port_clks[port])) {
4179 clk_disable_unprepare(hpriv->port_clks[port]); 4185 clk_disable_unprepare(hpriv->port_clks[port]);
4180 clk_put(hpriv->port_clks[port]); 4186 clk_put(hpriv->port_clks[port]);
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index d67fc351343c..b7695e804635 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -157,6 +157,7 @@ static const struct sil_drivelist {
157 { "ST380011ASL", SIL_QUIRK_MOD15WRITE }, 157 { "ST380011ASL", SIL_QUIRK_MOD15WRITE },
158 { "ST3120022ASL", SIL_QUIRK_MOD15WRITE }, 158 { "ST3120022ASL", SIL_QUIRK_MOD15WRITE },
159 { "ST3160021ASL", SIL_QUIRK_MOD15WRITE }, 159 { "ST3160021ASL", SIL_QUIRK_MOD15WRITE },
160 { "TOSHIBA MK2561GSYN", SIL_QUIRK_MOD15WRITE },
160 { "Maxtor 4D060H3", SIL_QUIRK_UDMA5MAX }, 161 { "Maxtor 4D060H3", SIL_QUIRK_UDMA5MAX },
161 { } 162 { }
162}; 163};
diff --git a/drivers/base/component.c b/drivers/base/component.c
index c53efe6c6d8e..c4778995cd72 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -133,9 +133,16 @@ static int try_to_bring_up_master(struct master *master,
133 goto out; 133 goto out;
134 } 134 }
135 135
136 if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) {
137 ret = -ENOMEM;
138 goto out;
139 }
140
136 /* Found all components */ 141 /* Found all components */
137 ret = master->ops->bind(master->dev); 142 ret = master->ops->bind(master->dev);
138 if (ret < 0) { 143 if (ret < 0) {
144 devres_release_group(master->dev, NULL);
145 dev_info(master->dev, "master bind failed: %d\n", ret);
139 master_remove_components(master); 146 master_remove_components(master);
140 goto out; 147 goto out;
141 } 148 }
@@ -166,6 +173,7 @@ static void take_down_master(struct master *master)
166{ 173{
167 if (master->bound) { 174 if (master->bound) {
168 master->ops->unbind(master->dev); 175 master->ops->unbind(master->dev);
176 devres_release_group(master->dev, NULL);
169 master->bound = false; 177 master->bound = false;
170 } 178 }
171 179
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index 1e16cbd61da2..61d6d62cc0d3 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -616,36 +616,35 @@ static int dma_buf_describe(struct seq_file *s)
616 if (ret) 616 if (ret)
617 return ret; 617 return ret;
618 618
619 seq_printf(s, "\nDma-buf Objects:\n"); 619 seq_puts(s, "\nDma-buf Objects:\n");
620 seq_printf(s, "\texp_name\tsize\tflags\tmode\tcount\n"); 620 seq_puts(s, "size\tflags\tmode\tcount\texp_name\n");
621 621
622 list_for_each_entry(buf_obj, &db_list.head, list_node) { 622 list_for_each_entry(buf_obj, &db_list.head, list_node) {
623 ret = mutex_lock_interruptible(&buf_obj->lock); 623 ret = mutex_lock_interruptible(&buf_obj->lock);
624 624
625 if (ret) { 625 if (ret) {
626 seq_printf(s, 626 seq_puts(s,
627 "\tERROR locking buffer object: skipping\n"); 627 "\tERROR locking buffer object: skipping\n");
628 continue; 628 continue;
629 } 629 }
630 630
631 seq_printf(s, "\t"); 631 seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\n",
632 632 buf_obj->size,
633 seq_printf(s, "\t%s\t%08zu\t%08x\t%08x\t%08ld\n",
634 buf_obj->exp_name, buf_obj->size,
635 buf_obj->file->f_flags, buf_obj->file->f_mode, 633 buf_obj->file->f_flags, buf_obj->file->f_mode,
636 (long)(buf_obj->file->f_count.counter)); 634 (long)(buf_obj->file->f_count.counter),
635 buf_obj->exp_name);
637 636
638 seq_printf(s, "\t\tAttached Devices:\n"); 637 seq_puts(s, "\tAttached Devices:\n");
639 attach_count = 0; 638 attach_count = 0;
640 639
641 list_for_each_entry(attach_obj, &buf_obj->attachments, node) { 640 list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
642 seq_printf(s, "\t\t"); 641 seq_puts(s, "\t");
643 642
644 seq_printf(s, "%s\n", attach_obj->dev->init_name); 643 seq_printf(s, "%s\n", dev_name(attach_obj->dev));
645 attach_count++; 644 attach_count++;
646 } 645 }
647 646
648 seq_printf(s, "\n\t\tTotal %d devices attached\n", 647 seq_printf(s, "Total %d devices attached\n\n",
649 attach_count); 648 attach_count);
650 649
651 count++; 650 count++;
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 3107282a9741..091b9ea14feb 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -60,7 +60,9 @@ enum {
60 NULL_IRQ_NONE = 0, 60 NULL_IRQ_NONE = 0,
61 NULL_IRQ_SOFTIRQ = 1, 61 NULL_IRQ_SOFTIRQ = 1,
62 NULL_IRQ_TIMER = 2, 62 NULL_IRQ_TIMER = 2,
63};
63 64
65enum {
64 NULL_Q_BIO = 0, 66 NULL_Q_BIO = 0,
65 NULL_Q_RQ = 1, 67 NULL_Q_RQ = 1,
66 NULL_Q_MQ = 2, 68 NULL_Q_MQ = 2,
@@ -172,18 +174,20 @@ static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
172 174
173static void end_cmd(struct nullb_cmd *cmd) 175static void end_cmd(struct nullb_cmd *cmd)
174{ 176{
175 if (cmd->rq) { 177 switch (queue_mode) {
176 if (queue_mode == NULL_Q_MQ) 178 case NULL_Q_MQ:
177 blk_mq_end_io(cmd->rq, 0); 179 blk_mq_end_io(cmd->rq, 0);
178 else { 180 return;
179 INIT_LIST_HEAD(&cmd->rq->queuelist); 181 case NULL_Q_RQ:
180 blk_end_request_all(cmd->rq, 0); 182 INIT_LIST_HEAD(&cmd->rq->queuelist);
181 } 183 blk_end_request_all(cmd->rq, 0);
182 } else if (cmd->bio) 184 break;
185 case NULL_Q_BIO:
183 bio_endio(cmd->bio, 0); 186 bio_endio(cmd->bio, 0);
187 break;
188 }
184 189
185 if (queue_mode != NULL_Q_MQ) 190 free_cmd(cmd);
186 free_cmd(cmd);
187} 191}
188 192
189static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) 193static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
@@ -195,6 +199,7 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
195 cq = &per_cpu(completion_queues, smp_processor_id()); 199 cq = &per_cpu(completion_queues, smp_processor_id());
196 200
197 while ((entry = llist_del_all(&cq->list)) != NULL) { 201 while ((entry = llist_del_all(&cq->list)) != NULL) {
202 entry = llist_reverse_order(entry);
198 do { 203 do {
199 cmd = container_of(entry, struct nullb_cmd, ll_list); 204 cmd = container_of(entry, struct nullb_cmd, ll_list);
200 end_cmd(cmd); 205 end_cmd(cmd);
@@ -221,61 +226,31 @@ static void null_cmd_end_timer(struct nullb_cmd *cmd)
221 226
222static void null_softirq_done_fn(struct request *rq) 227static void null_softirq_done_fn(struct request *rq)
223{ 228{
224 blk_end_request_all(rq, 0); 229 end_cmd(rq->special);
225}
226
227#ifdef CONFIG_SMP
228
229static void null_ipi_cmd_end_io(void *data)
230{
231 struct completion_queue *cq;
232 struct llist_node *entry, *next;
233 struct nullb_cmd *cmd;
234
235 cq = &per_cpu(completion_queues, smp_processor_id());
236
237 entry = llist_del_all(&cq->list);
238
239 while (entry) {
240 next = entry->next;
241 cmd = llist_entry(entry, struct nullb_cmd, ll_list);
242 end_cmd(cmd);
243 entry = next;
244 }
245}
246
247static void null_cmd_end_ipi(struct nullb_cmd *cmd)
248{
249 struct call_single_data *data = &cmd->csd;
250 int cpu = get_cpu();
251 struct completion_queue *cq = &per_cpu(completion_queues, cpu);
252
253 cmd->ll_list.next = NULL;
254
255 if (llist_add(&cmd->ll_list, &cq->list)) {
256 data->func = null_ipi_cmd_end_io;
257 data->flags = 0;
258 __smp_call_function_single(cpu, data, 0);
259 }
260
261 put_cpu();
262} 230}
263 231
264#endif /* CONFIG_SMP */
265
266static inline void null_handle_cmd(struct nullb_cmd *cmd) 232static inline void null_handle_cmd(struct nullb_cmd *cmd)
267{ 233{
268 /* Complete IO by inline, softirq or timer */ 234 /* Complete IO by inline, softirq or timer */
269 switch (irqmode) { 235 switch (irqmode) {
270 case NULL_IRQ_NONE:
271 end_cmd(cmd);
272 break;
273 case NULL_IRQ_SOFTIRQ: 236 case NULL_IRQ_SOFTIRQ:
274#ifdef CONFIG_SMP 237 switch (queue_mode) {
275 null_cmd_end_ipi(cmd); 238 case NULL_Q_MQ:
276#else 239 blk_mq_complete_request(cmd->rq);
240 break;
241 case NULL_Q_RQ:
242 blk_complete_request(cmd->rq);
243 break;
244 case NULL_Q_BIO:
245 /*
246 * XXX: no proper submitting cpu information available.
247 */
248 end_cmd(cmd);
249 break;
250 }
251 break;
252 case NULL_IRQ_NONE:
277 end_cmd(cmd); 253 end_cmd(cmd);
278#endif
279 break; 254 break;
280 case NULL_IRQ_TIMER: 255 case NULL_IRQ_TIMER:
281 null_cmd_end_timer(cmd); 256 null_cmd_end_timer(cmd);
@@ -411,6 +386,7 @@ static struct blk_mq_ops null_mq_ops = {
411 .queue_rq = null_queue_rq, 386 .queue_rq = null_queue_rq,
412 .map_queue = blk_mq_map_queue, 387 .map_queue = blk_mq_map_queue,
413 .init_hctx = null_init_hctx, 388 .init_hctx = null_init_hctx,
389 .complete = null_softirq_done_fn,
414}; 390};
415 391
416static struct blk_mq_reg null_mq_reg = { 392static struct blk_mq_reg null_mq_reg = {
@@ -609,13 +585,6 @@ static int __init null_init(void)
609{ 585{
610 unsigned int i; 586 unsigned int i;
611 587
612#if !defined(CONFIG_SMP)
613 if (irqmode == NULL_IRQ_SOFTIRQ) {
614 pr_warn("null_blk: softirq completions not available.\n");
615 pr_warn("null_blk: using direct completions.\n");
616 irqmode = NULL_IRQ_NONE;
617 }
618#endif
619 if (bs > PAGE_SIZE) { 588 if (bs > PAGE_SIZE) {
620 pr_warn("null_blk: invalid block size\n"); 589 pr_warn("null_blk: invalid block size\n");
621 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE); 590 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 6a680d4de7f1..b1cb3f4c4db4 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -110,9 +110,9 @@ static int __virtblk_add_req(struct virtqueue *vq,
110 return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC); 110 return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
111} 111}
112 112
113static inline void virtblk_request_done(struct virtblk_req *vbr) 113static inline void virtblk_request_done(struct request *req)
114{ 114{
115 struct request *req = vbr->req; 115 struct virtblk_req *vbr = req->special;
116 int error = virtblk_result(vbr); 116 int error = virtblk_result(vbr);
117 117
118 if (req->cmd_type == REQ_TYPE_BLOCK_PC) { 118 if (req->cmd_type == REQ_TYPE_BLOCK_PC) {
@@ -138,7 +138,7 @@ static void virtblk_done(struct virtqueue *vq)
138 do { 138 do {
139 virtqueue_disable_cb(vq); 139 virtqueue_disable_cb(vq);
140 while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) { 140 while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
141 virtblk_request_done(vbr); 141 blk_mq_complete_request(vbr->req);
142 req_done = true; 142 req_done = true;
143 } 143 }
144 if (unlikely(virtqueue_is_broken(vq))) 144 if (unlikely(virtqueue_is_broken(vq)))
@@ -479,6 +479,7 @@ static struct blk_mq_ops virtio_mq_ops = {
479 .map_queue = blk_mq_map_queue, 479 .map_queue = blk_mq_map_queue,
480 .alloc_hctx = blk_mq_alloc_single_hw_queue, 480 .alloc_hctx = blk_mq_alloc_single_hw_queue,
481 .free_hctx = blk_mq_free_single_hw_queue, 481 .free_hctx = blk_mq_free_single_hw_queue,
482 .complete = virtblk_request_done,
482}; 483};
483 484
484static struct blk_mq_reg virtio_mq_reg = { 485static struct blk_mq_reg virtio_mq_reg = {
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 4b97b86da926..64c60edcdfbc 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -299,7 +299,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
299 BUG_ON(num != 0); 299 BUG_ON(num != 0);
300} 300}
301 301
302static void unmap_purged_grants(struct work_struct *work) 302void xen_blkbk_unmap_purged_grants(struct work_struct *work)
303{ 303{
304 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 304 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
305 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 305 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
@@ -375,7 +375,7 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
375 375
376 pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean); 376 pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean);
377 377
378 INIT_LIST_HEAD(&blkif->persistent_purge_list); 378 BUG_ON(!list_empty(&blkif->persistent_purge_list));
379 root = &blkif->persistent_gnts; 379 root = &blkif->persistent_gnts;
380purge_list: 380purge_list:
381 foreach_grant_safe(persistent_gnt, n, root, node) { 381 foreach_grant_safe(persistent_gnt, n, root, node) {
@@ -420,7 +420,6 @@ finished:
420 blkif->vbd.overflow_max_grants = 0; 420 blkif->vbd.overflow_max_grants = 0;
421 421
422 /* We can defer this work */ 422 /* We can defer this work */
423 INIT_WORK(&blkif->persistent_purge_work, unmap_purged_grants);
424 schedule_work(&blkif->persistent_purge_work); 423 schedule_work(&blkif->persistent_purge_work);
425 pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total); 424 pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total);
426 return; 425 return;
@@ -625,9 +624,23 @@ purge_gnt_list:
625 print_stats(blkif); 624 print_stats(blkif);
626 } 625 }
627 626
628 /* Since we are shutting down remove all pages from the buffer */ 627 /* Drain pending purge work */
629 shrink_free_pagepool(blkif, 0 /* All */); 628 flush_work(&blkif->persistent_purge_work);
630 629
630 if (log_stats)
631 print_stats(blkif);
632
633 blkif->xenblkd = NULL;
634 xen_blkif_put(blkif);
635
636 return 0;
637}
638
639/*
640 * Remove persistent grants and empty the pool of free pages
641 */
642void xen_blkbk_free_caches(struct xen_blkif *blkif)
643{
631 /* Free all persistent grant pages */ 644 /* Free all persistent grant pages */
632 if (!RB_EMPTY_ROOT(&blkif->persistent_gnts)) 645 if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
633 free_persistent_gnts(blkif, &blkif->persistent_gnts, 646 free_persistent_gnts(blkif, &blkif->persistent_gnts,
@@ -636,13 +649,8 @@ purge_gnt_list:
636 BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); 649 BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
637 blkif->persistent_gnt_c = 0; 650 blkif->persistent_gnt_c = 0;
638 651
639 if (log_stats) 652 /* Since we are shutting down remove all pages from the buffer */
640 print_stats(blkif); 653 shrink_free_pagepool(blkif, 0 /* All */);
641
642 blkif->xenblkd = NULL;
643 xen_blkif_put(blkif);
644
645 return 0;
646} 654}
647 655
648/* 656/*
@@ -838,7 +846,7 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
838 struct grant_page **pages = pending_req->indirect_pages; 846 struct grant_page **pages = pending_req->indirect_pages;
839 struct xen_blkif *blkif = pending_req->blkif; 847 struct xen_blkif *blkif = pending_req->blkif;
840 int indirect_grefs, rc, n, nseg, i; 848 int indirect_grefs, rc, n, nseg, i;
841 struct blkif_request_segment_aligned *segments = NULL; 849 struct blkif_request_segment *segments = NULL;
842 850
843 nseg = pending_req->nr_pages; 851 nseg = pending_req->nr_pages;
844 indirect_grefs = INDIRECT_PAGES(nseg); 852 indirect_grefs = INDIRECT_PAGES(nseg);
@@ -934,9 +942,7 @@ static void xen_blk_drain_io(struct xen_blkif *blkif)
934{ 942{
935 atomic_set(&blkif->drain, 1); 943 atomic_set(&blkif->drain, 1);
936 do { 944 do {
937 /* The initial value is one, and one refcnt taken at the 945 if (atomic_read(&blkif->inflight) == 0)
938 * start of the xen_blkif_schedule thread. */
939 if (atomic_read(&blkif->refcnt) <= 2)
940 break; 946 break;
941 wait_for_completion_interruptible_timeout( 947 wait_for_completion_interruptible_timeout(
942 &blkif->drain_complete, HZ); 948 &blkif->drain_complete, HZ);
@@ -976,17 +982,30 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
976 * the proper response on the ring. 982 * the proper response on the ring.
977 */ 983 */
978 if (atomic_dec_and_test(&pending_req->pendcnt)) { 984 if (atomic_dec_and_test(&pending_req->pendcnt)) {
979 xen_blkbk_unmap(pending_req->blkif, 985 struct xen_blkif *blkif = pending_req->blkif;
986
987 xen_blkbk_unmap(blkif,
980 pending_req->segments, 988 pending_req->segments,
981 pending_req->nr_pages); 989 pending_req->nr_pages);
982 make_response(pending_req->blkif, pending_req->id, 990 make_response(blkif, pending_req->id,
983 pending_req->operation, pending_req->status); 991 pending_req->operation, pending_req->status);
984 xen_blkif_put(pending_req->blkif); 992 free_req(blkif, pending_req);
985 if (atomic_read(&pending_req->blkif->refcnt) <= 2) { 993 /*
986 if (atomic_read(&pending_req->blkif->drain)) 994 * Make sure the request is freed before releasing blkif,
987 complete(&pending_req->blkif->drain_complete); 995 * or there could be a race between free_req and the
996 * cleanup done in xen_blkif_free during shutdown.
997 *
998 * NB: The fact that we might try to wake up pending_free_wq
999 * before drain_complete (in case there's a drain going on)
1000 * it's not a problem with our current implementation
1001 * because we can assure there's no thread waiting on
1002 * pending_free_wq if there's a drain going on, but it has
1003 * to be taken into account if the current model is changed.
1004 */
1005 if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) {
1006 complete(&blkif->drain_complete);
988 } 1007 }
989 free_req(pending_req->blkif, pending_req); 1008 xen_blkif_put(blkif);
990 } 1009 }
991} 1010}
992 1011
@@ -1240,6 +1259,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
1240 * below (in "!bio") if we are handling a BLKIF_OP_DISCARD. 1259 * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
1241 */ 1260 */
1242 xen_blkif_get(blkif); 1261 xen_blkif_get(blkif);
1262 atomic_inc(&blkif->inflight);
1243 1263
1244 for (i = 0; i < nseg; i++) { 1264 for (i = 0; i < nseg; i++) {
1245 while ((bio == NULL) || 1265 while ((bio == NULL) ||
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 8d8807563d99..be052773ad03 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -57,7 +57,7 @@
57#define MAX_INDIRECT_SEGMENTS 256 57#define MAX_INDIRECT_SEGMENTS 256
58 58
59#define SEGS_PER_INDIRECT_FRAME \ 59#define SEGS_PER_INDIRECT_FRAME \
60 (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned)) 60 (PAGE_SIZE/sizeof(struct blkif_request_segment))
61#define MAX_INDIRECT_PAGES \ 61#define MAX_INDIRECT_PAGES \
62 ((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) 62 ((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
63#define INDIRECT_PAGES(_segs) \ 63#define INDIRECT_PAGES(_segs) \
@@ -278,6 +278,7 @@ struct xen_blkif {
278 /* for barrier (drain) requests */ 278 /* for barrier (drain) requests */
279 struct completion drain_complete; 279 struct completion drain_complete;
280 atomic_t drain; 280 atomic_t drain;
281 atomic_t inflight;
281 /* One thread per one blkif. */ 282 /* One thread per one blkif. */
282 struct task_struct *xenblkd; 283 struct task_struct *xenblkd;
283 unsigned int waiting_reqs; 284 unsigned int waiting_reqs;
@@ -376,6 +377,7 @@ int xen_blkif_xenbus_init(void);
376irqreturn_t xen_blkif_be_int(int irq, void *dev_id); 377irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
377int xen_blkif_schedule(void *arg); 378int xen_blkif_schedule(void *arg);
378int xen_blkif_purge_persistent(void *arg); 379int xen_blkif_purge_persistent(void *arg);
380void xen_blkbk_free_caches(struct xen_blkif *blkif);
379 381
380int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt, 382int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
381 struct backend_info *be, int state); 383 struct backend_info *be, int state);
@@ -383,6 +385,7 @@ int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
383int xen_blkbk_barrier(struct xenbus_transaction xbt, 385int xen_blkbk_barrier(struct xenbus_transaction xbt,
384 struct backend_info *be, int state); 386 struct backend_info *be, int state);
385struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be); 387struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
388void xen_blkbk_unmap_purged_grants(struct work_struct *work);
386 389
387static inline void blkif_get_x86_32_req(struct blkif_request *dst, 390static inline void blkif_get_x86_32_req(struct blkif_request *dst,
388 struct blkif_x86_32_request *src) 391 struct blkif_x86_32_request *src)
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index c2014a0aa206..9a547e6b6ebf 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -125,8 +125,11 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
125 blkif->persistent_gnts.rb_node = NULL; 125 blkif->persistent_gnts.rb_node = NULL;
126 spin_lock_init(&blkif->free_pages_lock); 126 spin_lock_init(&blkif->free_pages_lock);
127 INIT_LIST_HEAD(&blkif->free_pages); 127 INIT_LIST_HEAD(&blkif->free_pages);
128 INIT_LIST_HEAD(&blkif->persistent_purge_list);
128 blkif->free_pages_num = 0; 129 blkif->free_pages_num = 0;
129 atomic_set(&blkif->persistent_gnt_in_use, 0); 130 atomic_set(&blkif->persistent_gnt_in_use, 0);
131 atomic_set(&blkif->inflight, 0);
132 INIT_WORK(&blkif->persistent_purge_work, xen_blkbk_unmap_purged_grants);
130 133
131 INIT_LIST_HEAD(&blkif->pending_free); 134 INIT_LIST_HEAD(&blkif->pending_free);
132 135
@@ -259,6 +262,17 @@ static void xen_blkif_free(struct xen_blkif *blkif)
259 if (!atomic_dec_and_test(&blkif->refcnt)) 262 if (!atomic_dec_and_test(&blkif->refcnt))
260 BUG(); 263 BUG();
261 264
265 /* Remove all persistent grants and the cache of ballooned pages. */
266 xen_blkbk_free_caches(blkif);
267
268 /* Make sure everything is drained before shutting down */
269 BUG_ON(blkif->persistent_gnt_c != 0);
270 BUG_ON(atomic_read(&blkif->persistent_gnt_in_use) != 0);
271 BUG_ON(blkif->free_pages_num != 0);
272 BUG_ON(!list_empty(&blkif->persistent_purge_list));
273 BUG_ON(!list_empty(&blkif->free_pages));
274 BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
275
262 /* Check that there is no request in use */ 276 /* Check that there is no request in use */
263 list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) { 277 list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
264 list_del(&req->free_list); 278 list_del(&req->free_list);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 8dcfb54f1603..efe1b4761735 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -162,7 +162,7 @@ static DEFINE_SPINLOCK(minor_lock);
162#define DEV_NAME "xvd" /* name in /dev */ 162#define DEV_NAME "xvd" /* name in /dev */
163 163
164#define SEGS_PER_INDIRECT_FRAME \ 164#define SEGS_PER_INDIRECT_FRAME \
165 (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned)) 165 (PAGE_SIZE/sizeof(struct blkif_request_segment))
166#define INDIRECT_GREFS(_segs) \ 166#define INDIRECT_GREFS(_segs) \
167 ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) 167 ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
168 168
@@ -393,7 +393,7 @@ static int blkif_queue_request(struct request *req)
393 unsigned long id; 393 unsigned long id;
394 unsigned int fsect, lsect; 394 unsigned int fsect, lsect;
395 int i, ref, n; 395 int i, ref, n;
396 struct blkif_request_segment_aligned *segments = NULL; 396 struct blkif_request_segment *segments = NULL;
397 397
398 /* 398 /*
399 * Used to store if we are able to queue the request by just using 399 * Used to store if we are able to queue the request by just using
@@ -550,7 +550,7 @@ static int blkif_queue_request(struct request *req)
550 } else { 550 } else {
551 n = i % SEGS_PER_INDIRECT_FRAME; 551 n = i % SEGS_PER_INDIRECT_FRAME;
552 segments[n] = 552 segments[n] =
553 (struct blkif_request_segment_aligned) { 553 (struct blkif_request_segment) {
554 .gref = ref, 554 .gref = ref,
555 .first_sect = fsect, 555 .first_sect = fsect,
556 .last_sect = lsect }; 556 .last_sect = lsect };
@@ -1904,13 +1904,16 @@ static void blkback_changed(struct xenbus_device *dev,
1904 case XenbusStateReconfiguring: 1904 case XenbusStateReconfiguring:
1905 case XenbusStateReconfigured: 1905 case XenbusStateReconfigured:
1906 case XenbusStateUnknown: 1906 case XenbusStateUnknown:
1907 case XenbusStateClosed:
1908 break; 1907 break;
1909 1908
1910 case XenbusStateConnected: 1909 case XenbusStateConnected:
1911 blkfront_connect(info); 1910 blkfront_connect(info);
1912 break; 1911 break;
1913 1912
1913 case XenbusStateClosed:
1914 if (dev->state == XenbusStateClosed)
1915 break;
1916 /* Missed the backend's Closing state -- fallthrough */
1914 case XenbusStateClosing: 1917 case XenbusStateClosing:
1915 blkfront_closing(info); 1918 blkfront_closing(info);
1916 break; 1919 break;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index fa3243d71c76..1386749b48ff 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -499,6 +499,7 @@ config RAW_DRIVER
499config MAX_RAW_DEVS 499config MAX_RAW_DEVS
500 int "Maximum number of RAW devices to support (1-65536)" 500 int "Maximum number of RAW devices to support (1-65536)"
501 depends on RAW_DRIVER 501 depends on RAW_DRIVER
502 range 1 65536
502 default "256" 503 default "256"
503 help 504 help
504 The maximum number of RAW devices that are supported. 505 The maximum number of RAW devices that are supported.
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index f3223aac4df1..6e8d65e9b1d3 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -190,7 +190,7 @@ static int bind_get(int number, dev_t *dev)
190 struct raw_device_data *rawdev; 190 struct raw_device_data *rawdev;
191 struct block_device *bdev; 191 struct block_device *bdev;
192 192
193 if (number <= 0 || number >= MAX_RAW_MINORS) 193 if (number <= 0 || number >= max_raw_minors)
194 return -EINVAL; 194 return -EINVAL;
195 195
196 rawdev = &raw_devices[number]; 196 rawdev = &raw_devices[number];
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index feea87cc6b8f..6928d094451d 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -890,12 +890,10 @@ static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
890 } else { 890 } else {
891 /* Failback to copying a page */ 891 /* Failback to copying a page */
892 struct page *page = alloc_page(GFP_KERNEL); 892 struct page *page = alloc_page(GFP_KERNEL);
893 char *src = buf->ops->map(pipe, buf, 1); 893 char *src;
894 char *dst;
895 894
896 if (!page) 895 if (!page)
897 return -ENOMEM; 896 return -ENOMEM;
898 dst = kmap(page);
899 897
900 offset = sd->pos & ~PAGE_MASK; 898 offset = sd->pos & ~PAGE_MASK;
901 899
@@ -903,9 +901,8 @@ static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
903 if (len + offset > PAGE_SIZE) 901 if (len + offset > PAGE_SIZE)
904 len = PAGE_SIZE - offset; 902 len = PAGE_SIZE - offset;
905 903
906 memcpy(dst + offset, src + buf->offset, len); 904 src = buf->ops->map(pipe, buf, 1);
907 905 memcpy(page_address(page) + offset, src + buf->offset, len);
908 kunmap(page);
909 buf->ops->unmap(pipe, buf, src); 906 buf->ops->unmap(pipe, buf, src);
910 907
911 sg_set_page(&(sgl->sg[sgl->n]), page, len, offset); 908 sg_set_page(&(sgl->sg[sgl->n]), page, len, offset);
diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c
index 974b2db2fe10..0595dc6c453e 100644
--- a/drivers/clocksource/bcm_kona_timer.c
+++ b/drivers/clocksource/bcm_kona_timer.c
@@ -99,31 +99,6 @@ kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw)
99 return; 99 return;
100} 100}
101 101
102static void __init kona_timers_init(struct device_node *node)
103{
104 u32 freq;
105 struct clk *external_clk;
106
107 external_clk = of_clk_get_by_name(node, NULL);
108
109 if (!IS_ERR(external_clk)) {
110 arch_timer_rate = clk_get_rate(external_clk);
111 clk_prepare_enable(external_clk);
112 } else if (!of_property_read_u32(node, "clock-frequency", &freq)) {
113 arch_timer_rate = freq;
114 } else {
115 panic("unable to determine clock-frequency");
116 }
117
118 /* Setup IRQ numbers */
119 timers.tmr_irq = irq_of_parse_and_map(node, 0);
120
121 /* Setup IO addresses */
122 timers.tmr_regs = of_iomap(node, 0);
123
124 kona_timer_disable_and_clear(timers.tmr_regs);
125}
126
127static int kona_timer_set_next_event(unsigned long clc, 102static int kona_timer_set_next_event(unsigned long clc,
128 struct clock_event_device *unused) 103 struct clock_event_device *unused)
129{ 104{
@@ -198,7 +173,34 @@ static struct irqaction kona_timer_irq = {
198 173
199static void __init kona_timer_init(struct device_node *node) 174static void __init kona_timer_init(struct device_node *node)
200{ 175{
201 kona_timers_init(node); 176 u32 freq;
177 struct clk *external_clk;
178
179 if (!of_device_is_available(node)) {
180 pr_info("Kona Timer v1 marked as disabled in device tree\n");
181 return;
182 }
183
184 external_clk = of_clk_get_by_name(node, NULL);
185
186 if (!IS_ERR(external_clk)) {
187 arch_timer_rate = clk_get_rate(external_clk);
188 clk_prepare_enable(external_clk);
189 } else if (!of_property_read_u32(node, "clock-frequency", &freq)) {
190 arch_timer_rate = freq;
191 } else {
192 pr_err("Kona Timer v1 unable to determine clock-frequency");
193 return;
194 }
195
196 /* Setup IRQ numbers */
197 timers.tmr_irq = irq_of_parse_and_map(node, 0);
198
199 /* Setup IO addresses */
200 timers.tmr_regs = of_iomap(node, 0);
201
202 kona_timer_disable_and_clear(timers.tmr_regs);
203
202 kona_timer_clockevents_init(); 204 kona_timer_clockevents_init();
203 setup_irq(timers.tmr_irq, &kona_timer_irq); 205 setup_irq(timers.tmr_irq, &kona_timer_irq);
204 kona_timer_set_next_event((arch_timer_rate / HZ), NULL); 206 kona_timer_set_next_event((arch_timer_rate / HZ), NULL);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 08ca8c9f41cd..cb003a6b72c8 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1323,8 +1323,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
1323 up_read(&policy->rwsem); 1323 up_read(&policy->rwsem);
1324 1324
1325 if (cpu != policy->cpu) { 1325 if (cpu != policy->cpu) {
1326 if (!frozen) 1326 sysfs_remove_link(&dev->kobj, "cpufreq");
1327 sysfs_remove_link(&dev->kobj, "cpufreq");
1328 } else if (cpus > 1) { 1327 } else if (cpus > 1) {
1329 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu); 1328 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu);
1330 if (new_cpu >= 0) { 1329 if (new_cpu >= 0) {
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 79606f473f48..e90816105921 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -34,8 +34,10 @@
34 34
35#define SAMPLE_COUNT 3 35#define SAMPLE_COUNT 3
36 36
37#define BYT_RATIOS 0x66a 37#define BYT_RATIOS 0x66a
38#define BYT_VIDS 0x66b 38#define BYT_VIDS 0x66b
39#define BYT_TURBO_RATIOS 0x66c
40
39 41
40#define FRAC_BITS 8 42#define FRAC_BITS 8
41#define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 43#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
@@ -51,8 +53,6 @@ static inline int32_t div_fp(int32_t x, int32_t y)
51 return div_s64((int64_t)x << FRAC_BITS, (int64_t)y); 53 return div_s64((int64_t)x << FRAC_BITS, (int64_t)y);
52} 54}
53 55
54static u64 energy_divisor;
55
56struct sample { 56struct sample {
57 int32_t core_pct_busy; 57 int32_t core_pct_busy;
58 u64 aperf; 58 u64 aperf;
@@ -359,7 +359,7 @@ static int byt_get_min_pstate(void)
359{ 359{
360 u64 value; 360 u64 value;
361 rdmsrl(BYT_RATIOS, value); 361 rdmsrl(BYT_RATIOS, value);
362 return value & 0xFF; 362 return (value >> 8) & 0xFF;
363} 363}
364 364
365static int byt_get_max_pstate(void) 365static int byt_get_max_pstate(void)
@@ -369,6 +369,13 @@ static int byt_get_max_pstate(void)
369 return (value >> 16) & 0xFF; 369 return (value >> 16) & 0xFF;
370} 370}
371 371
372static int byt_get_turbo_pstate(void)
373{
374 u64 value;
375 rdmsrl(BYT_TURBO_RATIOS, value);
376 return value & 0x3F;
377}
378
372static void byt_set_pstate(struct cpudata *cpudata, int pstate) 379static void byt_set_pstate(struct cpudata *cpudata, int pstate)
373{ 380{
374 u64 val; 381 u64 val;
@@ -471,7 +478,7 @@ static struct cpu_defaults byt_params = {
471 .funcs = { 478 .funcs = {
472 .get_max = byt_get_max_pstate, 479 .get_max = byt_get_max_pstate,
473 .get_min = byt_get_min_pstate, 480 .get_min = byt_get_min_pstate,
474 .get_turbo = byt_get_max_pstate, 481 .get_turbo = byt_get_turbo_pstate,
475 .set = byt_set_pstate, 482 .set = byt_set_pstate,
476 .get_vid = byt_get_vid, 483 .get_vid = byt_get_vid,
477 }, 484 },
@@ -630,12 +637,10 @@ static void intel_pstate_timer_func(unsigned long __data)
630{ 637{
631 struct cpudata *cpu = (struct cpudata *) __data; 638 struct cpudata *cpu = (struct cpudata *) __data;
632 struct sample *sample; 639 struct sample *sample;
633 u64 energy;
634 640
635 intel_pstate_sample(cpu); 641 intel_pstate_sample(cpu);
636 642
637 sample = &cpu->samples[cpu->sample_ptr]; 643 sample = &cpu->samples[cpu->sample_ptr];
638 rdmsrl(MSR_PKG_ENERGY_STATUS, energy);
639 644
640 intel_pstate_adjust_busy_pstate(cpu); 645 intel_pstate_adjust_busy_pstate(cpu);
641 646
@@ -644,7 +649,6 @@ static void intel_pstate_timer_func(unsigned long __data)
644 cpu->pstate.current_pstate, 649 cpu->pstate.current_pstate,
645 sample->mperf, 650 sample->mperf,
646 sample->aperf, 651 sample->aperf,
647 div64_u64(energy, energy_divisor),
648 sample->freq); 652 sample->freq);
649 653
650 intel_pstate_set_sample_time(cpu); 654 intel_pstate_set_sample_time(cpu);
@@ -926,7 +930,6 @@ static int __init intel_pstate_init(void)
926 int cpu, rc = 0; 930 int cpu, rc = 0;
927 const struct x86_cpu_id *id; 931 const struct x86_cpu_id *id;
928 struct cpu_defaults *cpu_info; 932 struct cpu_defaults *cpu_info;
929 u64 units;
930 933
931 if (no_load) 934 if (no_load)
932 return -ENODEV; 935 return -ENODEV;
@@ -960,9 +963,6 @@ static int __init intel_pstate_init(void)
960 if (rc) 963 if (rc)
961 goto out; 964 goto out;
962 965
963 rdmsrl(MSR_RAPL_POWER_UNIT, units);
964 energy_divisor = 1 << ((units >> 8) & 0x1f); /* bits{12:8} */
965
966 intel_pstate_debug_expose_params(); 966 intel_pstate_debug_expose_params();
967 intel_pstate_sysfs_expose_params(); 967 intel_pstate_sysfs_expose_params();
968 968
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index e10b646634d7..6684e0342792 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -1076,7 +1076,7 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
1076{ 1076{
1077 struct powernow_k8_data *data; 1077 struct powernow_k8_data *data;
1078 struct init_on_cpu init_on_cpu; 1078 struct init_on_cpu init_on_cpu;
1079 int rc; 1079 int rc, cpu;
1080 1080
1081 smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1); 1081 smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1);
1082 if (rc) 1082 if (rc)
@@ -1140,7 +1140,9 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
1140 pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n", 1140 pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
1141 data->currfid, data->currvid); 1141 data->currfid, data->currvid);
1142 1142
1143 per_cpu(powernow_data, pol->cpu) = data; 1143 /* Point all the CPUs in this policy to the same data */
1144 for_each_cpu(cpu, pol->cpus)
1145 per_cpu(powernow_data, cpu) = data;
1144 1146
1145 return 0; 1147 return 0;
1146 1148
@@ -1155,6 +1157,7 @@ err_out:
1155static int powernowk8_cpu_exit(struct cpufreq_policy *pol) 1157static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
1156{ 1158{
1157 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); 1159 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
1160 int cpu;
1158 1161
1159 if (!data) 1162 if (!data)
1160 return -EINVAL; 1163 return -EINVAL;
@@ -1165,7 +1168,8 @@ static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
1165 1168
1166 kfree(data->powernow_table); 1169 kfree(data->powernow_table);
1167 kfree(data); 1170 kfree(data);
1168 per_cpu(powernow_data, pol->cpu) = NULL; 1171 for_each_cpu(cpu, pol->cpus)
1172 per_cpu(powernow_data, cpu) = NULL;
1169 1173
1170 return 0; 1174 return 0;
1171} 1175}
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index 6c4c000671c5..1e5481d88a26 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -158,6 +158,15 @@ static inline unsigned long nx842_get_scatterlist_size(
158 return sl->entry_nr * sizeof(struct nx842_slentry); 158 return sl->entry_nr * sizeof(struct nx842_slentry);
159} 159}
160 160
161static inline unsigned long nx842_get_pa(void *addr)
162{
163 if (is_vmalloc_addr(addr))
164 return page_to_phys(vmalloc_to_page(addr))
165 + offset_in_page(addr);
166 else
167 return __pa(addr);
168}
169
161static int nx842_build_scatterlist(unsigned long buf, int len, 170static int nx842_build_scatterlist(unsigned long buf, int len,
162 struct nx842_scatterlist *sl) 171 struct nx842_scatterlist *sl)
163{ 172{
@@ -168,7 +177,7 @@ static int nx842_build_scatterlist(unsigned long buf, int len,
168 177
169 entry = sl->entries; 178 entry = sl->entries;
170 while (len) { 179 while (len) {
171 entry->ptr = __pa(buf); 180 entry->ptr = nx842_get_pa((void *)buf);
172 nextpage = ALIGN(buf + 1, NX842_HW_PAGE_SIZE); 181 nextpage = ALIGN(buf + 1, NX842_HW_PAGE_SIZE);
173 if (nextpage < buf + len) { 182 if (nextpage < buf + len) {
174 /* we aren't at the end yet */ 183 /* we aren't at the end yet */
@@ -370,8 +379,8 @@ int nx842_compress(const unsigned char *in, unsigned int inlen,
370 op.flags = NX842_OP_COMPRESS; 379 op.flags = NX842_OP_COMPRESS;
371 csbcpb = &workmem->csbcpb; 380 csbcpb = &workmem->csbcpb;
372 memset(csbcpb, 0, sizeof(*csbcpb)); 381 memset(csbcpb, 0, sizeof(*csbcpb));
373 op.csbcpb = __pa(csbcpb); 382 op.csbcpb = nx842_get_pa(csbcpb);
374 op.out = __pa(slout.entries); 383 op.out = nx842_get_pa(slout.entries);
375 384
376 for (i = 0; i < hdr->blocks_nr; i++) { 385 for (i = 0; i < hdr->blocks_nr; i++) {
377 /* 386 /*
@@ -401,13 +410,13 @@ int nx842_compress(const unsigned char *in, unsigned int inlen,
401 */ 410 */
402 if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) { 411 if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) {
403 /* Create direct DDE */ 412 /* Create direct DDE */
404 op.in = __pa(inbuf); 413 op.in = nx842_get_pa((void *)inbuf);
405 op.inlen = max_sync_size; 414 op.inlen = max_sync_size;
406 415
407 } else { 416 } else {
408 /* Create indirect DDE (scatterlist) */ 417 /* Create indirect DDE (scatterlist) */
409 nx842_build_scatterlist(inbuf, max_sync_size, &slin); 418 nx842_build_scatterlist(inbuf, max_sync_size, &slin);
410 op.in = __pa(slin.entries); 419 op.in = nx842_get_pa(slin.entries);
411 op.inlen = -nx842_get_scatterlist_size(&slin); 420 op.inlen = -nx842_get_scatterlist_size(&slin);
412 } 421 }
413 422
@@ -565,7 +574,7 @@ int nx842_decompress(const unsigned char *in, unsigned int inlen,
565 op.flags = NX842_OP_DECOMPRESS; 574 op.flags = NX842_OP_DECOMPRESS;
566 csbcpb = &workmem->csbcpb; 575 csbcpb = &workmem->csbcpb;
567 memset(csbcpb, 0, sizeof(*csbcpb)); 576 memset(csbcpb, 0, sizeof(*csbcpb));
568 op.csbcpb = __pa(csbcpb); 577 op.csbcpb = nx842_get_pa(csbcpb);
569 578
570 /* 579 /*
571 * max_sync_size may have changed since compression, 580 * max_sync_size may have changed since compression,
@@ -597,12 +606,12 @@ int nx842_decompress(const unsigned char *in, unsigned int inlen,
597 if (likely((inbuf & NX842_HW_PAGE_MASK) == 606 if (likely((inbuf & NX842_HW_PAGE_MASK) ==
598 ((inbuf + hdr->sizes[i] - 1) & NX842_HW_PAGE_MASK))) { 607 ((inbuf + hdr->sizes[i] - 1) & NX842_HW_PAGE_MASK))) {
599 /* Create direct DDE */ 608 /* Create direct DDE */
600 op.in = __pa(inbuf); 609 op.in = nx842_get_pa((void *)inbuf);
601 op.inlen = hdr->sizes[i]; 610 op.inlen = hdr->sizes[i];
602 } else { 611 } else {
603 /* Create indirect DDE (scatterlist) */ 612 /* Create indirect DDE (scatterlist) */
604 nx842_build_scatterlist(inbuf, hdr->sizes[i] , &slin); 613 nx842_build_scatterlist(inbuf, hdr->sizes[i] , &slin);
605 op.in = __pa(slin.entries); 614 op.in = nx842_get_pa(slin.entries);
606 op.inlen = -nx842_get_scatterlist_size(&slin); 615 op.inlen = -nx842_get_scatterlist_size(&slin);
607 } 616 }
608 617
@@ -613,12 +622,12 @@ int nx842_decompress(const unsigned char *in, unsigned int inlen,
613 */ 622 */
614 if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) { 623 if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) {
615 /* Create direct DDE */ 624 /* Create direct DDE */
616 op.out = __pa(outbuf); 625 op.out = nx842_get_pa((void *)outbuf);
617 op.outlen = max_sync_size; 626 op.outlen = max_sync_size;
618 } else { 627 } else {
619 /* Create indirect DDE (scatterlist) */ 628 /* Create indirect DDE (scatterlist) */
620 nx842_build_scatterlist(outbuf, max_sync_size, &slout); 629 nx842_build_scatterlist(outbuf, max_sync_size, &slout);
621 op.out = __pa(slout.entries); 630 op.out = nx842_get_pa(slout.entries);
622 op.outlen = -nx842_get_scatterlist_size(&slout); 631 op.outlen = -nx842_get_scatterlist_size(&slout);
623 } 632 }
624 633
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 9bed1a2a67a1..605b016bcea4 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -346,6 +346,7 @@ config MOXART_DMA
346 tristate "MOXART DMA support" 346 tristate "MOXART DMA support"
347 depends on ARCH_MOXART 347 depends on ARCH_MOXART
348 select DMA_ENGINE 348 select DMA_ENGINE
349 select DMA_OF
349 select DMA_VIRTUAL_CHANNELS 350 select DMA_VIRTUAL_CHANNELS
350 help 351 help
351 Enable support for the MOXA ART SoC DMA controller. 352 Enable support for the MOXA ART SoC DMA controller.
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 87529181efcc..4e3549a16132 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -77,7 +77,8 @@ static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
77 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); 77 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
78 for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) { 78 for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
79 chan = ioat_chan_by_index(instance, bit); 79 chan = ioat_chan_by_index(instance, bit);
80 tasklet_schedule(&chan->cleanup_task); 80 if (test_bit(IOAT_RUN, &chan->state))
81 tasklet_schedule(&chan->cleanup_task);
81 } 82 }
82 83
83 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); 84 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
@@ -93,7 +94,8 @@ static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
93{ 94{
94 struct ioat_chan_common *chan = data; 95 struct ioat_chan_common *chan = data;
95 96
96 tasklet_schedule(&chan->cleanup_task); 97 if (test_bit(IOAT_RUN, &chan->state))
98 tasklet_schedule(&chan->cleanup_task);
97 99
98 return IRQ_HANDLED; 100 return IRQ_HANDLED;
99} 101}
@@ -116,7 +118,6 @@ void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *c
116 chan->timer.function = device->timer_fn; 118 chan->timer.function = device->timer_fn;
117 chan->timer.data = data; 119 chan->timer.data = data;
118 tasklet_init(&chan->cleanup_task, device->cleanup_fn, data); 120 tasklet_init(&chan->cleanup_task, device->cleanup_fn, data);
119 tasklet_disable(&chan->cleanup_task);
120} 121}
121 122
122/** 123/**
@@ -354,13 +355,49 @@ static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
354 writel(((u64) chan->completion_dma) >> 32, 355 writel(((u64) chan->completion_dma) >> 32,
355 chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); 356 chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
356 357
357 tasklet_enable(&chan->cleanup_task); 358 set_bit(IOAT_RUN, &chan->state);
358 ioat1_dma_start_null_desc(ioat); /* give chain to dma device */ 359 ioat1_dma_start_null_desc(ioat); /* give chain to dma device */
359 dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n", 360 dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
360 __func__, ioat->desccount); 361 __func__, ioat->desccount);
361 return ioat->desccount; 362 return ioat->desccount;
362} 363}
363 364
365void ioat_stop(struct ioat_chan_common *chan)
366{
367 struct ioatdma_device *device = chan->device;
368 struct pci_dev *pdev = device->pdev;
369 int chan_id = chan_num(chan);
370 struct msix_entry *msix;
371
372 /* 1/ stop irq from firing tasklets
373 * 2/ stop the tasklet from re-arming irqs
374 */
375 clear_bit(IOAT_RUN, &chan->state);
376
377 /* flush inflight interrupts */
378 switch (device->irq_mode) {
379 case IOAT_MSIX:
380 msix = &device->msix_entries[chan_id];
381 synchronize_irq(msix->vector);
382 break;
383 case IOAT_MSI:
384 case IOAT_INTX:
385 synchronize_irq(pdev->irq);
386 break;
387 default:
388 break;
389 }
390
391 /* flush inflight timers */
392 del_timer_sync(&chan->timer);
393
394 /* flush inflight tasklet runs */
395 tasklet_kill(&chan->cleanup_task);
396
397 /* final cleanup now that everything is quiesced and can't re-arm */
398 device->cleanup_fn((unsigned long) &chan->common);
399}
400
364/** 401/**
365 * ioat1_dma_free_chan_resources - release all the descriptors 402 * ioat1_dma_free_chan_resources - release all the descriptors
366 * @chan: the channel to be cleaned 403 * @chan: the channel to be cleaned
@@ -379,9 +416,7 @@ static void ioat1_dma_free_chan_resources(struct dma_chan *c)
379 if (ioat->desccount == 0) 416 if (ioat->desccount == 0)
380 return; 417 return;
381 418
382 tasklet_disable(&chan->cleanup_task); 419 ioat_stop(chan);
383 del_timer_sync(&chan->timer);
384 ioat1_cleanup(ioat);
385 420
386 /* Delay 100ms after reset to allow internal DMA logic to quiesce 421 /* Delay 100ms after reset to allow internal DMA logic to quiesce
387 * before removing DMA descriptor resources. 422 * before removing DMA descriptor resources.
@@ -526,8 +561,11 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
526static void ioat1_cleanup_event(unsigned long data) 561static void ioat1_cleanup_event(unsigned long data)
527{ 562{
528 struct ioat_dma_chan *ioat = to_ioat_chan((void *) data); 563 struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
564 struct ioat_chan_common *chan = &ioat->base;
529 565
530 ioat1_cleanup(ioat); 566 ioat1_cleanup(ioat);
567 if (!test_bit(IOAT_RUN, &chan->state))
568 return;
531 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); 569 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
532} 570}
533 571
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 11fb877ddca9..e982f00a9843 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -356,6 +356,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
356void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); 356void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
357void ioat_kobject_del(struct ioatdma_device *device); 357void ioat_kobject_del(struct ioatdma_device *device);
358int ioat_dma_setup_interrupts(struct ioatdma_device *device); 358int ioat_dma_setup_interrupts(struct ioatdma_device *device);
359void ioat_stop(struct ioat_chan_common *chan);
359extern const struct sysfs_ops ioat_sysfs_ops; 360extern const struct sysfs_ops ioat_sysfs_ops;
360extern struct ioat_sysfs_entry ioat_version_attr; 361extern struct ioat_sysfs_entry ioat_version_attr;
361extern struct ioat_sysfs_entry ioat_cap_attr; 362extern struct ioat_sysfs_entry ioat_cap_attr;
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 5d3affe7e976..8d1058085eeb 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -190,8 +190,11 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
190void ioat2_cleanup_event(unsigned long data) 190void ioat2_cleanup_event(unsigned long data)
191{ 191{
192 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); 192 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
193 struct ioat_chan_common *chan = &ioat->base;
193 194
194 ioat2_cleanup(ioat); 195 ioat2_cleanup(ioat);
196 if (!test_bit(IOAT_RUN, &chan->state))
197 return;
195 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); 198 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
196} 199}
197 200
@@ -553,10 +556,10 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
553 ioat->issued = 0; 556 ioat->issued = 0;
554 ioat->tail = 0; 557 ioat->tail = 0;
555 ioat->alloc_order = order; 558 ioat->alloc_order = order;
559 set_bit(IOAT_RUN, &chan->state);
556 spin_unlock_bh(&ioat->prep_lock); 560 spin_unlock_bh(&ioat->prep_lock);
557 spin_unlock_bh(&chan->cleanup_lock); 561 spin_unlock_bh(&chan->cleanup_lock);
558 562
559 tasklet_enable(&chan->cleanup_task);
560 ioat2_start_null_desc(ioat); 563 ioat2_start_null_desc(ioat);
561 564
562 /* check that we got off the ground */ 565 /* check that we got off the ground */
@@ -566,7 +569,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
566 } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status)); 569 } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
567 570
568 if (is_ioat_active(status) || is_ioat_idle(status)) { 571 if (is_ioat_active(status) || is_ioat_idle(status)) {
569 set_bit(IOAT_RUN, &chan->state);
570 return 1 << ioat->alloc_order; 572 return 1 << ioat->alloc_order;
571 } else { 573 } else {
572 u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); 574 u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
@@ -809,11 +811,8 @@ void ioat2_free_chan_resources(struct dma_chan *c)
809 if (!ioat->ring) 811 if (!ioat->ring)
810 return; 812 return;
811 813
812 tasklet_disable(&chan->cleanup_task); 814 ioat_stop(chan);
813 del_timer_sync(&chan->timer);
814 device->cleanup_fn((unsigned long) c);
815 device->reset_hw(chan); 815 device->reset_hw(chan);
816 clear_bit(IOAT_RUN, &chan->state);
817 816
818 spin_lock_bh(&chan->cleanup_lock); 817 spin_lock_bh(&chan->cleanup_lock);
819 spin_lock_bh(&ioat->prep_lock); 818 spin_lock_bh(&ioat->prep_lock);
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 820817e97e62..b9b38a1cf92f 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -464,8 +464,11 @@ static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
464static void ioat3_cleanup_event(unsigned long data) 464static void ioat3_cleanup_event(unsigned long data)
465{ 465{
466 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); 466 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
467 struct ioat_chan_common *chan = &ioat->base;
467 468
468 ioat3_cleanup(ioat); 469 ioat3_cleanup(ioat);
470 if (!test_bit(IOAT_RUN, &chan->state))
471 return;
469 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); 472 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
470} 473}
471 474
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 53fb0c8365b0..766b68ed505c 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -497,8 +497,8 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
497 if (!mv_can_chain(grp_start)) 497 if (!mv_can_chain(grp_start))
498 goto submit_done; 498 goto submit_done;
499 499
500 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n", 500 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
501 old_chain_tail->async_tx.phys); 501 &old_chain_tail->async_tx.phys);
502 502
503 /* fix up the hardware chain */ 503 /* fix up the hardware chain */
504 mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys); 504 mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
@@ -527,7 +527,8 @@ submit_done:
527/* returns the number of allocated descriptors */ 527/* returns the number of allocated descriptors */
528static int mv_xor_alloc_chan_resources(struct dma_chan *chan) 528static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
529{ 529{
530 char *hw_desc; 530 void *virt_desc;
531 dma_addr_t dma_desc;
531 int idx; 532 int idx;
532 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 533 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
533 struct mv_xor_desc_slot *slot = NULL; 534 struct mv_xor_desc_slot *slot = NULL;
@@ -542,17 +543,16 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
542 " %d descriptor slots", idx); 543 " %d descriptor slots", idx);
543 break; 544 break;
544 } 545 }
545 hw_desc = (char *) mv_chan->dma_desc_pool_virt; 546 virt_desc = mv_chan->dma_desc_pool_virt;
546 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE]; 547 slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
547 548
548 dma_async_tx_descriptor_init(&slot->async_tx, chan); 549 dma_async_tx_descriptor_init(&slot->async_tx, chan);
549 slot->async_tx.tx_submit = mv_xor_tx_submit; 550 slot->async_tx.tx_submit = mv_xor_tx_submit;
550 INIT_LIST_HEAD(&slot->chain_node); 551 INIT_LIST_HEAD(&slot->chain_node);
551 INIT_LIST_HEAD(&slot->slot_node); 552 INIT_LIST_HEAD(&slot->slot_node);
552 INIT_LIST_HEAD(&slot->tx_list); 553 INIT_LIST_HEAD(&slot->tx_list);
553 hw_desc = (char *) mv_chan->dma_desc_pool; 554 dma_desc = mv_chan->dma_desc_pool;
554 slot->async_tx.phys = 555 slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
555 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
556 slot->idx = idx++; 556 slot->idx = idx++;
557 557
558 spin_lock_bh(&mv_chan->lock); 558 spin_lock_bh(&mv_chan->lock);
@@ -582,8 +582,8 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
582 int slot_cnt; 582 int slot_cnt;
583 583
584 dev_dbg(mv_chan_to_devp(mv_chan), 584 dev_dbg(mv_chan_to_devp(mv_chan),
585 "%s dest: %x src %x len: %u flags: %ld\n", 585 "%s dest: %pad src %pad len: %u flags: %ld\n",
586 __func__, dest, src, len, flags); 586 __func__, &dest, &src, len, flags);
587 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) 587 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
588 return NULL; 588 return NULL;
589 589
@@ -626,8 +626,8 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
626 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); 626 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
627 627
628 dev_dbg(mv_chan_to_devp(mv_chan), 628 dev_dbg(mv_chan_to_devp(mv_chan),
629 "%s src_cnt: %d len: dest %x %u flags: %ld\n", 629 "%s src_cnt: %d len: %u dest %pad flags: %ld\n",
630 __func__, src_cnt, len, dest, flags); 630 __func__, src_cnt, len, &dest, flags);
631 631
632 spin_lock_bh(&mv_chan->lock); 632 spin_lock_bh(&mv_chan->lock);
633 slot_cnt = mv_chan_xor_slot_count(len, src_cnt); 633 slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index e8c9ef03495b..33edd6766344 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -559,7 +559,8 @@ static void edac_mc_workq_function(struct work_struct *work_req)
559 * 559 *
560 * called with the mem_ctls_mutex held 560 * called with the mem_ctls_mutex held
561 */ 561 */
562static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec) 562static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec,
563 bool init)
563{ 564{
564 edac_dbg(0, "\n"); 565 edac_dbg(0, "\n");
565 566
@@ -567,7 +568,9 @@ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
567 if (mci->op_state != OP_RUNNING_POLL) 568 if (mci->op_state != OP_RUNNING_POLL)
568 return; 569 return;
569 570
570 INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function); 571 if (init)
572 INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
573
571 mod_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec)); 574 mod_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
572} 575}
573 576
@@ -601,7 +604,7 @@ static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
601 * user space has updated our poll period value, need to 604 * user space has updated our poll period value, need to
602 * reset our workq delays 605 * reset our workq delays
603 */ 606 */
604void edac_mc_reset_delay_period(int value) 607void edac_mc_reset_delay_period(unsigned long value)
605{ 608{
606 struct mem_ctl_info *mci; 609 struct mem_ctl_info *mci;
607 struct list_head *item; 610 struct list_head *item;
@@ -611,7 +614,7 @@ void edac_mc_reset_delay_period(int value)
611 list_for_each(item, &mc_devices) { 614 list_for_each(item, &mc_devices) {
612 mci = list_entry(item, struct mem_ctl_info, link); 615 mci = list_entry(item, struct mem_ctl_info, link);
613 616
614 edac_mc_workq_setup(mci, (unsigned long) value); 617 edac_mc_workq_setup(mci, value, false);
615 } 618 }
616 619
617 mutex_unlock(&mem_ctls_mutex); 620 mutex_unlock(&mem_ctls_mutex);
@@ -782,7 +785,7 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
782 /* This instance is NOW RUNNING */ 785 /* This instance is NOW RUNNING */
783 mci->op_state = OP_RUNNING_POLL; 786 mci->op_state = OP_RUNNING_POLL;
784 787
785 edac_mc_workq_setup(mci, edac_mc_get_poll_msec()); 788 edac_mc_workq_setup(mci, edac_mc_get_poll_msec(), true);
786 } else { 789 } else {
787 mci->op_state = OP_RUNNING_INTERRUPT; 790 mci->op_state = OP_RUNNING_INTERRUPT;
788 } 791 }
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 51c0362acf5c..b335c6ab5efe 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -52,18 +52,20 @@ int edac_mc_get_poll_msec(void)
52 52
53static int edac_set_poll_msec(const char *val, struct kernel_param *kp) 53static int edac_set_poll_msec(const char *val, struct kernel_param *kp)
54{ 54{
55 long l; 55 unsigned long l;
56 int ret; 56 int ret;
57 57
58 if (!val) 58 if (!val)
59 return -EINVAL; 59 return -EINVAL;
60 60
61 ret = kstrtol(val, 0, &l); 61 ret = kstrtoul(val, 0, &l);
62 if (ret) 62 if (ret)
63 return ret; 63 return ret;
64 if ((int)l != l) 64
65 if (l < 1000)
65 return -EINVAL; 66 return -EINVAL;
66 *((int *)kp->arg) = l; 67
68 *((unsigned long *)kp->arg) = l;
67 69
68 /* notify edac_mc engine to reset the poll period */ 70 /* notify edac_mc engine to reset the poll period */
69 edac_mc_reset_delay_period(l); 71 edac_mc_reset_delay_period(l);
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index 3d139c6e7fe3..f2118bfcf8df 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -52,7 +52,7 @@ extern void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
52extern void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev); 52extern void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev);
53extern void edac_device_reset_delay_period(struct edac_device_ctl_info 53extern void edac_device_reset_delay_period(struct edac_device_ctl_info
54 *edac_dev, unsigned long value); 54 *edac_dev, unsigned long value);
55extern void edac_mc_reset_delay_period(int value); 55extern void edac_mc_reset_delay_period(unsigned long value);
56 56
57extern void *edac_align_ptr(void **p, unsigned size, int n_elems); 57extern void *edac_align_ptr(void **p, unsigned size, int n_elems);
58 58
diff --git a/drivers/fmc/fmc-write-eeprom.c b/drivers/fmc/fmc-write-eeprom.c
index ee5b47904130..9bb2cbd5c9f2 100644
--- a/drivers/fmc/fmc-write-eeprom.c
+++ b/drivers/fmc/fmc-write-eeprom.c
@@ -27,7 +27,7 @@ FMC_PARAM_BUSID(fwe_drv);
27/* The "file=" is like the generic "gateware=" used elsewhere */ 27/* The "file=" is like the generic "gateware=" used elsewhere */
28static char *fwe_file[FMC_MAX_CARDS]; 28static char *fwe_file[FMC_MAX_CARDS];
29static int fwe_file_n; 29static int fwe_file_n;
30module_param_array_named(file, fwe_file, charp, &fwe_file_n, 444); 30module_param_array_named(file, fwe_file, charp, &fwe_file_n, 0444);
31 31
32static int fwe_run_tlv(struct fmc_device *fmc, const struct firmware *fw, 32static int fwe_run_tlv(struct fmc_device *fmc, const struct firmware *fw,
33 int write) 33 int write)
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 697338772b64..903f24d28ba0 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -403,6 +403,7 @@ config GPIO_GRGPIO
403 403
404config GPIO_TB10X 404config GPIO_TB10X
405 bool 405 bool
406 select GENERIC_IRQ_CHIP
406 select OF_GPIO 407 select OF_GPIO
407 408
408comment "I2C GPIO expanders:" 409comment "I2C GPIO expanders:"
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 233d088ac59f..f32357e2d78d 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2012-2013 Broadcom Corporation 2 * Copyright (C) 2012-2014 Broadcom Corporation
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as 5 * modify it under the terms of the GNU General Public License as
@@ -657,6 +657,6 @@ static struct platform_driver bcm_kona_gpio_driver = {
657 657
658module_platform_driver(bcm_kona_gpio_driver); 658module_platform_driver(bcm_kona_gpio_driver);
659 659
660MODULE_AUTHOR("Broadcom"); 660MODULE_AUTHOR("Broadcom Corporation <bcm-kernel-feedback-list@broadcom.com>");
661MODULE_DESCRIPTION("Broadcom Kona GPIO Driver"); 661MODULE_DESCRIPTION("Broadcom Kona GPIO Driver");
662MODULE_LICENSE("GPL v2"); 662MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-clps711x.c b/drivers/gpio/gpio-clps711x.c
index d3550274b8f7..3c2ba2ad0ada 100644
--- a/drivers/gpio/gpio-clps711x.c
+++ b/drivers/gpio/gpio-clps711x.c
@@ -97,3 +97,4 @@ module_platform_driver(clps711x_gpio_driver);
97MODULE_LICENSE("GPL"); 97MODULE_LICENSE("GPL");
98MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>"); 98MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
99MODULE_DESCRIPTION("CLPS711X GPIO driver"); 99MODULE_DESCRIPTION("CLPS711X GPIO driver");
100MODULE_ALIAS("platform:clps711x-gpio");
diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c
index d1b50ef5fab8..e585163f1ad5 100644
--- a/drivers/gpio/gpio-intel-mid.c
+++ b/drivers/gpio/gpio-intel-mid.c
@@ -394,8 +394,8 @@ static const struct irq_domain_ops intel_gpio_irq_ops = {
394 394
395static int intel_gpio_runtime_idle(struct device *dev) 395static int intel_gpio_runtime_idle(struct device *dev)
396{ 396{
397 pm_schedule_suspend(dev, 500); 397 int err = pm_schedule_suspend(dev, 500);
398 return -EBUSY; 398 return err ?: -EBUSY;
399} 399}
400 400
401static const struct dev_pm_ops intel_gpio_pm_ops = { 401static const struct dev_pm_ops intel_gpio_pm_ops = {
diff --git a/drivers/gpio/gpio-xtensa.c b/drivers/gpio/gpio-xtensa.c
index 1d136eceda62..7081304d6797 100644
--- a/drivers/gpio/gpio-xtensa.c
+++ b/drivers/gpio/gpio-xtensa.c
@@ -40,6 +40,8 @@
40#error GPIO32 option is not enabled for your xtensa core variant 40#error GPIO32 option is not enabled for your xtensa core variant
41#endif 41#endif
42 42
43#if XCHAL_HAVE_CP
44
43static inline unsigned long enable_cp(unsigned long *cpenable) 45static inline unsigned long enable_cp(unsigned long *cpenable)
44{ 46{
45 unsigned long flags; 47 unsigned long flags;
@@ -57,6 +59,20 @@ static inline void disable_cp(unsigned long flags, unsigned long cpenable)
57 local_irq_restore(flags); 59 local_irq_restore(flags);
58} 60}
59 61
62#else
63
64static inline unsigned long enable_cp(unsigned long *cpenable)
65{
66 *cpenable = 0; /* avoid uninitialized value warning */
67 return 0;
68}
69
70static inline void disable_cp(unsigned long flags, unsigned long cpenable)
71{
72}
73
74#endif /* XCHAL_HAVE_CP */
75
60static int xtensa_impwire_get_direction(struct gpio_chip *gc, unsigned offset) 76static int xtensa_impwire_get_direction(struct gpio_chip *gc, unsigned offset)
61{ 77{
62 return 1; /* input only */ 78 return 1; /* input only */
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index dffc836144cc..f4dc9b7a3831 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -296,6 +296,18 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
296 case DRM_CAP_ASYNC_PAGE_FLIP: 296 case DRM_CAP_ASYNC_PAGE_FLIP:
297 req->value = dev->mode_config.async_page_flip; 297 req->value = dev->mode_config.async_page_flip;
298 break; 298 break;
299 case DRM_CAP_CURSOR_WIDTH:
300 if (dev->mode_config.cursor_width)
301 req->value = dev->mode_config.cursor_width;
302 else
303 req->value = 64;
304 break;
305 case DRM_CAP_CURSOR_HEIGHT:
306 if (dev->mode_config.cursor_height)
307 req->value = dev->mode_config.cursor_height;
308 else
309 req->value = 64;
310 break;
299 default: 311 default:
300 return -EINVAL; 312 return -EINVAL;
301 } 313 }
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index f227f544aa36..6e1a1a20cf6b 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -51,7 +51,7 @@ config DRM_EXYNOS_G2D
51 51
52config DRM_EXYNOS_IPP 52config DRM_EXYNOS_IPP
53 bool "Exynos DRM IPP" 53 bool "Exynos DRM IPP"
54 depends on DRM_EXYNOS && !ARCH_MULTIPLATFORM 54 depends on DRM_EXYNOS
55 help 55 help
56 Choose this option if you want to use IPP feature for DRM. 56 Choose this option if you want to use IPP feature for DRM.
57 57
@@ -69,6 +69,6 @@ config DRM_EXYNOS_ROTATOR
69 69
70config DRM_EXYNOS_GSC 70config DRM_EXYNOS_GSC
71 bool "Exynos DRM GSC" 71 bool "Exynos DRM GSC"
72 depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 72 depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 && !ARCH_MULTIPLATFORM
73 help 73 help
74 Choose this option if you want to use Exynos GSC for DRM. 74 Choose this option if you want to use Exynos GSC for DRM.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 9d096a0c5f8d..215131ab1dd2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -171,22 +171,24 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
171 file->driver_priv = file_priv; 171 file->driver_priv = file_priv;
172 172
173 ret = exynos_drm_subdrv_open(dev, file); 173 ret = exynos_drm_subdrv_open(dev, file);
174 if (ret) { 174 if (ret)
175 kfree(file_priv); 175 goto out;
176 file->driver_priv = NULL;
177 }
178 176
179 anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops, 177 anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops,
180 NULL, 0); 178 NULL, 0);
181 if (IS_ERR(anon_filp)) { 179 if (IS_ERR(anon_filp)) {
182 kfree(file_priv); 180 ret = PTR_ERR(anon_filp);
183 return PTR_ERR(anon_filp); 181 goto out;
184 } 182 }
185 183
186 anon_filp->f_mode = FMODE_READ | FMODE_WRITE; 184 anon_filp->f_mode = FMODE_READ | FMODE_WRITE;
187 file_priv->anon_filp = anon_filp; 185 file_priv->anon_filp = anon_filp;
188 186
189 return ret; 187 return ret;
188out:
189 kfree(file_priv);
190 file->driver_priv = NULL;
191 return ret;
190} 192}
191 193
192static void exynos_drm_preclose(struct drm_device *dev, 194static void exynos_drm_preclose(struct drm_device *dev,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 380aec28840b..6c1885eedfdf 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -607,7 +607,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
607 reg_type = REG_TYPE_NONE; 607 reg_type = REG_TYPE_NONE;
608 DRM_ERROR("Unknown register offset![%d]\n", reg_offset); 608 DRM_ERROR("Unknown register offset![%d]\n", reg_offset);
609 break; 609 break;
610 }; 610 }
611 611
612 return reg_type; 612 return reg_type;
613} 613}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index d519a4e5fe40..09312b877470 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -16,7 +16,6 @@
16#include <linux/types.h> 16#include <linux/types.h>
17#include <linux/clk.h> 17#include <linux/clk.h>
18#include <linux/pm_runtime.h> 18#include <linux/pm_runtime.h>
19#include <plat/map-base.h>
20 19
21#include <drm/drmP.h> 20#include <drm/drmP.h>
22#include <drm/exynos_drm.h> 21#include <drm/exynos_drm.h>
@@ -826,7 +825,7 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
826 DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e); 825 DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e);
827 826
828 /* 827 /*
829 * quf == NULL condition means all event deletion. 828 * qbuf == NULL condition means all event deletion.
830 * stop operations want to delete all event list. 829 * stop operations want to delete all event list.
831 * another case delete only same buf id. 830 * another case delete only same buf id.
832 */ 831 */
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index a0e10aeb0e67..c021ddc1ffb4 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -34,6 +34,7 @@
34#include <linux/io.h> 34#include <linux/io.h>
35#include <linux/of.h> 35#include <linux/of.h>
36#include <linux/of_gpio.h> 36#include <linux/of_gpio.h>
37#include <linux/hdmi.h>
37 38
38#include <drm/exynos_drm.h> 39#include <drm/exynos_drm.h>
39 40
@@ -59,19 +60,6 @@
59#define HDMI_AUI_VERSION 0x01 60#define HDMI_AUI_VERSION 0x01
60#define HDMI_AUI_LENGTH 0x0A 61#define HDMI_AUI_LENGTH 0x0A
61 62
62/* HDMI infoframe to configure HDMI out packet header, AUI and AVI */
63enum HDMI_PACKET_TYPE {
64 /* refer to Table 5-8 Packet Type in HDMI specification v1.4a */
65 /* InfoFrame packet type */
66 HDMI_PACKET_TYPE_INFOFRAME = 0x80,
67 /* Vendor-Specific InfoFrame */
68 HDMI_PACKET_TYPE_VSI = HDMI_PACKET_TYPE_INFOFRAME + 1,
69 /* Auxiliary Video information InfoFrame */
70 HDMI_PACKET_TYPE_AVI = HDMI_PACKET_TYPE_INFOFRAME + 2,
71 /* Audio information InfoFrame */
72 HDMI_PACKET_TYPE_AUI = HDMI_PACKET_TYPE_INFOFRAME + 4
73};
74
75enum hdmi_type { 63enum hdmi_type {
76 HDMI_TYPE13, 64 HDMI_TYPE13,
77 HDMI_TYPE14, 65 HDMI_TYPE14,
@@ -379,12 +367,6 @@ static const struct hdmiphy_config hdmiphy_v14_configs[] = {
379 }, 367 },
380}; 368};
381 369
382struct hdmi_infoframe {
383 enum HDMI_PACKET_TYPE type;
384 u8 ver;
385 u8 len;
386};
387
388static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id) 370static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id)
389{ 371{
390 return readl(hdata->regs + reg_id); 372 return readl(hdata->regs + reg_id);
@@ -682,7 +664,7 @@ static u8 hdmi_chksum(struct hdmi_context *hdata,
682} 664}
683 665
684static void hdmi_reg_infoframe(struct hdmi_context *hdata, 666static void hdmi_reg_infoframe(struct hdmi_context *hdata,
685 struct hdmi_infoframe *infoframe) 667 union hdmi_infoframe *infoframe)
686{ 668{
687 u32 hdr_sum; 669 u32 hdr_sum;
688 u8 chksum; 670 u8 chksum;
@@ -700,13 +682,15 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
700 return; 682 return;
701 } 683 }
702 684
703 switch (infoframe->type) { 685 switch (infoframe->any.type) {
704 case HDMI_PACKET_TYPE_AVI: 686 case HDMI_INFOFRAME_TYPE_AVI:
705 hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC); 687 hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC);
706 hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->type); 688 hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->any.type);
707 hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, infoframe->ver); 689 hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1,
708 hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->len); 690 infoframe->any.version);
709 hdr_sum = infoframe->type + infoframe->ver + infoframe->len; 691 hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->any.length);
692 hdr_sum = infoframe->any.type + infoframe->any.version +
693 infoframe->any.length;
710 694
711 /* Output format zero hardcoded ,RGB YBCR selection */ 695 /* Output format zero hardcoded ,RGB YBCR selection */
712 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 | 696 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 |
@@ -722,18 +706,20 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
722 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic); 706 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
723 707
724 chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1), 708 chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
725 infoframe->len, hdr_sum); 709 infoframe->any.length, hdr_sum);
726 DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum); 710 DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum);
727 hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum); 711 hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum);
728 break; 712 break;
729 case HDMI_PACKET_TYPE_AUI: 713 case HDMI_INFOFRAME_TYPE_AUDIO:
730 hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02); 714 hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02);
731 hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->type); 715 hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->any.type);
732 hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, infoframe->ver); 716 hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1,
733 hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->len); 717 infoframe->any.version);
734 hdr_sum = infoframe->type + infoframe->ver + infoframe->len; 718 hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->any.length);
719 hdr_sum = infoframe->any.type + infoframe->any.version +
720 infoframe->any.length;
735 chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1), 721 chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1),
736 infoframe->len, hdr_sum); 722 infoframe->any.length, hdr_sum);
737 DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum); 723 DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum);
738 hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum); 724 hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum);
739 break; 725 break;
@@ -985,7 +971,7 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
985 971
986static void hdmi_conf_init(struct hdmi_context *hdata) 972static void hdmi_conf_init(struct hdmi_context *hdata)
987{ 973{
988 struct hdmi_infoframe infoframe; 974 union hdmi_infoframe infoframe;
989 975
990 /* disable HPD interrupts from HDMI IP block, use GPIO instead */ 976 /* disable HPD interrupts from HDMI IP block, use GPIO instead */
991 hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL | 977 hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
@@ -1021,14 +1007,14 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
1021 hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02); 1007 hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02);
1022 hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04); 1008 hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04);
1023 } else { 1009 } else {
1024 infoframe.type = HDMI_PACKET_TYPE_AVI; 1010 infoframe.any.type = HDMI_INFOFRAME_TYPE_AVI;
1025 infoframe.ver = HDMI_AVI_VERSION; 1011 infoframe.any.version = HDMI_AVI_VERSION;
1026 infoframe.len = HDMI_AVI_LENGTH; 1012 infoframe.any.length = HDMI_AVI_LENGTH;
1027 hdmi_reg_infoframe(hdata, &infoframe); 1013 hdmi_reg_infoframe(hdata, &infoframe);
1028 1014
1029 infoframe.type = HDMI_PACKET_TYPE_AUI; 1015 infoframe.any.type = HDMI_INFOFRAME_TYPE_AUDIO;
1030 infoframe.ver = HDMI_AUI_VERSION; 1016 infoframe.any.version = HDMI_AUI_VERSION;
1031 infoframe.len = HDMI_AUI_LENGTH; 1017 infoframe.any.length = HDMI_AUI_LENGTH;
1032 hdmi_reg_infoframe(hdata, &infoframe); 1018 hdmi_reg_infoframe(hdata, &infoframe);
1033 1019
1034 /* enable AVI packet every vsync, fixes purple line problem */ 1020 /* enable AVI packet every vsync, fixes purple line problem */
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 400b0c4a10fb..faa77f543a07 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -208,7 +208,7 @@ struct tda998x_priv {
208# define PLL_SERIAL_1_SRL_IZ(x) (((x) & 3) << 1) 208# define PLL_SERIAL_1_SRL_IZ(x) (((x) & 3) << 1)
209# define PLL_SERIAL_1_SRL_MAN_IZ (1 << 6) 209# define PLL_SERIAL_1_SRL_MAN_IZ (1 << 6)
210#define REG_PLL_SERIAL_2 REG(0x02, 0x01) /* read/write */ 210#define REG_PLL_SERIAL_2 REG(0x02, 0x01) /* read/write */
211# define PLL_SERIAL_2_SRL_NOSC(x) (((x) & 3) << 0) 211# define PLL_SERIAL_2_SRL_NOSC(x) ((x) << 0)
212# define PLL_SERIAL_2_SRL_PR(x) (((x) & 0xf) << 4) 212# define PLL_SERIAL_2_SRL_PR(x) (((x) & 0xf) << 4)
213#define REG_PLL_SERIAL_3 REG(0x02, 0x02) /* read/write */ 213#define REG_PLL_SERIAL_3 REG(0x02, 0x02) /* read/write */
214# define PLL_SERIAL_3_SRL_CCIR (1 << 0) 214# define PLL_SERIAL_3_SRL_CCIR (1 << 0)
@@ -528,10 +528,10 @@ tda998x_write_aif(struct drm_encoder *encoder, struct tda998x_encoder_params *p)
528{ 528{
529 uint8_t buf[PB(5) + 1]; 529 uint8_t buf[PB(5) + 1];
530 530
531 memset(buf, 0, sizeof(buf));
531 buf[HB(0)] = 0x84; 532 buf[HB(0)] = 0x84;
532 buf[HB(1)] = 0x01; 533 buf[HB(1)] = 0x01;
533 buf[HB(2)] = 10; 534 buf[HB(2)] = 10;
534 buf[PB(0)] = 0;
535 buf[PB(1)] = p->audio_frame[1] & 0x07; /* CC */ 535 buf[PB(1)] = p->audio_frame[1] & 0x07; /* CC */
536 buf[PB(2)] = p->audio_frame[2] & 0x1c; /* SF */ 536 buf[PB(2)] = p->audio_frame[2] & 0x1c; /* SF */
537 buf[PB(4)] = p->audio_frame[4]; 537 buf[PB(4)] = p->audio_frame[4];
@@ -824,6 +824,11 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
824 } 824 }
825 825
826 div = 148500 / mode->clock; 826 div = 148500 / mode->clock;
827 if (div != 0) {
828 div--;
829 if (div > 3)
830 div = 3;
831 }
827 832
828 /* mute the audio FIFO: */ 833 /* mute the audio FIFO: */
829 reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO); 834 reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
@@ -913,7 +918,7 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
913 918
914 if (priv->rev == TDA19988) { 919 if (priv->rev == TDA19988) {
915 /* let incoming pixels fill the active space (if any) */ 920 /* let incoming pixels fill the active space (if any) */
916 reg_write(encoder, REG_ENABLE_SPACE, 0x01); 921 reg_write(encoder, REG_ENABLE_SPACE, 0x00);
917 } 922 }
918 923
919 /* must be last register set: */ 924 /* must be last register set: */
@@ -1094,6 +1099,8 @@ tda998x_encoder_destroy(struct drm_encoder *encoder)
1094{ 1099{
1095 struct tda998x_priv *priv = to_tda998x_priv(encoder); 1100 struct tda998x_priv *priv = to_tda998x_priv(encoder);
1096 drm_i2c_encoder_destroy(encoder); 1101 drm_i2c_encoder_destroy(encoder);
1102 if (priv->cec)
1103 i2c_unregister_device(priv->cec);
1097 kfree(priv); 1104 kfree(priv);
1098} 1105}
1099 1106
@@ -1142,8 +1149,12 @@ tda998x_encoder_init(struct i2c_client *client,
1142 priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1); 1149 priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
1143 priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5); 1150 priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5);
1144 1151
1145 priv->current_page = 0; 1152 priv->current_page = 0xff;
1146 priv->cec = i2c_new_dummy(client->adapter, 0x34); 1153 priv->cec = i2c_new_dummy(client->adapter, 0x34);
1154 if (!priv->cec) {
1155 kfree(priv);
1156 return -ENODEV;
1157 }
1147 priv->dpms = DRM_MODE_DPMS_OFF; 1158 priv->dpms = DRM_MODE_DPMS_OFF;
1148 1159
1149 encoder_slave->slave_priv = priv; 1160 encoder_slave->slave_priv = priv;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4a2bf8e3f739..df77e20e3c3d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1831,6 +1831,14 @@ struct drm_i915_file_private {
1831 1831
1832/* Early gen2 have a totally busted CS tlb and require pinned batches. */ 1832/* Early gen2 have a totally busted CS tlb and require pinned batches. */
1833#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) 1833#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
1834/*
1835 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
1836 * even when in MSI mode. This results in spurious interrupt warnings if the
1837 * legacy irq no. is shared with another device. The kernel then disables that
1838 * interrupt source and so prevents the other device from working properly.
1839 */
1840#define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
1841#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
1834 1842
1835/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 1843/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1836 * rows, which changed the alignment requirements and fence programming. 1844 * rows, which changed the alignment requirements and fence programming.
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index d7fd2fd2f0a5..990cf8f43efd 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -146,7 +146,10 @@ static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
146 va_list tmp; 146 va_list tmp;
147 147
148 va_copy(tmp, args); 148 va_copy(tmp, args);
149 if (!__i915_error_seek(e, vsnprintf(NULL, 0, f, tmp))) 149 len = vsnprintf(NULL, 0, f, tmp);
150 va_end(tmp);
151
152 if (!__i915_error_seek(e, len))
150 return; 153 return;
151 } 154 }
152 155
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 17d8fcb1b6f7..9fec71175571 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -567,8 +567,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
567 567
568 vbl_start = mode->crtc_vblank_start * mode->crtc_htotal; 568 vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
569 } else { 569 } else {
570 enum transcoder cpu_transcoder = 570 enum transcoder cpu_transcoder = (enum transcoder) pipe;
571 intel_pipe_to_cpu_transcoder(dev_priv, pipe);
572 u32 htotal; 571 u32 htotal;
573 572
574 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; 573 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 9fa24347963a..4c1672809493 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -8586,6 +8586,20 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
8586 if (ring->id == RCS) 8586 if (ring->id == RCS)
8587 len += 6; 8587 len += 6;
8588 8588
8589 /*
8590 * BSpec MI_DISPLAY_FLIP for IVB:
8591 * "The full packet must be contained within the same cache line."
8592 *
8593 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
8594 * cacheline, if we ever start emitting more commands before
8595 * the MI_DISPLAY_FLIP we may need to first emit everything else,
8596 * then do the cacheline alignment, and finally emit the
8597 * MI_DISPLAY_FLIP.
8598 */
8599 ret = intel_ring_cacheline_align(ring);
8600 if (ret)
8601 goto err_unpin;
8602
8589 ret = intel_ring_begin(ring, len); 8603 ret = intel_ring_begin(ring, len);
8590 if (ret) 8604 if (ret)
8591 goto err_unpin; 8605 goto err_unpin;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 5ede4e8e290d..57552eb386b0 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -404,7 +404,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
404 int i, ret, recv_bytes; 404 int i, ret, recv_bytes;
405 uint32_t status; 405 uint32_t status;
406 int try, precharge, clock = 0; 406 int try, precharge, clock = 0;
407 bool has_aux_irq = true; 407 bool has_aux_irq = HAS_AUX_IRQ(dev);
408 uint32_t timeout; 408 uint32_t timeout;
409 409
410 /* dp aux is extremely sensitive to irq latency, hence request the 410 /* dp aux is extremely sensitive to irq latency, hence request the
@@ -537,6 +537,7 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
537 uint8_t msg[20]; 537 uint8_t msg[20];
538 int msg_bytes; 538 int msg_bytes;
539 uint8_t ack; 539 uint8_t ack;
540 int retry;
540 541
541 if (WARN_ON(send_bytes > 16)) 542 if (WARN_ON(send_bytes > 16))
542 return -E2BIG; 543 return -E2BIG;
@@ -548,19 +549,21 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
548 msg[3] = send_bytes - 1; 549 msg[3] = send_bytes - 1;
549 memcpy(&msg[4], send, send_bytes); 550 memcpy(&msg[4], send, send_bytes);
550 msg_bytes = send_bytes + 4; 551 msg_bytes = send_bytes + 4;
551 for (;;) { 552 for (retry = 0; retry < 7; retry++) {
552 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1); 553 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
553 if (ret < 0) 554 if (ret < 0)
554 return ret; 555 return ret;
555 ack >>= 4; 556 ack >>= 4;
556 if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) 557 if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
557 break; 558 return send_bytes;
558 else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) 559 else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
559 udelay(100); 560 usleep_range(400, 500);
560 else 561 else
561 return -EIO; 562 return -EIO;
562 } 563 }
563 return send_bytes; 564
565 DRM_ERROR("too many retries, giving up\n");
566 return -EIO;
564} 567}
565 568
566/* Write a single byte to the aux channel in native mode */ 569/* Write a single byte to the aux channel in native mode */
@@ -582,6 +585,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
582 int reply_bytes; 585 int reply_bytes;
583 uint8_t ack; 586 uint8_t ack;
584 int ret; 587 int ret;
588 int retry;
585 589
586 if (WARN_ON(recv_bytes > 19)) 590 if (WARN_ON(recv_bytes > 19))
587 return -E2BIG; 591 return -E2BIG;
@@ -595,7 +599,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
595 msg_bytes = 4; 599 msg_bytes = 4;
596 reply_bytes = recv_bytes + 1; 600 reply_bytes = recv_bytes + 1;
597 601
598 for (;;) { 602 for (retry = 0; retry < 7; retry++) {
599 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, 603 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
600 reply, reply_bytes); 604 reply, reply_bytes);
601 if (ret == 0) 605 if (ret == 0)
@@ -608,10 +612,13 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
608 return ret - 1; 612 return ret - 1;
609 } 613 }
610 else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) 614 else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
611 udelay(100); 615 usleep_range(400, 500);
612 else 616 else
613 return -EIO; 617 return -EIO;
614 } 618 }
619
620 DRM_ERROR("too many retries, giving up\n");
621 return -EIO;
615} 622}
616 623
617static int 624static int
@@ -1869,10 +1876,12 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder)
1869 1876
1870 mutex_unlock(&dev_priv->dpio_lock); 1877 mutex_unlock(&dev_priv->dpio_lock);
1871 1878
1872 /* init power sequencer on this pipe and port */ 1879 if (is_edp(intel_dp)) {
1873 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); 1880 /* init power sequencer on this pipe and port */
1874 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, 1881 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
1875 &power_seq); 1882 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
1883 &power_seq);
1884 }
1876 1885
1877 intel_enable_dp(encoder); 1886 intel_enable_dp(encoder);
1878 1887
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index b1dc33f47899..d33b61d0dd33 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -258,13 +258,6 @@ intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
258 algo->data = bus; 258 algo->data = bus;
259} 259}
260 260
261/*
262 * gmbus on gen4 seems to be able to generate legacy interrupts even when in MSI
263 * mode. This results in spurious interrupt warnings if the legacy irq no. is
264 * shared with another device. The kernel then disables that interrupt source
265 * and so prevents the other device from working properly.
266 */
267#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
268static int 261static int
269gmbus_wait_hw_status(struct drm_i915_private *dev_priv, 262gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
270 u32 gmbus2_status, 263 u32 gmbus2_status,
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 4e960ec7419f..acde2945eb8a 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -226,6 +226,8 @@ struct opregion_asle {
226#define ACPI_DIGITAL_OUTPUT (3<<8) 226#define ACPI_DIGITAL_OUTPUT (3<<8)
227#define ACPI_LVDS_OUTPUT (4<<8) 227#define ACPI_LVDS_OUTPUT (4<<8)
228 228
229#define MAX_DSLP 1500
230
229#ifdef CONFIG_ACPI 231#ifdef CONFIG_ACPI
230static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) 232static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
231{ 233{
@@ -260,10 +262,11 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
260 /* The spec says 2ms should be the default, but it's too small 262 /* The spec says 2ms should be the default, but it's too small
261 * for some machines. */ 263 * for some machines. */
262 dslp = 50; 264 dslp = 50;
263 } else if (dslp > 500) { 265 } else if (dslp > MAX_DSLP) {
264 /* Hey bios, trust must be earned. */ 266 /* Hey bios, trust must be earned. */
265 WARN_ONCE(1, "excessive driver sleep timeout (DSPL) %u\n", dslp); 267 DRM_INFO_ONCE("ACPI BIOS requests an excessive sleep of %u ms, "
266 dslp = 500; 268 "using %u ms instead\n", dslp, MAX_DSLP);
269 dslp = MAX_DSLP;
267 } 270 }
268 271
269 /* The spec tells us to do this, but we are the only user... */ 272 /* The spec tells us to do this, but we are the only user... */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index b7f1742caf87..31b36c5ac894 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1653,6 +1653,27 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
1653 return 0; 1653 return 0;
1654} 1654}
1655 1655
1656/* Align the ring tail to a cacheline boundary */
1657int intel_ring_cacheline_align(struct intel_ring_buffer *ring)
1658{
1659 int num_dwords = (64 - (ring->tail & 63)) / sizeof(uint32_t);
1660 int ret;
1661
1662 if (num_dwords == 0)
1663 return 0;
1664
1665 ret = intel_ring_begin(ring, num_dwords);
1666 if (ret)
1667 return ret;
1668
1669 while (num_dwords--)
1670 intel_ring_emit(ring, MI_NOOP);
1671
1672 intel_ring_advance(ring);
1673
1674 return 0;
1675}
1676
1656void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno) 1677void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
1657{ 1678{
1658 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1679 struct drm_i915_private *dev_priv = ring->dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 71a73f4fe252..0b243ce33714 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -233,6 +233,7 @@ intel_write_status_page(struct intel_ring_buffer *ring,
233void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); 233void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
234 234
235int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); 235int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
236int __must_check intel_ring_cacheline_align(struct intel_ring_buffer *ring);
236static inline void intel_ring_emit(struct intel_ring_buffer *ring, 237static inline void intel_ring_emit(struct intel_ring_buffer *ring,
237 u32 data) 238 u32 data)
238{ 239{
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 1964f4f0d452..84c5b13b33c9 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -39,6 +39,7 @@ struct mdp4_crtc {
39 spinlock_t lock; 39 spinlock_t lock;
40 bool stale; 40 bool stale;
41 uint32_t width, height; 41 uint32_t width, height;
42 uint32_t x, y;
42 43
43 /* next cursor to scan-out: */ 44 /* next cursor to scan-out: */
44 uint32_t next_iova; 45 uint32_t next_iova;
@@ -57,9 +58,16 @@ struct mdp4_crtc {
57#define PENDING_FLIP 0x2 58#define PENDING_FLIP 0x2
58 atomic_t pending; 59 atomic_t pending;
59 60
60 /* the fb that we currently hold a scanout ref to: */ 61 /* the fb that we logically (from PoV of KMS API) hold a ref
62 * to. Which we may not yet be scanning out (we may still
63 * be scanning out previous in case of page_flip while waiting
64 * for gpu rendering to complete:
65 */
61 struct drm_framebuffer *fb; 66 struct drm_framebuffer *fb;
62 67
68 /* the fb that we currently hold a scanout ref to: */
69 struct drm_framebuffer *scanout_fb;
70
63 /* for unref'ing framebuffers after scanout completes: */ 71 /* for unref'ing framebuffers after scanout completes: */
64 struct drm_flip_work unref_fb_work; 72 struct drm_flip_work unref_fb_work;
65 73
@@ -77,24 +85,73 @@ static struct mdp4_kms *get_kms(struct drm_crtc *crtc)
77 return to_mdp4_kms(to_mdp_kms(priv->kms)); 85 return to_mdp4_kms(to_mdp_kms(priv->kms));
78} 86}
79 87
80static void update_fb(struct drm_crtc *crtc, bool async, 88static void request_pending(struct drm_crtc *crtc, uint32_t pending)
81 struct drm_framebuffer *new_fb)
82{ 89{
83 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 90 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
84 struct drm_framebuffer *old_fb = mdp4_crtc->fb;
85 91
86 if (old_fb) 92 atomic_or(pending, &mdp4_crtc->pending);
87 drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb); 93 mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
94}
95
96static void crtc_flush(struct drm_crtc *crtc)
97{
98 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
99 struct mdp4_kms *mdp4_kms = get_kms(crtc);
100 uint32_t i, flush = 0;
101
102 for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
103 struct drm_plane *plane = mdp4_crtc->planes[i];
104 if (plane) {
105 enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
106 flush |= pipe2flush(pipe_id);
107 }
108 }
109 flush |= ovlp2flush(mdp4_crtc->ovlp);
110
111 DBG("%s: flush=%08x", mdp4_crtc->name, flush);
112
113 mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
114}
115
116static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
117{
118 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
119 struct drm_framebuffer *old_fb = mdp4_crtc->fb;
88 120
89 /* grab reference to incoming scanout fb: */ 121 /* grab reference to incoming scanout fb: */
90 drm_framebuffer_reference(new_fb); 122 drm_framebuffer_reference(new_fb);
91 mdp4_crtc->base.fb = new_fb; 123 mdp4_crtc->base.fb = new_fb;
92 mdp4_crtc->fb = new_fb; 124 mdp4_crtc->fb = new_fb;
93 125
94 if (!async) { 126 if (old_fb)
95 /* enable vblank to pick up the old_fb */ 127 drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb);
96 mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank); 128}
97 } 129
130/* unlike update_fb(), take a ref to the new scanout fb *before* updating
131 * plane, then call this. Needed to ensure we don't unref the buffer that
132 * is actually still being scanned out.
133 *
134 * Note that this whole thing goes away with atomic.. since we can defer
135 * calling into driver until rendering is done.
136 */
137static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
138{
139 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
140
141 /* flush updates, to make sure hw is updated to new scanout fb,
142 * so that we can safely queue unref to current fb (ie. next
143 * vblank we know hw is done w/ previous scanout_fb).
144 */
145 crtc_flush(crtc);
146
147 if (mdp4_crtc->scanout_fb)
148 drm_flip_work_queue(&mdp4_crtc->unref_fb_work,
149 mdp4_crtc->scanout_fb);
150
151 mdp4_crtc->scanout_fb = fb;
152
153 /* enable vblank to complete flip: */
154 request_pending(crtc, PENDING_FLIP);
98} 155}
99 156
100/* if file!=NULL, this is preclose potential cancel-flip path */ 157/* if file!=NULL, this is preclose potential cancel-flip path */
@@ -120,34 +177,6 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
120 spin_unlock_irqrestore(&dev->event_lock, flags); 177 spin_unlock_irqrestore(&dev->event_lock, flags);
121} 178}
122 179
123static void crtc_flush(struct drm_crtc *crtc)
124{
125 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
126 struct mdp4_kms *mdp4_kms = get_kms(crtc);
127 uint32_t i, flush = 0;
128
129 for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
130 struct drm_plane *plane = mdp4_crtc->planes[i];
131 if (plane) {
132 enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
133 flush |= pipe2flush(pipe_id);
134 }
135 }
136 flush |= ovlp2flush(mdp4_crtc->ovlp);
137
138 DBG("%s: flush=%08x", mdp4_crtc->name, flush);
139
140 mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
141}
142
143static void request_pending(struct drm_crtc *crtc, uint32_t pending)
144{
145 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
146
147 atomic_or(pending, &mdp4_crtc->pending);
148 mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
149}
150
151static void pageflip_cb(struct msm_fence_cb *cb) 180static void pageflip_cb(struct msm_fence_cb *cb)
152{ 181{
153 struct mdp4_crtc *mdp4_crtc = 182 struct mdp4_crtc *mdp4_crtc =
@@ -158,11 +187,9 @@ static void pageflip_cb(struct msm_fence_cb *cb)
158 if (!fb) 187 if (!fb)
159 return; 188 return;
160 189
190 drm_framebuffer_reference(fb);
161 mdp4_plane_set_scanout(mdp4_crtc->plane, fb); 191 mdp4_plane_set_scanout(mdp4_crtc->plane, fb);
162 crtc_flush(crtc); 192 update_scanout(crtc, fb);
163
164 /* enable vblank to complete flip: */
165 request_pending(crtc, PENDING_FLIP);
166} 193}
167 194
168static void unref_fb_worker(struct drm_flip_work *work, void *val) 195static void unref_fb_worker(struct drm_flip_work *work, void *val)
@@ -320,6 +347,20 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
320 mode->vsync_end, mode->vtotal, 347 mode->vsync_end, mode->vtotal,
321 mode->type, mode->flags); 348 mode->type, mode->flags);
322 349
350 /* grab extra ref for update_scanout() */
351 drm_framebuffer_reference(crtc->fb);
352
353 ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb,
354 0, 0, mode->hdisplay, mode->vdisplay,
355 x << 16, y << 16,
356 mode->hdisplay << 16, mode->vdisplay << 16);
357 if (ret) {
358 drm_framebuffer_unreference(crtc->fb);
359 dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
360 mdp4_crtc->name, ret);
361 return ret;
362 }
363
323 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma), 364 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
324 MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) | 365 MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
325 MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay)); 366 MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
@@ -341,24 +382,15 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
341 382
342 mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1); 383 mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
343 384
344 update_fb(crtc, false, crtc->fb);
345
346 ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb,
347 0, 0, mode->hdisplay, mode->vdisplay,
348 x << 16, y << 16,
349 mode->hdisplay << 16, mode->vdisplay << 16);
350 if (ret) {
351 dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
352 mdp4_crtc->name, ret);
353 return ret;
354 }
355
356 if (dma == DMA_E) { 385 if (dma == DMA_E) {
357 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000); 386 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000);
358 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000); 387 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
359 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000); 388 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
360 } 389 }
361 390
391 update_fb(crtc, crtc->fb);
392 update_scanout(crtc, crtc->fb);
393
362 return 0; 394 return 0;
363} 395}
364 396
@@ -385,13 +417,24 @@ static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
385 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 417 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
386 struct drm_plane *plane = mdp4_crtc->plane; 418 struct drm_plane *plane = mdp4_crtc->plane;
387 struct drm_display_mode *mode = &crtc->mode; 419 struct drm_display_mode *mode = &crtc->mode;
420 int ret;
388 421
389 update_fb(crtc, false, crtc->fb); 422 /* grab extra ref for update_scanout() */
423 drm_framebuffer_reference(crtc->fb);
390 424
391 return mdp4_plane_mode_set(plane, crtc, crtc->fb, 425 ret = mdp4_plane_mode_set(plane, crtc, crtc->fb,
392 0, 0, mode->hdisplay, mode->vdisplay, 426 0, 0, mode->hdisplay, mode->vdisplay,
393 x << 16, y << 16, 427 x << 16, y << 16,
394 mode->hdisplay << 16, mode->vdisplay << 16); 428 mode->hdisplay << 16, mode->vdisplay << 16);
429 if (ret) {
430 drm_framebuffer_unreference(crtc->fb);
431 return ret;
432 }
433
434 update_fb(crtc, crtc->fb);
435 update_scanout(crtc, crtc->fb);
436
437 return 0;
395} 438}
396 439
397static void mdp4_crtc_load_lut(struct drm_crtc *crtc) 440static void mdp4_crtc_load_lut(struct drm_crtc *crtc)
@@ -419,7 +462,7 @@ static int mdp4_crtc_page_flip(struct drm_crtc *crtc,
419 mdp4_crtc->event = event; 462 mdp4_crtc->event = event;
420 spin_unlock_irqrestore(&dev->event_lock, flags); 463 spin_unlock_irqrestore(&dev->event_lock, flags);
421 464
422 update_fb(crtc, true, new_fb); 465 update_fb(crtc, new_fb);
423 466
424 return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb); 467 return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb);
425} 468}
@@ -442,12 +485,12 @@ static int mdp4_crtc_set_property(struct drm_crtc *crtc,
442static void update_cursor(struct drm_crtc *crtc) 485static void update_cursor(struct drm_crtc *crtc)
443{ 486{
444 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 487 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
488 struct mdp4_kms *mdp4_kms = get_kms(crtc);
445 enum mdp4_dma dma = mdp4_crtc->dma; 489 enum mdp4_dma dma = mdp4_crtc->dma;
446 unsigned long flags; 490 unsigned long flags;
447 491
448 spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags); 492 spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
449 if (mdp4_crtc->cursor.stale) { 493 if (mdp4_crtc->cursor.stale) {
450 struct mdp4_kms *mdp4_kms = get_kms(crtc);
451 struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo; 494 struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
452 struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo; 495 struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
453 uint32_t iova = mdp4_crtc->cursor.next_iova; 496 uint32_t iova = mdp4_crtc->cursor.next_iova;
@@ -479,6 +522,11 @@ static void update_cursor(struct drm_crtc *crtc)
479 mdp4_crtc->cursor.scanout_bo = next_bo; 522 mdp4_crtc->cursor.scanout_bo = next_bo;
480 mdp4_crtc->cursor.stale = false; 523 mdp4_crtc->cursor.stale = false;
481 } 524 }
525
526 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
527 MDP4_DMA_CURSOR_POS_X(mdp4_crtc->cursor.x) |
528 MDP4_DMA_CURSOR_POS_Y(mdp4_crtc->cursor.y));
529
482 spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags); 530 spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
483} 531}
484 532
@@ -530,6 +578,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
530 drm_gem_object_unreference_unlocked(old_bo); 578 drm_gem_object_unreference_unlocked(old_bo);
531 } 579 }
532 580
581 crtc_flush(crtc);
533 request_pending(crtc, PENDING_CURSOR); 582 request_pending(crtc, PENDING_CURSOR);
534 583
535 return 0; 584 return 0;
@@ -542,12 +591,15 @@ fail:
542static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) 591static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
543{ 592{
544 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 593 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
545 struct mdp4_kms *mdp4_kms = get_kms(crtc); 594 unsigned long flags;
546 enum mdp4_dma dma = mdp4_crtc->dma;
547 595
548 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma), 596 spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
549 MDP4_DMA_CURSOR_POS_X(x) | 597 mdp4_crtc->cursor.x = x;
550 MDP4_DMA_CURSOR_POS_Y(y)); 598 mdp4_crtc->cursor.y = y;
599 spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
600
601 crtc_flush(crtc);
602 request_pending(crtc, PENDING_CURSOR);
551 603
552 return 0; 604 return 0;
553} 605}
@@ -713,6 +765,7 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
713 crtc = &mdp4_crtc->base; 765 crtc = &mdp4_crtc->base;
714 766
715 mdp4_crtc->plane = plane; 767 mdp4_crtc->plane = plane;
768 mdp4_crtc->id = id;
716 769
717 mdp4_crtc->ovlp = ovlp_id; 770 mdp4_crtc->ovlp = ovlp_id;
718 mdp4_crtc->dma = dma_id; 771 mdp4_crtc->dma = dma_id;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 2406027200ec..1e893dd13859 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -170,8 +170,8 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
170 MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h)); 170 MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h));
171 171
172 mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe), 172 mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe),
173 MDP4_PIPE_SRC_XY_X(crtc_x) | 173 MDP4_PIPE_DST_XY_X(crtc_x) |
174 MDP4_PIPE_SRC_XY_Y(crtc_y)); 174 MDP4_PIPE_DST_XY_Y(crtc_y));
175 175
176 mdp4_plane_set_scanout(plane, fb); 176 mdp4_plane_set_scanout(plane, fb);
177 177
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 71a3b2345eb3..f2794021f086 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -296,6 +296,7 @@ static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
296 x << 16, y << 16, 296 x << 16, y << 16,
297 mode->hdisplay << 16, mode->vdisplay << 16); 297 mode->hdisplay << 16, mode->vdisplay << 16);
298 if (ret) { 298 if (ret) {
299 drm_framebuffer_unreference(crtc->fb);
299 dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n", 300 dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
300 mdp5_crtc->name, ret); 301 mdp5_crtc->name, ret);
301 return ret; 302 return ret;
@@ -343,11 +344,15 @@ static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
343 0, 0, mode->hdisplay, mode->vdisplay, 344 0, 0, mode->hdisplay, mode->vdisplay,
344 x << 16, y << 16, 345 x << 16, y << 16,
345 mode->hdisplay << 16, mode->vdisplay << 16); 346 mode->hdisplay << 16, mode->vdisplay << 16);
347 if (ret) {
348 drm_framebuffer_unreference(crtc->fb);
349 return ret;
350 }
346 351
347 update_fb(crtc, crtc->fb); 352 update_fb(crtc, crtc->fb);
348 update_scanout(crtc, crtc->fb); 353 update_scanout(crtc, crtc->fb);
349 354
350 return ret; 355 return 0;
351} 356}
352 357
353static void mdp5_crtc_load_lut(struct drm_crtc *crtc) 358static void mdp5_crtc_load_lut(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index d8d60c969ac7..3da8264d3039 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -644,7 +644,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
644 644
645fail: 645fail:
646 if (obj) 646 if (obj)
647 drm_gem_object_unreference_unlocked(obj); 647 drm_gem_object_unreference(obj);
648 648
649 return ERR_PTR(ret); 649 return ERR_PTR(ret);
650} 650}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 5281d4bc37f7..5423e914e491 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -163,7 +163,7 @@ retry:
163 163
164 164
165 /* if locking succeeded, pin bo: */ 165 /* if locking succeeded, pin bo: */
166 ret = msm_gem_get_iova(&msm_obj->base, 166 ret = msm_gem_get_iova_locked(&msm_obj->base,
167 submit->gpu->id, &iova); 167 submit->gpu->id, &iova);
168 168
169 /* this would break the logic in the fail path.. there is no 169 /* this would break the logic in the fail path.. there is no
@@ -247,7 +247,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
247 /* For now, just map the entire thing. Eventually we probably 247 /* For now, just map the entire thing. Eventually we probably
248 * to do it page-by-page, w/ kmap() if not vmap()d.. 248 * to do it page-by-page, w/ kmap() if not vmap()d..
249 */ 249 */
250 ptr = msm_gem_vaddr(&obj->base); 250 ptr = msm_gem_vaddr_locked(&obj->base);
251 251
252 if (IS_ERR(ptr)) { 252 if (IS_ERR(ptr)) {
253 ret = PTR_ERR(ptr); 253 ret = PTR_ERR(ptr);
@@ -307,14 +307,12 @@ static void submit_cleanup(struct msm_gem_submit *submit, bool fail)
307{ 307{
308 unsigned i; 308 unsigned i;
309 309
310 mutex_lock(&submit->dev->struct_mutex);
311 for (i = 0; i < submit->nr_bos; i++) { 310 for (i = 0; i < submit->nr_bos; i++) {
312 struct msm_gem_object *msm_obj = submit->bos[i].obj; 311 struct msm_gem_object *msm_obj = submit->bos[i].obj;
313 submit_unlock_unpin_bo(submit, i); 312 submit_unlock_unpin_bo(submit, i);
314 list_del_init(&msm_obj->submit_entry); 313 list_del_init(&msm_obj->submit_entry);
315 drm_gem_object_unreference(&msm_obj->base); 314 drm_gem_object_unreference(&msm_obj->base);
316 } 315 }
317 mutex_unlock(&submit->dev->struct_mutex);
318 316
319 ww_acquire_fini(&submit->ticket); 317 ww_acquire_fini(&submit->ticket);
320 kfree(submit); 318 kfree(submit);
@@ -342,6 +340,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
342 if (args->nr_cmds > MAX_CMDS) 340 if (args->nr_cmds > MAX_CMDS)
343 return -EINVAL; 341 return -EINVAL;
344 342
343 mutex_lock(&dev->struct_mutex);
344
345 submit = submit_create(dev, gpu, args->nr_bos); 345 submit = submit_create(dev, gpu, args->nr_bos);
346 if (!submit) { 346 if (!submit) {
347 ret = -ENOMEM; 347 ret = -ENOMEM;
@@ -410,5 +410,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
410out: 410out:
411 if (submit) 411 if (submit)
412 submit_cleanup(submit, !!ret); 412 submit_cleanup(submit, !!ret);
413 mutex_unlock(&dev->struct_mutex);
413 return ret; 414 return ret;
414} 415}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 4ebce8be489d..0cfe3f426ee4 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -298,8 +298,6 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
298 struct msm_drm_private *priv = dev->dev_private; 298 struct msm_drm_private *priv = dev->dev_private;
299 int i, ret; 299 int i, ret;
300 300
301 mutex_lock(&dev->struct_mutex);
302
303 submit->fence = ++priv->next_fence; 301 submit->fence = ++priv->next_fence;
304 302
305 gpu->submitted_fence = submit->fence; 303 gpu->submitted_fence = submit->fence;
@@ -331,7 +329,6 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
331 msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence); 329 msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
332 } 330 }
333 hangcheck_timer_reset(gpu); 331 hangcheck_timer_reset(gpu);
334 mutex_unlock(&dev->struct_mutex);
335 332
336 return ret; 333 return ret;
337} 334}
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index e88145ba1bf5..d310c195bdfe 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -141,6 +141,7 @@ nouveau-y += core/subdev/mc/base.o
141nouveau-y += core/subdev/mc/nv04.o 141nouveau-y += core/subdev/mc/nv04.o
142nouveau-y += core/subdev/mc/nv40.o 142nouveau-y += core/subdev/mc/nv40.o
143nouveau-y += core/subdev/mc/nv44.o 143nouveau-y += core/subdev/mc/nv44.o
144nouveau-y += core/subdev/mc/nv4c.o
144nouveau-y += core/subdev/mc/nv50.o 145nouveau-y += core/subdev/mc/nv50.o
145nouveau-y += core/subdev/mc/nv94.o 146nouveau-y += core/subdev/mc/nv94.o
146nouveau-y += core/subdev/mc/nv98.o 147nouveau-y += core/subdev/mc/nv98.o
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
index 1b653dd74a70..08b88591ed60 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
@@ -311,7 +311,7 @@ nv40_identify(struct nouveau_device *device)
311 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 311 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
312 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 312 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
313 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; 313 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
314 device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; 314 device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
315 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; 315 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
316 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 316 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
317 device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; 317 device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
@@ -334,7 +334,7 @@ nv40_identify(struct nouveau_device *device)
334 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 334 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
335 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 335 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
336 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; 336 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
337 device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; 337 device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
338 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; 338 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
339 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 339 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
340 device->oclass[NVDEV_SUBDEV_FB ] = nv4e_fb_oclass; 340 device->oclass[NVDEV_SUBDEV_FB ] = nv4e_fb_oclass;
@@ -357,7 +357,7 @@ nv40_identify(struct nouveau_device *device)
357 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 357 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
358 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 358 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
359 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; 359 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
360 device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; 360 device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
361 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; 361 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
362 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 362 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
363 device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; 363 device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
@@ -380,7 +380,7 @@ nv40_identify(struct nouveau_device *device)
380 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 380 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
381 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 381 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
382 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; 382 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
383 device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; 383 device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
384 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; 384 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
385 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 385 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
386 device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; 386 device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
@@ -403,7 +403,7 @@ nv40_identify(struct nouveau_device *device)
403 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 403 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
404 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 404 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
405 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; 405 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
406 device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; 406 device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
407 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; 407 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
408 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 408 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
409 device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; 409 device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 940eaa5d8b9a..9ad722e4e087 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -1142,7 +1142,7 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
1142 if (conf != ~0) { 1142 if (conf != ~0) {
1143 if (outp.location == 0 && outp.type == DCB_OUTPUT_DP) { 1143 if (outp.location == 0 && outp.type == DCB_OUTPUT_DP) {
1144 u32 soff = (ffs(outp.or) - 1) * 0x08; 1144 u32 soff = (ffs(outp.or) - 1) * 0x08;
1145 u32 ctrl = nv_rd32(priv, 0x610798 + soff); 1145 u32 ctrl = nv_rd32(priv, 0x610794 + soff);
1146 u32 datarate; 1146 u32 datarate;
1147 1147
1148 switch ((ctrl & 0x000f0000) >> 16) { 1148 switch ((ctrl & 0x000f0000) >> 16) {
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index 9a850fe19515..54c1b5b471cd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -112,7 +112,7 @@ nve0_fifo_runlist_update(struct nve0_fifo_priv *priv, u32 engine)
112 112
113 nv_wr32(priv, 0x002270, cur->addr >> 12); 113 nv_wr32(priv, 0x002270, cur->addr >> 12);
114 nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3)); 114 nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3));
115 if (!nv_wait(priv, 0x002284 + (engine * 4), 0x00100000, 0x00000000)) 115 if (!nv_wait(priv, 0x002284 + (engine * 8), 0x00100000, 0x00000000))
116 nv_error(priv, "runlist %d update timeout\n", engine); 116 nv_error(priv, "runlist %d update timeout\n", engine);
117 mutex_unlock(&nv_subdev(priv)->mutex); 117 mutex_unlock(&nv_subdev(priv)->mutex);
118} 118}
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
index 30ed19c52e05..7a367c402978 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
@@ -539,7 +539,7 @@ nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old,
539 ustatus &= ~0x04030000; 539 ustatus &= ~0x04030000;
540 } 540 }
541 if (ustatus && display) { 541 if (ustatus && display) {
542 nv_error("%s - TP%d:", name, i); 542 nv_error(priv, "%s - TP%d:", name, i);
543 nouveau_bitfield_print(nv50_mpc_traps, ustatus); 543 nouveau_bitfield_print(nv50_mpc_traps, ustatus);
544 pr_cont("\n"); 544 pr_cont("\n");
545 ustatus = 0; 545 ustatus = 0;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
index adc88b73d911..3c6738edd127 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
@@ -47,6 +47,7 @@ struct nouveau_mc_oclass {
47extern struct nouveau_oclass *nv04_mc_oclass; 47extern struct nouveau_oclass *nv04_mc_oclass;
48extern struct nouveau_oclass *nv40_mc_oclass; 48extern struct nouveau_oclass *nv40_mc_oclass;
49extern struct nouveau_oclass *nv44_mc_oclass; 49extern struct nouveau_oclass *nv44_mc_oclass;
50extern struct nouveau_oclass *nv4c_mc_oclass;
50extern struct nouveau_oclass *nv50_mc_oclass; 51extern struct nouveau_oclass *nv50_mc_oclass;
51extern struct nouveau_oclass *nv94_mc_oclass; 52extern struct nouveau_oclass *nv94_mc_oclass;
52extern struct nouveau_oclass *nv98_mc_oclass; 53extern struct nouveau_oclass *nv98_mc_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
index aa0fbbec7f08..ef0c9c4a8cc3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -130,6 +130,10 @@ nouveau_bios_shadow_prom(struct nouveau_bios *bios)
130 u16 pcir; 130 u16 pcir;
131 int i; 131 int i;
132 132
133 /* there is no prom on nv4x IGP's */
134 if (device->card_type == NV_40 && device->chipset >= 0x4c)
135 return;
136
133 /* enable access to rom */ 137 /* enable access to rom */
134 if (device->card_type >= NV_50) 138 if (device->card_type >= NV_50)
135 pcireg = 0x088050; 139 pcireg = 0x088050;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
index 9159a5ccee93..265d1253624a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
@@ -36,7 +36,7 @@ nv1a_fb_oclass = &(struct nv04_fb_impl) {
36 .fini = _nouveau_fb_fini, 36 .fini = _nouveau_fb_fini,
37 }, 37 },
38 .base.memtype = nv04_fb_memtype_valid, 38 .base.memtype = nv04_fb_memtype_valid,
39 .base.ram = &nv10_ram_oclass, 39 .base.ram = &nv1a_ram_oclass,
40 .tile.regions = 8, 40 .tile.regions = 8,
41 .tile.init = nv10_fb_tile_init, 41 .tile.init = nv10_fb_tile_init,
42 .tile.fini = nv10_fb_tile_fini, 42 .tile.fini = nv10_fb_tile_fini,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
index b0d5c31606c1..81a408e7d034 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
@@ -14,6 +14,7 @@ int nv04_mc_ctor(struct nouveau_object *, struct nouveau_object *,
14extern const struct nouveau_mc_intr nv04_mc_intr[]; 14extern const struct nouveau_mc_intr nv04_mc_intr[];
15int nv04_mc_init(struct nouveau_object *); 15int nv04_mc_init(struct nouveau_object *);
16void nv40_mc_msi_rearm(struct nouveau_mc *); 16void nv40_mc_msi_rearm(struct nouveau_mc *);
17int nv44_mc_init(struct nouveau_object *object);
17int nv50_mc_init(struct nouveau_object *); 18int nv50_mc_init(struct nouveau_object *);
18extern const struct nouveau_mc_intr nv50_mc_intr[]; 19extern const struct nouveau_mc_intr nv50_mc_intr[];
19extern const struct nouveau_mc_intr nvc0_mc_intr[]; 20extern const struct nouveau_mc_intr nvc0_mc_intr[];
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
index 3bfee5c6c4f2..cc4d0d2d886e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
@@ -24,7 +24,7 @@
24 24
25#include "nv04.h" 25#include "nv04.h"
26 26
27static int 27int
28nv44_mc_init(struct nouveau_object *object) 28nv44_mc_init(struct nouveau_object *object)
29{ 29{
30 struct nv04_mc_priv *priv = (void *)object; 30 struct nv04_mc_priv *priv = (void *)object;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
new file mode 100644
index 000000000000..a75c35ccf25c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
@@ -0,0 +1,45 @@
1/*
2 * Copyright 2014 Ilia Mirkin
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ilia Mirkin
23 */
24
25#include "nv04.h"
26
27static void
28nv4c_mc_msi_rearm(struct nouveau_mc *pmc)
29{
30 struct nv04_mc_priv *priv = (void *)pmc;
31 nv_wr08(priv, 0x088050, 0xff);
32}
33
34struct nouveau_oclass *
35nv4c_mc_oclass = &(struct nouveau_mc_oclass) {
36 .base.handle = NV_SUBDEV(MC, 0x4c),
37 .base.ofuncs = &(struct nouveau_ofuncs) {
38 .ctor = nv04_mc_ctor,
39 .dtor = _nouveau_mc_dtor,
40 .init = nv44_mc_init,
41 .fini = _nouveau_mc_fini,
42 },
43 .intr = nv04_mc_intr,
44 .msi_rearm = nv4c_mc_msi_rearm,
45}.base;
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 4ef83df2b246..83face3f608f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -106,6 +106,29 @@ static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *
106 return 0; 106 return 0;
107} 107}
108 108
109/*
110 * On some platforms, _DSM(nouveau_op_dsm_muid, func0) has special
111 * requirements on the fourth parameter, so a private implementation
112 * instead of using acpi_check_dsm().
113 */
114static int nouveau_check_optimus_dsm(acpi_handle handle)
115{
116 int result;
117
118 /*
119 * Function 0 returns a Buffer containing available functions.
120 * The args parameter is ignored for function 0, so just put 0 in it
121 */
122 if (nouveau_optimus_dsm(handle, 0, 0, &result))
123 return 0;
124
125 /*
126 * ACPI Spec v4 9.14.1: if bit 0 is zero, no function is supported.
127 * If the n-th bit is enabled, function n is supported
128 */
129 return result & 1 && result & (1 << NOUVEAU_DSM_OPTIMUS_CAPS);
130}
131
109static int nouveau_dsm(acpi_handle handle, int func, int arg) 132static int nouveau_dsm(acpi_handle handle, int func, int arg)
110{ 133{
111 int ret = 0; 134 int ret = 0;
@@ -207,8 +230,7 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
207 1 << NOUVEAU_DSM_POWER)) 230 1 << NOUVEAU_DSM_POWER))
208 retval |= NOUVEAU_DSM_HAS_MUX; 231 retval |= NOUVEAU_DSM_HAS_MUX;
209 232
210 if (acpi_check_dsm(dhandle, nouveau_op_dsm_muid, 0x00000100, 233 if (nouveau_check_optimus_dsm(dhandle))
211 1 << NOUVEAU_DSM_OPTIMUS_CAPS))
212 retval |= NOUVEAU_DSM_HAS_OPT; 234 retval |= NOUVEAU_DSM_HAS_OPT;
213 235
214 if (retval & NOUVEAU_DSM_HAS_OPT) { 236 if (retval & NOUVEAU_DSM_HAS_OPT) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 488686d490c0..4aed1714b9ab 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1249,7 +1249,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1249 mem->bus.is_iomem = !dev->agp->cant_use_aperture; 1249 mem->bus.is_iomem = !dev->agp->cant_use_aperture;
1250 } 1250 }
1251#endif 1251#endif
1252 if (!node->memtype) 1252 if (nv_device(drm->device)->card_type < NV_50 || !node->memtype)
1253 /* untiled */ 1253 /* untiled */
1254 break; 1254 break;
1255 /* fallthrough, tiled memory */ 1255 /* fallthrough, tiled memory */
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 78c8e7146d56..89c484d8ac26 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -376,6 +376,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
376 if (ret) 376 if (ret)
377 goto fail_device; 377 goto fail_device;
378 378
379 dev->irq_enabled = true;
380
379 /* workaround an odd issue on nvc1 by disabling the device's 381 /* workaround an odd issue on nvc1 by disabling the device's
380 * nosnoop capability. hopefully won't cause issues until a 382 * nosnoop capability. hopefully won't cause issues until a
381 * better fix is found - assuming there is one... 383 * better fix is found - assuming there is one...
@@ -475,6 +477,7 @@ nouveau_drm_remove(struct pci_dev *pdev)
475 struct nouveau_drm *drm = nouveau_drm(dev); 477 struct nouveau_drm *drm = nouveau_drm(dev);
476 struct nouveau_object *device; 478 struct nouveau_object *device;
477 479
480 dev->irq_enabled = false;
478 device = drm->client.base.device; 481 device = drm->client.base.device;
479 drm_put_dev(dev); 482 drm_put_dev(dev);
480 483
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 81638d7f2eff..471347edc27e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -14,7 +14,9 @@ nouveau_vga_set_decode(void *priv, bool state)
14{ 14{
15 struct nouveau_device *device = nouveau_dev(priv); 15 struct nouveau_device *device = nouveau_dev(priv);
16 16
17 if (device->chipset >= 0x40) 17 if (device->card_type == NV_40 && device->chipset >= 0x4c)
18 nv_wr32(device, 0x088060, state);
19 else if (device->chipset >= 0x40)
18 nv_wr32(device, 0x088054, state); 20 nv_wr32(device, 0x088054, state);
19 else 21 else
20 nv_wr32(device, 0x001854, state); 22 nv_wr32(device, 0x001854, state);
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index a9338c85630f..0d19f4f94d5a 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -559,7 +559,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
559 u32 adjusted_clock = mode->clock; 559 u32 adjusted_clock = mode->clock;
560 int encoder_mode = atombios_get_encoder_mode(encoder); 560 int encoder_mode = atombios_get_encoder_mode(encoder);
561 u32 dp_clock = mode->clock; 561 u32 dp_clock = mode->clock;
562 int bpc = radeon_get_monitor_bpc(connector); 562 int bpc = radeon_crtc->bpc;
563 bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock); 563 bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
564 564
565 /* reset the pll flags */ 565 /* reset the pll flags */
@@ -1176,7 +1176,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1176 evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split); 1176 evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
1177 1177
1178 /* Set NUM_BANKS. */ 1178 /* Set NUM_BANKS. */
1179 if (rdev->family >= CHIP_BONAIRE) { 1179 if (rdev->family >= CHIP_TAHITI) {
1180 unsigned tileb, index, num_banks, tile_split_bytes; 1180 unsigned tileb, index, num_banks, tile_split_bytes;
1181 1181
1182 /* Calculate the macrotile mode index. */ 1182 /* Calculate the macrotile mode index. */
@@ -1194,13 +1194,14 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1194 return -EINVAL; 1194 return -EINVAL;
1195 } 1195 }
1196 1196
1197 num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3; 1197 if (rdev->family >= CHIP_BONAIRE)
1198 num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3;
1199 else
1200 num_banks = (rdev->config.si.tile_mode_array[index] >> 20) & 0x3;
1198 fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks); 1201 fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
1199 } else { 1202 } else {
1200 /* SI and older. */ 1203 /* NI and older. */
1201 if (rdev->family >= CHIP_TAHITI) 1204 if (rdev->family >= CHIP_CAYMAN)
1202 tmp = rdev->config.si.tile_config;
1203 else if (rdev->family >= CHIP_CAYMAN)
1204 tmp = rdev->config.cayman.tile_config; 1205 tmp = rdev->config.cayman.tile_config;
1205 else 1206 else
1206 tmp = rdev->config.evergreen.tile_config; 1207 tmp = rdev->config.evergreen.tile_config;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index a42d61571f49..2cec2ab02f80 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -464,11 +464,12 @@ atombios_tv_setup(struct drm_encoder *encoder, int action)
464 464
465static u8 radeon_atom_get_bpc(struct drm_encoder *encoder) 465static u8 radeon_atom_get_bpc(struct drm_encoder *encoder)
466{ 466{
467 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
468 int bpc = 8; 467 int bpc = 8;
469 468
470 if (connector) 469 if (encoder->crtc) {
471 bpc = radeon_get_monitor_bpc(connector); 470 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
471 bpc = radeon_crtc->bpc;
472 }
472 473
473 switch (bpc) { 474 switch (bpc) {
474 case 0: 475 case 0:
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 0fbd36f3d4e9..ea103ccdf4bd 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -29,6 +29,7 @@
29#include "cypress_dpm.h" 29#include "cypress_dpm.h"
30#include "btc_dpm.h" 30#include "btc_dpm.h"
31#include "atom.h" 31#include "atom.h"
32#include <linux/seq_file.h>
32 33
33#define MC_CG_ARB_FREQ_F0 0x0a 34#define MC_CG_ARB_FREQ_F0 0x0a
34#define MC_CG_ARB_FREQ_F1 0x0b 35#define MC_CG_ARB_FREQ_F1 0x0b
@@ -2756,6 +2757,37 @@ void btc_dpm_fini(struct radeon_device *rdev)
2756 r600_free_extended_power_table(rdev); 2757 r600_free_extended_power_table(rdev);
2757} 2758}
2758 2759
2760void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
2761 struct seq_file *m)
2762{
2763 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2764 struct radeon_ps *rps = &eg_pi->current_rps;
2765 struct rv7xx_ps *ps = rv770_get_ps(rps);
2766 struct rv7xx_pl *pl;
2767 u32 current_index =
2768 (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
2769 CURRENT_PROFILE_INDEX_SHIFT;
2770
2771 if (current_index > 2) {
2772 seq_printf(m, "invalid dpm profile %d\n", current_index);
2773 } else {
2774 if (current_index == 0)
2775 pl = &ps->low;
2776 else if (current_index == 1)
2777 pl = &ps->medium;
2778 else /* current_index == 2 */
2779 pl = &ps->high;
2780 seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
2781 if (rdev->family >= CHIP_CEDAR) {
2782 seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u\n",
2783 current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
2784 } else {
2785 seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u\n",
2786 current_index, pl->sclk, pl->mclk, pl->vddc);
2787 }
2788 }
2789}
2790
2759u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low) 2791u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low)
2760{ 2792{
2761 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); 2793 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
diff --git a/drivers/gpu/drm/radeon/btcd.h b/drivers/gpu/drm/radeon/btcd.h
index 29e32de7e025..9c65be2d55a9 100644
--- a/drivers/gpu/drm/radeon/btcd.h
+++ b/drivers/gpu/drm/radeon/btcd.h
@@ -44,6 +44,10 @@
44# define DYN_SPREAD_SPECTRUM_EN (1 << 23) 44# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
45# define AC_DC_SW (1 << 24) 45# define AC_DC_SW (1 << 24)
46 46
47#define TARGET_AND_CURRENT_PROFILE_INDEX 0x66c
48# define CURRENT_PROFILE_INDEX_MASK (0xf << 4)
49# define CURRENT_PROFILE_INDEX_SHIFT 4
50
47#define CG_BIF_REQ_AND_RSP 0x7f4 51#define CG_BIF_REQ_AND_RSP 0x7f4
48#define CG_CLIENT_REQ(x) ((x) << 0) 52#define CG_CLIENT_REQ(x) ((x) << 0)
49#define CG_CLIENT_REQ_MASK (0xff << 0) 53#define CG_CLIENT_REQ_MASK (0xff << 0)
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index f2b9e21ce4da..5623e7542d99 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1680,7 +1680,7 @@ bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
1680 case RADEON_HPD_6: 1680 case RADEON_HPD_6:
1681 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE) 1681 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
1682 connected = true; 1682 connected = true;
1683 break; 1683 break;
1684 default: 1684 default:
1685 break; 1685 break;
1686 } 1686 }
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index b6e01d5d2cce..351db361239d 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -1223,7 +1223,7 @@ int kv_dpm_enable(struct radeon_device *rdev)
1223 1223
1224int kv_dpm_late_enable(struct radeon_device *rdev) 1224int kv_dpm_late_enable(struct radeon_device *rdev)
1225{ 1225{
1226 int ret; 1226 int ret = 0;
1227 1227
1228 if (rdev->irq.installed && 1228 if (rdev->irq.installed &&
1229 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { 1229 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index c351226ecb31..ca814276b075 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -2588,7 +2588,7 @@ static int ni_populate_sq_ramping_values(struct radeon_device *rdev,
2588 if (NISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT)) 2588 if (NISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
2589 enable_sq_ramping = false; 2589 enable_sq_ramping = false;
2590 2590
2591 if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) 2591 if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
2592 enable_sq_ramping = false; 2592 enable_sq_ramping = false;
2593 2593
2594 for (i = 0; i < state->performance_level_count; i++) { 2594 for (i = 0; i < state->performance_level_count; i++) {
@@ -3945,7 +3945,6 @@ static void ni_parse_pplib_clock_info(struct radeon_device *rdev,
3945 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 3945 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3946 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); 3946 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3947 struct ni_ps *ps = ni_get_ps(rps); 3947 struct ni_ps *ps = ni_get_ps(rps);
3948 u16 vddc;
3949 struct rv7xx_pl *pl = &ps->performance_levels[index]; 3948 struct rv7xx_pl *pl = &ps->performance_levels[index];
3950 3949
3951 ps->performance_level_count = index + 1; 3950 ps->performance_level_count = index + 1;
@@ -3961,8 +3960,8 @@ static void ni_parse_pplib_clock_info(struct radeon_device *rdev,
3961 3960
3962 /* patch up vddc if necessary */ 3961 /* patch up vddc if necessary */
3963 if (pl->vddc == 0xff01) { 3962 if (pl->vddc == 0xff01) {
3964 if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0) 3963 if (pi->max_vddc)
3965 pl->vddc = vddc; 3964 pl->vddc = pi->max_vddc;
3966 } 3965 }
3967 3966
3968 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { 3967 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
@@ -4322,7 +4321,8 @@ void ni_dpm_print_power_state(struct radeon_device *rdev,
4322void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, 4321void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
4323 struct seq_file *m) 4322 struct seq_file *m)
4324{ 4323{
4325 struct radeon_ps *rps = rdev->pm.dpm.current_ps; 4324 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
4325 struct radeon_ps *rps = &eg_pi->current_rps;
4326 struct ni_ps *ps = ni_get_ps(rps); 4326 struct ni_ps *ps = ni_get_ps(rps);
4327 struct rv7xx_pl *pl; 4327 struct rv7xx_pl *pl;
4328 u32 current_index = 4328 u32 current_index =
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 56140b4e5bb2..cdbc4171fe73 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3991,6 +3991,10 @@ restart_ih:
3991 break; 3991 break;
3992 } 3992 }
3993 break; 3993 break;
3994 case 124: /* UVD */
3995 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
3996 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
3997 break;
3994 case 176: /* CP_INT in ring buffer */ 3998 case 176: /* CP_INT in ring buffer */
3995 case 177: /* CP_INT in IB1 */ 3999 case 177: /* CP_INT in IB1 */
3996 case 178: /* CP_INT in IB2 */ 4000 case 178: /* CP_INT in IB2 */
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 4a8ac1cd6b4c..024db37b1832 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -135,6 +135,9 @@ extern int radeon_hard_reset;
135/* R600+ */ 135/* R600+ */
136#define R600_RING_TYPE_UVD_INDEX 5 136#define R600_RING_TYPE_UVD_INDEX 5
137 137
138/* number of hw syncs before falling back on blocking */
139#define RADEON_NUM_SYNCS 4
140
138/* hardcode those limit for now */ 141/* hardcode those limit for now */
139#define RADEON_VA_IB_OFFSET (1 << 20) 142#define RADEON_VA_IB_OFFSET (1 << 20)
140#define RADEON_VA_RESERVED_SIZE (8 << 20) 143#define RADEON_VA_RESERVED_SIZE (8 << 20)
@@ -554,7 +557,6 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
554/* 557/*
555 * Semaphores. 558 * Semaphores.
556 */ 559 */
557/* everything here is constant */
558struct radeon_semaphore { 560struct radeon_semaphore {
559 struct radeon_sa_bo *sa_bo; 561 struct radeon_sa_bo *sa_bo;
560 signed waiters; 562 signed waiters;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index f74db43346fd..dda02bfc10a4 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1555,7 +1555,7 @@ static struct radeon_asic btc_asic = {
1555 .get_sclk = &btc_dpm_get_sclk, 1555 .get_sclk = &btc_dpm_get_sclk,
1556 .get_mclk = &btc_dpm_get_mclk, 1556 .get_mclk = &btc_dpm_get_mclk,
1557 .print_power_state = &rv770_dpm_print_power_state, 1557 .print_power_state = &rv770_dpm_print_power_state,
1558 .debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level, 1558 .debugfs_print_current_performance_level = &btc_dpm_debugfs_print_current_performance_level,
1559 .force_performance_level = &rv770_dpm_force_performance_level, 1559 .force_performance_level = &rv770_dpm_force_performance_level,
1560 .vblank_too_short = &btc_dpm_vblank_too_short, 1560 .vblank_too_short = &btc_dpm_vblank_too_short,
1561 }, 1561 },
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index b3bc433eed4c..ae637cfda783 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -551,6 +551,8 @@ void btc_dpm_fini(struct radeon_device *rdev);
551u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low); 551u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low);
552u32 btc_dpm_get_mclk(struct radeon_device *rdev, bool low); 552u32 btc_dpm_get_mclk(struct radeon_device *rdev, bool low);
553bool btc_dpm_vblank_too_short(struct radeon_device *rdev); 553bool btc_dpm_vblank_too_short(struct radeon_device *rdev);
554void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
555 struct seq_file *m);
554int sumo_dpm_init(struct radeon_device *rdev); 556int sumo_dpm_init(struct radeon_device *rdev);
555int sumo_dpm_enable(struct radeon_device *rdev); 557int sumo_dpm_enable(struct radeon_device *rdev);
556int sumo_dpm_late_enable(struct radeon_device *rdev); 558int sumo_dpm_late_enable(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index d680608f6f5b..fbd8b930f2be 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -571,6 +571,8 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
571 radeon_crtc->max_cursor_width = CURSOR_WIDTH; 571 radeon_crtc->max_cursor_width = CURSOR_WIDTH;
572 radeon_crtc->max_cursor_height = CURSOR_HEIGHT; 572 radeon_crtc->max_cursor_height = CURSOR_HEIGHT;
573 } 573 }
574 dev->mode_config.cursor_width = radeon_crtc->max_cursor_width;
575 dev->mode_config.cursor_height = radeon_crtc->max_cursor_height;
574 576
575#if 0 577#if 0
576 radeon_crtc->mode_set.crtc = &radeon_crtc->base; 578 radeon_crtc->mode_set.crtc = &radeon_crtc->base;
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 1b783f0e6d3a..15e44a7281ab 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -139,7 +139,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
139 } 139 }
140 140
141 /* 64 dwords should be enough for fence too */ 141 /* 64 dwords should be enough for fence too */
142 r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_RINGS * 8); 142 r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8);
143 if (r) { 143 if (r) {
144 dev_err(rdev->dev, "scheduling IB failed (%d).\n", r); 144 dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
145 return r; 145 return r;
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 2b42aa1914f2..9006b32d5eed 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -34,14 +34,15 @@
34int radeon_semaphore_create(struct radeon_device *rdev, 34int radeon_semaphore_create(struct radeon_device *rdev,
35 struct radeon_semaphore **semaphore) 35 struct radeon_semaphore **semaphore)
36{ 36{
37 uint32_t *cpu_addr;
37 int i, r; 38 int i, r;
38 39
39 *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL); 40 *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
40 if (*semaphore == NULL) { 41 if (*semaphore == NULL) {
41 return -ENOMEM; 42 return -ENOMEM;
42 } 43 }
43 r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, 44 r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &(*semaphore)->sa_bo,
44 &(*semaphore)->sa_bo, 8, 8, true); 45 8 * RADEON_NUM_SYNCS, 8, true);
45 if (r) { 46 if (r) {
46 kfree(*semaphore); 47 kfree(*semaphore);
47 *semaphore = NULL; 48 *semaphore = NULL;
@@ -49,7 +50,10 @@ int radeon_semaphore_create(struct radeon_device *rdev,
49 } 50 }
50 (*semaphore)->waiters = 0; 51 (*semaphore)->waiters = 0;
51 (*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo); 52 (*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo);
52 *((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0; 53
54 cpu_addr = radeon_sa_bo_cpu_addr((*semaphore)->sa_bo);
55 for (i = 0; i < RADEON_NUM_SYNCS; ++i)
56 cpu_addr[i] = 0;
53 57
54 for (i = 0; i < RADEON_NUM_RINGS; ++i) 58 for (i = 0; i < RADEON_NUM_RINGS; ++i)
55 (*semaphore)->sync_to[i] = NULL; 59 (*semaphore)->sync_to[i] = NULL;
@@ -125,6 +129,7 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
125 struct radeon_semaphore *semaphore, 129 struct radeon_semaphore *semaphore,
126 int ring) 130 int ring)
127{ 131{
132 unsigned count = 0;
128 int i, r; 133 int i, r;
129 134
130 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 135 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
@@ -140,6 +145,12 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
140 return -EINVAL; 145 return -EINVAL;
141 } 146 }
142 147
148 if (++count > RADEON_NUM_SYNCS) {
149 /* not enough room, wait manually */
150 radeon_fence_wait_locked(fence);
151 continue;
152 }
153
143 /* allocate enough space for sync command */ 154 /* allocate enough space for sync command */
144 r = radeon_ring_alloc(rdev, &rdev->ring[i], 16); 155 r = radeon_ring_alloc(rdev, &rdev->ring[i], 16);
145 if (r) { 156 if (r) {
@@ -164,6 +175,8 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
164 175
165 radeon_ring_commit(rdev, &rdev->ring[i]); 176 radeon_ring_commit(rdev, &rdev->ring[i]);
166 radeon_fence_note_sync(fence, ring); 177 radeon_fence_note_sync(fence, ring);
178
179 semaphore->gpu_addr += 8;
167 } 180 }
168 181
169 return 0; 182 return 0;
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 80c595aba359..b5f63f5e22a3 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2174,7 +2174,6 @@ static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev,
2174 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); 2174 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2175 struct rv7xx_ps *ps = rv770_get_ps(rps); 2175 struct rv7xx_ps *ps = rv770_get_ps(rps);
2176 u32 sclk, mclk; 2176 u32 sclk, mclk;
2177 u16 vddc;
2178 struct rv7xx_pl *pl; 2177 struct rv7xx_pl *pl;
2179 2178
2180 switch (index) { 2179 switch (index) {
@@ -2214,8 +2213,8 @@ static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev,
2214 2213
2215 /* patch up vddc if necessary */ 2214 /* patch up vddc if necessary */
2216 if (pl->vddc == 0xff01) { 2215 if (pl->vddc == 0xff01) {
2217 if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0) 2216 if (pi->max_vddc)
2218 pl->vddc = vddc; 2217 pl->vddc = pi->max_vddc;
2219 } 2218 }
2220 2219
2221 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { 2220 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
@@ -2527,14 +2526,7 @@ u32 rv770_dpm_get_mclk(struct radeon_device *rdev, bool low)
2527bool rv770_dpm_vblank_too_short(struct radeon_device *rdev) 2526bool rv770_dpm_vblank_too_short(struct radeon_device *rdev)
2528{ 2527{
2529 u32 vblank_time = r600_dpm_get_vblank_time(rdev); 2528 u32 vblank_time = r600_dpm_get_vblank_time(rdev);
2530 u32 switch_limit = 300; 2529 u32 switch_limit = 200; /* 300 */
2531
2532 /* quirks */
2533 /* ASUS K70AF */
2534 if ((rdev->pdev->device == 0x9553) &&
2535 (rdev->pdev->subsystem_vendor == 0x1043) &&
2536 (rdev->pdev->subsystem_device == 0x1c42))
2537 switch_limit = 200;
2538 2530
2539 /* RV770 */ 2531 /* RV770 */
2540 /* mclk switching doesn't seem to work reliably on desktop RV770s */ 2532 /* mclk switching doesn't seem to work reliably on desktop RV770s */
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 09ec4f6c53bb..83578324e5d1 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6338,6 +6338,10 @@ restart_ih:
6338 break; 6338 break;
6339 } 6339 }
6340 break; 6340 break;
6341 case 124: /* UVD */
6342 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
6343 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
6344 break;
6341 case 146: 6345 case 146:
6342 case 147: 6346 case 147:
6343 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR); 6347 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 0471501338fb..0a2f5b4bca43 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2395,7 +2395,7 @@ static int si_populate_sq_ramping_values(struct radeon_device *rdev,
2395 if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT)) 2395 if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
2396 enable_sq_ramping = false; 2396 enable_sq_ramping = false;
2397 2397
2398 if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) 2398 if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
2399 enable_sq_ramping = false; 2399 enable_sq_ramping = false;
2400 2400
2401 for (i = 0; i < state->performance_level_count; i++) { 2401 for (i = 0; i < state->performance_level_count; i++) {
@@ -6472,7 +6472,8 @@ void si_dpm_fini(struct radeon_device *rdev)
6472void si_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, 6472void si_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
6473 struct seq_file *m) 6473 struct seq_file *m)
6474{ 6474{
6475 struct radeon_ps *rps = rdev->pm.dpm.current_ps; 6475 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
6476 struct radeon_ps *rps = &eg_pi->current_rps;
6476 struct ni_ps *ps = ni_get_ps(rps); 6477 struct ni_ps *ps = ni_get_ps(rps);
6477 struct rv7xx_pl *pl; 6478 struct rv7xx_pl *pl;
6478 u32 current_index = 6479 u32 current_index =
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index f121efe12dc5..8b47b3cd0357 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -1807,7 +1807,7 @@ void sumo_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev
1807 struct seq_file *m) 1807 struct seq_file *m)
1808{ 1808{
1809 struct sumo_power_info *pi = sumo_get_pi(rdev); 1809 struct sumo_power_info *pi = sumo_get_pi(rdev);
1810 struct radeon_ps *rps = rdev->pm.dpm.current_ps; 1810 struct radeon_ps *rps = &pi->current_rps;
1811 struct sumo_ps *ps = sumo_get_ps(rps); 1811 struct sumo_ps *ps = sumo_get_ps(rps);
1812 struct sumo_pl *pl; 1812 struct sumo_pl *pl;
1813 u32 current_index = 1813 u32 current_index =
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 2d447192d6f7..2da0e17eb960 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1926,7 +1926,8 @@ void trinity_dpm_print_power_state(struct radeon_device *rdev,
1926void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, 1926void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
1927 struct seq_file *m) 1927 struct seq_file *m)
1928{ 1928{
1929 struct radeon_ps *rps = rdev->pm.dpm.current_ps; 1929 struct trinity_power_info *pi = trinity_get_pi(rdev);
1930 struct radeon_ps *rps = &pi->current_rps;
1930 struct trinity_ps *ps = trinity_get_ps(rps); 1931 struct trinity_ps *ps = trinity_get_ps(rps);
1931 struct trinity_pl *pl; 1932 struct trinity_pl *pl;
1932 u32 current_index = 1933 u32 current_index =
diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c
index 824550db3fed..d1771004cb52 100644
--- a/drivers/gpu/drm/radeon/uvd_v2_2.c
+++ b/drivers/gpu/drm/radeon/uvd_v2_2.c
@@ -57,7 +57,6 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev,
57 radeon_ring_write(ring, 0); 57 radeon_ring_write(ring, 0);
58 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0)); 58 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
59 radeon_ring_write(ring, 2); 59 radeon_ring_write(ring, 2);
60 return;
61} 60}
62 61
63/** 62/**
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 3302f99e7497..764be36397fd 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -126,6 +126,7 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
126 agp_be->ttm.func = &ttm_agp_func; 126 agp_be->ttm.func = &ttm_agp_func;
127 127
128 if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) { 128 if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) {
129 kfree(agp_be);
129 return NULL; 130 return NULL;
130 } 131 }
131 132
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
index b645647b7776..bb594c11605e 100644
--- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h
+++ b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
@@ -1223,9 +1223,19 @@ typedef enum {
1223#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL 1129 1223#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL 1129
1224 1224
1225#define SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE 1130 1225#define SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE 1130
1226 1226#define SVGA_3D_CMD_GB_SCREEN_DMA 1131
1227#define SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH 1132
1228#define SVGA_3D_CMD_GB_MOB_FENCE 1133
1229#define SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 1134
1227#define SVGA_3D_CMD_DEFINE_GB_MOB64 1135 1230#define SVGA_3D_CMD_DEFINE_GB_MOB64 1135
1228#define SVGA_3D_CMD_REDEFINE_GB_MOB64 1136 1231#define SVGA_3D_CMD_REDEFINE_GB_MOB64 1136
1232#define SVGA_3D_CMD_NOP_ERROR 1137
1233
1234#define SVGA_3D_CMD_RESERVED1 1138
1235#define SVGA_3D_CMD_RESERVED2 1139
1236#define SVGA_3D_CMD_RESERVED3 1140
1237#define SVGA_3D_CMD_RESERVED4 1141
1238#define SVGA_3D_CMD_RESERVED5 1142
1229 1239
1230#define SVGA_3D_CMD_MAX 1142 1240#define SVGA_3D_CMD_MAX 1142
1231#define SVGA_3D_CMD_FUTURE_MAX 3000 1241#define SVGA_3D_CMD_FUTURE_MAX 3000
@@ -1973,8 +1983,7 @@ struct {
1973 uint32 sizeInBytes; 1983 uint32 sizeInBytes;
1974 uint32 validSizeInBytes; 1984 uint32 validSizeInBytes;
1975 SVGAMobFormat ptDepth; 1985 SVGAMobFormat ptDepth;
1976} 1986} __packed
1977__attribute__((__packed__))
1978SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */ 1987SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */
1979 1988
1980typedef 1989typedef
@@ -1984,15 +1993,13 @@ struct {
1984 uint32 sizeInBytes; 1993 uint32 sizeInBytes;
1985 uint32 validSizeInBytes; 1994 uint32 validSizeInBytes;
1986 SVGAMobFormat ptDepth; 1995 SVGAMobFormat ptDepth;
1987} 1996} __packed
1988__attribute__((__packed__))
1989SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */ 1997SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */
1990 1998
1991typedef 1999typedef
1992struct { 2000struct {
1993 SVGAOTableType type; 2001 SVGAOTableType type;
1994} 2002} __packed
1995__attribute__((__packed__))
1996SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */ 2003SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */
1997 2004
1998/* 2005/*
@@ -2005,8 +2012,7 @@ struct SVGA3dCmdDefineGBMob {
2005 SVGAMobFormat ptDepth; 2012 SVGAMobFormat ptDepth;
2006 PPN base; 2013 PPN base;
2007 uint32 sizeInBytes; 2014 uint32 sizeInBytes;
2008} 2015} __packed
2009__attribute__((__packed__))
2010SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */ 2016SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */
2011 2017
2012 2018
@@ -2017,8 +2023,7 @@ SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */
2017typedef 2023typedef
2018struct SVGA3dCmdDestroyGBMob { 2024struct SVGA3dCmdDestroyGBMob {
2019 SVGAMobId mobid; 2025 SVGAMobId mobid;
2020} 2026} __packed
2021__attribute__((__packed__))
2022SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */ 2027SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */
2023 2028
2024/* 2029/*
@@ -2031,8 +2036,7 @@ struct SVGA3dCmdRedefineGBMob {
2031 SVGAMobFormat ptDepth; 2036 SVGAMobFormat ptDepth;
2032 PPN base; 2037 PPN base;
2033 uint32 sizeInBytes; 2038 uint32 sizeInBytes;
2034} 2039} __packed
2035__attribute__((__packed__))
2036SVGA3dCmdRedefineGBMob; /* SVGA_3D_CMD_REDEFINE_GB_MOB */ 2040SVGA3dCmdRedefineGBMob; /* SVGA_3D_CMD_REDEFINE_GB_MOB */
2037 2041
2038/* 2042/*
@@ -2045,8 +2049,7 @@ struct SVGA3dCmdDefineGBMob64 {
2045 SVGAMobFormat ptDepth; 2049 SVGAMobFormat ptDepth;
2046 PPN64 base; 2050 PPN64 base;
2047 uint32 sizeInBytes; 2051 uint32 sizeInBytes;
2048} 2052} __packed
2049__attribute__((__packed__))
2050SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */ 2053SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */
2051 2054
2052/* 2055/*
@@ -2059,8 +2062,7 @@ struct SVGA3dCmdRedefineGBMob64 {
2059 SVGAMobFormat ptDepth; 2062 SVGAMobFormat ptDepth;
2060 PPN64 base; 2063 PPN64 base;
2061 uint32 sizeInBytes; 2064 uint32 sizeInBytes;
2062} 2065} __packed
2063__attribute__((__packed__))
2064SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */ 2066SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */
2065 2067
2066/* 2068/*
@@ -2070,8 +2072,7 @@ SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */
2070typedef 2072typedef
2071struct SVGA3dCmdUpdateGBMobMapping { 2073struct SVGA3dCmdUpdateGBMobMapping {
2072 SVGAMobId mobid; 2074 SVGAMobId mobid;
2073} 2075} __packed
2074__attribute__((__packed__))
2075SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */ 2076SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */
2076 2077
2077/* 2078/*
@@ -2087,7 +2088,8 @@ struct SVGA3dCmdDefineGBSurface {
2087 uint32 multisampleCount; 2088 uint32 multisampleCount;
2088 SVGA3dTextureFilter autogenFilter; 2089 SVGA3dTextureFilter autogenFilter;
2089 SVGA3dSize size; 2090 SVGA3dSize size;
2090} SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */ 2091} __packed
2092SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */
2091 2093
2092/* 2094/*
2093 * Destroy a guest-backed surface. 2095 * Destroy a guest-backed surface.
@@ -2096,7 +2098,8 @@ struct SVGA3dCmdDefineGBSurface {
2096typedef 2098typedef
2097struct SVGA3dCmdDestroyGBSurface { 2099struct SVGA3dCmdDestroyGBSurface {
2098 uint32 sid; 2100 uint32 sid;
2099} SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */ 2101} __packed
2102SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */
2100 2103
2101/* 2104/*
2102 * Bind a guest-backed surface to an object. 2105 * Bind a guest-backed surface to an object.
@@ -2106,7 +2109,8 @@ typedef
2106struct SVGA3dCmdBindGBSurface { 2109struct SVGA3dCmdBindGBSurface {
2107 uint32 sid; 2110 uint32 sid;
2108 SVGAMobId mobid; 2111 SVGAMobId mobid;
2109} SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */ 2112} __packed
2113SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */
2110 2114
2111/* 2115/*
2112 * Conditionally bind a mob to a guest backed surface if testMobid 2116 * Conditionally bind a mob to a guest backed surface if testMobid
@@ -2123,7 +2127,7 @@ struct{
2123 SVGAMobId testMobid; 2127 SVGAMobId testMobid;
2124 SVGAMobId mobid; 2128 SVGAMobId mobid;
2125 uint32 flags; 2129 uint32 flags;
2126} 2130} __packed
2127SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */ 2131SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */
2128 2132
2129/* 2133/*
@@ -2135,7 +2139,8 @@ typedef
2135struct SVGA3dCmdUpdateGBImage { 2139struct SVGA3dCmdUpdateGBImage {
2136 SVGA3dSurfaceImageId image; 2140 SVGA3dSurfaceImageId image;
2137 SVGA3dBox box; 2141 SVGA3dBox box;
2138} SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */ 2142} __packed
2143SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */
2139 2144
2140/* 2145/*
2141 * Update an entire guest-backed surface. 2146 * Update an entire guest-backed surface.
@@ -2145,7 +2150,8 @@ struct SVGA3dCmdUpdateGBImage {
2145typedef 2150typedef
2146struct SVGA3dCmdUpdateGBSurface { 2151struct SVGA3dCmdUpdateGBSurface {
2147 uint32 sid; 2152 uint32 sid;
2148} SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */ 2153} __packed
2154SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */
2149 2155
2150/* 2156/*
2151 * Readback an image in a guest-backed surface. 2157 * Readback an image in a guest-backed surface.
@@ -2155,7 +2161,8 @@ struct SVGA3dCmdUpdateGBSurface {
2155typedef 2161typedef
2156struct SVGA3dCmdReadbackGBImage { 2162struct SVGA3dCmdReadbackGBImage {
2157 SVGA3dSurfaceImageId image; 2163 SVGA3dSurfaceImageId image;
2158} SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/ 2164} __packed
2165SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/
2159 2166
2160/* 2167/*
2161 * Readback an entire guest-backed surface. 2168 * Readback an entire guest-backed surface.
@@ -2165,7 +2172,8 @@ struct SVGA3dCmdReadbackGBImage {
2165typedef 2172typedef
2166struct SVGA3dCmdReadbackGBSurface { 2173struct SVGA3dCmdReadbackGBSurface {
2167 uint32 sid; 2174 uint32 sid;
2168} SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */ 2175} __packed
2176SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */
2169 2177
2170/* 2178/*
2171 * Readback a sub rect of an image in a guest-backed surface. After 2179 * Readback a sub rect of an image in a guest-backed surface. After
@@ -2179,7 +2187,7 @@ struct SVGA3dCmdReadbackGBImagePartial {
2179 SVGA3dSurfaceImageId image; 2187 SVGA3dSurfaceImageId image;
2180 SVGA3dBox box; 2188 SVGA3dBox box;
2181 uint32 invertBox; 2189 uint32 invertBox;
2182} 2190} __packed
2183SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */ 2191SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */
2184 2192
2185/* 2193/*
@@ -2190,7 +2198,8 @@ SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */
2190typedef 2198typedef
2191struct SVGA3dCmdInvalidateGBImage { 2199struct SVGA3dCmdInvalidateGBImage {
2192 SVGA3dSurfaceImageId image; 2200 SVGA3dSurfaceImageId image;
2193} SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */ 2201} __packed
2202SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */
2194 2203
2195/* 2204/*
2196 * Invalidate an entire guest-backed surface. 2205 * Invalidate an entire guest-backed surface.
@@ -2200,7 +2209,8 @@ struct SVGA3dCmdInvalidateGBImage {
2200typedef 2209typedef
2201struct SVGA3dCmdInvalidateGBSurface { 2210struct SVGA3dCmdInvalidateGBSurface {
2202 uint32 sid; 2211 uint32 sid;
2203} SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */ 2212} __packed
2213SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */
2204 2214
2205/* 2215/*
2206 * Invalidate a sub rect of an image in a guest-backed surface. After 2216 * Invalidate a sub rect of an image in a guest-backed surface. After
@@ -2214,7 +2224,7 @@ struct SVGA3dCmdInvalidateGBImagePartial {
2214 SVGA3dSurfaceImageId image; 2224 SVGA3dSurfaceImageId image;
2215 SVGA3dBox box; 2225 SVGA3dBox box;
2216 uint32 invertBox; 2226 uint32 invertBox;
2217} 2227} __packed
2218SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */ 2228SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */
2219 2229
2220/* 2230/*
@@ -2224,7 +2234,8 @@ SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */
2224typedef 2234typedef
2225struct SVGA3dCmdDefineGBContext { 2235struct SVGA3dCmdDefineGBContext {
2226 uint32 cid; 2236 uint32 cid;
2227} SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */ 2237} __packed
2238SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */
2228 2239
2229/* 2240/*
2230 * Destroy a guest-backed context. 2241 * Destroy a guest-backed context.
@@ -2233,7 +2244,8 @@ struct SVGA3dCmdDefineGBContext {
2233typedef 2244typedef
2234struct SVGA3dCmdDestroyGBContext { 2245struct SVGA3dCmdDestroyGBContext {
2235 uint32 cid; 2246 uint32 cid;
2236} SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */ 2247} __packed
2248SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */
2237 2249
2238/* 2250/*
2239 * Bind a guest-backed context. 2251 * Bind a guest-backed context.
@@ -2252,7 +2264,8 @@ struct SVGA3dCmdBindGBContext {
2252 uint32 cid; 2264 uint32 cid;
2253 SVGAMobId mobid; 2265 SVGAMobId mobid;
2254 uint32 validContents; 2266 uint32 validContents;
2255} SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */ 2267} __packed
2268SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */
2256 2269
2257/* 2270/*
2258 * Readback a guest-backed context. 2271 * Readback a guest-backed context.
@@ -2262,7 +2275,8 @@ struct SVGA3dCmdBindGBContext {
2262typedef 2275typedef
2263struct SVGA3dCmdReadbackGBContext { 2276struct SVGA3dCmdReadbackGBContext {
2264 uint32 cid; 2277 uint32 cid;
2265} SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */ 2278} __packed
2279SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */
2266 2280
2267/* 2281/*
2268 * Invalidate a guest-backed context. 2282 * Invalidate a guest-backed context.
@@ -2270,7 +2284,8 @@ struct SVGA3dCmdReadbackGBContext {
2270typedef 2284typedef
2271struct SVGA3dCmdInvalidateGBContext { 2285struct SVGA3dCmdInvalidateGBContext {
2272 uint32 cid; 2286 uint32 cid;
2273} SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */ 2287} __packed
2288SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */
2274 2289
2275/* 2290/*
2276 * Define a guest-backed shader. 2291 * Define a guest-backed shader.
@@ -2281,7 +2296,8 @@ struct SVGA3dCmdDefineGBShader {
2281 uint32 shid; 2296 uint32 shid;
2282 SVGA3dShaderType type; 2297 SVGA3dShaderType type;
2283 uint32 sizeInBytes; 2298 uint32 sizeInBytes;
2284} SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */ 2299} __packed
2300SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */
2285 2301
2286/* 2302/*
2287 * Bind a guest-backed shader. 2303 * Bind a guest-backed shader.
@@ -2291,7 +2307,8 @@ typedef struct SVGA3dCmdBindGBShader {
2291 uint32 shid; 2307 uint32 shid;
2292 SVGAMobId mobid; 2308 SVGAMobId mobid;
2293 uint32 offsetInBytes; 2309 uint32 offsetInBytes;
2294} SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */ 2310} __packed
2311SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */
2295 2312
2296/* 2313/*
2297 * Destroy a guest-backed shader. 2314 * Destroy a guest-backed shader.
@@ -2299,7 +2316,8 @@ typedef struct SVGA3dCmdBindGBShader {
2299 2316
2300typedef struct SVGA3dCmdDestroyGBShader { 2317typedef struct SVGA3dCmdDestroyGBShader {
2301 uint32 shid; 2318 uint32 shid;
2302} SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */ 2319} __packed
2320SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */
2303 2321
2304typedef 2322typedef
2305struct { 2323struct {
@@ -2314,14 +2332,16 @@ struct {
2314 * Note that FLOAT and INT constants are 4-dwords in length, while 2332 * Note that FLOAT and INT constants are 4-dwords in length, while
2315 * BOOL constants are 1-dword in length. 2333 * BOOL constants are 1-dword in length.
2316 */ 2334 */
2317} SVGA3dCmdSetGBShaderConstInline; 2335} __packed
2336SVGA3dCmdSetGBShaderConstInline;
2318/* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */ 2337/* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */
2319 2338
2320typedef 2339typedef
2321struct { 2340struct {
2322 uint32 cid; 2341 uint32 cid;
2323 SVGA3dQueryType type; 2342 SVGA3dQueryType type;
2324} SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */ 2343} __packed
2344SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */
2325 2345
2326typedef 2346typedef
2327struct { 2347struct {
@@ -2329,7 +2349,8 @@ struct {
2329 SVGA3dQueryType type; 2349 SVGA3dQueryType type;
2330 SVGAMobId mobid; 2350 SVGAMobId mobid;
2331 uint32 offset; 2351 uint32 offset;
2332} SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */ 2352} __packed
2353SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */
2333 2354
2334 2355
2335/* 2356/*
@@ -2346,21 +2367,22 @@ struct {
2346 SVGA3dQueryType type; 2367 SVGA3dQueryType type;
2347 SVGAMobId mobid; 2368 SVGAMobId mobid;
2348 uint32 offset; 2369 uint32 offset;
2349} SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */ 2370} __packed
2371SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */
2350 2372
2351typedef 2373typedef
2352struct { 2374struct {
2353 SVGAMobId mobid; 2375 SVGAMobId mobid;
2354 uint32 fbOffset; 2376 uint32 fbOffset;
2355 uint32 initalized; 2377 uint32 initalized;
2356} 2378} __packed
2357SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */ 2379SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */
2358 2380
2359typedef 2381typedef
2360struct { 2382struct {
2361 SVGAMobId mobid; 2383 SVGAMobId mobid;
2362 uint32 gartOffset; 2384 uint32 gartOffset;
2363} 2385} __packed
2364SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */ 2386SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */
2365 2387
2366 2388
@@ -2368,7 +2390,7 @@ typedef
2368struct { 2390struct {
2369 uint32 gartOffset; 2391 uint32 gartOffset;
2370 uint32 numPages; 2392 uint32 numPages;
2371} 2393} __packed
2372SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */ 2394SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */
2373 2395
2374 2396
@@ -2385,27 +2407,27 @@ struct {
2385 int32 xRoot; 2407 int32 xRoot;
2386 int32 yRoot; 2408 int32 yRoot;
2387 uint32 flags; 2409 uint32 flags;
2388} 2410} __packed
2389SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */ 2411SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */
2390 2412
2391typedef 2413typedef
2392struct { 2414struct {
2393 uint32 stid; 2415 uint32 stid;
2394} 2416} __packed
2395SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */ 2417SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */
2396 2418
2397typedef 2419typedef
2398struct { 2420struct {
2399 uint32 stid; 2421 uint32 stid;
2400 SVGA3dSurfaceImageId image; 2422 SVGA3dSurfaceImageId image;
2401} 2423} __packed
2402SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */ 2424SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */
2403 2425
2404typedef 2426typedef
2405struct { 2427struct {
2406 uint32 stid; 2428 uint32 stid;
2407 SVGA3dBox box; 2429 SVGA3dBox box;
2408} 2430} __packed
2409SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */ 2431SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */
2410 2432
2411/* 2433/*
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
index 8369c3ba10fe..ef3385096145 100644
--- a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
+++ b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
@@ -38,8 +38,11 @@
38 38
39#define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y)) 39#define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
40#define max_t(type, x, y) ((x) > (y) ? (x) : (y)) 40#define max_t(type, x, y) ((x) > (y) ? (x) : (y))
41#define min_t(type, x, y) ((x) < (y) ? (x) : (y))
41#define surf_size_struct SVGA3dSize 42#define surf_size_struct SVGA3dSize
42#define u32 uint32 43#define u32 uint32
44#define u64 uint64_t
45#define U32_MAX ((u32)~0U)
43 46
44#endif /* __KERNEL__ */ 47#endif /* __KERNEL__ */
45 48
@@ -704,8 +707,8 @@ static const struct svga3d_surface_desc svga3d_surface_descs[] = {
704 707
705static inline u32 clamped_umul32(u32 a, u32 b) 708static inline u32 clamped_umul32(u32 a, u32 b)
706{ 709{
707 uint64_t tmp = (uint64_t) a*b; 710 u64 tmp = (u64) a*b;
708 return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp; 711 return (tmp > (u64) U32_MAX) ? U32_MAX : tmp;
709} 712}
710 713
711static inline const struct svga3d_surface_desc * 714static inline const struct svga3d_surface_desc *
@@ -834,7 +837,7 @@ svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
834 bool cubemap) 837 bool cubemap)
835{ 838{
836 const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format); 839 const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
837 u32 total_size = 0; 840 u64 total_size = 0;
838 u32 mip; 841 u32 mip;
839 842
840 for (mip = 0; mip < num_mip_levels; mip++) { 843 for (mip = 0; mip < num_mip_levels; mip++) {
@@ -847,7 +850,7 @@ svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
847 if (cubemap) 850 if (cubemap)
848 total_size *= SVGA3D_MAX_SURFACE_FACES; 851 total_size *= SVGA3D_MAX_SURFACE_FACES;
849 852
850 return total_size; 853 return (u32) min_t(u64, total_size, (u64) U32_MAX);
851} 854}
852 855
853 856
diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/svga_reg.h
index 71defa4d2d75..11323dd5196f 100644
--- a/drivers/gpu/drm/vmwgfx/svga_reg.h
+++ b/drivers/gpu/drm/vmwgfx/svga_reg.h
@@ -169,10 +169,17 @@ enum {
169 SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */ 169 SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */
170 SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */ 170 SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */
171 SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */ 171 SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */
172 SVGA_REG_COMMAND_LOW = 48, /* Lower 32 bits and submits commands */
173 SVGA_REG_COMMAND_HIGH = 49, /* Upper 32 bits of command buffer PA */
172 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */ 174 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */
173 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Suggested limit on mob mem */ 175 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Suggested limit on mob mem */
174 SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */ 176 SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */
175 SVGA_REG_TOP = 53, /* Must be 1 more than the last register */ 177 SVGA_REG_CMD_PREPEND_LOW = 53,
178 SVGA_REG_CMD_PREPEND_HIGH = 54,
179 SVGA_REG_SCREENTARGET_MAX_WIDTH = 55,
180 SVGA_REG_SCREENTARGET_MAX_HEIGHT = 56,
181 SVGA_REG_MOB_MAX_SIZE = 57,
182 SVGA_REG_TOP = 58, /* Must be 1 more than the last register */
176 183
177 SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */ 184 SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */
178 /* Next 768 (== 256*3) registers exist for colormap */ 185 /* Next 768 (== 256*3) registers exist for colormap */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 9426c53fb483..1e80152674b5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -551,8 +551,7 @@ static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
551 cmd->header.size = sizeof(cmd->body); 551 cmd->header.size = sizeof(cmd->body);
552 cmd->body.cid = bi->ctx->id; 552 cmd->body.cid = bi->ctx->id;
553 cmd->body.type = bi->i1.shader_type; 553 cmd->body.type = bi->i1.shader_type;
554 cmd->body.shid = 554 cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
555 cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
556 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 555 vmw_fifo_commit(dev_priv, sizeof(*cmd));
557 556
558 return 0; 557 return 0;
@@ -585,8 +584,7 @@ static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
585 cmd->header.size = sizeof(cmd->body); 584 cmd->header.size = sizeof(cmd->body);
586 cmd->body.cid = bi->ctx->id; 585 cmd->body.cid = bi->ctx->id;
587 cmd->body.type = bi->i1.rt_type; 586 cmd->body.type = bi->i1.rt_type;
588 cmd->body.target.sid = 587 cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
589 cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
590 cmd->body.target.face = 0; 588 cmd->body.target.face = 0;
591 cmd->body.target.mipmap = 0; 589 cmd->body.target.mipmap = 0;
592 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 590 vmw_fifo_commit(dev_priv, sizeof(*cmd));
@@ -628,8 +626,7 @@ static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
628 cmd->body.c.cid = bi->ctx->id; 626 cmd->body.c.cid = bi->ctx->id;
629 cmd->body.s1.stage = bi->i1.texture_stage; 627 cmd->body.s1.stage = bi->i1.texture_stage;
630 cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE; 628 cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
631 cmd->body.s1.value = 629 cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
632 cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
633 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 630 vmw_fifo_commit(dev_priv, sizeof(*cmd));
634 631
635 return 0; 632 return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 3bdc0adc656d..0083cbf99edf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -667,6 +667,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
667 dev_priv->memory_size = 512*1024*1024; 667 dev_priv->memory_size = 512*1024*1024;
668 } 668 }
669 dev_priv->max_mob_pages = 0; 669 dev_priv->max_mob_pages = 0;
670 dev_priv->max_mob_size = 0;
670 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { 671 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
671 uint64_t mem_size = 672 uint64_t mem_size =
672 vmw_read(dev_priv, 673 vmw_read(dev_priv,
@@ -676,6 +677,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
676 dev_priv->prim_bb_mem = 677 dev_priv->prim_bb_mem =
677 vmw_read(dev_priv, 678 vmw_read(dev_priv,
678 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM); 679 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
680 dev_priv->max_mob_size =
681 vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
679 } else 682 } else
680 dev_priv->prim_bb_mem = dev_priv->vram_size; 683 dev_priv->prim_bb_mem = dev_priv->vram_size;
681 684
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index ecaa302a6154..9e4be1725985 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -386,6 +386,7 @@ struct vmw_private {
386 uint32_t max_gmr_ids; 386 uint32_t max_gmr_ids;
387 uint32_t max_gmr_pages; 387 uint32_t max_gmr_pages;
388 uint32_t max_mob_pages; 388 uint32_t max_mob_pages;
389 uint32_t max_mob_size;
389 uint32_t memory_size; 390 uint32_t memory_size;
390 bool has_gmr; 391 bool has_gmr;
391 bool has_mob; 392 bool has_mob;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 269b85cc875a..efb575a7996c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -602,7 +602,7 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
602{ 602{
603 struct vmw_cid_cmd { 603 struct vmw_cid_cmd {
604 SVGA3dCmdHeader header; 604 SVGA3dCmdHeader header;
605 __le32 cid; 605 uint32_t cid;
606 } *cmd; 606 } *cmd;
607 607
608 cmd = container_of(header, struct vmw_cid_cmd, header); 608 cmd = container_of(header, struct vmw_cid_cmd, header);
@@ -1835,7 +1835,7 @@ static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
1835 return 0; 1835 return 0;
1836} 1836}
1837 1837
1838static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = { 1838static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
1839 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid, 1839 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
1840 false, false, false), 1840 false, false, false),
1841 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid, 1841 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
@@ -2032,6 +2032,9 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
2032 goto out_invalid; 2032 goto out_invalid;
2033 2033
2034 entry = &vmw_cmd_entries[cmd_id]; 2034 entry = &vmw_cmd_entries[cmd_id];
2035 if (unlikely(!entry->func))
2036 goto out_invalid;
2037
2035 if (unlikely(!entry->user_allow && !sw_context->kernel)) 2038 if (unlikely(!entry->user_allow && !sw_context->kernel))
2036 goto out_privileged; 2039 goto out_privileged;
2037 2040
@@ -2469,7 +2472,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2469 if (dev_priv->has_mob) { 2472 if (dev_priv->has_mob) {
2470 ret = vmw_rebind_contexts(sw_context); 2473 ret = vmw_rebind_contexts(sw_context);
2471 if (unlikely(ret != 0)) 2474 if (unlikely(ret != 0))
2472 goto out_err; 2475 goto out_unlock_binding;
2473 } 2476 }
2474 2477
2475 cmd = vmw_fifo_reserve(dev_priv, command_size); 2478 cmd = vmw_fifo_reserve(dev_priv, command_size);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index f9881f9e62bd..47b70949bf3a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -102,6 +102,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
102 vmw_fp->gb_aware = true; 102 vmw_fp->gb_aware = true;
103 param->value = dev_priv->max_mob_pages * PAGE_SIZE; 103 param->value = dev_priv->max_mob_pages * PAGE_SIZE;
104 break; 104 break;
105 case DRM_VMW_PARAM_MAX_MOB_SIZE:
106 param->value = dev_priv->max_mob_size;
107 break;
105 default: 108 default:
106 DRM_ERROR("Illegal vmwgfx get param request: %d\n", 109 DRM_ERROR("Illegal vmwgfx get param request: %d\n",
107 param->param); 110 param->param);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 217d941b8176..ee3856578a12 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -371,13 +371,13 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
371 TTM_REF_USAGE); 371 TTM_REF_USAGE);
372} 372}
373 373
374int vmw_shader_alloc(struct vmw_private *dev_priv, 374static int vmw_shader_alloc(struct vmw_private *dev_priv,
375 struct vmw_dma_buffer *buffer, 375 struct vmw_dma_buffer *buffer,
376 size_t shader_size, 376 size_t shader_size,
377 size_t offset, 377 size_t offset,
378 SVGA3dShaderType shader_type, 378 SVGA3dShaderType shader_type,
379 struct ttm_object_file *tfile, 379 struct ttm_object_file *tfile,
380 u32 *handle) 380 u32 *handle)
381{ 381{
382 struct vmw_user_shader *ushader; 382 struct vmw_user_shader *ushader;
383 struct vmw_resource *res, *tmp; 383 struct vmw_resource *res, *tmp;
@@ -779,6 +779,8 @@ vmw_compat_shader_man_create(struct vmw_private *dev_priv)
779 int ret; 779 int ret;
780 780
781 man = kzalloc(sizeof(*man), GFP_KERNEL); 781 man = kzalloc(sizeof(*man), GFP_KERNEL);
782 if (man == NULL)
783 return ERR_PTR(-ENOMEM);
782 784
783 man->dev_priv = dev_priv; 785 man->dev_priv = dev_priv;
784 INIT_LIST_HEAD(&man->list); 786 INIT_LIST_HEAD(&man->list);
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 497558127bb3..f822fd2a1ada 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -469,6 +469,9 @@ static const struct hid_device_id apple_devices[] = {
469 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, 469 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
470 USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI), 470 USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
471 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, 471 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
472 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
473 USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS),
474 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
472 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS), 475 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS),
473 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, 476 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
474 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI), 477 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 3bfac3accd22..cc32a6f96c64 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1679,6 +1679,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1679 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, 1679 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
1680 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI) }, 1680 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI) },
1681 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) }, 1681 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) },
1682 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS) },
1682 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, 1683 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
1683 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, 1684 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
1684 { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) }, 1685 { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
@@ -1779,6 +1780,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
1779 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) }, 1780 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) },
1780 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) }, 1781 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) },
1781 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) }, 1782 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) },
1783 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2) },
1784 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2) },
1782 { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) }, 1785 { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
1783 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) }, 1786 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) },
1784 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) }, 1787 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) },
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index 8fae6d1414cc..c24908f14934 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -157,6 +157,7 @@ struct mousevsc_dev {
157 u32 report_desc_size; 157 u32 report_desc_size;
158 struct hv_input_dev_info hid_dev_info; 158 struct hv_input_dev_info hid_dev_info;
159 struct hid_device *hid_device; 159 struct hid_device *hid_device;
160 u8 input_buf[HID_MAX_BUFFER_SIZE];
160}; 161};
161 162
162 163
@@ -256,6 +257,7 @@ static void mousevsc_on_receive(struct hv_device *device,
256 struct synthhid_msg *hid_msg; 257 struct synthhid_msg *hid_msg;
257 struct mousevsc_dev *input_dev = hv_get_drvdata(device); 258 struct mousevsc_dev *input_dev = hv_get_drvdata(device);
258 struct synthhid_input_report *input_report; 259 struct synthhid_input_report *input_report;
260 size_t len;
259 261
260 pipe_msg = (struct pipe_prt_msg *)((unsigned long)packet + 262 pipe_msg = (struct pipe_prt_msg *)((unsigned long)packet +
261 (packet->offset8 << 3)); 263 (packet->offset8 << 3));
@@ -300,9 +302,12 @@ static void mousevsc_on_receive(struct hv_device *device,
300 (struct synthhid_input_report *)pipe_msg->data; 302 (struct synthhid_input_report *)pipe_msg->data;
301 if (!input_dev->init_complete) 303 if (!input_dev->init_complete)
302 break; 304 break;
303 hid_input_report(input_dev->hid_device, 305
304 HID_INPUT_REPORT, input_report->buffer, 306 len = min(input_report->header.size,
305 input_report->header.size, 1); 307 (u32)sizeof(input_dev->input_buf));
308 memcpy(input_dev->input_buf, input_report->buffer, len);
309 hid_input_report(input_dev->hid_device, HID_INPUT_REPORT,
310 input_dev->input_buf, len, 1);
306 break; 311 break;
307 default: 312 default:
308 pr_err("unsupported hid msg type - type %d len %d", 313 pr_err("unsupported hid msg type - type %d len %d",
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 5a5248f2cc07..22f28d6b33a8 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -135,6 +135,7 @@
135#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b 135#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
136#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI 0x0255 136#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI 0x0255
137#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256 137#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256
138#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS 0x0257
138#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290 139#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
139#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291 140#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
140#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292 141#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
@@ -240,6 +241,7 @@
240 241
241#define USB_VENDOR_ID_CYGNAL 0x10c4 242#define USB_VENDOR_ID_CYGNAL 0x10c4
242#define USB_DEVICE_ID_CYGNAL_RADIO_SI470X 0x818a 243#define USB_DEVICE_ID_CYGNAL_RADIO_SI470X 0x818a
244#define USB_DEVICE_ID_FOCALTECH_FTXXXX_MULTITOUCH 0x81b9
243 245
244#define USB_DEVICE_ID_CYGNAL_RADIO_SI4713 0x8244 246#define USB_DEVICE_ID_CYGNAL_RADIO_SI4713 0x8244
245 247
@@ -451,6 +453,9 @@
451#define USB_VENDOR_ID_INTEL_1 0x8087 453#define USB_VENDOR_ID_INTEL_1 0x8087
452#define USB_DEVICE_ID_INTEL_HID_SENSOR 0x09fa 454#define USB_DEVICE_ID_INTEL_HID_SENSOR 0x09fa
453 455
456#define USB_VENDOR_ID_STM_0 0x0483
457#define USB_DEVICE_ID_STM_HID_SENSOR 0x91d1
458
454#define USB_VENDOR_ID_ION 0x15e4 459#define USB_VENDOR_ID_ION 0x15e4
455#define USB_DEVICE_ID_ICADE 0x0132 460#define USB_DEVICE_ID_ICADE 0x0132
456 461
@@ -619,6 +624,8 @@
619#define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713 624#define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713
620#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730 625#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730
621#define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c 626#define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c
627#define USB_DEVICE_ID_MS_TOUCH_COVER_2 0x07a7
628#define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9
622 629
623#define USB_VENDOR_ID_MOJO 0x8282 630#define USB_VENDOR_ID_MOJO 0x8282
624#define USB_DEVICE_ID_RETRO_ADAPTER 0x3201 631#define USB_DEVICE_ID_RETRO_ADAPTER 0x3201
@@ -644,6 +651,7 @@
644 651
645#define USB_VENDOR_ID_NEXIO 0x1870 652#define USB_VENDOR_ID_NEXIO 0x1870
646#define USB_DEVICE_ID_NEXIO_MULTITOUCH_420 0x010d 653#define USB_DEVICE_ID_NEXIO_MULTITOUCH_420 0x010d
654#define USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750 0x0110
647 655
648#define USB_VENDOR_ID_NEXTWINDOW 0x1926 656#define USB_VENDOR_ID_NEXTWINDOW 0x1926
649#define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003 657#define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index d50e7313b171..a713e6211419 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -1178,7 +1178,7 @@ static void hidinput_led_worker(struct work_struct *work)
1178 1178
1179 /* fall back to generic raw-output-report */ 1179 /* fall back to generic raw-output-report */
1180 len = ((report->size - 1) >> 3) + 1 + (report->id > 0); 1180 len = ((report->size - 1) >> 3) + 1 + (report->id > 0);
1181 buf = kmalloc(len, GFP_KERNEL); 1181 buf = hid_alloc_report_buf(report, GFP_KERNEL);
1182 if (!buf) 1182 if (!buf)
1183 return; 1183 return;
1184 1184
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index c6ef6eed3091..404a3a8a82f1 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -208,6 +208,10 @@ static const struct hid_device_id ms_devices[] = {
208 .driver_data = MS_NOGET }, 208 .driver_data = MS_NOGET },
209 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500), 209 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500),
210 .driver_data = MS_DUPLICATE_USAGES }, 210 .driver_data = MS_DUPLICATE_USAGES },
211 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2),
212 .driver_data = 0 },
213 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2),
214 .driver_data = 0 },
211 215
212 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT), 216 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT),
213 .driver_data = MS_PRESENTER }, 217 .driver_data = MS_PRESENTER },
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index f134d73beca1..221d503f1c24 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1166,6 +1166,11 @@ static const struct hid_device_id mt_devices[] = {
1166 MT_USB_DEVICE(USB_VENDOR_ID_FLATFROG, 1166 MT_USB_DEVICE(USB_VENDOR_ID_FLATFROG,
1167 USB_DEVICE_ID_MULTITOUCH_3200) }, 1167 USB_DEVICE_ID_MULTITOUCH_3200) },
1168 1168
1169 /* FocalTech Panels */
1170 { .driver_data = MT_CLS_SERIAL,
1171 MT_USB_DEVICE(USB_VENDOR_ID_CYGNAL,
1172 USB_DEVICE_ID_FOCALTECH_FTXXXX_MULTITOUCH) },
1173
1169 /* GeneralTouch panel */ 1174 /* GeneralTouch panel */
1170 { .driver_data = MT_CLS_GENERALTOUCH_TWOFINGERS, 1175 { .driver_data = MT_CLS_GENERALTOUCH_TWOFINGERS,
1171 MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 1176 MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 46f4480035bc..9c22e14c57f0 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -665,6 +665,9 @@ static const struct hid_device_id sensor_hub_devices[] = {
665 { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_INTEL_1, 665 { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_INTEL_1,
666 USB_DEVICE_ID_INTEL_HID_SENSOR), 666 USB_DEVICE_ID_INTEL_HID_SENSOR),
667 .driver_data = HID_SENSOR_HUB_ENUM_QUIRK}, 667 .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
668 { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_STM_0,
669 USB_DEVICE_ID_STM_HID_SENSOR),
670 .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
668 { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, HID_ANY_ID, 671 { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, HID_ANY_ID,
669 HID_ANY_ID) }, 672 HID_ANY_ID) },
670 { } 673 { }
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index d1f81f52481a..42eebd14de1f 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -582,7 +582,7 @@ static void i2c_hid_request(struct hid_device *hid, struct hid_report *rep,
582 int ret; 582 int ret;
583 int len = i2c_hid_get_report_length(rep) - 2; 583 int len = i2c_hid_get_report_length(rep) - 2;
584 584
585 buf = kzalloc(len, GFP_KERNEL); 585 buf = hid_alloc_report_buf(rep, GFP_KERNEL);
586 if (!buf) 586 if (!buf)
587 return; 587 return;
588 588
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 175ec0afb70c..dbd83878ff99 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -74,6 +74,7 @@ static const struct hid_blacklist {
74 { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, 74 { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
75 { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, 75 { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
76 { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS }, 76 { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
77 { USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS },
77 { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS }, 78 { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
78 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS }, 79 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
79 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS }, 80 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index af6edf9b1936..f2d7bf90c9fe 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -67,7 +67,6 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
67 int ret = 0; 67 int ret = 0;
68 struct vmbus_channel_initiate_contact *msg; 68 struct vmbus_channel_initiate_contact *msg;
69 unsigned long flags; 69 unsigned long flags;
70 int t;
71 70
72 init_completion(&msginfo->waitevent); 71 init_completion(&msginfo->waitevent);
73 72
@@ -78,6 +77,8 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
78 msg->interrupt_page = virt_to_phys(vmbus_connection.int_page); 77 msg->interrupt_page = virt_to_phys(vmbus_connection.int_page);
79 msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]); 78 msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]);
80 msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]); 79 msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]);
80 if (version == VERSION_WIN8)
81 msg->target_vcpu = hv_context.vp_index[smp_processor_id()];
81 82
82 /* 83 /*
83 * Add to list before we send the request since we may 84 * Add to list before we send the request since we may
@@ -100,15 +101,7 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
100 } 101 }
101 102
102 /* Wait for the connection response */ 103 /* Wait for the connection response */
103 t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ); 104 wait_for_completion(&msginfo->waitevent);
104 if (t == 0) {
105 spin_lock_irqsave(&vmbus_connection.channelmsg_lock,
106 flags);
107 list_del(&msginfo->msglistentry);
108 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
109 flags);
110 return -ETIMEDOUT;
111 }
112 105
113 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 106 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
114 list_del(&msginfo->msglistentry); 107 list_del(&msginfo->msglistentry);
diff --git a/drivers/hwmon/max1668.c b/drivers/hwmon/max1668.c
index a7626358c95d..029b65e6c589 100644
--- a/drivers/hwmon/max1668.c
+++ b/drivers/hwmon/max1668.c
@@ -243,7 +243,7 @@ static ssize_t set_temp_min(struct device *dev,
243 data->temp_min[index] = clamp_val(temp/1000, -128, 127); 243 data->temp_min[index] = clamp_val(temp/1000, -128, 127);
244 if (i2c_smbus_write_byte_data(client, 244 if (i2c_smbus_write_byte_data(client,
245 MAX1668_REG_LIML_WR(index), 245 MAX1668_REG_LIML_WR(index),
246 data->temp_max[index])) 246 data->temp_min[index]))
247 count = -EIO; 247 count = -EIO;
248 mutex_unlock(&data->update_lock); 248 mutex_unlock(&data->update_lock);
249 249
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index 8c23203915af..8a17f01e8672 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -145,7 +145,7 @@ struct ntc_data {
145static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata) 145static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
146{ 146{
147 struct iio_channel *channel = pdata->chan; 147 struct iio_channel *channel = pdata->chan;
148 unsigned int result; 148 s64 result;
149 int val, ret; 149 int val, ret;
150 150
151 ret = iio_read_channel_raw(channel, &val); 151 ret = iio_read_channel_raw(channel, &val);
@@ -155,10 +155,10 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
155 } 155 }
156 156
157 /* unit: mV */ 157 /* unit: mV */
158 result = pdata->pullup_uv * val; 158 result = pdata->pullup_uv * (s64) val;
159 result >>= 12; 159 result >>= 12;
160 160
161 return result; 161 return (int)result;
162} 162}
163 163
164static const struct of_device_id ntc_match[] = { 164static const struct of_device_id ntc_match[] = {
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index b8c5187b9ee0..d52d84937ad3 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -97,7 +97,6 @@ enum {
97enum { 97enum {
98 MV64XXX_I2C_ACTION_INVALID, 98 MV64XXX_I2C_ACTION_INVALID,
99 MV64XXX_I2C_ACTION_CONTINUE, 99 MV64XXX_I2C_ACTION_CONTINUE,
100 MV64XXX_I2C_ACTION_OFFLOAD_SEND_START,
101 MV64XXX_I2C_ACTION_SEND_START, 100 MV64XXX_I2C_ACTION_SEND_START,
102 MV64XXX_I2C_ACTION_SEND_RESTART, 101 MV64XXX_I2C_ACTION_SEND_RESTART,
103 MV64XXX_I2C_ACTION_OFFLOAD_RESTART, 102 MV64XXX_I2C_ACTION_OFFLOAD_RESTART,
@@ -204,6 +203,9 @@ static int mv64xxx_i2c_offload_msg(struct mv64xxx_i2c_data *drv_data)
204 unsigned long ctrl_reg; 203 unsigned long ctrl_reg;
205 struct i2c_msg *msg = drv_data->msgs; 204 struct i2c_msg *msg = drv_data->msgs;
206 205
206 if (!drv_data->offload_enabled)
207 return -EOPNOTSUPP;
208
207 drv_data->msg = msg; 209 drv_data->msg = msg;
208 drv_data->byte_posn = 0; 210 drv_data->byte_posn = 0;
209 drv_data->bytes_left = msg->len; 211 drv_data->bytes_left = msg->len;
@@ -433,8 +435,7 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
433 435
434 drv_data->msgs++; 436 drv_data->msgs++;
435 drv_data->num_msgs--; 437 drv_data->num_msgs--;
436 if (!(drv_data->offload_enabled && 438 if (mv64xxx_i2c_offload_msg(drv_data) < 0) {
437 mv64xxx_i2c_offload_msg(drv_data))) {
438 drv_data->cntl_bits |= MV64XXX_I2C_REG_CONTROL_START; 439 drv_data->cntl_bits |= MV64XXX_I2C_REG_CONTROL_START;
439 writel(drv_data->cntl_bits, 440 writel(drv_data->cntl_bits,
440 drv_data->reg_base + drv_data->reg_offsets.control); 441 drv_data->reg_base + drv_data->reg_offsets.control);
@@ -458,15 +459,14 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
458 drv_data->reg_base + drv_data->reg_offsets.control); 459 drv_data->reg_base + drv_data->reg_offsets.control);
459 break; 460 break;
460 461
461 case MV64XXX_I2C_ACTION_OFFLOAD_SEND_START:
462 if (!mv64xxx_i2c_offload_msg(drv_data))
463 break;
464 else
465 drv_data->action = MV64XXX_I2C_ACTION_SEND_START;
466 /* FALLTHRU */
467 case MV64XXX_I2C_ACTION_SEND_START: 462 case MV64XXX_I2C_ACTION_SEND_START:
468 writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START, 463 /* Can we offload this msg ? */
469 drv_data->reg_base + drv_data->reg_offsets.control); 464 if (mv64xxx_i2c_offload_msg(drv_data) < 0) {
465 /* No, switch to standard path */
466 mv64xxx_i2c_prepare_for_io(drv_data, drv_data->msgs);
467 writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START,
468 drv_data->reg_base + drv_data->reg_offsets.control);
469 }
470 break; 470 break;
471 471
472 case MV64XXX_I2C_ACTION_SEND_ADDR_1: 472 case MV64XXX_I2C_ACTION_SEND_ADDR_1:
@@ -625,15 +625,10 @@ mv64xxx_i2c_execute_msg(struct mv64xxx_i2c_data *drv_data, struct i2c_msg *msg,
625 unsigned long flags; 625 unsigned long flags;
626 626
627 spin_lock_irqsave(&drv_data->lock, flags); 627 spin_lock_irqsave(&drv_data->lock, flags);
628 if (drv_data->offload_enabled) {
629 drv_data->action = MV64XXX_I2C_ACTION_OFFLOAD_SEND_START;
630 drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND;
631 } else {
632 mv64xxx_i2c_prepare_for_io(drv_data, msg);
633 628
634 drv_data->action = MV64XXX_I2C_ACTION_SEND_START; 629 drv_data->action = MV64XXX_I2C_ACTION_SEND_START;
635 drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND; 630 drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND;
636 } 631
637 drv_data->send_stop = is_last; 632 drv_data->send_stop = is_last;
638 drv_data->block = 1; 633 drv_data->block = 1;
639 mv64xxx_i2c_do_action(drv_data); 634 mv64xxx_i2c_do_action(drv_data);
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index 3bec9220df04..bfec313492b3 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -447,14 +447,14 @@ static const struct iio_chan_spec_ext_info bma180_ext_info[] = {
447 { }, 447 { },
448}; 448};
449 449
450#define BMA180_CHANNEL(_index) { \ 450#define BMA180_CHANNEL(_axis) { \
451 .type = IIO_ACCEL, \ 451 .type = IIO_ACCEL, \
452 .indexed = 1, \ 452 .modified = 1, \
453 .channel = (_index), \ 453 .channel2 = IIO_MOD_##_axis, \
454 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ 454 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
455 BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \ 455 BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
456 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ 456 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
457 .scan_index = (_index), \ 457 .scan_index = AXIS_##_axis, \
458 .scan_type = { \ 458 .scan_type = { \
459 .sign = 's', \ 459 .sign = 's', \
460 .realbits = 14, \ 460 .realbits = 14, \
@@ -465,10 +465,10 @@ static const struct iio_chan_spec_ext_info bma180_ext_info[] = {
465} 465}
466 466
467static const struct iio_chan_spec bma180_channels[] = { 467static const struct iio_chan_spec bma180_channels[] = {
468 BMA180_CHANNEL(AXIS_X), 468 BMA180_CHANNEL(X),
469 BMA180_CHANNEL(AXIS_Y), 469 BMA180_CHANNEL(Y),
470 BMA180_CHANNEL(AXIS_Z), 470 BMA180_CHANNEL(Z),
471 IIO_CHAN_SOFT_TIMESTAMP(4), 471 IIO_CHAN_SOFT_TIMESTAMP(3),
472}; 472};
473 473
474static irqreturn_t bma180_trigger_handler(int irq, void *p) 474static irqreturn_t bma180_trigger_handler(int irq, void *p)
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index e283f2f2ee2f..360259266d4f 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -1560,7 +1560,7 @@ static int max1363_probe(struct i2c_client *client,
1560 st->client = client; 1560 st->client = client;
1561 1561
1562 st->vref_uv = st->chip_info->int_vref_mv * 1000; 1562 st->vref_uv = st->chip_info->int_vref_mv * 1000;
1563 vref = devm_regulator_get(&client->dev, "vref"); 1563 vref = devm_regulator_get_optional(&client->dev, "vref");
1564 if (!IS_ERR(vref)) { 1564 if (!IS_ERR(vref)) {
1565 int vref_uv; 1565 int vref_uv;
1566 1566
diff --git a/drivers/iio/imu/adis16400.h b/drivers/iio/imu/adis16400.h
index 2f8f9d632386..0916bf6b6c31 100644
--- a/drivers/iio/imu/adis16400.h
+++ b/drivers/iio/imu/adis16400.h
@@ -189,6 +189,7 @@ enum {
189 ADIS16300_SCAN_INCLI_X, 189 ADIS16300_SCAN_INCLI_X,
190 ADIS16300_SCAN_INCLI_Y, 190 ADIS16300_SCAN_INCLI_Y,
191 ADIS16400_SCAN_ADC, 191 ADIS16400_SCAN_ADC,
192 ADIS16400_SCAN_TIMESTAMP,
192}; 193};
193 194
194#ifdef CONFIG_IIO_BUFFER 195#ifdef CONFIG_IIO_BUFFER
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
index 368660dfe135..7c582f7ae34e 100644
--- a/drivers/iio/imu/adis16400_core.c
+++ b/drivers/iio/imu/adis16400_core.c
@@ -632,7 +632,7 @@ static const struct iio_chan_spec adis16400_channels[] = {
632 ADIS16400_MAGN_CHAN(Z, ADIS16400_ZMAGN_OUT, 14), 632 ADIS16400_MAGN_CHAN(Z, ADIS16400_ZMAGN_OUT, 14),
633 ADIS16400_TEMP_CHAN(ADIS16400_TEMP_OUT, 12), 633 ADIS16400_TEMP_CHAN(ADIS16400_TEMP_OUT, 12),
634 ADIS16400_AUX_ADC_CHAN(ADIS16400_AUX_ADC, 12), 634 ADIS16400_AUX_ADC_CHAN(ADIS16400_AUX_ADC, 12),
635 IIO_CHAN_SOFT_TIMESTAMP(12) 635 IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
636}; 636};
637 637
638static const struct iio_chan_spec adis16448_channels[] = { 638static const struct iio_chan_spec adis16448_channels[] = {
@@ -659,7 +659,7 @@ static const struct iio_chan_spec adis16448_channels[] = {
659 }, 659 },
660 }, 660 },
661 ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12), 661 ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12),
662 IIO_CHAN_SOFT_TIMESTAMP(11) 662 IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
663}; 663};
664 664
665static const struct iio_chan_spec adis16350_channels[] = { 665static const struct iio_chan_spec adis16350_channels[] = {
@@ -677,7 +677,7 @@ static const struct iio_chan_spec adis16350_channels[] = {
677 ADIS16400_MOD_TEMP_CHAN(X, ADIS16350_XTEMP_OUT, 12), 677 ADIS16400_MOD_TEMP_CHAN(X, ADIS16350_XTEMP_OUT, 12),
678 ADIS16400_MOD_TEMP_CHAN(Y, ADIS16350_YTEMP_OUT, 12), 678 ADIS16400_MOD_TEMP_CHAN(Y, ADIS16350_YTEMP_OUT, 12),
679 ADIS16400_MOD_TEMP_CHAN(Z, ADIS16350_ZTEMP_OUT, 12), 679 ADIS16400_MOD_TEMP_CHAN(Z, ADIS16350_ZTEMP_OUT, 12),
680 IIO_CHAN_SOFT_TIMESTAMP(11) 680 IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
681}; 681};
682 682
683static const struct iio_chan_spec adis16300_channels[] = { 683static const struct iio_chan_spec adis16300_channels[] = {
@@ -690,7 +690,7 @@ static const struct iio_chan_spec adis16300_channels[] = {
690 ADIS16400_AUX_ADC_CHAN(ADIS16300_AUX_ADC, 12), 690 ADIS16400_AUX_ADC_CHAN(ADIS16300_AUX_ADC, 12),
691 ADIS16400_INCLI_CHAN(X, ADIS16300_PITCH_OUT, 13), 691 ADIS16400_INCLI_CHAN(X, ADIS16300_PITCH_OUT, 13),
692 ADIS16400_INCLI_CHAN(Y, ADIS16300_ROLL_OUT, 13), 692 ADIS16400_INCLI_CHAN(Y, ADIS16300_ROLL_OUT, 13),
693 IIO_CHAN_SOFT_TIMESTAMP(14) 693 IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
694}; 694};
695 695
696static const struct iio_chan_spec adis16334_channels[] = { 696static const struct iio_chan_spec adis16334_channels[] = {
@@ -701,7 +701,7 @@ static const struct iio_chan_spec adis16334_channels[] = {
701 ADIS16400_ACCEL_CHAN(Y, ADIS16400_YACCL_OUT, 14), 701 ADIS16400_ACCEL_CHAN(Y, ADIS16400_YACCL_OUT, 14),
702 ADIS16400_ACCEL_CHAN(Z, ADIS16400_ZACCL_OUT, 14), 702 ADIS16400_ACCEL_CHAN(Z, ADIS16400_ZACCL_OUT, 14),
703 ADIS16400_TEMP_CHAN(ADIS16350_XTEMP_OUT, 12), 703 ADIS16400_TEMP_CHAN(ADIS16350_XTEMP_OUT, 12),
704 IIO_CHAN_SOFT_TIMESTAMP(8) 704 IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
705}; 705};
706 706
707static struct attribute *adis16400_attributes[] = { 707static struct attribute *adis16400_attributes[] = {
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index 3d8110157f2d..94daa9fc1247 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -460,10 +460,14 @@ static int tsl2563_write_raw(struct iio_dev *indio_dev,
460{ 460{
461 struct tsl2563_chip *chip = iio_priv(indio_dev); 461 struct tsl2563_chip *chip = iio_priv(indio_dev);
462 462
463 if (chan->channel == IIO_MOD_LIGHT_BOTH) 463 if (mask != IIO_CHAN_INFO_CALIBSCALE)
464 return -EINVAL;
465 if (chan->channel2 == IIO_MOD_LIGHT_BOTH)
464 chip->calib0 = calib_from_sysfs(val); 466 chip->calib0 = calib_from_sysfs(val);
465 else 467 else if (chan->channel2 == IIO_MOD_LIGHT_IR)
466 chip->calib1 = calib_from_sysfs(val); 468 chip->calib1 = calib_from_sysfs(val);
469 else
470 return -EINVAL;
467 471
468 return 0; 472 return 0;
469} 473}
@@ -472,14 +476,14 @@ static int tsl2563_read_raw(struct iio_dev *indio_dev,
472 struct iio_chan_spec const *chan, 476 struct iio_chan_spec const *chan,
473 int *val, 477 int *val,
474 int *val2, 478 int *val2,
475 long m) 479 long mask)
476{ 480{
477 int ret = -EINVAL; 481 int ret = -EINVAL;
478 u32 calib0, calib1; 482 u32 calib0, calib1;
479 struct tsl2563_chip *chip = iio_priv(indio_dev); 483 struct tsl2563_chip *chip = iio_priv(indio_dev);
480 484
481 mutex_lock(&chip->lock); 485 mutex_lock(&chip->lock);
482 switch (m) { 486 switch (mask) {
483 case IIO_CHAN_INFO_RAW: 487 case IIO_CHAN_INFO_RAW:
484 case IIO_CHAN_INFO_PROCESSED: 488 case IIO_CHAN_INFO_PROCESSED:
485 switch (chan->type) { 489 switch (chan->type) {
@@ -498,7 +502,7 @@ static int tsl2563_read_raw(struct iio_dev *indio_dev,
498 ret = tsl2563_get_adc(chip); 502 ret = tsl2563_get_adc(chip);
499 if (ret) 503 if (ret)
500 goto error_ret; 504 goto error_ret;
501 if (chan->channel == 0) 505 if (chan->channel2 == IIO_MOD_LIGHT_BOTH)
502 *val = chip->data0; 506 *val = chip->data0;
503 else 507 else
504 *val = chip->data1; 508 *val = chip->data1;
@@ -510,7 +514,7 @@ static int tsl2563_read_raw(struct iio_dev *indio_dev,
510 break; 514 break;
511 515
512 case IIO_CHAN_INFO_CALIBSCALE: 516 case IIO_CHAN_INFO_CALIBSCALE:
513 if (chan->channel == 0) 517 if (chan->channel2 == IIO_MOD_LIGHT_BOTH)
514 *val = calib_to_sysfs(chip->calib0); 518 *val = calib_to_sysfs(chip->calib0);
515 else 519 else
516 *val = calib_to_sysfs(chip->calib1); 520 *val = calib_to_sysfs(chip->calib1);
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index ff284e5afd95..05423543f89d 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -85,6 +85,7 @@
85#define AK8975_MAX_CONVERSION_TIMEOUT 500 85#define AK8975_MAX_CONVERSION_TIMEOUT 500
86#define AK8975_CONVERSION_DONE_POLL_TIME 10 86#define AK8975_CONVERSION_DONE_POLL_TIME 10
87#define AK8975_DATA_READY_TIMEOUT ((100*HZ)/1000) 87#define AK8975_DATA_READY_TIMEOUT ((100*HZ)/1000)
88#define RAW_TO_GAUSS(asa) ((((asa) + 128) * 3000) / 256)
88 89
89/* 90/*
90 * Per-instance context data for the device. 91 * Per-instance context data for the device.
@@ -265,15 +266,15 @@ static int ak8975_setup(struct i2c_client *client)
265 * 266 *
266 * Since 1uT = 0.01 gauss, our final scale factor becomes: 267 * Since 1uT = 0.01 gauss, our final scale factor becomes:
267 * 268 *
268 * Hadj = H * ((ASA + 128) / 256) * 3/10 * 100 269 * Hadj = H * ((ASA + 128) / 256) * 3/10 * 1/100
269 * Hadj = H * ((ASA + 128) * 30 / 256 270 * Hadj = H * ((ASA + 128) * 0.003) / 256
270 * 271 *
271 * Since ASA doesn't change, we cache the resultant scale factor into the 272 * Since ASA doesn't change, we cache the resultant scale factor into the
272 * device context in ak8975_setup(). 273 * device context in ak8975_setup().
273 */ 274 */
274 data->raw_to_gauss[0] = ((data->asa[0] + 128) * 30) >> 8; 275 data->raw_to_gauss[0] = RAW_TO_GAUSS(data->asa[0]);
275 data->raw_to_gauss[1] = ((data->asa[1] + 128) * 30) >> 8; 276 data->raw_to_gauss[1] = RAW_TO_GAUSS(data->asa[1]);
276 data->raw_to_gauss[2] = ((data->asa[2] + 128) * 30) >> 8; 277 data->raw_to_gauss[2] = RAW_TO_GAUSS(data->asa[2]);
277 278
278 return 0; 279 return 0;
279} 280}
@@ -428,8 +429,9 @@ static int ak8975_read_raw(struct iio_dev *indio_dev,
428 case IIO_CHAN_INFO_RAW: 429 case IIO_CHAN_INFO_RAW:
429 return ak8975_read_axis(indio_dev, chan->address, val); 430 return ak8975_read_axis(indio_dev, chan->address, val);
430 case IIO_CHAN_INFO_SCALE: 431 case IIO_CHAN_INFO_SCALE:
431 *val = data->raw_to_gauss[chan->address]; 432 *val = 0;
432 return IIO_VAL_INT; 433 *val2 = data->raw_to_gauss[chan->address];
434 return IIO_VAL_INT_PLUS_MICRO;
433 } 435 }
434 return -EINVAL; 436 return -EINVAL;
435} 437}
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
index 4b65b6d3bdb1..f66955fb3509 100644
--- a/drivers/iio/magnetometer/mag3110.c
+++ b/drivers/iio/magnetometer/mag3110.c
@@ -106,7 +106,7 @@ static ssize_t mag3110_show_int_plus_micros(char *buf,
106 106
107 while (n-- > 0) 107 while (n-- > 0)
108 len += scnprintf(buf + len, PAGE_SIZE - len, 108 len += scnprintf(buf + len, PAGE_SIZE - len,
109 "%d.%d ", vals[n][0], vals[n][1]); 109 "%d.%06d ", vals[n][0], vals[n][1]);
110 110
111 /* replace trailing space by newline */ 111 /* replace trailing space by newline */
112 buf[len - 1] = '\n'; 112 buf[len - 1] = '\n';
@@ -154,6 +154,9 @@ static int mag3110_read_raw(struct iio_dev *indio_dev,
154 154
155 switch (mask) { 155 switch (mask) {
156 case IIO_CHAN_INFO_RAW: 156 case IIO_CHAN_INFO_RAW:
157 if (iio_buffer_enabled(indio_dev))
158 return -EBUSY;
159
157 switch (chan->type) { 160 switch (chan->type) {
158 case IIO_MAGN: /* in 0.1 uT / LSB */ 161 case IIO_MAGN: /* in 0.1 uT / LSB */
159 ret = mag3110_read(data, buffer); 162 ret = mag3110_read(data, buffer);
@@ -199,6 +202,9 @@ static int mag3110_write_raw(struct iio_dev *indio_dev,
199 struct mag3110_data *data = iio_priv(indio_dev); 202 struct mag3110_data *data = iio_priv(indio_dev);
200 int rate; 203 int rate;
201 204
205 if (iio_buffer_enabled(indio_dev))
206 return -EBUSY;
207
202 switch (mask) { 208 switch (mask) {
203 case IIO_CHAN_INFO_SAMP_FREQ: 209 case IIO_CHAN_INFO_SAMP_FREQ:
204 rate = mag3110_get_samp_freq_index(data, val, val2); 210 rate = mag3110_get_samp_freq_index(data, val, val2);
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c
index d53cf519f42a..00400c352c1a 100644
--- a/drivers/infiniband/hw/amso1100/c2.c
+++ b/drivers/infiniband/hw/amso1100/c2.c
@@ -1082,6 +1082,7 @@ static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
1082 1082
1083 /* Initialize network device */ 1083 /* Initialize network device */
1084 if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) { 1084 if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) {
1085 ret = -ENOMEM;
1085 iounmap(mmio_regs); 1086 iounmap(mmio_regs);
1086 goto bail4; 1087 goto bail4;
1087 } 1088 }
@@ -1151,7 +1152,8 @@ static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
1151 goto bail10; 1152 goto bail10;
1152 } 1153 }
1153 1154
1154 if (c2_register_device(c2dev)) 1155 ret = c2_register_device(c2dev);
1156 if (ret)
1155 goto bail10; 1157 goto bail10;
1156 1158
1157 return 0; 1159 return 0;
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
index b7c986990053..d2a6d961344b 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -576,7 +576,8 @@ int c2_rnic_init(struct c2_dev *c2dev)
576 goto bail4; 576 goto bail4;
577 577
578 /* Initialize cached the adapter limits */ 578 /* Initialize cached the adapter limits */
579 if (c2_rnic_query(c2dev, &c2dev->props)) 579 err = c2_rnic_query(c2dev, &c2dev->props);
580 if (err)
580 goto bail5; 581 goto bail5;
581 582
582 /* Initialize the PD pool */ 583 /* Initialize the PD pool */
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 45126879ad28..d286bdebe2ab 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -3352,6 +3352,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
3352 goto free_dst; 3352 goto free_dst;
3353 } 3353 }
3354 3354
3355 neigh_release(neigh);
3355 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; 3356 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
3356 rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step]; 3357 rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step];
3357 window = (__force u16) htons((__force u16)tcph->window); 3358 window = (__force u16) htons((__force u16)tcph->window);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index c2702f549f10..e81c5547e647 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -347,7 +347,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
347 props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ? 347 props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ?
348 IB_WIDTH_4X : IB_WIDTH_1X; 348 IB_WIDTH_4X : IB_WIDTH_1X;
349 props->active_speed = IB_SPEED_QDR; 349 props->active_speed = IB_SPEED_QDR;
350 props->port_cap_flags = IB_PORT_CM_SUP; 350 props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
351 props->gid_tbl_len = mdev->dev->caps.gid_table_len[port]; 351 props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
352 props->max_msg_sz = mdev->dev->caps.max_msg_sz; 352 props->max_msg_sz = mdev->dev->caps.max_msg_sz;
353 props->pkey_tbl_len = 1; 353 props->pkey_tbl_len = 1;
@@ -1357,6 +1357,21 @@ static struct device_attribute *mlx4_class_attributes[] = {
1357 &dev_attr_board_id 1357 &dev_attr_board_id
1358}; 1358};
1359 1359
1360static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id,
1361 struct net_device *dev)
1362{
1363 memcpy(eui, dev->dev_addr, 3);
1364 memcpy(eui + 5, dev->dev_addr + 3, 3);
1365 if (vlan_id < 0x1000) {
1366 eui[3] = vlan_id >> 8;
1367 eui[4] = vlan_id & 0xff;
1368 } else {
1369 eui[3] = 0xff;
1370 eui[4] = 0xfe;
1371 }
1372 eui[0] ^= 2;
1373}
1374
1360static void update_gids_task(struct work_struct *work) 1375static void update_gids_task(struct work_struct *work)
1361{ 1376{
1362 struct update_gid_work *gw = container_of(work, struct update_gid_work, work); 1377 struct update_gid_work *gw = container_of(work, struct update_gid_work, work);
@@ -1393,7 +1408,6 @@ static void reset_gids_task(struct work_struct *work)
1393 struct mlx4_cmd_mailbox *mailbox; 1408 struct mlx4_cmd_mailbox *mailbox;
1394 union ib_gid *gids; 1409 union ib_gid *gids;
1395 int err; 1410 int err;
1396 int i;
1397 struct mlx4_dev *dev = gw->dev->dev; 1411 struct mlx4_dev *dev = gw->dev->dev;
1398 1412
1399 mailbox = mlx4_alloc_cmd_mailbox(dev); 1413 mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -1405,18 +1419,16 @@ static void reset_gids_task(struct work_struct *work)
1405 gids = mailbox->buf; 1419 gids = mailbox->buf;
1406 memcpy(gids, gw->gids, sizeof(gw->gids)); 1420 memcpy(gids, gw->gids, sizeof(gw->gids));
1407 1421
1408 for (i = 1; i < gw->dev->num_ports + 1; i++) { 1422 if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, gw->port) ==
1409 if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, i) == 1423 IB_LINK_LAYER_ETHERNET) {
1410 IB_LINK_LAYER_ETHERNET) { 1424 err = mlx4_cmd(dev, mailbox->dma,
1411 err = mlx4_cmd(dev, mailbox->dma, 1425 MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
1412 MLX4_SET_PORT_GID_TABLE << 8 | i, 1426 1, MLX4_CMD_SET_PORT,
1413 1, MLX4_CMD_SET_PORT, 1427 MLX4_CMD_TIME_CLASS_B,
1414 MLX4_CMD_TIME_CLASS_B, 1428 MLX4_CMD_WRAPPED);
1415 MLX4_CMD_WRAPPED); 1429 if (err)
1416 if (err) 1430 pr_warn(KERN_WARNING
1417 pr_warn(KERN_WARNING 1431 "set port %d command failed\n", gw->port);
1418 "set port %d command failed\n", i);
1419 }
1420 } 1432 }
1421 1433
1422 mlx4_free_cmd_mailbox(dev, mailbox); 1434 mlx4_free_cmd_mailbox(dev, mailbox);
@@ -1425,7 +1437,8 @@ free:
1425} 1437}
1426 1438
1427static int update_gid_table(struct mlx4_ib_dev *dev, int port, 1439static int update_gid_table(struct mlx4_ib_dev *dev, int port,
1428 union ib_gid *gid, int clear) 1440 union ib_gid *gid, int clear,
1441 int default_gid)
1429{ 1442{
1430 struct update_gid_work *work; 1443 struct update_gid_work *work;
1431 int i; 1444 int i;
@@ -1434,26 +1447,31 @@ static int update_gid_table(struct mlx4_ib_dev *dev, int port,
1434 int found = -1; 1447 int found = -1;
1435 int max_gids; 1448 int max_gids;
1436 1449
1437 max_gids = dev->dev->caps.gid_table_len[port]; 1450 if (default_gid) {
1438 for (i = 0; i < max_gids; ++i) { 1451 free = 0;
1439 if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid, 1452 } else {
1440 sizeof(*gid))) 1453 max_gids = dev->dev->caps.gid_table_len[port];
1441 found = i; 1454 for (i = 1; i < max_gids; ++i) {
1442 1455 if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid,
1443 if (clear) {
1444 if (found >= 0) {
1445 need_update = 1;
1446 dev->iboe.gid_table[port - 1][found] = zgid;
1447 break;
1448 }
1449 } else {
1450 if (found >= 0)
1451 break;
1452
1453 if (free < 0 &&
1454 !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid,
1455 sizeof(*gid))) 1456 sizeof(*gid)))
1456 free = i; 1457 found = i;
1458
1459 if (clear) {
1460 if (found >= 0) {
1461 need_update = 1;
1462 dev->iboe.gid_table[port - 1][found] =
1463 zgid;
1464 break;
1465 }
1466 } else {
1467 if (found >= 0)
1468 break;
1469
1470 if (free < 0 &&
1471 !memcmp(&dev->iboe.gid_table[port - 1][i],
1472 &zgid, sizeof(*gid)))
1473 free = i;
1474 }
1457 } 1475 }
1458 } 1476 }
1459 1477
@@ -1478,18 +1496,26 @@ static int update_gid_table(struct mlx4_ib_dev *dev, int port,
1478 return 0; 1496 return 0;
1479} 1497}
1480 1498
1481static int reset_gid_table(struct mlx4_ib_dev *dev) 1499static void mlx4_make_default_gid(struct net_device *dev, union ib_gid *gid)
1482{ 1500{
1483 struct update_gid_work *work; 1501 gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
1502 mlx4_addrconf_ifid_eui48(&gid->raw[8], 0xffff, dev);
1503}
1504
1484 1505
1506static int reset_gid_table(struct mlx4_ib_dev *dev, u8 port)
1507{
1508 struct update_gid_work *work;
1485 1509
1486 work = kzalloc(sizeof(*work), GFP_ATOMIC); 1510 work = kzalloc(sizeof(*work), GFP_ATOMIC);
1487 if (!work) 1511 if (!work)
1488 return -ENOMEM; 1512 return -ENOMEM;
1489 memset(dev->iboe.gid_table, 0, sizeof(dev->iboe.gid_table)); 1513
1514 memset(dev->iboe.gid_table[port - 1], 0, sizeof(work->gids));
1490 memset(work->gids, 0, sizeof(work->gids)); 1515 memset(work->gids, 0, sizeof(work->gids));
1491 INIT_WORK(&work->work, reset_gids_task); 1516 INIT_WORK(&work->work, reset_gids_task);
1492 work->dev = dev; 1517 work->dev = dev;
1518 work->port = port;
1493 queue_work(wq, &work->work); 1519 queue_work(wq, &work->work);
1494 return 0; 1520 return 0;
1495} 1521}
@@ -1502,6 +1528,12 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
1502 struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ? 1528 struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ?
1503 rdma_vlan_dev_real_dev(event_netdev) : 1529 rdma_vlan_dev_real_dev(event_netdev) :
1504 event_netdev; 1530 event_netdev;
1531 union ib_gid default_gid;
1532
1533 mlx4_make_default_gid(real_dev, &default_gid);
1534
1535 if (!memcmp(gid, &default_gid, sizeof(*gid)))
1536 return 0;
1505 1537
1506 if (event != NETDEV_DOWN && event != NETDEV_UP) 1538 if (event != NETDEV_DOWN && event != NETDEV_UP)
1507 return 0; 1539 return 0;
@@ -1520,7 +1552,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
1520 (!netif_is_bond_master(real_dev) && 1552 (!netif_is_bond_master(real_dev) &&
1521 (real_dev == iboe->netdevs[port - 1]))) 1553 (real_dev == iboe->netdevs[port - 1])))
1522 update_gid_table(ibdev, port, gid, 1554 update_gid_table(ibdev, port, gid,
1523 event == NETDEV_DOWN); 1555 event == NETDEV_DOWN, 0);
1524 1556
1525 spin_unlock(&iboe->lock); 1557 spin_unlock(&iboe->lock);
1526 return 0; 1558 return 0;
@@ -1536,7 +1568,6 @@ static u8 mlx4_ib_get_dev_port(struct net_device *dev,
1536 rdma_vlan_dev_real_dev(dev) : dev; 1568 rdma_vlan_dev_real_dev(dev) : dev;
1537 1569
1538 iboe = &ibdev->iboe; 1570 iboe = &ibdev->iboe;
1539 spin_lock(&iboe->lock);
1540 1571
1541 for (port = 1; port <= MLX4_MAX_PORTS; ++port) 1572 for (port = 1; port <= MLX4_MAX_PORTS; ++port)
1542 if ((netif_is_bond_master(real_dev) && 1573 if ((netif_is_bond_master(real_dev) &&
@@ -1545,8 +1576,6 @@ static u8 mlx4_ib_get_dev_port(struct net_device *dev,
1545 (real_dev == iboe->netdevs[port - 1]))) 1576 (real_dev == iboe->netdevs[port - 1])))
1546 break; 1577 break;
1547 1578
1548 spin_unlock(&iboe->lock);
1549
1550 if ((port == 0) || (port > MLX4_MAX_PORTS)) 1579 if ((port == 0) || (port > MLX4_MAX_PORTS))
1551 return 0; 1580 return 0;
1552 else 1581 else
@@ -1607,7 +1636,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
1607 /*ifa->ifa_address;*/ 1636 /*ifa->ifa_address;*/
1608 ipv6_addr_set_v4mapped(ifa->ifa_address, 1637 ipv6_addr_set_v4mapped(ifa->ifa_address,
1609 (struct in6_addr *)&gid); 1638 (struct in6_addr *)&gid);
1610 update_gid_table(ibdev, port, &gid, 0); 1639 update_gid_table(ibdev, port, &gid, 0, 0);
1611 } 1640 }
1612 endfor_ifa(in_dev); 1641 endfor_ifa(in_dev);
1613 in_dev_put(in_dev); 1642 in_dev_put(in_dev);
@@ -1619,7 +1648,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
1619 read_lock_bh(&in6_dev->lock); 1648 read_lock_bh(&in6_dev->lock);
1620 list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { 1649 list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
1621 pgid = (union ib_gid *)&ifp->addr; 1650 pgid = (union ib_gid *)&ifp->addr;
1622 update_gid_table(ibdev, port, pgid, 0); 1651 update_gid_table(ibdev, port, pgid, 0, 0);
1623 } 1652 }
1624 read_unlock_bh(&in6_dev->lock); 1653 read_unlock_bh(&in6_dev->lock);
1625 in6_dev_put(in6_dev); 1654 in6_dev_put(in6_dev);
@@ -1627,14 +1656,26 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
1627#endif 1656#endif
1628} 1657}
1629 1658
1659static void mlx4_ib_set_default_gid(struct mlx4_ib_dev *ibdev,
1660 struct net_device *dev, u8 port)
1661{
1662 union ib_gid gid;
1663 mlx4_make_default_gid(dev, &gid);
1664 update_gid_table(ibdev, port, &gid, 0, 1);
1665}
1666
1630static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev) 1667static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
1631{ 1668{
1632 struct net_device *dev; 1669 struct net_device *dev;
1670 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
1671 int i;
1633 1672
1634 if (reset_gid_table(ibdev)) 1673 for (i = 1; i <= ibdev->num_ports; ++i)
1635 return -1; 1674 if (reset_gid_table(ibdev, i))
1675 return -1;
1636 1676
1637 read_lock(&dev_base_lock); 1677 read_lock(&dev_base_lock);
1678 spin_lock(&iboe->lock);
1638 1679
1639 for_each_netdev(&init_net, dev) { 1680 for_each_netdev(&init_net, dev) {
1640 u8 port = mlx4_ib_get_dev_port(dev, ibdev); 1681 u8 port = mlx4_ib_get_dev_port(dev, ibdev);
@@ -1642,6 +1683,7 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
1642 mlx4_ib_get_dev_addr(dev, ibdev, port); 1683 mlx4_ib_get_dev_addr(dev, ibdev, port);
1643 } 1684 }
1644 1685
1686 spin_unlock(&iboe->lock);
1645 read_unlock(&dev_base_lock); 1687 read_unlock(&dev_base_lock);
1646 1688
1647 return 0; 1689 return 0;
@@ -1656,25 +1698,57 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
1656 1698
1657 spin_lock(&iboe->lock); 1699 spin_lock(&iboe->lock);
1658 mlx4_foreach_ib_transport_port(port, ibdev->dev) { 1700 mlx4_foreach_ib_transport_port(port, ibdev->dev) {
1701 enum ib_port_state port_state = IB_PORT_NOP;
1659 struct net_device *old_master = iboe->masters[port - 1]; 1702 struct net_device *old_master = iboe->masters[port - 1];
1703 struct net_device *curr_netdev;
1660 struct net_device *curr_master; 1704 struct net_device *curr_master;
1705
1661 iboe->netdevs[port - 1] = 1706 iboe->netdevs[port - 1] =
1662 mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port); 1707 mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
1708 if (iboe->netdevs[port - 1])
1709 mlx4_ib_set_default_gid(ibdev,
1710 iboe->netdevs[port - 1], port);
1711 curr_netdev = iboe->netdevs[port - 1];
1663 1712
1664 if (iboe->netdevs[port - 1] && 1713 if (iboe->netdevs[port - 1] &&
1665 netif_is_bond_slave(iboe->netdevs[port - 1])) { 1714 netif_is_bond_slave(iboe->netdevs[port - 1])) {
1666 rtnl_lock();
1667 iboe->masters[port - 1] = netdev_master_upper_dev_get( 1715 iboe->masters[port - 1] = netdev_master_upper_dev_get(
1668 iboe->netdevs[port - 1]); 1716 iboe->netdevs[port - 1]);
1669 rtnl_unlock(); 1717 } else {
1718 iboe->masters[port - 1] = NULL;
1670 } 1719 }
1671 curr_master = iboe->masters[port - 1]; 1720 curr_master = iboe->masters[port - 1];
1672 1721
1722 if (curr_netdev) {
1723 port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
1724 IB_PORT_ACTIVE : IB_PORT_DOWN;
1725 mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
1726 } else {
1727 reset_gid_table(ibdev, port);
1728 }
1729 /* if using bonding/team and a slave port is down, we don't the bond IP
1730 * based gids in the table since flows that select port by gid may get
1731 * the down port.
1732 */
1733 if (curr_master && (port_state == IB_PORT_DOWN)) {
1734 reset_gid_table(ibdev, port);
1735 mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
1736 }
1673 /* if bonding is used it is possible that we add it to masters 1737 /* if bonding is used it is possible that we add it to masters
1674 only after IP address is assigned to the net bonding 1738 * only after IP address is assigned to the net bonding
1675 interface */ 1739 * interface.
1676 if (curr_master && (old_master != curr_master)) 1740 */
1741 if (curr_master && (old_master != curr_master)) {
1742 reset_gid_table(ibdev, port);
1743 mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
1677 mlx4_ib_get_dev_addr(curr_master, ibdev, port); 1744 mlx4_ib_get_dev_addr(curr_master, ibdev, port);
1745 }
1746
1747 if (!curr_master && (old_master != curr_master)) {
1748 reset_gid_table(ibdev, port);
1749 mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
1750 mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
1751 }
1678 } 1752 }
1679 1753
1680 spin_unlock(&iboe->lock); 1754 spin_unlock(&iboe->lock);
@@ -1810,6 +1884,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
1810 int i, j; 1884 int i, j;
1811 int err; 1885 int err;
1812 struct mlx4_ib_iboe *iboe; 1886 struct mlx4_ib_iboe *iboe;
1887 int ib_num_ports = 0;
1813 1888
1814 pr_info_once("%s", mlx4_ib_version); 1889 pr_info_once("%s", mlx4_ib_version);
1815 1890
@@ -1985,10 +2060,14 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
1985 ibdev->counters[i] = -1; 2060 ibdev->counters[i] = -1;
1986 } 2061 }
1987 2062
2063 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2064 ib_num_ports++;
2065
1988 spin_lock_init(&ibdev->sm_lock); 2066 spin_lock_init(&ibdev->sm_lock);
1989 mutex_init(&ibdev->cap_mask_mutex); 2067 mutex_init(&ibdev->cap_mask_mutex);
1990 2068
1991 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) { 2069 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2070 ib_num_ports) {
1992 ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS; 2071 ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
1993 err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count, 2072 err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
1994 MLX4_IB_UC_STEER_QPN_ALIGN, 2073 MLX4_IB_UC_STEER_QPN_ALIGN,
@@ -2051,7 +2130,11 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
2051 } 2130 }
2052 } 2131 }
2053#endif 2132#endif
2133 for (i = 1 ; i <= ibdev->num_ports ; ++i)
2134 reset_gid_table(ibdev, i);
2135 rtnl_lock();
2054 mlx4_ib_scan_netdevs(ibdev); 2136 mlx4_ib_scan_netdevs(ibdev);
2137 rtnl_unlock();
2055 mlx4_ib_init_gid_table(ibdev); 2138 mlx4_ib_init_gid_table(ibdev);
2056 } 2139 }
2057 2140
diff --git a/drivers/infiniband/hw/mlx5/Kconfig b/drivers/infiniband/hw/mlx5/Kconfig
index 8e6aebfaf8a4..10df386c6344 100644
--- a/drivers/infiniband/hw/mlx5/Kconfig
+++ b/drivers/infiniband/hw/mlx5/Kconfig
@@ -1,6 +1,6 @@
1config MLX5_INFINIBAND 1config MLX5_INFINIBAND
2 tristate "Mellanox Connect-IB HCA support" 2 tristate "Mellanox Connect-IB HCA support"
3 depends on NETDEVICES && ETHERNET && PCI && X86 3 depends on NETDEVICES && ETHERNET && PCI
4 select NET_VENDOR_MELLANOX 4 select NET_VENDOR_MELLANOX
5 select MLX5_CORE 5 select MLX5_CORE
6 ---help--- 6 ---help---
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 9660d093f8cf..aa03e732b6a8 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -261,8 +261,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
261 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | 261 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
262 IB_DEVICE_PORT_ACTIVE_EVENT | 262 IB_DEVICE_PORT_ACTIVE_EVENT |
263 IB_DEVICE_SYS_IMAGE_GUID | 263 IB_DEVICE_SYS_IMAGE_GUID |
264 IB_DEVICE_RC_RNR_NAK_GEN | 264 IB_DEVICE_RC_RNR_NAK_GEN;
265 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
266 flags = dev->mdev.caps.flags; 265 flags = dev->mdev.caps.flags;
267 if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR) 266 if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR)
268 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; 267 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
@@ -536,24 +535,38 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
536 struct ib_udata *udata) 535 struct ib_udata *udata)
537{ 536{
538 struct mlx5_ib_dev *dev = to_mdev(ibdev); 537 struct mlx5_ib_dev *dev = to_mdev(ibdev);
539 struct mlx5_ib_alloc_ucontext_req req; 538 struct mlx5_ib_alloc_ucontext_req_v2 req;
540 struct mlx5_ib_alloc_ucontext_resp resp; 539 struct mlx5_ib_alloc_ucontext_resp resp;
541 struct mlx5_ib_ucontext *context; 540 struct mlx5_ib_ucontext *context;
542 struct mlx5_uuar_info *uuari; 541 struct mlx5_uuar_info *uuari;
543 struct mlx5_uar *uars; 542 struct mlx5_uar *uars;
544 int gross_uuars; 543 int gross_uuars;
545 int num_uars; 544 int num_uars;
545 int ver;
546 int uuarn; 546 int uuarn;
547 int err; 547 int err;
548 int i; 548 int i;
549 int reqlen;
549 550
550 if (!dev->ib_active) 551 if (!dev->ib_active)
551 return ERR_PTR(-EAGAIN); 552 return ERR_PTR(-EAGAIN);
552 553
553 err = ib_copy_from_udata(&req, udata, sizeof(req)); 554 memset(&req, 0, sizeof(req));
555 reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
556 if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
557 ver = 0;
558 else if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req_v2))
559 ver = 2;
560 else
561 return ERR_PTR(-EINVAL);
562
563 err = ib_copy_from_udata(&req, udata, reqlen);
554 if (err) 564 if (err)
555 return ERR_PTR(err); 565 return ERR_PTR(err);
556 566
567 if (req.flags || req.reserved)
568 return ERR_PTR(-EINVAL);
569
557 if (req.total_num_uuars > MLX5_MAX_UUARS) 570 if (req.total_num_uuars > MLX5_MAX_UUARS)
558 return ERR_PTR(-ENOMEM); 571 return ERR_PTR(-ENOMEM);
559 572
@@ -626,6 +639,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
626 if (err) 639 if (err)
627 goto out_uars; 640 goto out_uars;
628 641
642 uuari->ver = ver;
629 uuari->num_low_latency_uuars = req.num_low_latency_uuars; 643 uuari->num_low_latency_uuars = req.num_low_latency_uuars;
630 uuari->uars = uars; 644 uuari->uars = uars;
631 uuari->num_uars = num_uars; 645 uuari->num_uars = num_uars;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index ae37fb9bf262..7dfe8a1c84cf 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -216,7 +216,9 @@ static int sq_overhead(enum ib_qp_type qp_type)
216 216
217 case IB_QPT_UC: 217 case IB_QPT_UC:
218 size += sizeof(struct mlx5_wqe_ctrl_seg) + 218 size += sizeof(struct mlx5_wqe_ctrl_seg) +
219 sizeof(struct mlx5_wqe_raddr_seg); 219 sizeof(struct mlx5_wqe_raddr_seg) +
220 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
221 sizeof(struct mlx5_mkey_seg);
220 break; 222 break;
221 223
222 case IB_QPT_UD: 224 case IB_QPT_UD:
@@ -428,11 +430,17 @@ static int alloc_uuar(struct mlx5_uuar_info *uuari,
428 break; 430 break;
429 431
430 case MLX5_IB_LATENCY_CLASS_MEDIUM: 432 case MLX5_IB_LATENCY_CLASS_MEDIUM:
431 uuarn = alloc_med_class_uuar(uuari); 433 if (uuari->ver < 2)
434 uuarn = -ENOMEM;
435 else
436 uuarn = alloc_med_class_uuar(uuari);
432 break; 437 break;
433 438
434 case MLX5_IB_LATENCY_CLASS_HIGH: 439 case MLX5_IB_LATENCY_CLASS_HIGH:
435 uuarn = alloc_high_class_uuar(uuari); 440 if (uuari->ver < 2)
441 uuarn = -ENOMEM;
442 else
443 uuarn = alloc_high_class_uuar(uuari);
436 break; 444 break;
437 445
438 case MLX5_IB_LATENCY_CLASS_FAST_PATH: 446 case MLX5_IB_LATENCY_CLASS_FAST_PATH:
@@ -657,8 +665,8 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
657 int err; 665 int err;
658 666
659 uuari = &dev->mdev.priv.uuari; 667 uuari = &dev->mdev.priv.uuari;
660 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) 668 if (init_attr->create_flags)
661 qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK; 669 return -EINVAL;
662 670
663 if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR) 671 if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
664 lc = MLX5_IB_LATENCY_CLASS_FAST_PATH; 672 lc = MLX5_IB_LATENCY_CLASS_FAST_PATH;
diff --git a/drivers/infiniband/hw/mlx5/user.h b/drivers/infiniband/hw/mlx5/user.h
index 32a2a5dfc523..0f4f8e42a17f 100644
--- a/drivers/infiniband/hw/mlx5/user.h
+++ b/drivers/infiniband/hw/mlx5/user.h
@@ -62,6 +62,13 @@ struct mlx5_ib_alloc_ucontext_req {
62 __u32 num_low_latency_uuars; 62 __u32 num_low_latency_uuars;
63}; 63};
64 64
65struct mlx5_ib_alloc_ucontext_req_v2 {
66 __u32 total_num_uuars;
67 __u32 num_low_latency_uuars;
68 __u32 flags;
69 __u32 reserved;
70};
71
65struct mlx5_ib_alloc_ucontext_resp { 72struct mlx5_ib_alloc_ucontext_resp {
66 __u32 qp_tab_size; 73 __u32 qp_tab_size;
67 __u32 bf_reg_size; 74 __u32 bf_reg_size;
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 429141078eec..353c7b05a90a 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -675,8 +675,11 @@ static int nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
675 INIT_DELAYED_WORK(&nesdev->work, nes_recheck_link_status); 675 INIT_DELAYED_WORK(&nesdev->work, nes_recheck_link_status);
676 676
677 /* Initialize network devices */ 677 /* Initialize network devices */
678 if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL) 678 netdev = nes_netdev_init(nesdev, mmio_regs);
679 if (netdev == NULL) {
680 ret = -ENOMEM;
679 goto bail7; 681 goto bail7;
682 }
680 683
681 /* Register network device */ 684 /* Register network device */
682 ret = register_netdev(netdev); 685 ret = register_netdev(netdev);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 2ca86ca818bd..1a8a945efa60 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -127,7 +127,7 @@ static int ocrdma_addr_event(unsigned long event, struct net_device *netdev,
127 127
128 is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN; 128 is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN;
129 if (is_vlan) 129 if (is_vlan)
130 netdev = vlan_dev_real_dev(netdev); 130 netdev = rdma_vlan_dev_real_dev(netdev);
131 131
132 rcu_read_lock(); 132 rcu_read_lock();
133 list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) { 133 list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index aa92f40c9d50..e0cc201be41a 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -176,7 +176,7 @@ int ocrdma_query_port(struct ib_device *ibdev,
176 props->port_cap_flags = 176 props->port_cap_flags =
177 IB_PORT_CM_SUP | 177 IB_PORT_CM_SUP |
178 IB_PORT_REINIT_SUP | 178 IB_PORT_REINIT_SUP |
179 IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP; 179 IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_IP_BASED_GIDS;
180 props->gid_tbl_len = OCRDMA_MAX_SGID; 180 props->gid_tbl_len = OCRDMA_MAX_SGID;
181 props->pkey_tbl_len = 1; 181 props->pkey_tbl_len = 1;
182 props->bad_pkey_cntr = 0; 182 props->bad_pkey_cntr = 0;
@@ -1416,7 +1416,7 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
1416 OCRDMA_QP_PARAMS_HOP_LMT_MASK) >> 1416 OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
1417 OCRDMA_QP_PARAMS_HOP_LMT_SHIFT; 1417 OCRDMA_QP_PARAMS_HOP_LMT_SHIFT;
1418 qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn & 1418 qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn &
1419 OCRDMA_QP_PARAMS_SQ_PSN_MASK) >> 1419 OCRDMA_QP_PARAMS_TCLASS_MASK) >>
1420 OCRDMA_QP_PARAMS_TCLASS_SHIFT; 1420 OCRDMA_QP_PARAMS_TCLASS_SHIFT;
1421 1421
1422 qp_attr->ah_attr.ah_flags = IB_AH_GRH; 1422 qp_attr->ah_attr.ah_flags = IB_AH_GRH;
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 5bfc02f450e6..d1bd21319d7d 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -2395,6 +2395,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
2395 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); 2395 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2396 qib_write_kreg(dd, kr_scratch, 0ULL); 2396 qib_write_kreg(dd, kr_scratch, 0ULL);
2397 2397
2398 /* ensure previous Tx parameters are not still forced */
2399 qib_write_kreg_port(ppd, krp_tx_deemph_override,
2400 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
2401 reset_tx_deemphasis_override));
2402
2398 if (qib_compat_ddr_negotiate) { 2403 if (qib_compat_ddr_negotiate) {
2399 ppd->cpspec->ibdeltainprog = 1; 2404 ppd->cpspec->ibdeltainprog = 1;
2400 ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd, 2405 ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
index 7ecc6061f1f4..f8dfd76be89f 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
@@ -629,6 +629,7 @@ static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow,
629{ 629{
630 enum usnic_transport_type trans_type = qp_flow->trans_type; 630 enum usnic_transport_type trans_type = qp_flow->trans_type;
631 int err; 631 int err;
632 uint16_t port_num = 0;
632 633
633 switch (trans_type) { 634 switch (trans_type) {
634 case USNIC_TRANSPORT_ROCE_CUSTOM: 635 case USNIC_TRANSPORT_ROCE_CUSTOM:
@@ -637,9 +638,15 @@ static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow,
637 case USNIC_TRANSPORT_IPV4_UDP: 638 case USNIC_TRANSPORT_IPV4_UDP:
638 err = usnic_transport_sock_get_addr(qp_flow->udp.sock, 639 err = usnic_transport_sock_get_addr(qp_flow->udp.sock,
639 NULL, NULL, 640 NULL, NULL,
640 (uint16_t *) id); 641 &port_num);
641 if (err) 642 if (err)
642 return err; 643 return err;
644 /*
645 * Copy port_num to stack first and then to *id,
646 * so that the short to int cast works for little
647 * and big endian systems.
648 */
649 *id = port_num;
643 break; 650 break;
644 default: 651 default:
645 usnic_err("Unsupported transport %u\n", trans_type); 652 usnic_err("Unsupported transport %u\n", trans_type);
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 538822684d5b..334f34b1cd46 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -610,11 +610,12 @@ void iser_snd_completion(struct iser_tx_desc *tx_desc,
610 ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr, 610 ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
611 ISER_HEADERS_LEN, DMA_TO_DEVICE); 611 ISER_HEADERS_LEN, DMA_TO_DEVICE);
612 kmem_cache_free(ig.desc_cache, tx_desc); 612 kmem_cache_free(ig.desc_cache, tx_desc);
613 tx_desc = NULL;
613 } 614 }
614 615
615 atomic_dec(&ib_conn->post_send_buf_count); 616 atomic_dec(&ib_conn->post_send_buf_count);
616 617
617 if (tx_desc->type == ISCSI_TX_CONTROL) { 618 if (tx_desc && tx_desc->type == ISCSI_TX_CONTROL) {
618 /* this arithmetic is legal by libiscsi dd_data allocation */ 619 /* this arithmetic is legal by libiscsi dd_data allocation */
619 task = (void *) ((long)(void *)tx_desc - 620 task = (void *) ((long)(void *)tx_desc -
620 sizeof(struct iscsi_task)); 621 sizeof(struct iscsi_task));
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index afe95674008b..ca37edef2791 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -652,9 +652,13 @@ static int iser_disconnected_handler(struct rdma_cm_id *cma_id)
652 /* getting here when the state is UP means that the conn is being * 652 /* getting here when the state is UP means that the conn is being *
653 * terminated asynchronously from the iSCSI layer's perspective. */ 653 * terminated asynchronously from the iSCSI layer's perspective. */
654 if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, 654 if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP,
655 ISER_CONN_TERMINATING)) 655 ISER_CONN_TERMINATING)){
656 iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn, 656 if (ib_conn->iser_conn)
657 ISCSI_ERR_CONN_FAILED); 657 iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
658 ISCSI_ERR_CONN_FAILED);
659 else
660 iser_err("iscsi_iser connection isn't bound\n");
661 }
658 662
659 /* Complete the termination process if no posts are pending */ 663 /* Complete the termination process if no posts are pending */
660 if (ib_conn->post_recv_buf_count == 0 && 664 if (ib_conn->post_recv_buf_count == 0 &&
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 2b161be3c1a3..d18d08a076e8 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -453,6 +453,7 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
453 if (ret) { 453 if (ret) {
454 pr_err("Failed to create fastreg descriptor err=%d\n", 454 pr_err("Failed to create fastreg descriptor err=%d\n",
455 ret); 455 ret);
456 kfree(fr_desc);
456 goto err; 457 goto err;
457 } 458 }
458 459
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 520a7e5a490b..0e537d8d0e47 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -3666,9 +3666,9 @@ static ssize_t srpt_tpg_attrib_store_srp_max_rdma_size(
3666 unsigned long val; 3666 unsigned long val;
3667 int ret; 3667 int ret;
3668 3668
3669 ret = strict_strtoul(page, 0, &val); 3669 ret = kstrtoul(page, 0, &val);
3670 if (ret < 0) { 3670 if (ret < 0) {
3671 pr_err("strict_strtoul() failed with ret: %d\n", ret); 3671 pr_err("kstrtoul() failed with ret: %d\n", ret);
3672 return -EINVAL; 3672 return -EINVAL;
3673 } 3673 }
3674 if (val > MAX_SRPT_RDMA_SIZE) { 3674 if (val > MAX_SRPT_RDMA_SIZE) {
@@ -3706,9 +3706,9 @@ static ssize_t srpt_tpg_attrib_store_srp_max_rsp_size(
3706 unsigned long val; 3706 unsigned long val;
3707 int ret; 3707 int ret;
3708 3708
3709 ret = strict_strtoul(page, 0, &val); 3709 ret = kstrtoul(page, 0, &val);
3710 if (ret < 0) { 3710 if (ret < 0) {
3711 pr_err("strict_strtoul() failed with ret: %d\n", ret); 3711 pr_err("kstrtoul() failed with ret: %d\n", ret);
3712 return -EINVAL; 3712 return -EINVAL;
3713 } 3713 }
3714 if (val > MAX_SRPT_RSP_SIZE) { 3714 if (val > MAX_SRPT_RSP_SIZE) {
@@ -3746,9 +3746,9 @@ static ssize_t srpt_tpg_attrib_store_srp_sq_size(
3746 unsigned long val; 3746 unsigned long val;
3747 int ret; 3747 int ret;
3748 3748
3749 ret = strict_strtoul(page, 0, &val); 3749 ret = kstrtoul(page, 0, &val);
3750 if (ret < 0) { 3750 if (ret < 0) {
3751 pr_err("strict_strtoul() failed with ret: %d\n", ret); 3751 pr_err("kstrtoul() failed with ret: %d\n", ret);
3752 return -EINVAL; 3752 return -EINVAL;
3753 } 3753 }
3754 if (val > MAX_SRPT_SRQ_SIZE) { 3754 if (val > MAX_SRPT_SRQ_SIZE) {
@@ -3793,7 +3793,7 @@ static ssize_t srpt_tpg_store_enable(
3793 unsigned long tmp; 3793 unsigned long tmp;
3794 int ret; 3794 int ret;
3795 3795
3796 ret = strict_strtoul(page, 0, &tmp); 3796 ret = kstrtoul(page, 0, &tmp);
3797 if (ret < 0) { 3797 if (ret < 0) {
3798 printk(KERN_ERR "Unable to extract srpt_tpg_store_enable\n"); 3798 printk(KERN_ERR "Unable to extract srpt_tpg_store_enable\n");
3799 return -EINVAL; 3799 return -EINVAL;
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 8911850c9444..1d9ab39af29f 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -79,7 +79,6 @@
79 79
80#define ARM_SMMU_PTE_CONT_SIZE (PAGE_SIZE * ARM_SMMU_PTE_CONT_ENTRIES) 80#define ARM_SMMU_PTE_CONT_SIZE (PAGE_SIZE * ARM_SMMU_PTE_CONT_ENTRIES)
81#define ARM_SMMU_PTE_CONT_MASK (~(ARM_SMMU_PTE_CONT_SIZE - 1)) 81#define ARM_SMMU_PTE_CONT_MASK (~(ARM_SMMU_PTE_CONT_SIZE - 1))
82#define ARM_SMMU_PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(pte_t))
83 82
84/* Stage-1 PTE */ 83/* Stage-1 PTE */
85#define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6) 84#define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6)
@@ -191,6 +190,9 @@
191#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2)) 190#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
192#define CBAR_VMID_SHIFT 0 191#define CBAR_VMID_SHIFT 0
193#define CBAR_VMID_MASK 0xff 192#define CBAR_VMID_MASK 0xff
193#define CBAR_S1_BPSHCFG_SHIFT 8
194#define CBAR_S1_BPSHCFG_MASK 3
195#define CBAR_S1_BPSHCFG_NSH 3
194#define CBAR_S1_MEMATTR_SHIFT 12 196#define CBAR_S1_MEMATTR_SHIFT 12
195#define CBAR_S1_MEMATTR_MASK 0xf 197#define CBAR_S1_MEMATTR_MASK 0xf
196#define CBAR_S1_MEMATTR_WB 0xf 198#define CBAR_S1_MEMATTR_WB 0xf
@@ -393,7 +395,7 @@ struct arm_smmu_domain {
393 struct arm_smmu_cfg root_cfg; 395 struct arm_smmu_cfg root_cfg;
394 phys_addr_t output_mask; 396 phys_addr_t output_mask;
395 397
396 struct mutex lock; 398 spinlock_t lock;
397}; 399};
398 400
399static DEFINE_SPINLOCK(arm_smmu_devices_lock); 401static DEFINE_SPINLOCK(arm_smmu_devices_lock);
@@ -632,6 +634,28 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
632 return IRQ_HANDLED; 634 return IRQ_HANDLED;
633} 635}
634 636
637static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr,
638 size_t size)
639{
640 unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
641
642
643 /* Ensure new page tables are visible to the hardware walker */
644 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) {
645 dsb();
646 } else {
647 /*
648 * If the SMMU can't walk tables in the CPU caches, treat them
649 * like non-coherent DMA since we need to flush the new entries
650 * all the way out to memory. There's no possibility of
651 * recursion here as the SMMU table walker will not be wired
652 * through another SMMU.
653 */
654 dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
655 DMA_TO_DEVICE);
656 }
657}
658
635static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) 659static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
636{ 660{
637 u32 reg; 661 u32 reg;
@@ -650,11 +674,16 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
650 if (smmu->version == 1) 674 if (smmu->version == 1)
651 reg |= root_cfg->irptndx << CBAR_IRPTNDX_SHIFT; 675 reg |= root_cfg->irptndx << CBAR_IRPTNDX_SHIFT;
652 676
653 /* Use the weakest memory type, so it is overridden by the pte */ 677 /*
654 if (stage1) 678 * Use the weakest shareability/memory types, so they are
655 reg |= (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT); 679 * overridden by the ttbcr/pte.
656 else 680 */
681 if (stage1) {
682 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
683 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
684 } else {
657 reg |= ARM_SMMU_CB_VMID(root_cfg) << CBAR_VMID_SHIFT; 685 reg |= ARM_SMMU_CB_VMID(root_cfg) << CBAR_VMID_SHIFT;
686 }
658 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(root_cfg->cbndx)); 687 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(root_cfg->cbndx));
659 688
660 if (smmu->version > 1) { 689 if (smmu->version > 1) {
@@ -715,6 +744,8 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
715 } 744 }
716 745
717 /* TTBR0 */ 746 /* TTBR0 */
747 arm_smmu_flush_pgtable(smmu, root_cfg->pgd,
748 PTRS_PER_PGD * sizeof(pgd_t));
718 reg = __pa(root_cfg->pgd); 749 reg = __pa(root_cfg->pgd);
719 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); 750 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
720 reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32; 751 reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32;
@@ -901,7 +932,7 @@ static int arm_smmu_domain_init(struct iommu_domain *domain)
901 goto out_free_domain; 932 goto out_free_domain;
902 smmu_domain->root_cfg.pgd = pgd; 933 smmu_domain->root_cfg.pgd = pgd;
903 934
904 mutex_init(&smmu_domain->lock); 935 spin_lock_init(&smmu_domain->lock);
905 domain->priv = smmu_domain; 936 domain->priv = smmu_domain;
906 return 0; 937 return 0;
907 938
@@ -1128,6 +1159,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1128 struct arm_smmu_domain *smmu_domain = domain->priv; 1159 struct arm_smmu_domain *smmu_domain = domain->priv;
1129 struct arm_smmu_device *device_smmu = dev->archdata.iommu; 1160 struct arm_smmu_device *device_smmu = dev->archdata.iommu;
1130 struct arm_smmu_master *master; 1161 struct arm_smmu_master *master;
1162 unsigned long flags;
1131 1163
1132 if (!device_smmu) { 1164 if (!device_smmu) {
1133 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n"); 1165 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
@@ -1138,7 +1170,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1138 * Sanity check the domain. We don't currently support domains 1170 * Sanity check the domain. We don't currently support domains
1139 * that cross between different SMMU chains. 1171 * that cross between different SMMU chains.
1140 */ 1172 */
1141 mutex_lock(&smmu_domain->lock); 1173 spin_lock_irqsave(&smmu_domain->lock, flags);
1142 if (!smmu_domain->leaf_smmu) { 1174 if (!smmu_domain->leaf_smmu) {
1143 /* Now that we have a master, we can finalise the domain */ 1175 /* Now that we have a master, we can finalise the domain */
1144 ret = arm_smmu_init_domain_context(domain, dev); 1176 ret = arm_smmu_init_domain_context(domain, dev);
@@ -1153,7 +1185,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1153 dev_name(device_smmu->dev)); 1185 dev_name(device_smmu->dev));
1154 goto err_unlock; 1186 goto err_unlock;
1155 } 1187 }
1156 mutex_unlock(&smmu_domain->lock); 1188 spin_unlock_irqrestore(&smmu_domain->lock, flags);
1157 1189
1158 /* Looks ok, so add the device to the domain */ 1190 /* Looks ok, so add the device to the domain */
1159 master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node); 1191 master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node);
@@ -1163,7 +1195,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1163 return arm_smmu_domain_add_master(smmu_domain, master); 1195 return arm_smmu_domain_add_master(smmu_domain, master);
1164 1196
1165err_unlock: 1197err_unlock:
1166 mutex_unlock(&smmu_domain->lock); 1198 spin_unlock_irqrestore(&smmu_domain->lock, flags);
1167 return ret; 1199 return ret;
1168} 1200}
1169 1201
@@ -1177,23 +1209,6 @@ static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
1177 arm_smmu_domain_remove_master(smmu_domain, master); 1209 arm_smmu_domain_remove_master(smmu_domain, master);
1178} 1210}
1179 1211
1180static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr,
1181 size_t size)
1182{
1183 unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
1184
1185 /*
1186 * If the SMMU can't walk tables in the CPU caches, treat them
1187 * like non-coherent DMA since we need to flush the new entries
1188 * all the way out to memory. There's no possibility of recursion
1189 * here as the SMMU table walker will not be wired through another
1190 * SMMU.
1191 */
1192 if (!(smmu->features & ARM_SMMU_FEAT_COHERENT_WALK))
1193 dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
1194 DMA_TO_DEVICE);
1195}
1196
1197static bool arm_smmu_pte_is_contiguous_range(unsigned long addr, 1212static bool arm_smmu_pte_is_contiguous_range(unsigned long addr,
1198 unsigned long end) 1213 unsigned long end)
1199{ 1214{
@@ -1210,12 +1225,11 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
1210 1225
1211 if (pmd_none(*pmd)) { 1226 if (pmd_none(*pmd)) {
1212 /* Allocate a new set of tables */ 1227 /* Allocate a new set of tables */
1213 pgtable_t table = alloc_page(PGALLOC_GFP); 1228 pgtable_t table = alloc_page(GFP_ATOMIC|__GFP_ZERO);
1214 if (!table) 1229 if (!table)
1215 return -ENOMEM; 1230 return -ENOMEM;
1216 1231
1217 arm_smmu_flush_pgtable(smmu, page_address(table), 1232 arm_smmu_flush_pgtable(smmu, page_address(table), PAGE_SIZE);
1218 ARM_SMMU_PTE_HWTABLE_SIZE);
1219 if (!pgtable_page_ctor(table)) { 1233 if (!pgtable_page_ctor(table)) {
1220 __free_page(table); 1234 __free_page(table);
1221 return -ENOMEM; 1235 return -ENOMEM;
@@ -1317,9 +1331,15 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
1317 1331
1318#ifndef __PAGETABLE_PMD_FOLDED 1332#ifndef __PAGETABLE_PMD_FOLDED
1319 if (pud_none(*pud)) { 1333 if (pud_none(*pud)) {
1320 pmd = pmd_alloc_one(NULL, addr); 1334 pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC);
1321 if (!pmd) 1335 if (!pmd)
1322 return -ENOMEM; 1336 return -ENOMEM;
1337
1338 arm_smmu_flush_pgtable(smmu, pmd, PAGE_SIZE);
1339 pud_populate(NULL, pud, pmd);
1340 arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud));
1341
1342 pmd += pmd_index(addr);
1323 } else 1343 } else
1324#endif 1344#endif
1325 pmd = pmd_offset(pud, addr); 1345 pmd = pmd_offset(pud, addr);
@@ -1328,8 +1348,6 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
1328 next = pmd_addr_end(addr, end); 1348 next = pmd_addr_end(addr, end);
1329 ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, end, pfn, 1349 ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, end, pfn,
1330 flags, stage); 1350 flags, stage);
1331 pud_populate(NULL, pud, pmd);
1332 arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud));
1333 phys += next - addr; 1351 phys += next - addr;
1334 } while (pmd++, addr = next, addr < end); 1352 } while (pmd++, addr = next, addr < end);
1335 1353
@@ -1346,9 +1364,15 @@ static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd,
1346 1364
1347#ifndef __PAGETABLE_PUD_FOLDED 1365#ifndef __PAGETABLE_PUD_FOLDED
1348 if (pgd_none(*pgd)) { 1366 if (pgd_none(*pgd)) {
1349 pud = pud_alloc_one(NULL, addr); 1367 pud = (pud_t *)get_zeroed_page(GFP_ATOMIC);
1350 if (!pud) 1368 if (!pud)
1351 return -ENOMEM; 1369 return -ENOMEM;
1370
1371 arm_smmu_flush_pgtable(smmu, pud, PAGE_SIZE);
1372 pgd_populate(NULL, pgd, pud);
1373 arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd));
1374
1375 pud += pud_index(addr);
1352 } else 1376 } else
1353#endif 1377#endif
1354 pud = pud_offset(pgd, addr); 1378 pud = pud_offset(pgd, addr);
@@ -1357,8 +1381,6 @@ static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd,
1357 next = pud_addr_end(addr, end); 1381 next = pud_addr_end(addr, end);
1358 ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys, 1382 ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys,
1359 flags, stage); 1383 flags, stage);
1360 pgd_populate(NULL, pud, pgd);
1361 arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd));
1362 phys += next - addr; 1384 phys += next - addr;
1363 } while (pud++, addr = next, addr < end); 1385 } while (pud++, addr = next, addr < end);
1364 1386
@@ -1375,6 +1397,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
1375 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; 1397 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
1376 pgd_t *pgd = root_cfg->pgd; 1398 pgd_t *pgd = root_cfg->pgd;
1377 struct arm_smmu_device *smmu = root_cfg->smmu; 1399 struct arm_smmu_device *smmu = root_cfg->smmu;
1400 unsigned long irqflags;
1378 1401
1379 if (root_cfg->cbar == CBAR_TYPE_S2_TRANS) { 1402 if (root_cfg->cbar == CBAR_TYPE_S2_TRANS) {
1380 stage = 2; 1403 stage = 2;
@@ -1397,7 +1420,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
1397 if (paddr & ~output_mask) 1420 if (paddr & ~output_mask)
1398 return -ERANGE; 1421 return -ERANGE;
1399 1422
1400 mutex_lock(&smmu_domain->lock); 1423 spin_lock_irqsave(&smmu_domain->lock, irqflags);
1401 pgd += pgd_index(iova); 1424 pgd += pgd_index(iova);
1402 end = iova + size; 1425 end = iova + size;
1403 do { 1426 do {
@@ -1413,11 +1436,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
1413 } while (pgd++, iova != end); 1436 } while (pgd++, iova != end);
1414 1437
1415out_unlock: 1438out_unlock:
1416 mutex_unlock(&smmu_domain->lock); 1439 spin_unlock_irqrestore(&smmu_domain->lock, irqflags);
1417
1418 /* Ensure new page tables are visible to the hardware walker */
1419 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
1420 dsb();
1421 1440
1422 return ret; 1441 return ret;
1423} 1442}
@@ -1987,8 +2006,10 @@ static int __init arm_smmu_init(void)
1987 if (!iommu_present(&platform_bus_type)) 2006 if (!iommu_present(&platform_bus_type))
1988 bus_set_iommu(&platform_bus_type, &arm_smmu_ops); 2007 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
1989 2008
2009#ifdef CONFIG_ARM_AMBA
1990 if (!iommu_present(&amba_bustype)) 2010 if (!iommu_present(&amba_bustype))
1991 bus_set_iommu(&amba_bustype, &arm_smmu_ops); 2011 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2012#endif
1992 2013
1993 return 0; 2014 return 0;
1994} 2015}
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
index d97fbe4fb9b1..80fffba7f12d 100644
--- a/drivers/iommu/omap-iommu-debug.c
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -354,8 +354,8 @@ DEBUG_FOPS(mem);
354 return -ENOMEM; \ 354 return -ENOMEM; \
355 } 355 }
356 356
357#define DEBUG_ADD_FILE(name) __DEBUG_ADD_FILE(name, 600) 357#define DEBUG_ADD_FILE(name) __DEBUG_ADD_FILE(name, 0600)
358#define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 400) 358#define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 0400)
359 359
360static int iommu_debug_register(struct device *dev, void *data) 360static int iommu_debug_register(struct device *dev, void *data)
361{ 361{
diff --git a/drivers/irqchip/irq-orion.c b/drivers/irqchip/irq-orion.c
index e51d40031884..8e41be62812e 100644
--- a/drivers/irqchip/irq-orion.c
+++ b/drivers/irqchip/irq-orion.c
@@ -111,7 +111,8 @@ IRQCHIP_DECLARE(orion_intc, "marvell,orion-intc", orion_irq_init);
111static void orion_bridge_irq_handler(unsigned int irq, struct irq_desc *desc) 111static void orion_bridge_irq_handler(unsigned int irq, struct irq_desc *desc)
112{ 112{
113 struct irq_domain *d = irq_get_handler_data(irq); 113 struct irq_domain *d = irq_get_handler_data(irq);
114 struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, irq); 114
115 struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, 0);
115 u32 stat = readl_relaxed(gc->reg_base + ORION_BRIDGE_IRQ_CAUSE) & 116 u32 stat = readl_relaxed(gc->reg_base + ORION_BRIDGE_IRQ_CAUSE) &
116 gc->mask_cache; 117 gc->mask_cache;
117 118
@@ -123,6 +124,19 @@ static void orion_bridge_irq_handler(unsigned int irq, struct irq_desc *desc)
123 } 124 }
124} 125}
125 126
127/*
128 * Bridge IRQ_CAUSE is asserted regardless of IRQ_MASK register.
129 * To avoid interrupt events on stale irqs, we clear them before unmask.
130 */
131static unsigned int orion_bridge_irq_startup(struct irq_data *d)
132{
133 struct irq_chip_type *ct = irq_data_get_chip_type(d);
134
135 ct->chip.irq_ack(d);
136 ct->chip.irq_unmask(d);
137 return 0;
138}
139
126static int __init orion_bridge_irq_init(struct device_node *np, 140static int __init orion_bridge_irq_init(struct device_node *np,
127 struct device_node *parent) 141 struct device_node *parent)
128{ 142{
@@ -143,7 +157,7 @@ static int __init orion_bridge_irq_init(struct device_node *np,
143 } 157 }
144 158
145 ret = irq_alloc_domain_generic_chips(domain, nrirqs, 1, np->name, 159 ret = irq_alloc_domain_generic_chips(domain, nrirqs, 1, np->name,
146 handle_level_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE); 160 handle_edge_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE);
147 if (ret) { 161 if (ret) {
148 pr_err("%s: unable to alloc irq domain gc\n", np->name); 162 pr_err("%s: unable to alloc irq domain gc\n", np->name);
149 return ret; 163 return ret;
@@ -176,12 +190,14 @@ static int __init orion_bridge_irq_init(struct device_node *np,
176 190
177 gc->chip_types[0].regs.ack = ORION_BRIDGE_IRQ_CAUSE; 191 gc->chip_types[0].regs.ack = ORION_BRIDGE_IRQ_CAUSE;
178 gc->chip_types[0].regs.mask = ORION_BRIDGE_IRQ_MASK; 192 gc->chip_types[0].regs.mask = ORION_BRIDGE_IRQ_MASK;
193 gc->chip_types[0].chip.irq_startup = orion_bridge_irq_startup;
179 gc->chip_types[0].chip.irq_ack = irq_gc_ack_clr_bit; 194 gc->chip_types[0].chip.irq_ack = irq_gc_ack_clr_bit;
180 gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit; 195 gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
181 gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit; 196 gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
182 197
183 /* mask all interrupts */ 198 /* mask and clear all interrupts */
184 writel(0, gc->reg_base + ORION_BRIDGE_IRQ_MASK); 199 writel(0, gc->reg_base + ORION_BRIDGE_IRQ_MASK);
200 writel(0, gc->reg_base + ORION_BRIDGE_IRQ_CAUSE);
185 201
186 irq_set_handler_data(irq, domain); 202 irq_set_handler_data(irq, domain);
187 irq_set_chained_handler(irq, orion_bridge_irq_handler); 203 irq_set_chained_handler(irq, orion_bridge_irq_handler);
diff --git a/drivers/isdn/hisax/q931.c b/drivers/isdn/hisax/q931.c
index af1b020a81f1..b420f8bd862e 100644
--- a/drivers/isdn/hisax/q931.c
+++ b/drivers/isdn/hisax/q931.c
@@ -810,7 +810,7 @@ prfeatureind(char *dest, u_char *p)
810 dp += sprintf(dp, " octet 3 "); 810 dp += sprintf(dp, " octet 3 ");
811 dp += prbits(dp, *p, 8, 8); 811 dp += prbits(dp, *p, 8, 8);
812 *dp++ = '\n'; 812 *dp++ = '\n';
813 if (!(*p++ & 80)) { 813 if (!(*p++ & 0x80)) {
814 dp += sprintf(dp, " octet 4 "); 814 dp += sprintf(dp, " octet 4 ");
815 dp += prbits(dp, *p++, 8, 8); 815 dp += prbits(dp, *p++, 8, 8);
816 *dp++ = '\n'; 816 *dp++ = '\n';
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 0c707e4f4eaf..a4c7306ff43d 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -210,7 +210,9 @@ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2);
210#define GC_MARK_RECLAIMABLE 0 210#define GC_MARK_RECLAIMABLE 0
211#define GC_MARK_DIRTY 1 211#define GC_MARK_DIRTY 1
212#define GC_MARK_METADATA 2 212#define GC_MARK_METADATA 2
213BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 13); 213#define GC_SECTORS_USED_SIZE 13
214#define MAX_GC_SECTORS_USED (~(~0ULL << GC_SECTORS_USED_SIZE))
215BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE);
214BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1); 216BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
215 217
216#include "journal.h" 218#include "journal.h"
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 4f6b5940e609..3f74b4b0747b 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -23,7 +23,7 @@ void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set)
23 for (k = i->start; k < bset_bkey_last(i); k = next) { 23 for (k = i->start; k < bset_bkey_last(i); k = next) {
24 next = bkey_next(k); 24 next = bkey_next(k);
25 25
26 printk(KERN_ERR "block %u key %zi/%u: ", set, 26 printk(KERN_ERR "block %u key %li/%u: ", set,
27 (uint64_t *) k - i->d, i->keys); 27 (uint64_t *) k - i->d, i->keys);
28 28
29 if (b->ops->key_dump) 29 if (b->ops->key_dump)
@@ -1185,9 +1185,12 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
1185 struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOIO, 1185 struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOIO,
1186 order); 1186 order);
1187 if (!out) { 1187 if (!out) {
1188 struct page *outp;
1189
1188 BUG_ON(order > state->page_order); 1190 BUG_ON(order > state->page_order);
1189 1191
1190 out = page_address(mempool_alloc(state->pool, GFP_NOIO)); 1192 outp = mempool_alloc(state->pool, GFP_NOIO);
1193 out = page_address(outp);
1191 used_mempool = true; 1194 used_mempool = true;
1192 order = state->page_order; 1195 order = state->page_order;
1193 } 1196 }
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 98cc0a810a36..5f9c2a665ca5 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -1167,7 +1167,7 @@ uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
1167 /* guard against overflow */ 1167 /* guard against overflow */
1168 SET_GC_SECTORS_USED(g, min_t(unsigned, 1168 SET_GC_SECTORS_USED(g, min_t(unsigned,
1169 GC_SECTORS_USED(g) + KEY_SIZE(k), 1169 GC_SECTORS_USED(g) + KEY_SIZE(k),
1170 (1 << 14) - 1)); 1170 MAX_GC_SECTORS_USED));
1171 1171
1172 BUG_ON(!GC_SECTORS_USED(g)); 1172 BUG_ON(!GC_SECTORS_USED(g));
1173 } 1173 }
@@ -1805,7 +1805,7 @@ static bool btree_insert_key(struct btree *b, struct bkey *k,
1805 1805
1806static size_t insert_u64s_remaining(struct btree *b) 1806static size_t insert_u64s_remaining(struct btree *b)
1807{ 1807{
1808 ssize_t ret = bch_btree_keys_u64s_remaining(&b->keys); 1808 long ret = bch_btree_keys_u64s_remaining(&b->keys);
1809 1809
1810 /* 1810 /*
1811 * Might land in the middle of an existing extent and have to split it 1811 * Might land in the middle of an existing extent and have to split it
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
index c3ead586dc27..416d1a3e028e 100644
--- a/drivers/md/bcache/extents.c
+++ b/drivers/md/bcache/extents.c
@@ -194,7 +194,7 @@ err:
194 mutex_unlock(&b->c->bucket_lock); 194 mutex_unlock(&b->c->bucket_lock);
195 bch_extent_to_text(buf, sizeof(buf), k); 195 bch_extent_to_text(buf, sizeof(buf), k);
196 btree_bug(b, 196 btree_bug(b,
197"inconsistent btree pointer %s: bucket %li pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i", 197"inconsistent btree pointer %s: bucket %zi pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
198 buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin), 198 buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
199 g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen); 199 g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
200 return true; 200 return true;
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 72cd213f213f..5d5d031cf381 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -353,14 +353,14 @@ static void bch_data_insert_start(struct closure *cl)
353 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); 353 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
354 struct bio *bio = op->bio, *n; 354 struct bio *bio = op->bio, *n;
355 355
356 if (op->bypass)
357 return bch_data_invalidate(cl);
358
359 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) { 356 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
360 set_gc_sectors(op->c); 357 set_gc_sectors(op->c);
361 wake_up_gc(op->c); 358 wake_up_gc(op->c);
362 } 359 }
363 360
361 if (op->bypass)
362 return bch_data_invalidate(cl);
363
364 /* 364 /*
365 * Journal writes are marked REQ_FLUSH; if the original write was a 365 * Journal writes are marked REQ_FLUSH; if the original write was a
366 * flush, it'll wait on the journal write. 366 * flush, it'll wait on the journal write.
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index c6ab69333a6d..d8458d477a12 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -416,7 +416,7 @@ static int btree_bset_stats(struct btree_op *b_op, struct btree *b)
416 return MAP_CONTINUE; 416 return MAP_CONTINUE;
417} 417}
418 418
419int bch_bset_print_stats(struct cache_set *c, char *buf) 419static int bch_bset_print_stats(struct cache_set *c, char *buf)
420{ 420{
421 struct bset_stats_op op; 421 struct bset_stats_op op;
422 int ret; 422 int ret;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index fd3a2a14b587..4a6ca1cb2e78 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1953,11 +1953,15 @@ static int process_checks(struct r1bio *r1_bio)
1953 for (i = 0; i < conf->raid_disks * 2; i++) { 1953 for (i = 0; i < conf->raid_disks * 2; i++) {
1954 int j; 1954 int j;
1955 int size; 1955 int size;
1956 int uptodate;
1956 struct bio *b = r1_bio->bios[i]; 1957 struct bio *b = r1_bio->bios[i];
1957 if (b->bi_end_io != end_sync_read) 1958 if (b->bi_end_io != end_sync_read)
1958 continue; 1959 continue;
1959 /* fixup the bio for reuse */ 1960 /* fixup the bio for reuse, but preserve BIO_UPTODATE */
1961 uptodate = test_bit(BIO_UPTODATE, &b->bi_flags);
1960 bio_reset(b); 1962 bio_reset(b);
1963 if (!uptodate)
1964 clear_bit(BIO_UPTODATE, &b->bi_flags);
1961 b->bi_vcnt = vcnt; 1965 b->bi_vcnt = vcnt;
1962 b->bi_iter.bi_size = r1_bio->sectors << 9; 1966 b->bi_iter.bi_size = r1_bio->sectors << 9;
1963 b->bi_iter.bi_sector = r1_bio->sector + 1967 b->bi_iter.bi_sector = r1_bio->sector +
@@ -1990,11 +1994,14 @@ static int process_checks(struct r1bio *r1_bio)
1990 int j; 1994 int j;
1991 struct bio *pbio = r1_bio->bios[primary]; 1995 struct bio *pbio = r1_bio->bios[primary];
1992 struct bio *sbio = r1_bio->bios[i]; 1996 struct bio *sbio = r1_bio->bios[i];
1997 int uptodate = test_bit(BIO_UPTODATE, &sbio->bi_flags);
1993 1998
1994 if (sbio->bi_end_io != end_sync_read) 1999 if (sbio->bi_end_io != end_sync_read)
1995 continue; 2000 continue;
2001 /* Now we can 'fixup' the BIO_UPTODATE flag */
2002 set_bit(BIO_UPTODATE, &sbio->bi_flags);
1996 2003
1997 if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) { 2004 if (uptodate) {
1998 for (j = vcnt; j-- ; ) { 2005 for (j = vcnt; j-- ; ) {
1999 struct page *p, *s; 2006 struct page *p, *s;
2000 p = pbio->bi_io_vec[j].bv_page; 2007 p = pbio->bi_io_vec[j].bv_page;
@@ -2009,7 +2016,7 @@ static int process_checks(struct r1bio *r1_bio)
2009 if (j >= 0) 2016 if (j >= 0)
2010 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); 2017 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
2011 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) 2018 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
2012 && test_bit(BIO_UPTODATE, &sbio->bi_flags))) { 2019 && uptodate)) {
2013 /* No need to write to this device. */ 2020 /* No need to write to this device. */
2014 sbio->bi_end_io = NULL; 2021 sbio->bi_end_io = NULL;
2015 rdev_dec_pending(conf->mirrors[i].rdev, mddev); 2022 rdev_dec_pending(conf->mirrors[i].rdev, mddev);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f1feadeb7bb2..16f5c21963db 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5514,23 +5514,43 @@ raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
5514 return sectors * (raid_disks - conf->max_degraded); 5514 return sectors * (raid_disks - conf->max_degraded);
5515} 5515}
5516 5516
5517static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
5518{
5519 safe_put_page(percpu->spare_page);
5520 kfree(percpu->scribble);
5521 percpu->spare_page = NULL;
5522 percpu->scribble = NULL;
5523}
5524
5525static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
5526{
5527 if (conf->level == 6 && !percpu->spare_page)
5528 percpu->spare_page = alloc_page(GFP_KERNEL);
5529 if (!percpu->scribble)
5530 percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
5531
5532 if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) {
5533 free_scratch_buffer(conf, percpu);
5534 return -ENOMEM;
5535 }
5536
5537 return 0;
5538}
5539
5517static void raid5_free_percpu(struct r5conf *conf) 5540static void raid5_free_percpu(struct r5conf *conf)
5518{ 5541{
5519 struct raid5_percpu *percpu;
5520 unsigned long cpu; 5542 unsigned long cpu;
5521 5543
5522 if (!conf->percpu) 5544 if (!conf->percpu)
5523 return; 5545 return;
5524 5546
5525 get_online_cpus();
5526 for_each_possible_cpu(cpu) {
5527 percpu = per_cpu_ptr(conf->percpu, cpu);
5528 safe_put_page(percpu->spare_page);
5529 kfree(percpu->scribble);
5530 }
5531#ifdef CONFIG_HOTPLUG_CPU 5547#ifdef CONFIG_HOTPLUG_CPU
5532 unregister_cpu_notifier(&conf->cpu_notify); 5548 unregister_cpu_notifier(&conf->cpu_notify);
5533#endif 5549#endif
5550
5551 get_online_cpus();
5552 for_each_possible_cpu(cpu)
5553 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
5534 put_online_cpus(); 5554 put_online_cpus();
5535 5555
5536 free_percpu(conf->percpu); 5556 free_percpu(conf->percpu);
@@ -5557,15 +5577,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
5557 switch (action) { 5577 switch (action) {
5558 case CPU_UP_PREPARE: 5578 case CPU_UP_PREPARE:
5559 case CPU_UP_PREPARE_FROZEN: 5579 case CPU_UP_PREPARE_FROZEN:
5560 if (conf->level == 6 && !percpu->spare_page) 5580 if (alloc_scratch_buffer(conf, percpu)) {
5561 percpu->spare_page = alloc_page(GFP_KERNEL);
5562 if (!percpu->scribble)
5563 percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
5564
5565 if (!percpu->scribble ||
5566 (conf->level == 6 && !percpu->spare_page)) {
5567 safe_put_page(percpu->spare_page);
5568 kfree(percpu->scribble);
5569 pr_err("%s: failed memory allocation for cpu%ld\n", 5581 pr_err("%s: failed memory allocation for cpu%ld\n",
5570 __func__, cpu); 5582 __func__, cpu);
5571 return notifier_from_errno(-ENOMEM); 5583 return notifier_from_errno(-ENOMEM);
@@ -5573,10 +5585,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
5573 break; 5585 break;
5574 case CPU_DEAD: 5586 case CPU_DEAD:
5575 case CPU_DEAD_FROZEN: 5587 case CPU_DEAD_FROZEN:
5576 safe_put_page(percpu->spare_page); 5588 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
5577 kfree(percpu->scribble);
5578 percpu->spare_page = NULL;
5579 percpu->scribble = NULL;
5580 break; 5589 break;
5581 default: 5590 default:
5582 break; 5591 break;
@@ -5588,40 +5597,29 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
5588static int raid5_alloc_percpu(struct r5conf *conf) 5597static int raid5_alloc_percpu(struct r5conf *conf)
5589{ 5598{
5590 unsigned long cpu; 5599 unsigned long cpu;
5591 struct page *spare_page; 5600 int err = 0;
5592 struct raid5_percpu __percpu *allcpus;
5593 void *scribble;
5594 int err;
5595 5601
5596 allcpus = alloc_percpu(struct raid5_percpu); 5602 conf->percpu = alloc_percpu(struct raid5_percpu);
5597 if (!allcpus) 5603 if (!conf->percpu)
5598 return -ENOMEM; 5604 return -ENOMEM;
5599 conf->percpu = allcpus; 5605
5606#ifdef CONFIG_HOTPLUG_CPU
5607 conf->cpu_notify.notifier_call = raid456_cpu_notify;
5608 conf->cpu_notify.priority = 0;
5609 err = register_cpu_notifier(&conf->cpu_notify);
5610 if (err)
5611 return err;
5612#endif
5600 5613
5601 get_online_cpus(); 5614 get_online_cpus();
5602 err = 0;
5603 for_each_present_cpu(cpu) { 5615 for_each_present_cpu(cpu) {
5604 if (conf->level == 6) { 5616 err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
5605 spare_page = alloc_page(GFP_KERNEL); 5617 if (err) {
5606 if (!spare_page) { 5618 pr_err("%s: failed memory allocation for cpu%ld\n",
5607 err = -ENOMEM; 5619 __func__, cpu);
5608 break;
5609 }
5610 per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
5611 }
5612 scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
5613 if (!scribble) {
5614 err = -ENOMEM;
5615 break; 5620 break;
5616 } 5621 }
5617 per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
5618 } 5622 }
5619#ifdef CONFIG_HOTPLUG_CPU
5620 conf->cpu_notify.notifier_call = raid456_cpu_notify;
5621 conf->cpu_notify.priority = 0;
5622 if (err == 0)
5623 err = register_cpu_notifier(&conf->cpu_notify);
5624#endif
5625 put_online_cpus(); 5623 put_online_cpus();
5626 5624
5627 return err; 5625 return err;
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index a60c188c2bd9..04bd3b6de401 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -754,19 +754,19 @@ static long i2o_cfg_compat_ioctl(struct file *file, unsigned cmd,
754 unsigned long arg) 754 unsigned long arg)
755{ 755{
756 int ret; 756 int ret;
757 mutex_lock(&i2o_cfg_mutex);
758 switch (cmd) { 757 switch (cmd) {
759 case I2OGETIOPS: 758 case I2OGETIOPS:
760 ret = i2o_cfg_ioctl(file, cmd, arg); 759 ret = i2o_cfg_ioctl(file, cmd, arg);
761 break; 760 break;
762 case I2OPASSTHRU32: 761 case I2OPASSTHRU32:
762 mutex_lock(&i2o_cfg_mutex);
763 ret = i2o_cfg_passthru32(file, cmd, arg); 763 ret = i2o_cfg_passthru32(file, cmd, arg);
764 mutex_unlock(&i2o_cfg_mutex);
764 break; 765 break;
765 default: 766 default:
766 ret = -ENOIOCTLCMD; 767 ret = -ENOIOCTLCMD;
767 break; 768 break;
768 } 769 }
769 mutex_unlock(&i2o_cfg_mutex);
770 return ret; 770 return ret;
771} 771}
772 772
diff --git a/drivers/mfd/da9055-i2c.c b/drivers/mfd/da9055-i2c.c
index 13af7e50021e..8103e4362132 100644
--- a/drivers/mfd/da9055-i2c.c
+++ b/drivers/mfd/da9055-i2c.c
@@ -53,17 +53,25 @@ static int da9055_i2c_remove(struct i2c_client *i2c)
53 return 0; 53 return 0;
54} 54}
55 55
56/*
57 * DO NOT change the device Ids. The naming is intentionally specific as both
58 * the PMIC and CODEC parts of this chip are instantiated separately as I2C
59 * devices (both have configurable I2C addresses, and are to all intents and
60 * purposes separate). As a result there are specific DA9055 ids for PMIC
61 * and CODEC, which must be different to operate together.
62 */
56static struct i2c_device_id da9055_i2c_id[] = { 63static struct i2c_device_id da9055_i2c_id[] = {
57 {"da9055", 0}, 64 {"da9055-pmic", 0},
58 { } 65 { }
59}; 66};
67MODULE_DEVICE_TABLE(i2c, da9055_i2c_id);
60 68
61static struct i2c_driver da9055_i2c_driver = { 69static struct i2c_driver da9055_i2c_driver = {
62 .probe = da9055_i2c_probe, 70 .probe = da9055_i2c_probe,
63 .remove = da9055_i2c_remove, 71 .remove = da9055_i2c_remove,
64 .id_table = da9055_i2c_id, 72 .id_table = da9055_i2c_id,
65 .driver = { 73 .driver = {
66 .name = "da9055", 74 .name = "da9055-pmic",
67 .owner = THIS_MODULE, 75 .owner = THIS_MODULE,
68 }, 76 },
69}; 77};
diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c
index ac514fb2b877..71aa14a6bfbb 100644
--- a/drivers/mfd/max14577.c
+++ b/drivers/mfd/max14577.c
@@ -173,6 +173,7 @@ static const struct i2c_device_id max14577_i2c_id[] = {
173}; 173};
174MODULE_DEVICE_TABLE(i2c, max14577_i2c_id); 174MODULE_DEVICE_TABLE(i2c, max14577_i2c_id);
175 175
176#ifdef CONFIG_PM_SLEEP
176static int max14577_suspend(struct device *dev) 177static int max14577_suspend(struct device *dev)
177{ 178{
178 struct i2c_client *i2c = container_of(dev, struct i2c_client, dev); 179 struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
@@ -208,6 +209,7 @@ static int max14577_resume(struct device *dev)
208 209
209 return 0; 210 return 0;
210} 211}
212#endif /* CONFIG_PM_SLEEP */
211 213
212static struct of_device_id max14577_dt_match[] = { 214static struct of_device_id max14577_dt_match[] = {
213 { .compatible = "maxim,max14577", }, 215 { .compatible = "maxim,max14577", },
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index be88a3bf7b85..5adede0fb04c 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -164,15 +164,15 @@ static struct max8997_platform_data *max8997_i2c_parse_dt_pdata(
164 return pd; 164 return pd;
165} 165}
166 166
167static inline int max8997_i2c_get_driver_data(struct i2c_client *i2c, 167static inline unsigned long max8997_i2c_get_driver_data(struct i2c_client *i2c,
168 const struct i2c_device_id *id) 168 const struct i2c_device_id *id)
169{ 169{
170 if (IS_ENABLED(CONFIG_OF) && i2c->dev.of_node) { 170 if (IS_ENABLED(CONFIG_OF) && i2c->dev.of_node) {
171 const struct of_device_id *match; 171 const struct of_device_id *match;
172 match = of_match_node(max8997_pmic_dt_match, i2c->dev.of_node); 172 match = of_match_node(max8997_pmic_dt_match, i2c->dev.of_node);
173 return (int)match->data; 173 return (unsigned long)match->data;
174 } 174 }
175 return (int)id->driver_data; 175 return id->driver_data;
176} 176}
177 177
178static int max8997_i2c_probe(struct i2c_client *i2c, 178static int max8997_i2c_probe(struct i2c_client *i2c,
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index 612ca404e150..5d5e186b5d8b 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -169,16 +169,16 @@ static struct max8998_platform_data *max8998_i2c_parse_dt_pdata(
169 return pd; 169 return pd;
170} 170}
171 171
172static inline int max8998_i2c_get_driver_data(struct i2c_client *i2c, 172static inline unsigned long max8998_i2c_get_driver_data(struct i2c_client *i2c,
173 const struct i2c_device_id *id) 173 const struct i2c_device_id *id)
174{ 174{
175 if (IS_ENABLED(CONFIG_OF) && i2c->dev.of_node) { 175 if (IS_ENABLED(CONFIG_OF) && i2c->dev.of_node) {
176 const struct of_device_id *match; 176 const struct of_device_id *match;
177 match = of_match_node(max8998_dt_match, i2c->dev.of_node); 177 match = of_match_node(max8998_dt_match, i2c->dev.of_node);
178 return (int)(long)match->data; 178 return (unsigned long)match->data;
179 } 179 }
180 180
181 return (int)id->driver_data; 181 return id->driver_data;
182} 182}
183 183
184static int max8998_i2c_probe(struct i2c_client *i2c, 184static int max8998_i2c_probe(struct i2c_client *i2c,
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index a139798b8065..714e2135210e 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -315,6 +315,7 @@ static int sec_pmic_remove(struct i2c_client *i2c)
315 return 0; 315 return 0;
316} 316}
317 317
318#ifdef CONFIG_PM_SLEEP
318static int sec_pmic_suspend(struct device *dev) 319static int sec_pmic_suspend(struct device *dev)
319{ 320{
320 struct i2c_client *i2c = container_of(dev, struct i2c_client, dev); 321 struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
@@ -349,6 +350,7 @@ static int sec_pmic_resume(struct device *dev)
349 350
350 return 0; 351 return 0;
351} 352}
353#endif /* CONFIG_PM_SLEEP */
352 354
353static SIMPLE_DEV_PM_OPS(sec_pmic_pm_ops, sec_pmic_suspend, sec_pmic_resume); 355static SIMPLE_DEV_PM_OPS(sec_pmic_pm_ops, sec_pmic_suspend, sec_pmic_resume);
354 356
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index 966cf65c5c36..3cc4c7084b92 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -158,7 +158,7 @@ static int tps65217_probe(struct i2c_client *client,
158{ 158{
159 struct tps65217 *tps; 159 struct tps65217 *tps;
160 unsigned int version; 160 unsigned int version;
161 unsigned int chip_id = ids->driver_data; 161 unsigned long chip_id = ids->driver_data;
162 const struct of_device_id *match; 162 const struct of_device_id *match;
163 bool status_off = false; 163 bool status_off = false;
164 int ret; 164 int ret;
@@ -170,7 +170,7 @@ static int tps65217_probe(struct i2c_client *client,
170 "Failed to find matching dt id\n"); 170 "Failed to find matching dt id\n");
171 return -EINVAL; 171 return -EINVAL;
172 } 172 }
173 chip_id = (unsigned int)(unsigned long)match->data; 173 chip_id = (unsigned long)match->data;
174 status_off = of_property_read_bool(client->dev.of_node, 174 status_off = of_property_read_bool(client->dev.of_node,
175 "ti,pmic-shutdown-controller"); 175 "ti,pmic-shutdown-controller");
176 } 176 }
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index ba04f1bc70eb..e6fab94e2c8a 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -636,7 +636,7 @@ static int wm8994_i2c_probe(struct i2c_client *i2c,
636 if (i2c->dev.of_node) { 636 if (i2c->dev.of_node) {
637 of_id = of_match_device(wm8994_of_match, &i2c->dev); 637 of_id = of_match_device(wm8994_of_match, &i2c->dev);
638 if (of_id) 638 if (of_id)
639 wm8994->type = (int)of_id->data; 639 wm8994->type = (enum wm8994_type)of_id->data;
640 } else { 640 } else {
641 wm8994->type = id->driver_data; 641 wm8994->type = id->driver_data;
642 } 642 }
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
index 8f8a6b327cdb..2c2c9cc75231 100644
--- a/drivers/misc/genwqe/card_dev.c
+++ b/drivers/misc/genwqe/card_dev.c
@@ -787,6 +787,7 @@ static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
787 if (rc != 0) { 787 if (rc != 0) {
788 dev_err(&pci_dev->dev, 788 dev_err(&pci_dev->dev,
789 "[%s] genwqe_user_vmap rc=%d\n", __func__, rc); 789 "[%s] genwqe_user_vmap rc=%d\n", __func__, rc);
790 kfree(dma_map);
790 return rc; 791 return rc;
791 } 792 }
792 793
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 1ee2b9492a82..89a557972d1b 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -666,7 +666,6 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
666 goto err; 666 goto err;
667 667
668 cb->fop_type = MEI_FOP_READ; 668 cb->fop_type = MEI_FOP_READ;
669 cl->read_cb = cb;
670 if (dev->hbuf_is_ready) { 669 if (dev->hbuf_is_ready) {
671 dev->hbuf_is_ready = false; 670 dev->hbuf_is_ready = false;
672 if (mei_hbm_cl_flow_control_req(dev, cl)) { 671 if (mei_hbm_cl_flow_control_req(dev, cl)) {
@@ -678,6 +677,9 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
678 } else { 677 } else {
679 list_add_tail(&cb->list, &dev->ctrl_wr_list.list); 678 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
680 } 679 }
680
681 cl->read_cb = cb;
682
681 return rets; 683 return rets;
682err: 684err:
683 mei_io_cb_free(cb); 685 mei_io_cb_free(cb);
@@ -908,7 +910,6 @@ void mei_cl_all_disconnect(struct mei_device *dev)
908 list_for_each_entry_safe(cl, next, &dev->file_list, link) { 910 list_for_each_entry_safe(cl, next, &dev->file_list, link) {
909 cl->state = MEI_FILE_DISCONNECTED; 911 cl->state = MEI_FILE_DISCONNECTED;
910 cl->mei_flow_ctrl_creds = 0; 912 cl->mei_flow_ctrl_creds = 0;
911 cl->read_cb = NULL;
912 cl->timer_count = 0; 913 cl->timer_count = 0;
913 } 914 }
914} 915}
@@ -942,8 +943,16 @@ void mei_cl_all_wakeup(struct mei_device *dev)
942void mei_cl_all_write_clear(struct mei_device *dev) 943void mei_cl_all_write_clear(struct mei_device *dev)
943{ 944{
944 struct mei_cl_cb *cb, *next; 945 struct mei_cl_cb *cb, *next;
946 struct list_head *list;
947
948 list = &dev->write_list.list;
949 list_for_each_entry_safe(cb, next, list, list) {
950 list_del(&cb->list);
951 mei_io_cb_free(cb);
952 }
945 953
946 list_for_each_entry_safe(cb, next, &dev->write_list.list, list) { 954 list = &dev->write_waiting_list.list;
955 list_for_each_entry_safe(cb, next, list, list) {
947 list_del(&cb->list); 956 list_del(&cb->list);
948 mei_io_cb_free(cb); 957 mei_io_cb_free(cb);
949 } 958 }
diff --git a/drivers/misc/mic/host/mic_virtio.c b/drivers/misc/mic/host/mic_virtio.c
index 752ff873f891..7e1ef0ebbb80 100644
--- a/drivers/misc/mic/host/mic_virtio.c
+++ b/drivers/misc/mic/host/mic_virtio.c
@@ -156,7 +156,8 @@ static int mic_vringh_copy(struct mic_vdev *mvdev, struct vringh_kiov *iov,
156static int _mic_virtio_copy(struct mic_vdev *mvdev, 156static int _mic_virtio_copy(struct mic_vdev *mvdev,
157 struct mic_copy_desc *copy) 157 struct mic_copy_desc *copy)
158{ 158{
159 int ret = 0, iovcnt = copy->iovcnt; 159 int ret = 0;
160 u32 iovcnt = copy->iovcnt;
160 struct iovec iov; 161 struct iovec iov;
161 struct iovec __user *u_iov = copy->iov; 162 struct iovec __user *u_iov = copy->iov;
162 void __user *ubuf = NULL; 163 void __user *ubuf = NULL;
diff --git a/drivers/misc/sgi-gru/grukdump.c b/drivers/misc/sgi-gru/grukdump.c
index 9b2062d17327..2bef3f76032a 100644
--- a/drivers/misc/sgi-gru/grukdump.c
+++ b/drivers/misc/sgi-gru/grukdump.c
@@ -139,8 +139,11 @@ static int gru_dump_context(struct gru_state *gru, int ctxnum,
139 139
140 ubuf += sizeof(hdr); 140 ubuf += sizeof(hdr);
141 ubufcch = ubuf; 141 ubufcch = ubuf;
142 if (gru_user_copy_handle(&ubuf, cch)) 142 if (gru_user_copy_handle(&ubuf, cch)) {
143 goto fail; 143 if (cch_locked)
144 unlock_cch_handle(cch);
145 return -EFAULT;
146 }
144 if (cch_locked) 147 if (cch_locked)
145 ubufcch->delresp = 0; 148 ubufcch->delresp = 0;
146 bytes = sizeof(hdr) + GRU_CACHE_LINE_BYTES; 149 bytes = sizeof(hdr) + GRU_CACHE_LINE_BYTES;
@@ -179,10 +182,6 @@ static int gru_dump_context(struct gru_state *gru, int ctxnum,
179 ret = -EFAULT; 182 ret = -EFAULT;
180 183
181 return ret ? ret : bytes; 184 return ret ? ret : bytes;
182
183fail:
184 unlock_cch_handle(cch);
185 return -EFAULT;
186} 185}
187 186
188int gru_dump_chiplet_request(unsigned long arg) 187int gru_dump_chiplet_request(unsigned long arg)
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 357bbc54fe4b..3e049c13429c 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -197,7 +197,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
197 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; 197 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
198 198
199 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) 199 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
200 limit = dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; 200 limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
201 201
202 mq->card = card; 202 mq->card = card;
203 mq->queue = blk_init_queue(mmc_request_fn, lock); 203 mq->queue = blk_init_queue(mmc_request_fn, lock);
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 59eba5d2c685..9715a7ba164a 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -1584,7 +1584,7 @@ read_retry:
1584 } 1584 }
1585 1585
1586 if (mtd->ecc_stats.failed - ecc_failures) { 1586 if (mtd->ecc_stats.failed - ecc_failures) {
1587 if (retry_mode + 1 <= chip->read_retries) { 1587 if (retry_mode + 1 < chip->read_retries) {
1588 retry_mode++; 1588 retry_mode++;
1589 ret = nand_setup_read_retry(mtd, 1589 ret = nand_setup_read_retry(mtd,
1590 retry_mode); 1590 retry_mode);
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index ef4190a02b7b..bf642ceef681 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1633,6 +1633,7 @@ static int omap_nand_probe(struct platform_device *pdev)
1633 int i; 1633 int i;
1634 dma_cap_mask_t mask; 1634 dma_cap_mask_t mask;
1635 unsigned sig; 1635 unsigned sig;
1636 unsigned oob_index;
1636 struct resource *res; 1637 struct resource *res;
1637 struct mtd_part_parser_data ppdata = {}; 1638 struct mtd_part_parser_data ppdata = {};
1638 1639
@@ -1826,11 +1827,14 @@ static int omap_nand_probe(struct platform_device *pdev)
1826 (mtd->writesize / 1827 (mtd->writesize /
1827 nand_chip->ecc.size); 1828 nand_chip->ecc.size);
1828 if (nand_chip->options & NAND_BUSWIDTH_16) 1829 if (nand_chip->options & NAND_BUSWIDTH_16)
1829 ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH; 1830 oob_index = BADBLOCK_MARKER_LENGTH;
1830 else 1831 else
1831 ecclayout->eccpos[0] = 1; 1832 oob_index = 1;
1832 ecclayout->oobfree->offset = ecclayout->eccpos[0] + 1833 for (i = 0; i < ecclayout->eccbytes; i++, oob_index++)
1833 ecclayout->eccbytes; 1834 ecclayout->eccpos[i] = oob_index;
1835 /* no reserved-marker in ecclayout for this ecc-scheme */
1836 ecclayout->oobfree->offset =
1837 ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
1834 break; 1838 break;
1835 1839
1836 case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: 1840 case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
@@ -1847,9 +1851,15 @@ static int omap_nand_probe(struct platform_device *pdev)
1847 ecclayout->eccbytes = nand_chip->ecc.bytes * 1851 ecclayout->eccbytes = nand_chip->ecc.bytes *
1848 (mtd->writesize / 1852 (mtd->writesize /
1849 nand_chip->ecc.size); 1853 nand_chip->ecc.size);
1850 ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH; 1854 oob_index = BADBLOCK_MARKER_LENGTH;
1851 ecclayout->oobfree->offset = ecclayout->eccpos[0] + 1855 for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) {
1852 ecclayout->eccbytes; 1856 ecclayout->eccpos[i] = oob_index;
1857 if (((i + 1) % nand_chip->ecc.bytes) == 0)
1858 oob_index++;
1859 }
1860 /* include reserved-marker in ecclayout->oobfree calculation */
1861 ecclayout->oobfree->offset = 1 +
1862 ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
1853 /* software bch library is used for locating errors */ 1863 /* software bch library is used for locating errors */
1854 nand_chip->ecc.priv = nand_bch_init(mtd, 1864 nand_chip->ecc.priv = nand_bch_init(mtd,
1855 nand_chip->ecc.size, 1865 nand_chip->ecc.size,
@@ -1883,9 +1893,12 @@ static int omap_nand_probe(struct platform_device *pdev)
1883 ecclayout->eccbytes = nand_chip->ecc.bytes * 1893 ecclayout->eccbytes = nand_chip->ecc.bytes *
1884 (mtd->writesize / 1894 (mtd->writesize /
1885 nand_chip->ecc.size); 1895 nand_chip->ecc.size);
1886 ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH; 1896 oob_index = BADBLOCK_MARKER_LENGTH;
1887 ecclayout->oobfree->offset = ecclayout->eccpos[0] + 1897 for (i = 0; i < ecclayout->eccbytes; i++, oob_index++)
1888 ecclayout->eccbytes; 1898 ecclayout->eccpos[i] = oob_index;
1899 /* reserved marker already included in ecclayout->eccbytes */
1900 ecclayout->oobfree->offset =
1901 ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
1889 /* This ECC scheme requires ELM H/W block */ 1902 /* This ECC scheme requires ELM H/W block */
1890 if (is_elm_present(info, pdata->elm_of_node, BCH4_ECC) < 0) { 1903 if (is_elm_present(info, pdata->elm_of_node, BCH4_ECC) < 0) {
1891 pr_err("nand: error: could not initialize ELM\n"); 1904 pr_err("nand: error: could not initialize ELM\n");
@@ -1913,9 +1926,15 @@ static int omap_nand_probe(struct platform_device *pdev)
1913 ecclayout->eccbytes = nand_chip->ecc.bytes * 1926 ecclayout->eccbytes = nand_chip->ecc.bytes *
1914 (mtd->writesize / 1927 (mtd->writesize /
1915 nand_chip->ecc.size); 1928 nand_chip->ecc.size);
1916 ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH; 1929 oob_index = BADBLOCK_MARKER_LENGTH;
1917 ecclayout->oobfree->offset = ecclayout->eccpos[0] + 1930 for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) {
1918 ecclayout->eccbytes; 1931 ecclayout->eccpos[i] = oob_index;
1932 if (((i + 1) % nand_chip->ecc.bytes) == 0)
1933 oob_index++;
1934 }
1935 /* include reserved-marker in ecclayout->oobfree calculation */
1936 ecclayout->oobfree->offset = 1 +
1937 ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
1919 /* software bch library is used for locating errors */ 1938 /* software bch library is used for locating errors */
1920 nand_chip->ecc.priv = nand_bch_init(mtd, 1939 nand_chip->ecc.priv = nand_bch_init(mtd,
1921 nand_chip->ecc.size, 1940 nand_chip->ecc.size,
@@ -1956,9 +1975,12 @@ static int omap_nand_probe(struct platform_device *pdev)
1956 ecclayout->eccbytes = nand_chip->ecc.bytes * 1975 ecclayout->eccbytes = nand_chip->ecc.bytes *
1957 (mtd->writesize / 1976 (mtd->writesize /
1958 nand_chip->ecc.size); 1977 nand_chip->ecc.size);
1959 ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH; 1978 oob_index = BADBLOCK_MARKER_LENGTH;
1960 ecclayout->oobfree->offset = ecclayout->eccpos[0] + 1979 for (i = 0; i < ecclayout->eccbytes; i++, oob_index++)
1961 ecclayout->eccbytes; 1980 ecclayout->eccpos[i] = oob_index;
1981 /* reserved marker already included in ecclayout->eccbytes */
1982 ecclayout->oobfree->offset =
1983 ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
1962 break; 1984 break;
1963#else 1985#else
1964 pr_err("nand: error: CONFIG_MTD_NAND_OMAP_BCH not enabled\n"); 1986 pr_err("nand: error: CONFIG_MTD_NAND_OMAP_BCH not enabled\n");
@@ -1972,11 +1994,8 @@ static int omap_nand_probe(struct platform_device *pdev)
1972 goto return_error; 1994 goto return_error;
1973 } 1995 }
1974 1996
1975 /* populate remaining ECC layout data */ 1997 /* all OOB bytes from oobfree->offset till end off OOB are free */
1976 ecclayout->oobfree->length = mtd->oobsize - (BADBLOCK_MARKER_LENGTH + 1998 ecclayout->oobfree->length = mtd->oobsize - ecclayout->oobfree->offset;
1977 ecclayout->eccbytes);
1978 for (i = 1; i < ecclayout->eccbytes; i++)
1979 ecclayout->eccpos[i] = ecclayout->eccpos[0] + i;
1980 /* check if NAND device's OOB is enough to store ECC signatures */ 1999 /* check if NAND device's OOB is enough to store ECC signatures */
1981 if (mtd->oobsize < (ecclayout->eccbytes + BADBLOCK_MARKER_LENGTH)) { 2000 if (mtd->oobsize < (ecclayout->eccbytes + BADBLOCK_MARKER_LENGTH)) {
1982 pr_err("not enough OOB bytes required = %d, available=%d\n", 2001 pr_err("not enough OOB bytes required = %d, available=%d\n",
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index f342278539d5..494b888a6568 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -139,7 +139,7 @@ config MACVTAP
139 This adds a specialized tap character device driver that is based 139 This adds a specialized tap character device driver that is based
140 on the MAC-VLAN network interface, called macvtap. A macvtap device 140 on the MAC-VLAN network interface, called macvtap. A macvtap device
141 can be added in the same way as a macvlan device, using 'type 141 can be added in the same way as a macvlan device, using 'type
142 macvlan', and then be accessed through the tap user space interface. 142 macvtap', and then be accessed through the tap user space interface.
143 143
144 To compile this driver as a module, choose M here: the module 144 To compile this driver as a module, choose M here: the module
145 will be called macvtap. 145 will be called macvtap.
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index cce1f1bf90b4..6d20fbde8d43 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -1796,8 +1796,6 @@ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
1796 BOND_AD_INFO(bond).agg_select_timer = timeout; 1796 BOND_AD_INFO(bond).agg_select_timer = timeout;
1797} 1797}
1798 1798
1799static u16 aggregator_identifier;
1800
1801/** 1799/**
1802 * bond_3ad_initialize - initialize a bond's 802.3ad parameters and structures 1800 * bond_3ad_initialize - initialize a bond's 802.3ad parameters and structures
1803 * @bond: bonding struct to work on 1801 * @bond: bonding struct to work on
@@ -1811,7 +1809,7 @@ void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
1811 if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr), 1809 if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr),
1812 bond->dev->dev_addr)) { 1810 bond->dev->dev_addr)) {
1813 1811
1814 aggregator_identifier = 0; 1812 BOND_AD_INFO(bond).aggregator_identifier = 0;
1815 1813
1816 BOND_AD_INFO(bond).system.sys_priority = 0xFFFF; 1814 BOND_AD_INFO(bond).system.sys_priority = 0xFFFF;
1817 BOND_AD_INFO(bond).system.sys_mac_addr = *((struct mac_addr *)bond->dev->dev_addr); 1815 BOND_AD_INFO(bond).system.sys_mac_addr = *((struct mac_addr *)bond->dev->dev_addr);
@@ -1880,7 +1878,7 @@ void bond_3ad_bind_slave(struct slave *slave)
1880 ad_initialize_agg(aggregator); 1878 ad_initialize_agg(aggregator);
1881 1879
1882 aggregator->aggregator_mac_address = *((struct mac_addr *)bond->dev->dev_addr); 1880 aggregator->aggregator_mac_address = *((struct mac_addr *)bond->dev->dev_addr);
1883 aggregator->aggregator_identifier = (++aggregator_identifier); 1881 aggregator->aggregator_identifier = ++BOND_AD_INFO(bond).aggregator_identifier;
1884 aggregator->slave = slave; 1882 aggregator->slave = slave;
1885 aggregator->is_active = 0; 1883 aggregator->is_active = 0;
1886 aggregator->num_of_ports = 0; 1884 aggregator->num_of_ports = 0;
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index 13dc9d3c5e34..f4dd9592ac62 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -253,6 +253,7 @@ struct ad_system {
253struct ad_bond_info { 253struct ad_bond_info {
254 struct ad_system system; /* 802.3ad system structure */ 254 struct ad_system system; /* 802.3ad system structure */
255 u32 agg_select_timer; // Timer to select aggregator after all adapter's hand shakes 255 u32 agg_select_timer; // Timer to select aggregator after all adapter's hand shakes
256 u16 aggregator_identifier;
256}; 257};
257 258
258struct ad_slave_info { 259struct ad_slave_info {
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 4c08018d7333..1c6104d3501d 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1270,9 +1270,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1270 1270
1271 if (slave_ops->ndo_set_mac_address == NULL) { 1271 if (slave_ops->ndo_set_mac_address == NULL) {
1272 if (!bond_has_slaves(bond)) { 1272 if (!bond_has_slaves(bond)) {
1273 pr_warning("%s: Warning: The first slave device specified does not support setting the MAC address. Setting fail_over_mac to active.", 1273 pr_warn("%s: Warning: The first slave device specified does not support setting the MAC address.\n",
1274 bond_dev->name); 1274 bond_dev->name);
1275 bond->params.fail_over_mac = BOND_FOM_ACTIVE; 1275 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
1276 bond->params.fail_over_mac = BOND_FOM_ACTIVE;
1277 pr_warn("%s: Setting fail_over_mac to active for active-backup mode.\n",
1278 bond_dev->name);
1279 }
1276 } else if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) { 1280 } else if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
1277 pr_err("%s: Error: The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active.\n", 1281 pr_err("%s: Error: The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active.\n",
1278 bond_dev->name); 1282 bond_dev->name);
@@ -1315,7 +1319,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1315 */ 1319 */
1316 memcpy(new_slave->perm_hwaddr, slave_dev->dev_addr, ETH_ALEN); 1320 memcpy(new_slave->perm_hwaddr, slave_dev->dev_addr, ETH_ALEN);
1317 1321
1318 if (!bond->params.fail_over_mac) { 1322 if (!bond->params.fail_over_mac ||
1323 bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
1319 /* 1324 /*
1320 * Set slave to master's mac address. The application already 1325 * Set slave to master's mac address. The application already
1321 * set the master's mac address to that of the first slave 1326 * set the master's mac address to that of the first slave
@@ -1505,7 +1510,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1505 slave_dev->npinfo = bond->dev->npinfo; 1510 slave_dev->npinfo = bond->dev->npinfo;
1506 if (slave_dev->npinfo) { 1511 if (slave_dev->npinfo) {
1507 if (slave_enable_netpoll(new_slave)) { 1512 if (slave_enable_netpoll(new_slave)) {
1508 read_unlock(&bond->lock);
1509 pr_info("Error, %s: master_dev is using netpoll, " 1513 pr_info("Error, %s: master_dev is using netpoll, "
1510 "but new slave device does not support netpoll.\n", 1514 "but new slave device does not support netpoll.\n",
1511 bond_dev->name); 1515 bond_dev->name);
@@ -1539,9 +1543,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1539 bond_set_carrier(bond); 1543 bond_set_carrier(bond);
1540 1544
1541 if (USES_PRIMARY(bond->params.mode)) { 1545 if (USES_PRIMARY(bond->params.mode)) {
1546 block_netpoll_tx();
1542 write_lock_bh(&bond->curr_slave_lock); 1547 write_lock_bh(&bond->curr_slave_lock);
1543 bond_select_active_slave(bond); 1548 bond_select_active_slave(bond);
1544 write_unlock_bh(&bond->curr_slave_lock); 1549 write_unlock_bh(&bond->curr_slave_lock);
1550 unblock_netpoll_tx();
1545 } 1551 }
1546 1552
1547 pr_info("%s: enslaving %s as a%s interface with a%s link.\n", 1553 pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
@@ -1567,10 +1573,12 @@ err_detach:
1567 if (bond->primary_slave == new_slave) 1573 if (bond->primary_slave == new_slave)
1568 bond->primary_slave = NULL; 1574 bond->primary_slave = NULL;
1569 if (bond->curr_active_slave == new_slave) { 1575 if (bond->curr_active_slave == new_slave) {
1576 block_netpoll_tx();
1570 write_lock_bh(&bond->curr_slave_lock); 1577 write_lock_bh(&bond->curr_slave_lock);
1571 bond_change_active_slave(bond, NULL); 1578 bond_change_active_slave(bond, NULL);
1572 bond_select_active_slave(bond); 1579 bond_select_active_slave(bond);
1573 write_unlock_bh(&bond->curr_slave_lock); 1580 write_unlock_bh(&bond->curr_slave_lock);
1581 unblock_netpoll_tx();
1574 } 1582 }
1575 slave_disable_netpoll(new_slave); 1583 slave_disable_netpoll(new_slave);
1576 1584
@@ -1579,7 +1587,8 @@ err_close:
1579 dev_close(slave_dev); 1587 dev_close(slave_dev);
1580 1588
1581err_restore_mac: 1589err_restore_mac:
1582 if (!bond->params.fail_over_mac) { 1590 if (!bond->params.fail_over_mac ||
1591 bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
1583 /* XXX TODO - fom follow mode needs to change master's 1592 /* XXX TODO - fom follow mode needs to change master's
1584 * MAC if this slave's MAC is in use by the bond, or at 1593 * MAC if this slave's MAC is in use by the bond, or at
1585 * least print a warning. 1594 * least print a warning.
@@ -1672,7 +1681,8 @@ static int __bond_release_one(struct net_device *bond_dev,
1672 1681
1673 bond->current_arp_slave = NULL; 1682 bond->current_arp_slave = NULL;
1674 1683
1675 if (!all && !bond->params.fail_over_mac) { 1684 if (!all && (!bond->params.fail_over_mac ||
1685 bond->params.mode != BOND_MODE_ACTIVEBACKUP)) {
1676 if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) && 1686 if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
1677 bond_has_slaves(bond)) 1687 bond_has_slaves(bond))
1678 pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n", 1688 pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
@@ -1769,7 +1779,8 @@ static int __bond_release_one(struct net_device *bond_dev,
1769 /* close slave before restoring its mac address */ 1779 /* close slave before restoring its mac address */
1770 dev_close(slave_dev); 1780 dev_close(slave_dev);
1771 1781
1772 if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) { 1782 if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
1783 bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
1773 /* restore original ("permanent") mac address */ 1784 /* restore original ("permanent") mac address */
1774 memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN); 1785 memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN);
1775 addr.sa_family = slave_dev->type; 1786 addr.sa_family = slave_dev->type;
@@ -2857,9 +2868,12 @@ static int bond_slave_netdev_event(unsigned long event,
2857 pr_info("%s: Primary slave changed to %s, reselecting active slave.\n", 2868 pr_info("%s: Primary slave changed to %s, reselecting active slave.\n",
2858 bond->dev->name, bond->primary_slave ? slave_dev->name : 2869 bond->dev->name, bond->primary_slave ? slave_dev->name :
2859 "none"); 2870 "none");
2871
2872 block_netpoll_tx();
2860 write_lock_bh(&bond->curr_slave_lock); 2873 write_lock_bh(&bond->curr_slave_lock);
2861 bond_select_active_slave(bond); 2874 bond_select_active_slave(bond);
2862 write_unlock_bh(&bond->curr_slave_lock); 2875 write_unlock_bh(&bond->curr_slave_lock);
2876 unblock_netpoll_tx();
2863 break; 2877 break;
2864 case NETDEV_FEAT_CHANGE: 2878 case NETDEV_FEAT_CHANGE:
2865 bond_compute_features(bond); 2879 bond_compute_features(bond);
@@ -3431,7 +3445,8 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
3431 /* If fail_over_mac is enabled, do nothing and return success. 3445 /* If fail_over_mac is enabled, do nothing and return success.
3432 * Returning an error causes ifenslave to fail. 3446 * Returning an error causes ifenslave to fail.
3433 */ 3447 */
3434 if (bond->params.fail_over_mac) 3448 if (bond->params.fail_over_mac &&
3449 bond->params.mode == BOND_MODE_ACTIVEBACKUP)
3435 return 0; 3450 return 0;
3436 3451
3437 if (!is_valid_ether_addr(sa->sa_data)) 3452 if (!is_valid_ether_addr(sa->sa_data))
@@ -3692,7 +3707,7 @@ static inline int bond_slave_override(struct bonding *bond,
3692 3707
3693 3708
3694static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb, 3709static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
3695 void *accel_priv) 3710 void *accel_priv, select_queue_fallback_t fallback)
3696{ 3711{
3697 /* 3712 /*
3698 * This helper function exists to help dev_pick_tx get the correct 3713 * This helper function exists to help dev_pick_tx get the correct
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 11cb943222d5..c37878432717 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -14,7 +14,7 @@
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/if.h> 15#include <linux/if.h>
16#include <linux/netdevice.h> 16#include <linux/netdevice.h>
17#include <linux/rwlock.h> 17#include <linux/spinlock.h>
18#include <linux/rcupdate.h> 18#include <linux/rcupdate.h>
19#include <linux/ctype.h> 19#include <linux/ctype.h>
20#include <linux/inet.h> 20#include <linux/inet.h>
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index d447b881bbde..9e7d95dae2c7 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -104,7 +104,7 @@ config CAN_JANZ_ICAN3
104 104
105config CAN_FLEXCAN 105config CAN_FLEXCAN
106 tristate "Support for Freescale FLEXCAN based chips" 106 tristate "Support for Freescale FLEXCAN based chips"
107 depends on (ARM && CPU_LITTLE_ENDIAN) || PPC 107 depends on ARM || PPC
108 ---help--- 108 ---help---
109 Say Y here if you want to support for Freescale FlexCAN. 109 Say Y here if you want to support for Freescale FlexCAN.
110 110
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 13a909822e25..fc59bc6f040b 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -323,19 +323,10 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
323 } 323 }
324 324
325 if (!priv->echo_skb[idx]) { 325 if (!priv->echo_skb[idx]) {
326 struct sock *srcsk = skb->sk;
327 326
328 if (atomic_read(&skb->users) != 1) { 327 skb = can_create_echo_skb(skb);
329 struct sk_buff *old_skb = skb; 328 if (!skb)
330 329 return;
331 skb = skb_clone(old_skb, GFP_ATOMIC);
332 kfree_skb(old_skb);
333 if (!skb)
334 return;
335 } else
336 skb_orphan(skb);
337
338 skb->sk = srcsk;
339 330
340 /* make settings for echo to reduce code in irq context */ 331 /* make settings for echo to reduce code in irq context */
341 skb->protocol = htons(ETH_P_CAN); 332 skb->protocol = htons(ETH_P_CAN);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index aaed97bee471..320bef2dba42 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -235,9 +235,12 @@ static const struct can_bittiming_const flexcan_bittiming_const = {
235}; 235};
236 236
237/* 237/*
238 * Abstract off the read/write for arm versus ppc. 238 * Abstract off the read/write for arm versus ppc. This
239 * assumes that PPC uses big-endian registers and everything
240 * else uses little-endian registers, independent of CPU
241 * endianess.
239 */ 242 */
240#if defined(__BIG_ENDIAN) 243#if defined(CONFIG_PPC)
241static inline u32 flexcan_read(void __iomem *addr) 244static inline u32 flexcan_read(void __iomem *addr)
242{ 245{
243 return in_be32(addr); 246 return in_be32(addr);
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index e24e6690d672..71594e5676fd 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -18,6 +18,7 @@
18#include <linux/netdevice.h> 18#include <linux/netdevice.h>
19#include <linux/can.h> 19#include <linux/can.h>
20#include <linux/can/dev.h> 20#include <linux/can/dev.h>
21#include <linux/can/skb.h>
21#include <linux/can/error.h> 22#include <linux/can/error.h>
22 23
23#include <linux/mfd/janz.h> 24#include <linux/mfd/janz.h>
@@ -1133,20 +1134,9 @@ static void ican3_handle_message(struct ican3_dev *mod, struct ican3_msg *msg)
1133 */ 1134 */
1134static void ican3_put_echo_skb(struct ican3_dev *mod, struct sk_buff *skb) 1135static void ican3_put_echo_skb(struct ican3_dev *mod, struct sk_buff *skb)
1135{ 1136{
1136 struct sock *srcsk = skb->sk; 1137 skb = can_create_echo_skb(skb);
1137 1138 if (!skb)
1138 if (atomic_read(&skb->users) != 1) { 1139 return;
1139 struct sk_buff *old_skb = skb;
1140
1141 skb = skb_clone(old_skb, GFP_ATOMIC);
1142 kfree_skb(old_skb);
1143 if (!skb)
1144 return;
1145 } else {
1146 skb_orphan(skb);
1147 }
1148
1149 skb->sk = srcsk;
1150 1140
1151 /* save this skb for tx interrupt echo handling */ 1141 /* save this skb for tx interrupt echo handling */
1152 skb_queue_tail(&mod->echoq, skb); 1142 skb_queue_tail(&mod->echoq, skb);
@@ -1322,7 +1312,7 @@ static int ican3_napi(struct napi_struct *napi, int budget)
1322 1312
1323 /* process all communication messages */ 1313 /* process all communication messages */
1324 while (true) { 1314 while (true) {
1325 struct ican3_msg msg; 1315 struct ican3_msg uninitialized_var(msg);
1326 ret = ican3_recv_msg(mod, &msg); 1316 ret = ican3_recv_msg(mod, &msg);
1327 if (ret) 1317 if (ret)
1328 break; 1318 break;
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 6c859bba8b65..e77d11049747 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -473,6 +473,8 @@ static int kvaser_usb_get_card_info(struct kvaser_usb *dev)
473 return err; 473 return err;
474 474
475 dev->nchannels = msg.u.cardinfo.nchannels; 475 dev->nchannels = msg.u.cardinfo.nchannels;
476 if (dev->nchannels > MAX_NET_DEVICES)
477 return -EINVAL;
476 478
477 return 0; 479 return 0;
478} 480}
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 0a2a5ee79a17..4e94057ef5cf 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -46,6 +46,7 @@
46#include <linux/if_ether.h> 46#include <linux/if_ether.h>
47#include <linux/can.h> 47#include <linux/can.h>
48#include <linux/can/dev.h> 48#include <linux/can/dev.h>
49#include <linux/can/skb.h>
49#include <linux/slab.h> 50#include <linux/slab.h>
50#include <net/rtnetlink.h> 51#include <net/rtnetlink.h>
51 52
@@ -109,25 +110,23 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
109 stats->rx_packets++; 110 stats->rx_packets++;
110 stats->rx_bytes += cfd->len; 111 stats->rx_bytes += cfd->len;
111 } 112 }
112 kfree_skb(skb); 113 consume_skb(skb);
113 return NETDEV_TX_OK; 114 return NETDEV_TX_OK;
114 } 115 }
115 116
116 /* perform standard echo handling for CAN network interfaces */ 117 /* perform standard echo handling for CAN network interfaces */
117 118
118 if (loop) { 119 if (loop) {
119 struct sock *srcsk = skb->sk;
120 120
121 skb = skb_share_check(skb, GFP_ATOMIC); 121 skb = can_create_echo_skb(skb);
122 if (!skb) 122 if (!skb)
123 return NETDEV_TX_OK; 123 return NETDEV_TX_OK;
124 124
125 /* receive with packet counting */ 125 /* receive with packet counting */
126 skb->sk = srcsk;
127 vcan_rx(skb, dev); 126 vcan_rx(skb, dev);
128 } else { 127 } else {
129 /* no looped packets => no counting */ 128 /* no looped packets => no counting */
130 kfree_skb(skb); 129 consume_skb(skb);
131 } 130 }
132 return NETDEV_TX_OK; 131 return NETDEV_TX_OK;
133} 132}
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 0f4241c6e97e..238ccea965c8 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -3294,7 +3294,6 @@ static int __init vortex_init(void)
3294 3294
3295static void __exit vortex_eisa_cleanup(void) 3295static void __exit vortex_eisa_cleanup(void)
3296{ 3296{
3297 struct vortex_private *vp;
3298 void __iomem *ioaddr; 3297 void __iomem *ioaddr;
3299 3298
3300#ifdef CONFIG_EISA 3299#ifdef CONFIG_EISA
@@ -3303,7 +3302,6 @@ static void __exit vortex_eisa_cleanup(void)
3303#endif 3302#endif
3304 3303
3305 if (compaq_net_device) { 3304 if (compaq_net_device) {
3306 vp = netdev_priv(compaq_net_device);
3307 ioaddr = ioport_map(compaq_net_device->base_addr, 3305 ioaddr = ioport_map(compaq_net_device->base_addr,
3308 VORTEX_TOTAL_SIZE); 3306 VORTEX_TOTAL_SIZE);
3309 3307
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 0cc21437478c..511f6eecd58b 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -929,6 +929,9 @@ static int emac_resume(struct platform_device *dev)
929} 929}
930 930
931static const struct of_device_id emac_of_match[] = { 931static const struct of_device_id emac_of_match[] = {
932 {.compatible = "allwinner,sun4i-a10-emac",},
933
934 /* Deprecated */
932 {.compatible = "allwinner,sun4i-emac",}, 935 {.compatible = "allwinner,sun4i-emac",},
933 {}, 936 {},
934}; 937};
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index e92ffd6e1c15..2e45f6ec1bf0 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1292,6 +1292,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1292 alx = netdev_priv(netdev); 1292 alx = netdev_priv(netdev);
1293 spin_lock_init(&alx->hw.mdio_lock); 1293 spin_lock_init(&alx->hw.mdio_lock);
1294 spin_lock_init(&alx->irq_lock); 1294 spin_lock_init(&alx->irq_lock);
1295 spin_lock_init(&alx->stats_lock);
1295 alx->dev = netdev; 1296 alx->dev = netdev;
1296 alx->hw.pdev = pdev; 1297 alx->hw.pdev = pdev;
1297 alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP | 1298 alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP |
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 9d2dedadf2df..cda25ac45b47 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -85,7 +85,7 @@ MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
85 85
86static int disable_msi = 0; 86static int disable_msi = 0;
87 87
88module_param(disable_msi, int, 0); 88module_param(disable_msi, int, S_IRUGO);
89MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); 89MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
90 90
91typedef enum { 91typedef enum {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 9d7419e0390b..66c0df78c3ff 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1873,7 +1873,7 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1873} 1873}
1874 1874
1875u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, 1875u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1876 void *accel_priv) 1876 void *accel_priv, select_queue_fallback_t fallback)
1877{ 1877{
1878 struct bnx2x *bp = netdev_priv(dev); 1878 struct bnx2x *bp = netdev_priv(dev);
1879 1879
@@ -1895,7 +1895,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1895 } 1895 }
1896 1896
1897 /* select a non-FCoE queue */ 1897 /* select a non-FCoE queue */
1898 return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp); 1898 return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
1899} 1899}
1900 1900
1901void bnx2x_set_num_queues(struct bnx2x *bp) 1901void bnx2x_set_num_queues(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 17d1689aec6b..a89a40f88c25 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -496,7 +496,7 @@ int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos);
496 496
497/* select_queue callback */ 497/* select_queue callback */
498u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, 498u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
499 void *accel_priv); 499 void *accel_priv, select_queue_fallback_t fallback);
500 500
501static inline void bnx2x_update_rx_prod(struct bnx2x *bp, 501static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
502 struct bnx2x_fastpath *fp, 502 struct bnx2x_fastpath *fp,
@@ -936,7 +936,7 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
936 else /* CHIP_IS_E1X */ 936 else /* CHIP_IS_E1X */
937 start_params->network_cos_mode = FW_WRR; 937 start_params->network_cos_mode = FW_WRR;
938 938
939 start_params->gre_tunnel_mode = IPGRE_TUNNEL; 939 start_params->gre_tunnel_mode = L2GRE_TUNNEL;
940 start_params->gre_tunnel_rss = GRE_INNER_HEADERS_RSS; 940 start_params->gre_tunnel_rss = GRE_INNER_HEADERS_RSS;
941 941
942 return bnx2x_func_state_change(bp, &func_params); 942 return bnx2x_func_state_change(bp, &func_params);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index c9c445e7b4a5..7d4382286457 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -95,29 +95,29 @@ MODULE_FIRMWARE(FW_FILE_NAME_E1H);
95MODULE_FIRMWARE(FW_FILE_NAME_E2); 95MODULE_FIRMWARE(FW_FILE_NAME_E2);
96 96
97int bnx2x_num_queues; 97int bnx2x_num_queues;
98module_param_named(num_queues, bnx2x_num_queues, int, 0); 98module_param_named(num_queues, bnx2x_num_queues, int, S_IRUGO);
99MODULE_PARM_DESC(num_queues, 99MODULE_PARM_DESC(num_queues,
100 " Set number of queues (default is as a number of CPUs)"); 100 " Set number of queues (default is as a number of CPUs)");
101 101
102static int disable_tpa; 102static int disable_tpa;
103module_param(disable_tpa, int, 0); 103module_param(disable_tpa, int, S_IRUGO);
104MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature"); 104MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
105 105
106static int int_mode; 106static int int_mode;
107module_param(int_mode, int, 0); 107module_param(int_mode, int, S_IRUGO);
108MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X " 108MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
109 "(1 INT#x; 2 MSI)"); 109 "(1 INT#x; 2 MSI)");
110 110
111static int dropless_fc; 111static int dropless_fc;
112module_param(dropless_fc, int, 0); 112module_param(dropless_fc, int, S_IRUGO);
113MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring"); 113MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
114 114
115static int mrrs = -1; 115static int mrrs = -1;
116module_param(mrrs, int, 0); 116module_param(mrrs, int, S_IRUGO);
117MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)"); 117MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118 118
119static int debug; 119static int debug;
120module_param(debug, int, 0); 120module_param(debug, int, S_IRUGO);
121MODULE_PARM_DESC(debug, " Default debug msglevel"); 121MODULE_PARM_DESC(debug, " Default debug msglevel");
122 122
123struct workqueue_struct *bnx2x_wq; 123struct workqueue_struct *bnx2x_wq;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index aec5ef2ed7ce..e42f48df6e94 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1446,12 +1446,12 @@ static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
1446 if (vf->cfg_flags & VF_CFG_INT_SIMD) 1446 if (vf->cfg_flags & VF_CFG_INT_SIMD)
1447 val |= IGU_VF_CONF_SINGLE_ISR_EN; 1447 val |= IGU_VF_CONF_SINGLE_ISR_EN;
1448 val &= ~IGU_VF_CONF_PARENT_MASK; 1448 val &= ~IGU_VF_CONF_PARENT_MASK;
1449 val |= BP_FUNC(bp) << IGU_VF_CONF_PARENT_SHIFT; /* parent PF */ 1449 val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT;
1450 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); 1450 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
1451 1451
1452 DP(BNX2X_MSG_IOV, 1452 DP(BNX2X_MSG_IOV,
1453 "value in IGU_REG_VF_CONFIGURATION of vf %d after write %x\n", 1453 "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n",
1454 vf->abs_vfid, REG_RD(bp, IGU_REG_VF_CONFIGURATION)); 1454 vf->abs_vfid, val);
1455 1455
1456 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1456 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1457 1457
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index e2ca03e23dc1..3167ed6593b0 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -2609,13 +2609,14 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2609 2609
2610 tg3_writephy(tp, MII_CTRL1000, phy9_orig); 2610 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2611 2611
2612 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) { 2612 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2613 reg32 &= ~0x3000; 2613 if (err)
2614 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); 2614 return err;
2615 } else if (!err)
2616 err = -EBUSY;
2617 2615
2618 return err; 2616 reg32 &= ~0x3000;
2617 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2618
2619 return 0;
2619} 2620}
2620 2621
2621static void tg3_carrier_off(struct tg3 *tp) 2622static void tg3_carrier_off(struct tg3 *tp)
@@ -14113,12 +14114,12 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14113 14114
14114 tg3_netif_stop(tp); 14115 tg3_netif_stop(tp);
14115 14116
14117 tg3_set_mtu(dev, tp, new_mtu);
14118
14116 tg3_full_lock(tp, 1); 14119 tg3_full_lock(tp, 1);
14117 14120
14118 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 14121 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14119 14122
14120 tg3_set_mtu(dev, tp, new_mtu);
14121
14122 /* Reset PHY, otherwise the read DMA engine will be in a mode that 14123 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14123 * breaks all requests to 256 bytes. 14124 * breaks all requests to 256 bytes.
14124 */ 14125 */
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index add05f14b38b..1642de78aac8 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1939,6 +1939,7 @@ static void tulip_remove_one(struct pci_dev *pdev)
1939 pci_iounmap(pdev, tp->base_addr); 1939 pci_iounmap(pdev, tp->base_addr);
1940 free_netdev (dev); 1940 free_netdev (dev);
1941 pci_release_regions (pdev); 1941 pci_release_regions (pdev);
1942 pci_disable_device(pdev);
1942 1943
1943 /* pci_power_off (pdev, -1); */ 1944 /* pci_power_off (pdev, -1); */
1944} 1945}
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 4de8cfd149cf..55e0fa03dc90 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/dma-mapping.h> 14#include <linux/dma-mapping.h>
15#include <linux/etherdevice.h> 15#include <linux/etherdevice.h>
16#include <linux/clk.h>
16#include <linux/crc32.h> 17#include <linux/crc32.h>
17#include <linux/interrupt.h> 18#include <linux/interrupt.h>
18#include <linux/io.h> 19#include <linux/io.h>
@@ -51,6 +52,7 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
51#define ETH_HASH0 0x48 52#define ETH_HASH0 0x48
52#define ETH_HASH1 0x4c 53#define ETH_HASH1 0x4c
53#define ETH_TXCTRL 0x50 54#define ETH_TXCTRL 0x50
55#define ETH_END 0x54
54 56
55/* mode register */ 57/* mode register */
56#define MODER_RXEN (1 << 0) /* receive enable */ 58#define MODER_RXEN (1 << 0) /* receive enable */
@@ -179,6 +181,7 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
179 * @membase: pointer to buffer memory region 181 * @membase: pointer to buffer memory region
180 * @dma_alloc: dma allocated buffer size 182 * @dma_alloc: dma allocated buffer size
181 * @io_region_size: I/O memory region size 183 * @io_region_size: I/O memory region size
184 * @num_bd: number of buffer descriptors
182 * @num_tx: number of send buffers 185 * @num_tx: number of send buffers
183 * @cur_tx: last send buffer written 186 * @cur_tx: last send buffer written
184 * @dty_tx: last buffer actually sent 187 * @dty_tx: last buffer actually sent
@@ -199,6 +202,7 @@ struct ethoc {
199 int dma_alloc; 202 int dma_alloc;
200 resource_size_t io_region_size; 203 resource_size_t io_region_size;
201 204
205 unsigned int num_bd;
202 unsigned int num_tx; 206 unsigned int num_tx;
203 unsigned int cur_tx; 207 unsigned int cur_tx;
204 unsigned int dty_tx; 208 unsigned int dty_tx;
@@ -216,6 +220,7 @@ struct ethoc {
216 220
217 struct phy_device *phy; 221 struct phy_device *phy;
218 struct mii_bus *mdio; 222 struct mii_bus *mdio;
223 struct clk *clk;
219 s8 phy_id; 224 s8 phy_id;
220}; 225};
221 226
@@ -688,6 +693,11 @@ static int ethoc_mdio_probe(struct net_device *dev)
688 } 693 }
689 694
690 priv->phy = phy; 695 priv->phy = phy;
696 phy->advertising &= ~(ADVERTISED_1000baseT_Full |
697 ADVERTISED_1000baseT_Half);
698 phy->supported &= ~(SUPPORTED_1000baseT_Full |
699 SUPPORTED_1000baseT_Half);
700
691 return 0; 701 return 0;
692} 702}
693 703
@@ -890,6 +900,102 @@ out:
890 return NETDEV_TX_OK; 900 return NETDEV_TX_OK;
891} 901}
892 902
903static int ethoc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
904{
905 struct ethoc *priv = netdev_priv(dev);
906 struct phy_device *phydev = priv->phy;
907
908 if (!phydev)
909 return -EOPNOTSUPP;
910
911 return phy_ethtool_gset(phydev, cmd);
912}
913
914static int ethoc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
915{
916 struct ethoc *priv = netdev_priv(dev);
917 struct phy_device *phydev = priv->phy;
918
919 if (!phydev)
920 return -EOPNOTSUPP;
921
922 return phy_ethtool_sset(phydev, cmd);
923}
924
925static int ethoc_get_regs_len(struct net_device *netdev)
926{
927 return ETH_END;
928}
929
930static void ethoc_get_regs(struct net_device *dev, struct ethtool_regs *regs,
931 void *p)
932{
933 struct ethoc *priv = netdev_priv(dev);
934 u32 *regs_buff = p;
935 unsigned i;
936
937 regs->version = 0;
938 for (i = 0; i < ETH_END / sizeof(u32); ++i)
939 regs_buff[i] = ethoc_read(priv, i * sizeof(u32));
940}
941
942static void ethoc_get_ringparam(struct net_device *dev,
943 struct ethtool_ringparam *ring)
944{
945 struct ethoc *priv = netdev_priv(dev);
946
947 ring->rx_max_pending = priv->num_bd - 1;
948 ring->rx_mini_max_pending = 0;
949 ring->rx_jumbo_max_pending = 0;
950 ring->tx_max_pending = priv->num_bd - 1;
951
952 ring->rx_pending = priv->num_rx;
953 ring->rx_mini_pending = 0;
954 ring->rx_jumbo_pending = 0;
955 ring->tx_pending = priv->num_tx;
956}
957
958static int ethoc_set_ringparam(struct net_device *dev,
959 struct ethtool_ringparam *ring)
960{
961 struct ethoc *priv = netdev_priv(dev);
962
963 if (ring->tx_pending < 1 || ring->rx_pending < 1 ||
964 ring->tx_pending + ring->rx_pending > priv->num_bd)
965 return -EINVAL;
966 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
967 return -EINVAL;
968
969 if (netif_running(dev)) {
970 netif_tx_disable(dev);
971 ethoc_disable_rx_and_tx(priv);
972 ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX);
973 synchronize_irq(dev->irq);
974 }
975
976 priv->num_tx = rounddown_pow_of_two(ring->tx_pending);
977 priv->num_rx = ring->rx_pending;
978 ethoc_init_ring(priv, dev->mem_start);
979
980 if (netif_running(dev)) {
981 ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX);
982 ethoc_enable_rx_and_tx(priv);
983 netif_wake_queue(dev);
984 }
985 return 0;
986}
987
988const struct ethtool_ops ethoc_ethtool_ops = {
989 .get_settings = ethoc_get_settings,
990 .set_settings = ethoc_set_settings,
991 .get_regs_len = ethoc_get_regs_len,
992 .get_regs = ethoc_get_regs,
993 .get_link = ethtool_op_get_link,
994 .get_ringparam = ethoc_get_ringparam,
995 .set_ringparam = ethoc_set_ringparam,
996 .get_ts_info = ethtool_op_get_ts_info,
997};
998
893static const struct net_device_ops ethoc_netdev_ops = { 999static const struct net_device_ops ethoc_netdev_ops = {
894 .ndo_open = ethoc_open, 1000 .ndo_open = ethoc_open,
895 .ndo_stop = ethoc_stop, 1001 .ndo_stop = ethoc_stop,
@@ -917,6 +1023,8 @@ static int ethoc_probe(struct platform_device *pdev)
917 int num_bd; 1023 int num_bd;
918 int ret = 0; 1024 int ret = 0;
919 bool random_mac = false; 1025 bool random_mac = false;
1026 struct ethoc_platform_data *pdata = dev_get_platdata(&pdev->dev);
1027 u32 eth_clkfreq = pdata ? pdata->eth_clkfreq : 0;
920 1028
921 /* allocate networking device */ 1029 /* allocate networking device */
922 netdev = alloc_etherdev(sizeof(struct ethoc)); 1030 netdev = alloc_etherdev(sizeof(struct ethoc));
@@ -1016,6 +1124,7 @@ static int ethoc_probe(struct platform_device *pdev)
1016 ret = -ENODEV; 1124 ret = -ENODEV;
1017 goto error; 1125 goto error;
1018 } 1126 }
1127 priv->num_bd = num_bd;
1019 /* num_tx must be a power of two */ 1128 /* num_tx must be a power of two */
1020 priv->num_tx = rounddown_pow_of_two(num_bd >> 1); 1129 priv->num_tx = rounddown_pow_of_two(num_bd >> 1);
1021 priv->num_rx = num_bd - priv->num_tx; 1130 priv->num_rx = num_bd - priv->num_tx;
@@ -1030,8 +1139,7 @@ static int ethoc_probe(struct platform_device *pdev)
1030 } 1139 }
1031 1140
1032 /* Allow the platform setup code to pass in a MAC address. */ 1141 /* Allow the platform setup code to pass in a MAC address. */
1033 if (dev_get_platdata(&pdev->dev)) { 1142 if (pdata) {
1034 struct ethoc_platform_data *pdata = dev_get_platdata(&pdev->dev);
1035 memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN); 1143 memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
1036 priv->phy_id = pdata->phy_id; 1144 priv->phy_id = pdata->phy_id;
1037 } else { 1145 } else {
@@ -1069,6 +1177,27 @@ static int ethoc_probe(struct platform_device *pdev)
1069 if (random_mac) 1177 if (random_mac)
1070 netdev->addr_assign_type = NET_ADDR_RANDOM; 1178 netdev->addr_assign_type = NET_ADDR_RANDOM;
1071 1179
1180 /* Allow the platform setup code to adjust MII management bus clock. */
1181 if (!eth_clkfreq) {
1182 struct clk *clk = devm_clk_get(&pdev->dev, NULL);
1183
1184 if (!IS_ERR(clk)) {
1185 priv->clk = clk;
1186 clk_prepare_enable(clk);
1187 eth_clkfreq = clk_get_rate(clk);
1188 }
1189 }
1190 if (eth_clkfreq) {
1191 u32 clkdiv = MIIMODER_CLKDIV(eth_clkfreq / 2500000 + 1);
1192
1193 if (!clkdiv)
1194 clkdiv = 2;
1195 dev_dbg(&pdev->dev, "setting MII clkdiv to %u\n", clkdiv);
1196 ethoc_write(priv, MIIMODER,
1197 (ethoc_read(priv, MIIMODER) & MIIMODER_NOPRE) |
1198 clkdiv);
1199 }
1200
1072 /* register MII bus */ 1201 /* register MII bus */
1073 priv->mdio = mdiobus_alloc(); 1202 priv->mdio = mdiobus_alloc();
1074 if (!priv->mdio) { 1203 if (!priv->mdio) {
@@ -1111,6 +1240,7 @@ static int ethoc_probe(struct platform_device *pdev)
1111 netdev->netdev_ops = &ethoc_netdev_ops; 1240 netdev->netdev_ops = &ethoc_netdev_ops;
1112 netdev->watchdog_timeo = ETHOC_TIMEOUT; 1241 netdev->watchdog_timeo = ETHOC_TIMEOUT;
1113 netdev->features |= 0; 1242 netdev->features |= 0;
1243 netdev->ethtool_ops = &ethoc_ethtool_ops;
1114 1244
1115 /* setup NAPI */ 1245 /* setup NAPI */
1116 netif_napi_add(netdev, &priv->napi, ethoc_poll, 64); 1246 netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
@@ -1133,6 +1263,8 @@ free_mdio:
1133 kfree(priv->mdio->irq); 1263 kfree(priv->mdio->irq);
1134 mdiobus_free(priv->mdio); 1264 mdiobus_free(priv->mdio);
1135free: 1265free:
1266 if (priv->clk)
1267 clk_disable_unprepare(priv->clk);
1136 free_netdev(netdev); 1268 free_netdev(netdev);
1137out: 1269out:
1138 return ret; 1270 return ret;
@@ -1157,6 +1289,8 @@ static int ethoc_remove(struct platform_device *pdev)
1157 kfree(priv->mdio->irq); 1289 kfree(priv->mdio->irq);
1158 mdiobus_free(priv->mdio); 1290 mdiobus_free(priv->mdio);
1159 } 1291 }
1292 if (priv->clk)
1293 clk_disable_unprepare(priv->clk);
1160 unregister_netdev(netdev); 1294 unregister_netdev(netdev);
1161 free_netdev(netdev); 1295 free_netdev(netdev);
1162 } 1296 }
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index d4782b42401b..903362a7b584 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1778,8 +1778,6 @@ fec_enet_open(struct net_device *ndev)
1778 struct fec_enet_private *fep = netdev_priv(ndev); 1778 struct fec_enet_private *fep = netdev_priv(ndev);
1779 int ret; 1779 int ret;
1780 1780
1781 napi_enable(&fep->napi);
1782
1783 /* I should reset the ring buffers here, but I don't yet know 1781 /* I should reset the ring buffers here, but I don't yet know
1784 * a simple way to do that. 1782 * a simple way to do that.
1785 */ 1783 */
@@ -1794,6 +1792,8 @@ fec_enet_open(struct net_device *ndev)
1794 fec_enet_free_buffers(ndev); 1792 fec_enet_free_buffers(ndev);
1795 return ret; 1793 return ret;
1796 } 1794 }
1795
1796 napi_enable(&fep->napi);
1797 phy_start(fep->phy_dev); 1797 phy_start(fep->phy_dev);
1798 netif_start_queue(ndev); 1798 netif_start_queue(ndev);
1799 fep->opened = 1; 1799 fep->opened = 1;
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index cbaba4442d4b..bf7a01ef9a57 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -3034,7 +3034,7 @@ static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
3034 *enable_wake = false; 3034 *enable_wake = false;
3035 } 3035 }
3036 3036
3037 pci_disable_device(pdev); 3037 pci_clear_master(pdev);
3038} 3038}
3039 3039
3040static int __e100_power_off(struct pci_dev *pdev, bool wake) 3040static int __e100_power_off(struct pci_dev *pdev, bool wake)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 6d4ada72dfd0..18076c4178b4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6881,7 +6881,7 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
6881} 6881}
6882 6882
6883static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, 6883static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
6884 void *accel_priv) 6884 void *accel_priv, select_queue_fallback_t fallback)
6885{ 6885{
6886 struct ixgbe_fwd_adapter *fwd_adapter = accel_priv; 6886 struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
6887#ifdef IXGBE_FCOE 6887#ifdef IXGBE_FCOE
@@ -6907,7 +6907,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
6907 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) 6907 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
6908 break; 6908 break;
6909 default: 6909 default:
6910 return __netdev_pick_tx(dev, skb); 6910 return fallback(dev, skb);
6911 } 6911 }
6912 6912
6913 f = &adapter->ring_feature[RING_F_FCOE]; 6913 f = &adapter->ring_feature[RING_F_FCOE];
@@ -6920,7 +6920,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
6920 6920
6921 return txq + f->offset; 6921 return txq + f->offset;
6922#else 6922#else
6923 return __netdev_pick_tx(dev, skb); 6923 return fallback(dev, skb);
6924#endif 6924#endif
6925} 6925}
6926 6926
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 8f9266c64c75..fd4b6aecf6ee 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -619,7 +619,7 @@ ltq_etop_set_multicast_list(struct net_device *dev)
619 619
620static u16 620static u16
621ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb, 621ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb,
622 void *accel_priv) 622 void *accel_priv, select_queue_fallback_t fallback)
623{ 623{
624 /* we are currently only using the first queue */ 624 /* we are currently only using the first queue */
625 return 0; 625 return 0;
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index 6300fd27f2db..68e6a6613e9a 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -43,12 +43,12 @@ config MVMDIO
43 This driver is used by the MV643XX_ETH and MVNETA drivers. 43 This driver is used by the MV643XX_ETH and MVNETA drivers.
44 44
45config MVNETA 45config MVNETA
46 tristate "Marvell Armada 370/XP network interface support" 46 tristate "Marvell Armada 370/38x/XP network interface support"
47 depends on MACH_ARMADA_370_XP 47 depends on PLAT_ORION
48 select MVMDIO 48 select MVMDIO
49 ---help--- 49 ---help---
50 This driver supports the network interface units in the 50 This driver supports the network interface units in the
51 Marvell ARMADA XP and ARMADA 370 SoC family. 51 Marvell ARMADA XP, ARMADA 370 and ARMADA 38x SoC family.
52 52
53 Note that this driver is distinct from the mv643xx_eth 53 Note that this driver is distinct from the mv643xx_eth
54 driver, which should be used for the older Marvell SoCs 54 driver, which should be used for the older Marvell SoCs
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 8e8a7eb43a2c..13457032d15f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -629,7 +629,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
629} 629}
630 630
631u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, 631u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
632 void *accel_priv) 632 void *accel_priv, select_queue_fallback_t fallback)
633{ 633{
634 struct mlx4_en_priv *priv = netdev_priv(dev); 634 struct mlx4_en_priv *priv = netdev_priv(dev);
635 u16 rings_p_up = priv->num_tx_rings_p_up; 635 u16 rings_p_up = priv->num_tx_rings_p_up;
@@ -641,7 +641,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
641 if (vlan_tx_tag_present(skb)) 641 if (vlan_tx_tag_present(skb))
642 up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT; 642 up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT;
643 643
644 return __netdev_pick_tx(dev, skb) % rings_p_up + up * rings_p_up; 644 return fallback(dev, skb) % rings_p_up + up * rings_p_up;
645} 645}
646 646
647static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt) 647static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 3af04c3f42ea..9ca223bc90fc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -723,7 +723,7 @@ int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
723 723
724void mlx4_en_tx_irq(struct mlx4_cq *mcq); 724void mlx4_en_tx_irq(struct mlx4_cq *mcq);
725u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, 725u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
726 void *accel_priv); 726 void *accel_priv, select_queue_fallback_t fallback);
727netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); 727netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
728 728
729int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, 729int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 157fe8df2c3e..8ff57e8e3e91 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -4,5 +4,5 @@
4 4
5config MLX5_CORE 5config MLX5_CORE
6 tristate 6 tristate
7 depends on PCI && X86 7 depends on PCI
8 default n 8 default n
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 1ded50ca1600..e46e8698e630 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -726,9 +726,6 @@ static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
726 int vpath_idx = 0; 726 int vpath_idx = 0;
727 enum vxge_hw_status status = VXGE_HW_OK; 727 enum vxge_hw_status status = VXGE_HW_OK;
728 struct vxge_vpath *vpath = NULL; 728 struct vxge_vpath *vpath = NULL;
729 struct __vxge_hw_device *hldev;
730
731 hldev = pci_get_drvdata(vdev->pdev);
732 729
733 mac_address = (u8 *)&mac_addr; 730 mac_address = (u8 *)&mac_addr;
734 memcpy(mac_address, mac_header, ETH_ALEN); 731 memcpy(mac_address, mac_header, ETH_ALEN);
@@ -2443,9 +2440,6 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev)
2443 2440
2444static void vxge_rem_isr(struct vxgedev *vdev) 2441static void vxge_rem_isr(struct vxgedev *vdev)
2445{ 2442{
2446 struct __vxge_hw_device *hldev;
2447 hldev = pci_get_drvdata(vdev->pdev);
2448
2449#ifdef CONFIG_PCI_MSI 2443#ifdef CONFIG_PCI_MSI
2450 if (vdev->config.intr_type == MSI_X) { 2444 if (vdev->config.intr_type == MSI_X) {
2451 vxge_rem_msix_isr(vdev); 2445 vxge_rem_msix_isr(vdev);
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index c49d1fb16965..75d11fa4eb0a 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -429,7 +429,9 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
429 } 429 }
430 430
431 /* Transfer ownership of the skb to the final buffer */ 431 /* Transfer ownership of the skb to the final buffer */
432#ifdef EFX_USE_PIO
432finish_packet: 433finish_packet:
434#endif
433 buffer->skb = skb; 435 buffer->skb = skb;
434 buffer->flags = EFX_TX_BUF_SKB | dma_flags; 436 buffer->flags = EFX_TX_BUF_SKB | dma_flags;
435 437
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index e2f202e3932f..f2d7c702c77f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -37,6 +37,17 @@ config DWMAC_SUNXI
37 stmmac device driver. This driver is used for A20/A31 37 stmmac device driver. This driver is used for A20/A31
38 GMAC ethernet controller. 38 GMAC ethernet controller.
39 39
40config DWMAC_STI
41 bool "STi GMAC support"
42 depends on STMMAC_PLATFORM && ARCH_STI
43 default y
44 ---help---
45 Support for ethernet controller on STi SOCs.
46
47 This selects STi SoC glue layer support for the stmmac
48 device driver. This driver is used on for the STi series
49 SOCs GMAC ethernet controller.
50
40config STMMAC_PCI 51config STMMAC_PCI
41 bool "STMMAC PCI bus support" 52 bool "STMMAC PCI bus support"
42 depends on STMMAC_ETH && PCI 53 depends on STMMAC_ETH && PCI
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index ecadecea79b2..dcef28775dad 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -2,6 +2,7 @@ obj-$(CONFIG_STMMAC_ETH) += stmmac.o
2stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o 2stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o
3stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o 3stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o
4stmmac-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o 4stmmac-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
5stmmac-$(CONFIG_DWMAC_STI) += dwmac-sti.o
5stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ 6stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
6 chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \ 7 chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
7 dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \ 8 dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
new file mode 100644
index 000000000000..552bbc17863c
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -0,0 +1,330 @@
1/**
2 * dwmac-sti.c - STMicroelectronics DWMAC Specific Glue layer
3 *
4 * Copyright (C) 2003-2014 STMicroelectronics (R&D) Limited
5 * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com>
6 *
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/kernel.h>
15#include <linux/slab.h>
16#include <linux/platform_device.h>
17#include <linux/stmmac.h>
18#include <linux/phy.h>
19#include <linux/mfd/syscon.h>
20#include <linux/regmap.h>
21#include <linux/clk.h>
22#include <linux/of.h>
23#include <linux/of_net.h>
24
25/**
26 * STi GMAC glue logic.
27 * --------------------
28 *
29 * _
30 * | \
31 * --------|0 \ ETH_SEL_INTERNAL_NOTEXT_PHYCLK
32 * phyclk | |___________________________________________
33 * | | | (phyclk-in)
34 * --------|1 / |
35 * int-clk |_ / |
36 * | _
37 * | | \
38 * |_______|1 \ ETH_SEL_TX_RETIME_CLK
39 * | |___________________________
40 * | | (tx-retime-clk)
41 * _______|0 /
42 * | |_ /
43 * _ |
44 * | \ |
45 * --------|0 \ |
46 * clk_125 | |__|
47 * | | ETH_SEL_TXCLK_NOT_CLK125
48 * --------|1 /
49 * txclk |_ /
50 *
51 *
52 * ETH_SEL_INTERNAL_NOTEXT_PHYCLK is valid only for RMII where PHY can
53 * generate 50MHz clock or MAC can generate it.
54 * This bit is configured by "st,ext-phyclk" property.
55 *
56 * ETH_SEL_TXCLK_NOT_CLK125 is only valid for gigabit modes, where the 125Mhz
57 * clock either comes from clk-125 pin or txclk pin. This configuration is
58 * totally driven by the board wiring. This bit is configured by
59 * "st,tx-retime-src" property.
60 *
61 * TXCLK configuration is different for different phy interface modes
62 * and changes according to link speed in modes like RGMII.
63 *
64 * Below table summarizes the clock requirement and clock sources for
65 * supported phy interface modes with link speeds.
66 * ________________________________________________
67 *| PHY_MODE | 1000 Mbit Link | 100 Mbit Link |
68 * ------------------------------------------------
69 *| MII | n/a | 25Mhz |
70 *| | | txclk |
71 * ------------------------------------------------
72 *| GMII | 125Mhz | 25Mhz |
73 *| | clk-125/txclk | txclk |
74 * ------------------------------------------------
75 *| RGMII | 125Mhz | 25Mhz |
76 *| | clk-125/txclk | clkgen |
77 * ------------------------------------------------
78 *| RMII | n/a | 25Mhz |
79 *| | |clkgen/phyclk-in |
80 * ------------------------------------------------
81 *
82 * TX lines are always retimed with a clk, which can vary depending
83 * on the board configuration. Below is the table of these bits
84 * in eth configuration register depending on source of retime clk.
85 *
86 *---------------------------------------------------------------
87 * src | tx_rt_clk | int_not_ext_phyclk | txclk_n_clk125|
88 *---------------------------------------------------------------
89 * txclk | 0 | n/a | 1 |
90 *---------------------------------------------------------------
91 * ck_125| 0 | n/a | 0 |
92 *---------------------------------------------------------------
93 * phyclk| 1 | 0 | n/a |
94 *---------------------------------------------------------------
95 * clkgen| 1 | 1 | n/a |
96 *---------------------------------------------------------------
97 */
98
99 /* Register definition */
100
101 /* 3 bits [8:6]
102 * [6:6] ETH_SEL_TXCLK_NOT_CLK125
103 * [7:7] ETH_SEL_INTERNAL_NOTEXT_PHYCLK
104 * [8:8] ETH_SEL_TX_RETIME_CLK
105 *
106 */
107
108#define TX_RETIME_SRC_MASK GENMASK(8, 6)
109#define ETH_SEL_TX_RETIME_CLK BIT(8)
110#define ETH_SEL_INTERNAL_NOTEXT_PHYCLK BIT(7)
111#define ETH_SEL_TXCLK_NOT_CLK125 BIT(6)
112
113#define ENMII_MASK GENMASK(5, 5)
114#define ENMII BIT(5)
115
116/**
117 * 3 bits [4:2]
118 * 000-GMII/MII
119 * 001-RGMII
120 * 010-SGMII
121 * 100-RMII
122*/
123#define MII_PHY_SEL_MASK GENMASK(4, 2)
124#define ETH_PHY_SEL_RMII BIT(4)
125#define ETH_PHY_SEL_SGMII BIT(3)
126#define ETH_PHY_SEL_RGMII BIT(2)
127#define ETH_PHY_SEL_GMII 0x0
128#define ETH_PHY_SEL_MII 0x0
129
130#define IS_PHY_IF_MODE_RGMII(iface) (iface == PHY_INTERFACE_MODE_RGMII || \
131 iface == PHY_INTERFACE_MODE_RGMII_ID || \
132 iface == PHY_INTERFACE_MODE_RGMII_RXID || \
133 iface == PHY_INTERFACE_MODE_RGMII_TXID)
134
135#define IS_PHY_IF_MODE_GBIT(iface) (IS_PHY_IF_MODE_RGMII(iface) || \
136 iface == PHY_INTERFACE_MODE_GMII)
137
138struct sti_dwmac {
139 int interface;
140 bool ext_phyclk;
141 bool is_tx_retime_src_clk_125;
142 struct clk *clk;
143 int reg;
144 struct device *dev;
145 struct regmap *regmap;
146};
147
148static u32 phy_intf_sels[] = {
149 [PHY_INTERFACE_MODE_MII] = ETH_PHY_SEL_MII,
150 [PHY_INTERFACE_MODE_GMII] = ETH_PHY_SEL_GMII,
151 [PHY_INTERFACE_MODE_RGMII] = ETH_PHY_SEL_RGMII,
152 [PHY_INTERFACE_MODE_RGMII_ID] = ETH_PHY_SEL_RGMII,
153 [PHY_INTERFACE_MODE_SGMII] = ETH_PHY_SEL_SGMII,
154 [PHY_INTERFACE_MODE_RMII] = ETH_PHY_SEL_RMII,
155};
156
157enum {
158 TX_RETIME_SRC_NA = 0,
159 TX_RETIME_SRC_TXCLK = 1,
160 TX_RETIME_SRC_CLK_125,
161 TX_RETIME_SRC_PHYCLK,
162 TX_RETIME_SRC_CLKGEN,
163};
164
165static const char *const tx_retime_srcs[] = {
166 [TX_RETIME_SRC_NA] = "",
167 [TX_RETIME_SRC_TXCLK] = "txclk",
168 [TX_RETIME_SRC_CLK_125] = "clk_125",
169 [TX_RETIME_SRC_PHYCLK] = "phyclk",
170 [TX_RETIME_SRC_CLKGEN] = "clkgen",
171};
172
173static u32 tx_retime_val[] = {
174 [TX_RETIME_SRC_TXCLK] = ETH_SEL_TXCLK_NOT_CLK125,
175 [TX_RETIME_SRC_CLK_125] = 0x0,
176 [TX_RETIME_SRC_PHYCLK] = ETH_SEL_TX_RETIME_CLK,
177 [TX_RETIME_SRC_CLKGEN] = ETH_SEL_TX_RETIME_CLK |
178 ETH_SEL_INTERNAL_NOTEXT_PHYCLK,
179};
180
181static void setup_retime_src(struct sti_dwmac *dwmac, u32 spd)
182{
183 u32 src = 0, freq = 0;
184
185 if (spd == SPEED_100) {
186 if (dwmac->interface == PHY_INTERFACE_MODE_MII ||
187 dwmac->interface == PHY_INTERFACE_MODE_GMII) {
188 src = TX_RETIME_SRC_TXCLK;
189 } else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) {
190 if (dwmac->ext_phyclk) {
191 src = TX_RETIME_SRC_PHYCLK;
192 } else {
193 src = TX_RETIME_SRC_CLKGEN;
194 freq = 50000000;
195 }
196
197 } else if (IS_PHY_IF_MODE_RGMII(dwmac->interface)) {
198 src = TX_RETIME_SRC_CLKGEN;
199 freq = 25000000;
200 }
201
202 if (src == TX_RETIME_SRC_CLKGEN && dwmac->clk)
203 clk_set_rate(dwmac->clk, freq);
204
205 } else if (spd == SPEED_1000) {
206 if (dwmac->is_tx_retime_src_clk_125)
207 src = TX_RETIME_SRC_CLK_125;
208 else
209 src = TX_RETIME_SRC_TXCLK;
210 }
211
212 regmap_update_bits(dwmac->regmap, dwmac->reg,
213 TX_RETIME_SRC_MASK, tx_retime_val[src]);
214}
215
216static void sti_dwmac_exit(struct platform_device *pdev, void *priv)
217{
218 struct sti_dwmac *dwmac = priv;
219
220 if (dwmac->clk)
221 clk_disable_unprepare(dwmac->clk);
222}
223
224static void sti_fix_mac_speed(void *priv, unsigned int spd)
225{
226 struct sti_dwmac *dwmac = priv;
227
228 setup_retime_src(dwmac, spd);
229
230 return;
231}
232
233static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
234 struct platform_device *pdev)
235{
236 struct resource *res;
237 struct device *dev = &pdev->dev;
238 struct device_node *np = dev->of_node;
239 struct regmap *regmap;
240 int err;
241
242 if (!np)
243 return -EINVAL;
244
245 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-ethconf");
246 if (!res)
247 return -ENODATA;
248
249 regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon");
250 if (IS_ERR(regmap))
251 return PTR_ERR(regmap);
252
253 dwmac->dev = dev;
254 dwmac->interface = of_get_phy_mode(np);
255 dwmac->regmap = regmap;
256 dwmac->reg = res->start;
257 dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk");
258 dwmac->is_tx_retime_src_clk_125 = false;
259
260 if (IS_PHY_IF_MODE_GBIT(dwmac->interface)) {
261 const char *rs;
262
263 err = of_property_read_string(np, "st,tx-retime-src", &rs);
264 if (err < 0) {
265 dev_err(dev, "st,tx-retime-src not specified\n");
266 return err;
267 }
268
269 if (!strcasecmp(rs, "clk_125"))
270 dwmac->is_tx_retime_src_clk_125 = true;
271 }
272
273 dwmac->clk = devm_clk_get(dev, "sti-ethclk");
274
275 if (IS_ERR(dwmac->clk))
276 dwmac->clk = NULL;
277
278 return 0;
279}
280
281static int sti_dwmac_init(struct platform_device *pdev, void *priv)
282{
283 struct sti_dwmac *dwmac = priv;
284 struct regmap *regmap = dwmac->regmap;
285 int iface = dwmac->interface;
286 u32 reg = dwmac->reg;
287 u32 val, spd;
288
289 if (dwmac->clk)
290 clk_prepare_enable(dwmac->clk);
291
292 regmap_update_bits(regmap, reg, MII_PHY_SEL_MASK, phy_intf_sels[iface]);
293
294 val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII;
295 regmap_update_bits(regmap, reg, ENMII_MASK, val);
296
297 if (IS_PHY_IF_MODE_GBIT(iface))
298 spd = SPEED_1000;
299 else
300 spd = SPEED_100;
301
302 setup_retime_src(dwmac, spd);
303
304 return 0;
305}
306
307static void *sti_dwmac_setup(struct platform_device *pdev)
308{
309 struct sti_dwmac *dwmac;
310 int ret;
311
312 dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
313 if (!dwmac)
314 return ERR_PTR(-ENOMEM);
315
316 ret = sti_dwmac_parse_data(dwmac, pdev);
317 if (ret) {
318 dev_err(&pdev->dev, "Unable to parse OF data\n");
319 return ERR_PTR(ret);
320 }
321
322 return dwmac;
323}
324
325const struct stmmac_of_data sti_gmac_data = {
326 .fix_mac_speed = sti_fix_mac_speed,
327 .setup = sti_dwmac_setup,
328 .init = sti_dwmac_init,
329 .exit = sti_dwmac_exit,
330};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index d9af26ed58ee..f9e60d7918c4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -133,6 +133,9 @@ bool stmmac_eee_init(struct stmmac_priv *priv);
133#ifdef CONFIG_DWMAC_SUNXI 133#ifdef CONFIG_DWMAC_SUNXI
134extern const struct stmmac_of_data sun7i_gmac_data; 134extern const struct stmmac_of_data sun7i_gmac_data;
135#endif 135#endif
136#ifdef CONFIG_DWMAC_STI
137extern const struct stmmac_of_data sti_gmac_data;
138#endif
136extern struct platform_driver stmmac_pltfr_driver; 139extern struct platform_driver stmmac_pltfr_driver;
137static inline int stmmac_register_platform(void) 140static inline int stmmac_register_platform(void)
138{ 141{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 5884a7d2063b..c61bc72b8e90 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -33,6 +33,11 @@ static const struct of_device_id stmmac_dt_ids[] = {
33#ifdef CONFIG_DWMAC_SUNXI 33#ifdef CONFIG_DWMAC_SUNXI
34 { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data}, 34 { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
35#endif 35#endif
36#ifdef CONFIG_DWMAC_STI
37 { .compatible = "st,stih415-dwmac", .data = &sti_gmac_data},
38 { .compatible = "st,stih416-dwmac", .data = &sti_gmac_data},
39 { .compatible = "st,stih127-dwmac", .data = &sti_gmac_data},
40#endif
36 /* SoC specific glue layers should come before generic bindings */ 41 /* SoC specific glue layers should come before generic bindings */
37 { .compatible = "st,spear600-gmac"}, 42 { .compatible = "st,spear600-gmac"},
38 { .compatible = "snps,dwmac-3.610"}, 43 { .compatible = "snps,dwmac-3.610"},
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index bde63e3af96f..651087b5c8da 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -554,7 +554,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
554 * common for both the interface as the interface shares 554 * common for both the interface as the interface shares
555 * the same hardware resource. 555 * the same hardware resource.
556 */ 556 */
557 for (i = 0; i <= priv->data.slaves; i++) 557 for (i = 0; i < priv->data.slaves; i++)
558 if (priv->slaves[i].ndev->flags & IFF_PROMISC) 558 if (priv->slaves[i].ndev->flags & IFF_PROMISC)
559 flag = true; 559 flag = true;
560 560
@@ -578,7 +578,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
578 unsigned long timeout = jiffies + HZ; 578 unsigned long timeout = jiffies + HZ;
579 579
580 /* Disable Learn for all ports */ 580 /* Disable Learn for all ports */
581 for (i = 0; i <= priv->data.slaves; i++) { 581 for (i = 0; i < priv->data.slaves; i++) {
582 cpsw_ale_control_set(ale, i, 582 cpsw_ale_control_set(ale, i,
583 ALE_PORT_NOLEARN, 1); 583 ALE_PORT_NOLEARN, 1);
584 cpsw_ale_control_set(ale, i, 584 cpsw_ale_control_set(ale, i,
@@ -606,7 +606,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
606 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0); 606 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
607 607
608 /* Enable Learn for all ports */ 608 /* Enable Learn for all ports */
609 for (i = 0; i <= priv->data.slaves; i++) { 609 for (i = 0; i < priv->data.slaves; i++) {
610 cpsw_ale_control_set(ale, i, 610 cpsw_ale_control_set(ale, i,
611 ALE_PORT_NOLEARN, 0); 611 ALE_PORT_NOLEARN, 0);
612 cpsw_ale_control_set(ale, i, 612 cpsw_ale_control_set(ale, i,
@@ -1878,14 +1878,29 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1878 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp)); 1878 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
1879 phyid = be32_to_cpup(parp+1); 1879 phyid = be32_to_cpup(parp+1);
1880 mdio = of_find_device_by_node(mdio_node); 1880 mdio = of_find_device_by_node(mdio_node);
1881 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), 1881
1882 PHY_ID_FMT, mdio->name, phyid); 1882 if (strncmp(mdio->name, "gpio", 4) == 0) {
1883 /* GPIO bitbang MDIO driver attached */
1884 struct mii_bus *bus = dev_get_drvdata(&mdio->dev);
1885
1886 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
1887 PHY_ID_FMT, bus->id, phyid);
1888 } else {
1889 /* davinci MDIO driver attached */
1890 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
1891 PHY_ID_FMT, mdio->name, phyid);
1892 }
1883 1893
1884 mac_addr = of_get_mac_address(slave_node); 1894 mac_addr = of_get_mac_address(slave_node);
1885 if (mac_addr) 1895 if (mac_addr)
1886 memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); 1896 memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
1887 1897
1888 slave_data->phy_if = of_get_phy_mode(slave_node); 1898 slave_data->phy_if = of_get_phy_mode(slave_node);
1899 if (slave_data->phy_if < 0) {
1900 pr_err("Missing or malformed slave[%d] phy-mode property\n",
1901 i);
1902 return slave_data->phy_if;
1903 }
1889 1904
1890 if (data->dual_emac) { 1905 if (data->dual_emac) {
1891 if (of_property_read_u32(slave_node, "dual_emac_res_vlan", 1906 if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 023237a65720..17503da9f7a5 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -2071,7 +2071,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
2071 2071
2072/* Return subqueue id on this core (one per core). */ 2072/* Return subqueue id on this core (one per core). */
2073static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb, 2073static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb,
2074 void *accel_priv) 2074 void *accel_priv, select_queue_fallback_t fallback)
2075{ 2075{
2076 return smp_processor_id(); 2076 return smp_processor_id();
2077} 2077}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 1ec65feebb9e..4bfdf8c7ada0 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -26,6 +26,7 @@
26#include <linux/netdevice.h> 26#include <linux/netdevice.h>
27#include <linux/of_mdio.h> 27#include <linux/of_mdio.h>
28#include <linux/of_platform.h> 28#include <linux/of_platform.h>
29#include <linux/of_irq.h>
29#include <linux/of_address.h> 30#include <linux/of_address.h>
30#include <linux/skbuff.h> 31#include <linux/skbuff.h>
31#include <linux/spinlock.h> 32#include <linux/spinlock.h>
@@ -600,7 +601,8 @@ static void axienet_start_xmit_done(struct net_device *ndev)
600 size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; 601 size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
601 packets++; 602 packets++;
602 603
603 lp->tx_bd_ci = ++lp->tx_bd_ci % TX_BD_NUM; 604 ++lp->tx_bd_ci;
605 lp->tx_bd_ci %= TX_BD_NUM;
604 cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; 606 cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
605 status = cur_p->status; 607 status = cur_p->status;
606 } 608 }
@@ -686,7 +688,8 @@ static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
686 skb_headlen(skb), DMA_TO_DEVICE); 688 skb_headlen(skb), DMA_TO_DEVICE);
687 689
688 for (ii = 0; ii < num_frag; ii++) { 690 for (ii = 0; ii < num_frag; ii++) {
689 lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM; 691 ++lp->tx_bd_tail;
692 lp->tx_bd_tail %= TX_BD_NUM;
690 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 693 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
691 frag = &skb_shinfo(skb)->frags[ii]; 694 frag = &skb_shinfo(skb)->frags[ii];
692 cur_p->phys = dma_map_single(ndev->dev.parent, 695 cur_p->phys = dma_map_single(ndev->dev.parent,
@@ -702,7 +705,8 @@ static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
702 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; 705 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
703 /* Start the transfer */ 706 /* Start the transfer */
704 axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); 707 axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
705 lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM; 708 ++lp->tx_bd_tail;
709 lp->tx_bd_tail %= TX_BD_NUM;
706 710
707 return NETDEV_TX_OK; 711 return NETDEV_TX_OK;
708} 712}
@@ -774,7 +778,8 @@ static void axienet_recv(struct net_device *ndev)
774 cur_p->status = 0; 778 cur_p->status = 0;
775 cur_p->sw_id_offset = (u32) new_skb; 779 cur_p->sw_id_offset = (u32) new_skb;
776 780
777 lp->rx_bd_ci = ++lp->rx_bd_ci % RX_BD_NUM; 781 ++lp->rx_bd_ci;
782 lp->rx_bd_ci %= RX_BD_NUM;
778 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 783 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
779 } 784 }
780 785
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 7756118c2f0a..7141a1937360 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -88,8 +88,12 @@ static int netvsc_open(struct net_device *net)
88{ 88{
89 struct net_device_context *net_device_ctx = netdev_priv(net); 89 struct net_device_context *net_device_ctx = netdev_priv(net);
90 struct hv_device *device_obj = net_device_ctx->device_ctx; 90 struct hv_device *device_obj = net_device_ctx->device_ctx;
91 struct netvsc_device *nvdev;
92 struct rndis_device *rdev;
91 int ret = 0; 93 int ret = 0;
92 94
95 netif_carrier_off(net);
96
93 /* Open up the device */ 97 /* Open up the device */
94 ret = rndis_filter_open(device_obj); 98 ret = rndis_filter_open(device_obj);
95 if (ret != 0) { 99 if (ret != 0) {
@@ -99,6 +103,11 @@ static int netvsc_open(struct net_device *net)
99 103
100 netif_start_queue(net); 104 netif_start_queue(net);
101 105
106 nvdev = hv_get_drvdata(device_obj);
107 rdev = nvdev->extension;
108 if (!rdev->link_state)
109 netif_carrier_on(net);
110
102 return ret; 111 return ret;
103} 112}
104 113
@@ -229,23 +238,24 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
229 struct net_device *net; 238 struct net_device *net;
230 struct net_device_context *ndev_ctx; 239 struct net_device_context *ndev_ctx;
231 struct netvsc_device *net_device; 240 struct netvsc_device *net_device;
241 struct rndis_device *rdev;
232 242
233 net_device = hv_get_drvdata(device_obj); 243 net_device = hv_get_drvdata(device_obj);
244 rdev = net_device->extension;
245
246 rdev->link_state = status != 1;
247
234 net = net_device->ndev; 248 net = net_device->ndev;
235 249
236 if (!net) { 250 if (!net || net->reg_state != NETREG_REGISTERED)
237 netdev_err(net, "got link status but net device "
238 "not initialized yet\n");
239 return; 251 return;
240 }
241 252
253 ndev_ctx = netdev_priv(net);
242 if (status == 1) { 254 if (status == 1) {
243 netif_carrier_on(net);
244 ndev_ctx = netdev_priv(net);
245 schedule_delayed_work(&ndev_ctx->dwork, 0); 255 schedule_delayed_work(&ndev_ctx->dwork, 0);
246 schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20)); 256 schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
247 } else { 257 } else {
248 netif_carrier_off(net); 258 schedule_delayed_work(&ndev_ctx->dwork, 0);
249 } 259 }
250} 260}
251 261
@@ -388,17 +398,35 @@ static const struct net_device_ops device_ops = {
388 * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add 398 * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add
389 * another netif_notify_peers() into a delayed work, otherwise GARP packet 399 * another netif_notify_peers() into a delayed work, otherwise GARP packet
390 * will not be sent after quick migration, and cause network disconnection. 400 * will not be sent after quick migration, and cause network disconnection.
401 * Also, we update the carrier status here.
391 */ 402 */
392static void netvsc_send_garp(struct work_struct *w) 403static void netvsc_link_change(struct work_struct *w)
393{ 404{
394 struct net_device_context *ndev_ctx; 405 struct net_device_context *ndev_ctx;
395 struct net_device *net; 406 struct net_device *net;
396 struct netvsc_device *net_device; 407 struct netvsc_device *net_device;
408 struct rndis_device *rdev;
409 bool notify;
410
411 rtnl_lock();
397 412
398 ndev_ctx = container_of(w, struct net_device_context, dwork.work); 413 ndev_ctx = container_of(w, struct net_device_context, dwork.work);
399 net_device = hv_get_drvdata(ndev_ctx->device_ctx); 414 net_device = hv_get_drvdata(ndev_ctx->device_ctx);
415 rdev = net_device->extension;
400 net = net_device->ndev; 416 net = net_device->ndev;
401 netdev_notify_peers(net); 417
418 if (rdev->link_state) {
419 netif_carrier_off(net);
420 notify = false;
421 } else {
422 netif_carrier_on(net);
423 notify = true;
424 }
425
426 rtnl_unlock();
427
428 if (notify)
429 netdev_notify_peers(net);
402} 430}
403 431
404 432
@@ -414,13 +442,10 @@ static int netvsc_probe(struct hv_device *dev,
414 if (!net) 442 if (!net)
415 return -ENOMEM; 443 return -ENOMEM;
416 444
417 /* Set initial state */
418 netif_carrier_off(net);
419
420 net_device_ctx = netdev_priv(net); 445 net_device_ctx = netdev_priv(net);
421 net_device_ctx->device_ctx = dev; 446 net_device_ctx->device_ctx = dev;
422 hv_set_drvdata(dev, net); 447 hv_set_drvdata(dev, net);
423 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp); 448 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
424 INIT_WORK(&net_device_ctx->work, do_set_multicast); 449 INIT_WORK(&net_device_ctx->work, do_set_multicast);
425 450
426 net->netdev_ops = &device_ops; 451 net->netdev_ops = &device_ops;
@@ -443,8 +468,6 @@ static int netvsc_probe(struct hv_device *dev,
443 } 468 }
444 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); 469 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
445 470
446 netif_carrier_on(net);
447
448 ret = register_netdev(net); 471 ret = register_netdev(net);
449 if (ret != 0) { 472 if (ret != 0) {
450 pr_err("Unable to register netdev.\n"); 473 pr_err("Unable to register netdev.\n");
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index 2dc82f1d2e70..3da44d5d9149 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -210,13 +210,6 @@ config KINGSUN_DONGLE
210 To compile it as a module, choose M here: the module will be called 210 To compile it as a module, choose M here: the module will be called
211 kingsun-sir. 211 kingsun-sir.
212 212
213config EP7211_DONGLE
214 tristate "Cirrus Logic clps711x I/R support"
215 depends on IRTTY_SIR && ARCH_CLPS711X && IRDA
216 help
217 Say Y here if you want to build support for the Cirrus logic
218 EP7211 chipset's infrared module.
219
220config KSDAZZLE_DONGLE 213config KSDAZZLE_DONGLE
221 tristate "KingSun Dazzle IrDA-USB dongle" 214 tristate "KingSun Dazzle IrDA-USB dongle"
222 depends on IRDA && USB 215 depends on IRDA && USB
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index dfc64537f62f..be8ab5b9a4a2 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -35,7 +35,6 @@ obj-$(CONFIG_MCP2120_DONGLE) += mcp2120-sir.o
35obj-$(CONFIG_ACT200L_DONGLE) += act200l-sir.o 35obj-$(CONFIG_ACT200L_DONGLE) += act200l-sir.o
36obj-$(CONFIG_MA600_DONGLE) += ma600-sir.o 36obj-$(CONFIG_MA600_DONGLE) += ma600-sir.o
37obj-$(CONFIG_TOIM3232_DONGLE) += toim3232-sir.o 37obj-$(CONFIG_TOIM3232_DONGLE) += toim3232-sir.o
38obj-$(CONFIG_EP7211_DONGLE) += ep7211-sir.o
39obj-$(CONFIG_KINGSUN_DONGLE) += kingsun-sir.o 38obj-$(CONFIG_KINGSUN_DONGLE) += kingsun-sir.o
40obj-$(CONFIG_KSDAZZLE_DONGLE) += ksdazzle-sir.o 39obj-$(CONFIG_KSDAZZLE_DONGLE) += ksdazzle-sir.o
41obj-$(CONFIG_KS959_DONGLE) += ks959-sir.o 40obj-$(CONFIG_KS959_DONGLE) += ks959-sir.o
diff --git a/drivers/net/irda/ep7211-sir.c b/drivers/net/irda/ep7211-sir.c
deleted file mode 100644
index 5fe1f4dd3369..000000000000
--- a/drivers/net/irda/ep7211-sir.c
+++ /dev/null
@@ -1,70 +0,0 @@
1/*
2 * IR port driver for the Cirrus Logic CLPS711X processors
3 *
4 * Copyright 2001, Blue Mug Inc. All rights reserved.
5 * Copyright 2007, Samuel Ortiz <samuel@sortiz.org>
6 */
7
8#include <linux/module.h>
9#include <linux/platform_device.h>
10
11#include <mach/hardware.h>
12
13#include "sir-dev.h"
14
15static int clps711x_dongle_open(struct sir_dev *dev)
16{
17 unsigned int syscon;
18
19 /* Turn on the SIR encoder. */
20 syscon = clps_readl(SYSCON1);
21 syscon |= SYSCON1_SIREN;
22 clps_writel(syscon, SYSCON1);
23
24 return 0;
25}
26
27static int clps711x_dongle_close(struct sir_dev *dev)
28{
29 unsigned int syscon;
30
31 /* Turn off the SIR encoder. */
32 syscon = clps_readl(SYSCON1);
33 syscon &= ~SYSCON1_SIREN;
34 clps_writel(syscon, SYSCON1);
35
36 return 0;
37}
38
39static struct dongle_driver clps711x_dongle = {
40 .owner = THIS_MODULE,
41 .driver_name = "EP7211 IR driver",
42 .type = IRDA_EP7211_DONGLE,
43 .open = clps711x_dongle_open,
44 .close = clps711x_dongle_close,
45};
46
47static int clps711x_sir_probe(struct platform_device *pdev)
48{
49 return irda_register_dongle(&clps711x_dongle);
50}
51
52static int clps711x_sir_remove(struct platform_device *pdev)
53{
54 return irda_unregister_dongle(&clps711x_dongle);
55}
56
57static struct platform_driver clps711x_sir_driver = {
58 .driver = {
59 .name = "sir-clps711x",
60 .owner = THIS_MODULE,
61 },
62 .probe = clps711x_sir_probe,
63 .remove = clps711x_sir_remove,
64};
65module_platform_driver(clps711x_sir_driver);
66
67MODULE_AUTHOR("Samuel Ortiz <samuel@sortiz.org>");
68MODULE_DESCRIPTION("EP7211 IR dongle driver");
69MODULE_LICENSE("GPL");
70MODULE_ALIAS("irda-dongle-13"); /* IRDA_EP7211_DONGLE */
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index 177441afeb96..24b6dddd7f2f 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -522,7 +522,6 @@ static void irtty_close(struct tty_struct *tty)
522 sirdev_put_instance(priv->dev); 522 sirdev_put_instance(priv->dev);
523 523
524 /* Stop tty */ 524 /* Stop tty */
525 irtty_stop_receiver(tty, TRUE);
526 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 525 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
527 if (tty->ops->stop) 526 if (tty->ops->stop)
528 tty->ops->stop(tty); 527 tty->ops->stop(tty);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 8433de4509c7..a5d21893670d 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -879,14 +879,15 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
879 dev->priv_flags |= IFF_MACVLAN; 879 dev->priv_flags |= IFF_MACVLAN;
880 err = netdev_upper_dev_link(lowerdev, dev); 880 err = netdev_upper_dev_link(lowerdev, dev);
881 if (err) 881 if (err)
882 goto destroy_port; 882 goto unregister_netdev;
883
884 883
885 list_add_tail_rcu(&vlan->list, &port->vlans); 884 list_add_tail_rcu(&vlan->list, &port->vlans);
886 netif_stacked_transfer_operstate(lowerdev, dev); 885 netif_stacked_transfer_operstate(lowerdev, dev);
887 886
888 return 0; 887 return 0;
889 888
889unregister_netdev:
890 unregister_netdevice(dev);
890destroy_port: 891destroy_port:
891 port->count -= 1; 892 port->count -= 1;
892 if (!port->count) 893 if (!port->count)
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 547725fa8671..98e7cbf720a5 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -437,7 +437,10 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
437 if (on) { 437 if (on) {
438 gpio_num = gpio_tab[EXTTS0_GPIO + index]; 438 gpio_num = gpio_tab[EXTTS0_GPIO + index];
439 evnt |= (gpio_num & EVNT_GPIO_MASK) << EVNT_GPIO_SHIFT; 439 evnt |= (gpio_num & EVNT_GPIO_MASK) << EVNT_GPIO_SHIFT;
440 evnt |= EVNT_RISE; 440 if (rq->extts.flags & PTP_FALLING_EDGE)
441 evnt |= EVNT_FALL;
442 else
443 evnt |= EVNT_RISE;
441 } 444 }
442 ext_write(0, phydev, PAGE5, PTP_EVNT, evnt); 445 ext_write(0, phydev, PAGE5, PTP_EVNT, evnt);
443 return 0; 446 return 0;
@@ -1003,11 +1006,6 @@ static int dp83640_probe(struct phy_device *phydev)
1003 } else 1006 } else
1004 list_add_tail(&dp83640->list, &clock->phylist); 1007 list_add_tail(&dp83640->list, &clock->phylist);
1005 1008
1006 if (clock->chosen && !list_empty(&clock->phylist))
1007 recalibrate(clock);
1008 else
1009 enable_broadcast(dp83640->phydev, clock->page, 1);
1010
1011 dp83640_clock_put(clock); 1009 dp83640_clock_put(clock);
1012 return 0; 1010 return 0;
1013 1011
@@ -1058,6 +1056,21 @@ static void dp83640_remove(struct phy_device *phydev)
1058 kfree(dp83640); 1056 kfree(dp83640);
1059} 1057}
1060 1058
1059static int dp83640_config_init(struct phy_device *phydev)
1060{
1061 struct dp83640_private *dp83640 = phydev->priv;
1062 struct dp83640_clock *clock = dp83640->clock;
1063
1064 if (clock->chosen && !list_empty(&clock->phylist))
1065 recalibrate(clock);
1066 else
1067 enable_broadcast(phydev, clock->page, 1);
1068
1069 enable_status_frames(phydev, true);
1070 ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE);
1071 return 0;
1072}
1073
1061static int dp83640_ack_interrupt(struct phy_device *phydev) 1074static int dp83640_ack_interrupt(struct phy_device *phydev)
1062{ 1075{
1063 int err = phy_read(phydev, MII_DP83640_MISR); 1076 int err = phy_read(phydev, MII_DP83640_MISR);
@@ -1195,11 +1208,6 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
1195 1208
1196 mutex_lock(&dp83640->clock->extreg_lock); 1209 mutex_lock(&dp83640->clock->extreg_lock);
1197 1210
1198 if (dp83640->hwts_tx_en || dp83640->hwts_rx_en) {
1199 enable_status_frames(phydev, true);
1200 ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE);
1201 }
1202
1203 ext_write(0, phydev, PAGE5, PTP_TXCFG0, txcfg0); 1211 ext_write(0, phydev, PAGE5, PTP_TXCFG0, txcfg0);
1204 ext_write(0, phydev, PAGE5, PTP_RXCFG0, rxcfg0); 1212 ext_write(0, phydev, PAGE5, PTP_RXCFG0, rxcfg0);
1205 1213
@@ -1281,6 +1289,7 @@ static void dp83640_txtstamp(struct phy_device *phydev,
1281 } 1289 }
1282 /* fall through */ 1290 /* fall through */
1283 case HWTSTAMP_TX_ON: 1291 case HWTSTAMP_TX_ON:
1292 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1284 skb_queue_tail(&dp83640->tx_queue, skb); 1293 skb_queue_tail(&dp83640->tx_queue, skb);
1285 schedule_work(&dp83640->ts_work); 1294 schedule_work(&dp83640->ts_work);
1286 break; 1295 break;
@@ -1330,6 +1339,7 @@ static struct phy_driver dp83640_driver = {
1330 .flags = PHY_HAS_INTERRUPT, 1339 .flags = PHY_HAS_INTERRUPT,
1331 .probe = dp83640_probe, 1340 .probe = dp83640_probe,
1332 .remove = dp83640_remove, 1341 .remove = dp83640_remove,
1342 .config_init = dp83640_config_init,
1333 .config_aneg = genphy_config_aneg, 1343 .config_aneg = genphy_config_aneg,
1334 .read_status = genphy_read_status, 1344 .read_status = genphy_read_status,
1335 .ack_interrupt = dp83640_ack_interrupt, 1345 .ack_interrupt = dp83640_ack_interrupt,
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index bb88bc7d81fb..9367acc84fbb 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -170,6 +170,9 @@ static int sun4i_mdio_remove(struct platform_device *pdev)
170} 170}
171 171
172static const struct of_device_id sun4i_mdio_dt_ids[] = { 172static const struct of_device_id sun4i_mdio_dt_ids[] = {
173 { .compatible = "allwinner,sun4i-a10-mdio" },
174
175 /* Deprecated */
173 { .compatible = "allwinner,sun4i-mdio" }, 176 { .compatible = "allwinner,sun4i-mdio" },
174 { } 177 { }
175}; 178};
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 4b03e63639b7..82514e72b3d8 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -719,7 +719,7 @@ int phy_resume(struct phy_device *phydev)
719static int genphy_config_advert(struct phy_device *phydev) 719static int genphy_config_advert(struct phy_device *phydev)
720{ 720{
721 u32 advertise; 721 u32 advertise;
722 int oldadv, adv; 722 int oldadv, adv, bmsr;
723 int err, changed = 0; 723 int err, changed = 0;
724 724
725 /* Only allow advertising what this PHY supports */ 725 /* Only allow advertising what this PHY supports */
@@ -744,26 +744,36 @@ static int genphy_config_advert(struct phy_device *phydev)
744 changed = 1; 744 changed = 1;
745 } 745 }
746 746
747 bmsr = phy_read(phydev, MII_BMSR);
748 if (bmsr < 0)
749 return bmsr;
750
751 /* Per 802.3-2008, Section 22.2.4.2.16 Extended status all
752 * 1000Mbits/sec capable PHYs shall have the BMSR_ESTATEN bit set to a
753 * logical 1.
754 */
755 if (!(bmsr & BMSR_ESTATEN))
756 return changed;
757
747 /* Configure gigabit if it's supported */ 758 /* Configure gigabit if it's supported */
759 adv = phy_read(phydev, MII_CTRL1000);
760 if (adv < 0)
761 return adv;
762
763 oldadv = adv;
764 adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
765
748 if (phydev->supported & (SUPPORTED_1000baseT_Half | 766 if (phydev->supported & (SUPPORTED_1000baseT_Half |
749 SUPPORTED_1000baseT_Full)) { 767 SUPPORTED_1000baseT_Full)) {
750 adv = phy_read(phydev, MII_CTRL1000);
751 if (adv < 0)
752 return adv;
753
754 oldadv = adv;
755 adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
756 adv |= ethtool_adv_to_mii_ctrl1000_t(advertise); 768 adv |= ethtool_adv_to_mii_ctrl1000_t(advertise);
757 769 if (adv != oldadv)
758 if (adv != oldadv) {
759 err = phy_write(phydev, MII_CTRL1000, adv);
760
761 if (err < 0)
762 return err;
763 changed = 1; 770 changed = 1;
764 }
765 } 771 }
766 772
773 err = phy_write(phydev, MII_CTRL1000, adv);
774 if (err < 0)
775 return err;
776
767 return changed; 777 return changed;
768} 778}
769 779
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 28407426fd6f..c8624a8235ab 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1648,7 +1648,7 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
1648} 1648}
1649 1649
1650static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb, 1650static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
1651 void *accel_priv) 1651 void *accel_priv, select_queue_fallback_t fallback)
1652{ 1652{
1653 /* 1653 /*
1654 * This helper function exists to help dev_pick_tx get the correct 1654 * This helper function exists to help dev_pick_tx get the correct
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 44c4db8450f0..8fe9cb7d0f72 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -366,7 +366,7 @@ static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
366 * hope the rxq no. may help here. 366 * hope the rxq no. may help here.
367 */ 367 */
368static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, 368static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
369 void *accel_priv) 369 void *accel_priv, select_queue_fallback_t fallback)
370{ 370{
371 struct tun_struct *tun = netdev_priv(dev); 371 struct tun_struct *tun = netdev_priv(dev);
372 struct tun_flow_entry *e; 372 struct tun_flow_entry *e;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 6b638a066c1d..7e7269fd3707 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -292,6 +292,21 @@ config USB_NET_SR9700
292 This option adds support for CoreChip-sz SR9700 based USB 1.1 292 This option adds support for CoreChip-sz SR9700 based USB 1.1
293 10/100 Ethernet adapters. 293 10/100 Ethernet adapters.
294 294
295config USB_NET_SR9800
296 tristate "CoreChip-sz SR9800 based USB 2.0 10/100 ethernet devices"
297 depends on USB_USBNET
298 select CRC32
299 ---help---
300 Say Y if you want to use one of the following 100Mbps USB Ethernet
301 device based on the CoreChip-sz SR9800 chip.
302
303 This driver makes the adapter appear as a normal Ethernet interface,
304 typically on eth0, if it is the only ethernet device, or perhaps on
305 eth1, if you have a PCI or ISA ethernet card installed.
306
307 To compile this driver as a module, choose M here: the
308 module will be called sr9800.
309
295config USB_NET_SMSC75XX 310config USB_NET_SMSC75XX
296 tristate "SMSC LAN75XX based USB 2.0 gigabit ethernet devices" 311 tristate "SMSC LAN75XX based USB 2.0 gigabit ethernet devices"
297 depends on USB_USBNET 312 depends on USB_USBNET
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index b17b5e88bbaf..433f0a00c683 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o r815x.o
15obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o 15obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o
16obj-$(CONFIG_USB_NET_DM9601) += dm9601.o 16obj-$(CONFIG_USB_NET_DM9601) += dm9601.o
17obj-$(CONFIG_USB_NET_SR9700) += sr9700.o 17obj-$(CONFIG_USB_NET_SR9700) += sr9700.o
18obj-$(CONFIG_USB_NET_SR9800) += sr9800.o
18obj-$(CONFIG_USB_NET_SMSC75XX) += smsc75xx.o 19obj-$(CONFIG_USB_NET_SMSC75XX) += smsc75xx.o
19obj-$(CONFIG_USB_NET_SMSC95XX) += smsc95xx.o 20obj-$(CONFIG_USB_NET_SMSC95XX) += smsc95xx.o
20obj-$(CONFIG_USB_NET_GL620A) += gl620a.o 21obj-$(CONFIG_USB_NET_GL620A) += gl620a.o
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 9765a7d4766d..5d194093f3e1 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -917,7 +917,8 @@ static const struct driver_info ax88178_info = {
917 .status = asix_status, 917 .status = asix_status,
918 .link_reset = ax88178_link_reset, 918 .link_reset = ax88178_link_reset,
919 .reset = ax88178_reset, 919 .reset = ax88178_reset,
920 .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR, 920 .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
921 FLAG_MULTI_PACKET,
921 .rx_fixup = asix_rx_fixup_common, 922 .rx_fixup = asix_rx_fixup_common,
922 .tx_fixup = asix_tx_fixup, 923 .tx_fixup = asix_tx_fixup,
923}; 924};
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index d6f64dad05bc..955df81a4358 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1118,6 +1118,10 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1118 u16 hdr_off; 1118 u16 hdr_off;
1119 u32 *pkt_hdr; 1119 u32 *pkt_hdr;
1120 1120
1121 /* This check is no longer done by usbnet */
1122 if (skb->len < dev->net->hard_header_len)
1123 return 0;
1124
1121 skb_trim(skb, skb->len - 4); 1125 skb_trim(skb, skb->len - 4);
1122 memcpy(&rx_hdr, skb_tail_pointer(skb), 4); 1126 memcpy(&rx_hdr, skb_tail_pointer(skb), 4);
1123 le32_to_cpus(&rx_hdr); 1127 le32_to_cpus(&rx_hdr);
diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
index e4a8a93fbaf7..1cc24e6f23e2 100644
--- a/drivers/net/usb/gl620a.c
+++ b/drivers/net/usb/gl620a.c
@@ -84,6 +84,10 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
84 u32 size; 84 u32 size;
85 u32 count; 85 u32 count;
86 86
87 /* This check is no longer done by usbnet */
88 if (skb->len < dev->net->hard_header_len)
89 return 0;
90
87 header = (struct gl_header *) skb->data; 91 header = (struct gl_header *) skb->data;
88 92
89 // get the packet count of the received skb 93 // get the packet count of the received skb
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 1a482344b3f5..660bd5ea9fc0 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1201,16 +1201,18 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
1201 struct hso_serial *serial = urb->context; 1201 struct hso_serial *serial = urb->context;
1202 int status = urb->status; 1202 int status = urb->status;
1203 1203
1204 D4("\n--- Got serial_read_bulk callback %02x ---", status);
1205
1204 /* sanity check */ 1206 /* sanity check */
1205 if (!serial) { 1207 if (!serial) {
1206 D1("serial == NULL"); 1208 D1("serial == NULL");
1207 return; 1209 return;
1208 } else if (status) { 1210 }
1211 if (status) {
1209 handle_usb_error(status, __func__, serial->parent); 1212 handle_usb_error(status, __func__, serial->parent);
1210 return; 1213 return;
1211 } 1214 }
1212 1215
1213 D4("\n--- Got serial_read_bulk callback %02x ---", status);
1214 D1("Actual length = %d\n", urb->actual_length); 1216 D1("Actual length = %d\n", urb->actual_length);
1215 DUMP1(urb->transfer_buffer, urb->actual_length); 1217 DUMP1(urb->transfer_buffer, urb->actual_length);
1216 1218
@@ -1218,25 +1220,13 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
1218 if (serial->port.count == 0) 1220 if (serial->port.count == 0)
1219 return; 1221 return;
1220 1222
1221 if (status == 0) { 1223 if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
1222 if (serial->parent->port_spec & HSO_INFO_CRC_BUG) 1224 fix_crc_bug(urb, serial->in_endp->wMaxPacketSize);
1223 fix_crc_bug(urb, serial->in_endp->wMaxPacketSize); 1225 /* Valid data, handle RX data */
1224 /* Valid data, handle RX data */ 1226 spin_lock(&serial->serial_lock);
1225 spin_lock(&serial->serial_lock); 1227 serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 1;
1226 serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 1; 1228 put_rxbuf_data_and_resubmit_bulk_urb(serial);
1227 put_rxbuf_data_and_resubmit_bulk_urb(serial); 1229 spin_unlock(&serial->serial_lock);
1228 spin_unlock(&serial->serial_lock);
1229 } else if (status == -ENOENT || status == -ECONNRESET) {
1230 /* Unlinked - check for throttled port. */
1231 D2("Port %d, successfully unlinked urb", serial->minor);
1232 spin_lock(&serial->serial_lock);
1233 serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0;
1234 hso_resubmit_rx_bulk_urb(serial, urb);
1235 spin_unlock(&serial->serial_lock);
1236 } else {
1237 D2("Port %d, status = %d for read urb", serial->minor, status);
1238 return;
1239 }
1240} 1230}
1241 1231
1242/* 1232/*
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index a305a7b2dae6..82d844a8ebd0 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -526,8 +526,9 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
526{ 526{
527 u8 status; 527 u8 status;
528 528
529 if (skb->len == 0) { 529 /* This check is no longer done by usbnet */
530 dev_err(&dev->udev->dev, "unexpected empty rx frame\n"); 530 if (skb->len < dev->net->hard_header_len) {
531 dev_err(&dev->udev->dev, "unexpected tiny rx frame\n");
531 return 0; 532 return 0;
532 } 533 }
533 534
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index 0a85d9227775..4cbdb1307f3e 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -364,6 +364,10 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
364 struct nc_trailer *trailer; 364 struct nc_trailer *trailer;
365 u16 hdr_len, packet_len; 365 u16 hdr_len, packet_len;
366 366
367 /* This check is no longer done by usbnet */
368 if (skb->len < dev->net->hard_header_len)
369 return 0;
370
367 if (!(skb->len & 0x01)) { 371 if (!(skb->len & 0x01)) {
368 netdev_dbg(dev->net, "rx framesize %d range %d..%d mtu %d\n", 372 netdev_dbg(dev->net, "rx framesize %d range %d..%d mtu %d\n",
369 skb->len, dev->net->hard_header_len, dev->hard_mtu, 373 skb->len, dev->net->hard_header_len, dev->hard_mtu,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 23bdd5b9274d..313cb6cd4848 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -80,10 +80,10 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
80{ 80{
81 __be16 proto; 81 __be16 proto;
82 82
83 /* usbnet rx_complete guarantees that skb->len is at least 83 /* This check is no longer done by usbnet */
84 * hard_header_len, so we can inspect the dest address without 84 if (skb->len < dev->net->hard_header_len)
85 * checking skb->len 85 return 0;
86 */ 86
87 switch (skb->data[0] & 0xf0) { 87 switch (skb->data[0] & 0xf0) {
88 case 0x40: 88 case 0x40:
89 proto = htons(ETH_P_IP); 89 proto = htons(ETH_P_IP);
@@ -712,6 +712,7 @@ static const struct usb_device_id products[] = {
712 {QMI_FIXED_INTF(0x19d2, 0x1255, 3)}, 712 {QMI_FIXED_INTF(0x19d2, 0x1255, 3)},
713 {QMI_FIXED_INTF(0x19d2, 0x1255, 4)}, 713 {QMI_FIXED_INTF(0x19d2, 0x1255, 4)},
714 {QMI_FIXED_INTF(0x19d2, 0x1256, 4)}, 714 {QMI_FIXED_INTF(0x19d2, 0x1256, 4)},
715 {QMI_FIXED_INTF(0x19d2, 0x1270, 5)}, /* ZTE MF667 */
715 {QMI_FIXED_INTF(0x19d2, 0x1401, 2)}, 716 {QMI_FIXED_INTF(0x19d2, 0x1401, 2)},
716 {QMI_FIXED_INTF(0x19d2, 0x1402, 2)}, /* ZTE MF60 */ 717 {QMI_FIXED_INTF(0x19d2, 0x1402, 2)}, /* ZTE MF60 */
717 {QMI_FIXED_INTF(0x19d2, 0x1424, 2)}, 718 {QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
@@ -723,6 +724,7 @@ static const struct usb_device_id products[] = {
723 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ 724 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
724 {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */ 725 {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
725 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ 726 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
727 {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */
726 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ 728 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
727 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 729 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
728 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ 730 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
@@ -730,6 +732,7 @@ static const struct usb_device_id products[] = {
730 {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */ 732 {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
731 {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */ 733 {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */
732 {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */ 734 {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
735 {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
733 736
734 /* 4. Gobi 1000 devices */ 737 /* 4. Gobi 1000 devices */
735 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ 738 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index e8fac732c6f1..d89dbe395ad2 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -2273,22 +2273,21 @@ static int rtl8152_open(struct net_device *netdev)
2273 struct r8152 *tp = netdev_priv(netdev); 2273 struct r8152 *tp = netdev_priv(netdev);
2274 int res = 0; 2274 int res = 0;
2275 2275
2276 rtl8152_set_speed(tp, AUTONEG_ENABLE,
2277 tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
2278 DUPLEX_FULL);
2279 tp->speed = 0;
2280 netif_carrier_off(netdev);
2281 netif_start_queue(netdev);
2282 set_bit(WORK_ENABLE, &tp->flags);
2276 res = usb_submit_urb(tp->intr_urb, GFP_KERNEL); 2283 res = usb_submit_urb(tp->intr_urb, GFP_KERNEL);
2277 if (res) { 2284 if (res) {
2278 if (res == -ENODEV) 2285 if (res == -ENODEV)
2279 netif_device_detach(tp->netdev); 2286 netif_device_detach(tp->netdev);
2280 netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n", 2287 netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n",
2281 res); 2288 res);
2282 return res;
2283 } 2289 }
2284 2290
2285 rtl8152_set_speed(tp, AUTONEG_ENABLE,
2286 tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
2287 DUPLEX_FULL);
2288 tp->speed = 0;
2289 netif_carrier_off(netdev);
2290 netif_start_queue(netdev);
2291 set_bit(WORK_ENABLE, &tp->flags);
2292 2291
2293 return res; 2292 return res;
2294} 2293}
@@ -2298,8 +2297,8 @@ static int rtl8152_close(struct net_device *netdev)
2298 struct r8152 *tp = netdev_priv(netdev); 2297 struct r8152 *tp = netdev_priv(netdev);
2299 int res = 0; 2298 int res = 0;
2300 2299
2301 usb_kill_urb(tp->intr_urb);
2302 clear_bit(WORK_ENABLE, &tp->flags); 2300 clear_bit(WORK_ENABLE, &tp->flags);
2301 usb_kill_urb(tp->intr_urb);
2303 cancel_delayed_work_sync(&tp->schedule); 2302 cancel_delayed_work_sync(&tp->schedule);
2304 netif_stop_queue(netdev); 2303 netif_stop_queue(netdev);
2305 tasklet_disable(&tp->tl); 2304 tasklet_disable(&tp->tl);
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index a48bc0f20c1a..524a47a28120 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -492,6 +492,10 @@ EXPORT_SYMBOL_GPL(rndis_unbind);
492 */ 492 */
493int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb) 493int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
494{ 494{
495 /* This check is no longer done by usbnet */
496 if (skb->len < dev->net->hard_header_len)
497 return 0;
498
495 /* peripheral may have batched packets to us... */ 499 /* peripheral may have batched packets to us... */
496 while (likely(skb->len)) { 500 while (likely(skb->len)) {
497 struct rndis_data_hdr *hdr = (void *)skb->data; 501 struct rndis_data_hdr *hdr = (void *)skb->data;
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index f17b9e02dd34..d9e7892262fa 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -2106,6 +2106,10 @@ static void smsc75xx_rx_csum_offload(struct usbnet *dev, struct sk_buff *skb,
2106 2106
2107static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) 2107static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
2108{ 2108{
2109 /* This check is no longer done by usbnet */
2110 if (skb->len < dev->net->hard_header_len)
2111 return 0;
2112
2109 while (skb->len > 0) { 2113 while (skb->len > 0) {
2110 u32 rx_cmd_a, rx_cmd_b, align_count, size; 2114 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2111 struct sk_buff *ax_skb; 2115 struct sk_buff *ax_skb;
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 8dd54a0f7b29..424db65e4396 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1723,6 +1723,10 @@ static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
1723 1723
1724static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) 1724static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1725{ 1725{
1726 /* This check is no longer done by usbnet */
1727 if (skb->len < dev->net->hard_header_len)
1728 return 0;
1729
1726 while (skb->len > 0) { 1730 while (skb->len > 0) {
1727 u32 header, align_count; 1731 u32 header, align_count;
1728 struct sk_buff *ax_skb; 1732 struct sk_buff *ax_skb;
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
new file mode 100644
index 000000000000..b94a0fbb8b3b
--- /dev/null
+++ b/drivers/net/usb/sr9800.c
@@ -0,0 +1,874 @@
1/* CoreChip-sz SR9800 one chip USB 2.0 Ethernet Devices
2 *
3 * Author : Liu Junliang <liujunliang_ljl@163.com>
4 *
5 * Based on asix_common.c, asix_devices.c
6 *
7 * This file is licensed under the terms of the GNU General Public License
8 * version 2. This program is licensed "as is" without any warranty of any
9 * kind, whether express or implied.*
10 */
11
12#include <linux/module.h>
13#include <linux/kmod.h>
14#include <linux/init.h>
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/ethtool.h>
18#include <linux/workqueue.h>
19#include <linux/mii.h>
20#include <linux/usb.h>
21#include <linux/crc32.h>
22#include <linux/usb/usbnet.h>
23#include <linux/slab.h>
24#include <linux/if_vlan.h>
25
26#include "sr9800.h"
27
28static int sr_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
29 u16 size, void *data)
30{
31 int err;
32
33 err = usbnet_read_cmd(dev, cmd, SR_REQ_RD_REG, value, index,
34 data, size);
35 if ((err != size) && (err >= 0))
36 err = -EINVAL;
37
38 return err;
39}
40
41static int sr_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
42 u16 size, void *data)
43{
44 int err;
45
46 err = usbnet_write_cmd(dev, cmd, SR_REQ_WR_REG, value, index,
47 data, size);
48 if ((err != size) && (err >= 0))
49 err = -EINVAL;
50
51 return err;
52}
53
54static void
55sr_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
56 u16 size, void *data)
57{
58 usbnet_write_cmd_async(dev, cmd, SR_REQ_WR_REG, value, index, data,
59 size);
60}
61
62static int sr_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
63{
64 int offset = 0;
65
66 /* This check is no longer done by usbnet */
67 if (skb->len < dev->net->hard_header_len)
68 return 0;
69
70 while (offset + sizeof(u32) < skb->len) {
71 struct sk_buff *sr_skb;
72 u16 size;
73 u32 header = get_unaligned_le32(skb->data + offset);
74
75 offset += sizeof(u32);
76 /* get the packet length */
77 size = (u16) (header & 0x7ff);
78 if (size != ((~header >> 16) & 0x07ff)) {
79 netdev_err(dev->net, "%s : Bad Header Length\n",
80 __func__);
81 return 0;
82 }
83
84 if ((size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) ||
85 (size + offset > skb->len)) {
86 netdev_err(dev->net, "%s : Bad RX Length %d\n",
87 __func__, size);
88 return 0;
89 }
90 sr_skb = netdev_alloc_skb_ip_align(dev->net, size);
91 if (!sr_skb)
92 return 0;
93
94 skb_put(sr_skb, size);
95 memcpy(sr_skb->data, skb->data + offset, size);
96 usbnet_skb_return(dev, sr_skb);
97
98 offset += (size + 1) & 0xfffe;
99 }
100
101 if (skb->len != offset) {
102 netdev_err(dev->net, "%s : Bad SKB Length %d\n", __func__,
103 skb->len);
104 return 0;
105 }
106
107 return 1;
108}
109
110static struct sk_buff *sr_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
111 gfp_t flags)
112{
113 int headroom = skb_headroom(skb);
114 int tailroom = skb_tailroom(skb);
115 u32 padbytes = 0xffff0000;
116 u32 packet_len;
117 int padlen;
118
119 padlen = ((skb->len + 4) % (dev->maxpacket - 1)) ? 0 : 4;
120
121 if ((!skb_cloned(skb)) && ((headroom + tailroom) >= (4 + padlen))) {
122 if ((headroom < 4) || (tailroom < padlen)) {
123 skb->data = memmove(skb->head + 4, skb->data,
124 skb->len);
125 skb_set_tail_pointer(skb, skb->len);
126 }
127 } else {
128 struct sk_buff *skb2;
129 skb2 = skb_copy_expand(skb, 4, padlen, flags);
130 dev_kfree_skb_any(skb);
131 skb = skb2;
132 if (!skb)
133 return NULL;
134 }
135
136 skb_push(skb, 4);
137 packet_len = (((skb->len - 4) ^ 0x0000ffff) << 16) + (skb->len - 4);
138 cpu_to_le32s(&packet_len);
139 skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len));
140
141 if (padlen) {
142 cpu_to_le32s(&padbytes);
143 memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
144 skb_put(skb, sizeof(padbytes));
145 }
146
147 return skb;
148}
149
150static void sr_status(struct usbnet *dev, struct urb *urb)
151{
152 struct sr9800_int_data *event;
153 int link;
154
155 if (urb->actual_length < 8)
156 return;
157
158 event = urb->transfer_buffer;
159 link = event->link & 0x01;
160 if (netif_carrier_ok(dev->net) != link) {
161 usbnet_link_change(dev, link, 1);
162 netdev_dbg(dev->net, "Link Status is: %d\n", link);
163 }
164
165 return;
166}
167
168static inline int sr_set_sw_mii(struct usbnet *dev)
169{
170 int ret;
171
172 ret = sr_write_cmd(dev, SR_CMD_SET_SW_MII, 0x0000, 0, 0, NULL);
173 if (ret < 0)
174 netdev_err(dev->net, "Failed to enable software MII access\n");
175 return ret;
176}
177
178static inline int sr_set_hw_mii(struct usbnet *dev)
179{
180 int ret;
181
182 ret = sr_write_cmd(dev, SR_CMD_SET_HW_MII, 0x0000, 0, 0, NULL);
183 if (ret < 0)
184 netdev_err(dev->net, "Failed to enable hardware MII access\n");
185 return ret;
186}
187
188static inline int sr_get_phy_addr(struct usbnet *dev)
189{
190 u8 buf[2];
191 int ret;
192
193 ret = sr_read_cmd(dev, SR_CMD_READ_PHY_ID, 0, 0, 2, buf);
194 if (ret < 0) {
195 netdev_err(dev->net, "%s : Error reading PHYID register:%02x\n",
196 __func__, ret);
197 goto out;
198 }
199 netdev_dbg(dev->net, "%s : returning 0x%04x\n", __func__,
200 *((__le16 *)buf));
201
202 ret = buf[1];
203
204out:
205 return ret;
206}
207
208static int sr_sw_reset(struct usbnet *dev, u8 flags)
209{
210 int ret;
211
212 ret = sr_write_cmd(dev, SR_CMD_SW_RESET, flags, 0, 0, NULL);
213 if (ret < 0)
214 netdev_err(dev->net, "Failed to send software reset:%02x\n",
215 ret);
216
217 return ret;
218}
219
220static u16 sr_read_rx_ctl(struct usbnet *dev)
221{
222 __le16 v;
223 int ret;
224
225 ret = sr_read_cmd(dev, SR_CMD_READ_RX_CTL, 0, 0, 2, &v);
226 if (ret < 0) {
227 netdev_err(dev->net, "Error reading RX_CTL register:%02x\n",
228 ret);
229 goto out;
230 }
231
232 ret = le16_to_cpu(v);
233out:
234 return ret;
235}
236
237static int sr_write_rx_ctl(struct usbnet *dev, u16 mode)
238{
239 int ret;
240
241 netdev_dbg(dev->net, "%s : mode = 0x%04x\n", __func__, mode);
242 ret = sr_write_cmd(dev, SR_CMD_WRITE_RX_CTL, mode, 0, 0, NULL);
243 if (ret < 0)
244 netdev_err(dev->net,
245 "Failed to write RX_CTL mode to 0x%04x:%02x\n",
246 mode, ret);
247
248 return ret;
249}
250
251static u16 sr_read_medium_status(struct usbnet *dev)
252{
253 __le16 v;
254 int ret;
255
256 ret = sr_read_cmd(dev, SR_CMD_READ_MEDIUM_STATUS, 0, 0, 2, &v);
257 if (ret < 0) {
258 netdev_err(dev->net,
259 "Error reading Medium Status register:%02x\n", ret);
260 return ret; /* TODO: callers not checking for error ret */
261 }
262
263 return le16_to_cpu(v);
264}
265
266static int sr_write_medium_mode(struct usbnet *dev, u16 mode)
267{
268 int ret;
269
270 netdev_dbg(dev->net, "%s : mode = 0x%04x\n", __func__, mode);
271 ret = sr_write_cmd(dev, SR_CMD_WRITE_MEDIUM_MODE, mode, 0, 0, NULL);
272 if (ret < 0)
273 netdev_err(dev->net,
274 "Failed to write Medium Mode mode to 0x%04x:%02x\n",
275 mode, ret);
276 return ret;
277}
278
279static int sr_write_gpio(struct usbnet *dev, u16 value, int sleep)
280{
281 int ret;
282
283 netdev_dbg(dev->net, "%s : value = 0x%04x\n", __func__, value);
284 ret = sr_write_cmd(dev, SR_CMD_WRITE_GPIOS, value, 0, 0, NULL);
285 if (ret < 0)
286 netdev_err(dev->net, "Failed to write GPIO value 0x%04x:%02x\n",
287 value, ret);
288 if (sleep)
289 msleep(sleep);
290
291 return ret;
292}
293
294/* SR9800 have a 16-bit RX_CTL value */
295static void sr_set_multicast(struct net_device *net)
296{
297 struct usbnet *dev = netdev_priv(net);
298 struct sr_data *data = (struct sr_data *)&dev->data;
299 u16 rx_ctl = SR_DEFAULT_RX_CTL;
300
301 if (net->flags & IFF_PROMISC) {
302 rx_ctl |= SR_RX_CTL_PRO;
303 } else if (net->flags & IFF_ALLMULTI ||
304 netdev_mc_count(net) > SR_MAX_MCAST) {
305 rx_ctl |= SR_RX_CTL_AMALL;
306 } else if (netdev_mc_empty(net)) {
307 /* just broadcast and directed */
308 } else {
309 /* We use the 20 byte dev->data
310 * for our 8 byte filter buffer
311 * to avoid allocating memory that
312 * is tricky to free later
313 */
314 struct netdev_hw_addr *ha;
315 u32 crc_bits;
316
317 memset(data->multi_filter, 0, SR_MCAST_FILTER_SIZE);
318
319 /* Build the multicast hash filter. */
320 netdev_for_each_mc_addr(ha, net) {
321 crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
322 data->multi_filter[crc_bits >> 3] |=
323 1 << (crc_bits & 7);
324 }
325
326 sr_write_cmd_async(dev, SR_CMD_WRITE_MULTI_FILTER, 0, 0,
327 SR_MCAST_FILTER_SIZE, data->multi_filter);
328
329 rx_ctl |= SR_RX_CTL_AM;
330 }
331
332 sr_write_cmd_async(dev, SR_CMD_WRITE_RX_CTL, rx_ctl, 0, 0, NULL);
333}
334
335static int sr_mdio_read(struct net_device *net, int phy_id, int loc)
336{
337 struct usbnet *dev = netdev_priv(net);
338 __le16 res;
339
340 mutex_lock(&dev->phy_mutex);
341 sr_set_sw_mii(dev);
342 sr_read_cmd(dev, SR_CMD_READ_MII_REG, phy_id, (__u16)loc, 2, &res);
343 sr_set_hw_mii(dev);
344 mutex_unlock(&dev->phy_mutex);
345
346 netdev_dbg(dev->net,
347 "%s : phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n", __func__,
348 phy_id, loc, le16_to_cpu(res));
349
350 return le16_to_cpu(res);
351}
352
353static void
354sr_mdio_write(struct net_device *net, int phy_id, int loc, int val)
355{
356 struct usbnet *dev = netdev_priv(net);
357 __le16 res = cpu_to_le16(val);
358
359 netdev_dbg(dev->net,
360 "%s : phy_id=0x%02x, loc=0x%02x, val=0x%04x\n", __func__,
361 phy_id, loc, val);
362 mutex_lock(&dev->phy_mutex);
363 sr_set_sw_mii(dev);
364 sr_write_cmd(dev, SR_CMD_WRITE_MII_REG, phy_id, (__u16)loc, 2, &res);
365 sr_set_hw_mii(dev);
366 mutex_unlock(&dev->phy_mutex);
367}
368
369/* Get the PHY Identifier from the PHYSID1 & PHYSID2 MII registers */
370static u32 sr_get_phyid(struct usbnet *dev)
371{
372 int phy_reg;
373 u32 phy_id;
374 int i;
375
376 /* Poll for the rare case the FW or phy isn't ready yet. */
377 for (i = 0; i < 100; i++) {
378 phy_reg = sr_mdio_read(dev->net, dev->mii.phy_id, MII_PHYSID1);
379 if (phy_reg != 0 && phy_reg != 0xFFFF)
380 break;
381 mdelay(1);
382 }
383
384 if (phy_reg <= 0 || phy_reg == 0xFFFF)
385 return 0;
386
387 phy_id = (phy_reg & 0xffff) << 16;
388
389 phy_reg = sr_mdio_read(dev->net, dev->mii.phy_id, MII_PHYSID2);
390 if (phy_reg < 0)
391 return 0;
392
393 phy_id |= (phy_reg & 0xffff);
394
395 return phy_id;
396}
397
398static void
399sr_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
400{
401 struct usbnet *dev = netdev_priv(net);
402 u8 opt;
403
404 if (sr_read_cmd(dev, SR_CMD_READ_MONITOR_MODE, 0, 0, 1, &opt) < 0) {
405 wolinfo->supported = 0;
406 wolinfo->wolopts = 0;
407 return;
408 }
409 wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
410 wolinfo->wolopts = 0;
411 if (opt & SR_MONITOR_LINK)
412 wolinfo->wolopts |= WAKE_PHY;
413 if (opt & SR_MONITOR_MAGIC)
414 wolinfo->wolopts |= WAKE_MAGIC;
415}
416
417static int
418sr_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
419{
420 struct usbnet *dev = netdev_priv(net);
421 u8 opt = 0;
422
423 if (wolinfo->wolopts & WAKE_PHY)
424 opt |= SR_MONITOR_LINK;
425 if (wolinfo->wolopts & WAKE_MAGIC)
426 opt |= SR_MONITOR_MAGIC;
427
428 if (sr_write_cmd(dev, SR_CMD_WRITE_MONITOR_MODE,
429 opt, 0, 0, NULL) < 0)
430 return -EINVAL;
431
432 return 0;
433}
434
435static int sr_get_eeprom_len(struct net_device *net)
436{
437 struct usbnet *dev = netdev_priv(net);
438 struct sr_data *data = (struct sr_data *)&dev->data;
439
440 return data->eeprom_len;
441}
442
443static int sr_get_eeprom(struct net_device *net,
444 struct ethtool_eeprom *eeprom, u8 *data)
445{
446 struct usbnet *dev = netdev_priv(net);
447 __le16 *ebuf = (__le16 *)data;
448 int ret;
449 int i;
450
451 /* Crude hack to ensure that we don't overwrite memory
452 * if an odd length is supplied
453 */
454 if (eeprom->len % 2)
455 return -EINVAL;
456
457 eeprom->magic = SR_EEPROM_MAGIC;
458
459 /* sr9800 returns 2 bytes from eeprom on read */
460 for (i = 0; i < eeprom->len / 2; i++) {
461 ret = sr_read_cmd(dev, SR_CMD_READ_EEPROM, eeprom->offset + i,
462 0, 2, &ebuf[i]);
463 if (ret < 0)
464 return -EINVAL;
465 }
466 return 0;
467}
468
469static void sr_get_drvinfo(struct net_device *net,
470 struct ethtool_drvinfo *info)
471{
472 struct usbnet *dev = netdev_priv(net);
473 struct sr_data *data = (struct sr_data *)&dev->data;
474
475 /* Inherit standard device info */
476 usbnet_get_drvinfo(net, info);
477 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
478 strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
479 info->eedump_len = data->eeprom_len;
480}
481
482static u32 sr_get_link(struct net_device *net)
483{
484 struct usbnet *dev = netdev_priv(net);
485
486 return mii_link_ok(&dev->mii);
487}
488
489static int sr_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
490{
491 struct usbnet *dev = netdev_priv(net);
492
493 return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
494}
495
496static int sr_set_mac_address(struct net_device *net, void *p)
497{
498 struct usbnet *dev = netdev_priv(net);
499 struct sr_data *data = (struct sr_data *)&dev->data;
500 struct sockaddr *addr = p;
501
502 if (netif_running(net))
503 return -EBUSY;
504 if (!is_valid_ether_addr(addr->sa_data))
505 return -EADDRNOTAVAIL;
506
507 memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
508
509 /* We use the 20 byte dev->data
510 * for our 6 byte mac buffer
511 * to avoid allocating memory that
512 * is tricky to free later
513 */
514 memcpy(data->mac_addr, addr->sa_data, ETH_ALEN);
515 sr_write_cmd_async(dev, SR_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
516 data->mac_addr);
517
518 return 0;
519}
520
521static const struct ethtool_ops sr9800_ethtool_ops = {
522 .get_drvinfo = sr_get_drvinfo,
523 .get_link = sr_get_link,
524 .get_msglevel = usbnet_get_msglevel,
525 .set_msglevel = usbnet_set_msglevel,
526 .get_wol = sr_get_wol,
527 .set_wol = sr_set_wol,
528 .get_eeprom_len = sr_get_eeprom_len,
529 .get_eeprom = sr_get_eeprom,
530 .get_settings = usbnet_get_settings,
531 .set_settings = usbnet_set_settings,
532 .nway_reset = usbnet_nway_reset,
533};
534
535static int sr9800_link_reset(struct usbnet *dev)
536{
537 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
538 u16 mode;
539
540 mii_check_media(&dev->mii, 1, 1);
541 mii_ethtool_gset(&dev->mii, &ecmd);
542 mode = SR9800_MEDIUM_DEFAULT;
543
544 if (ethtool_cmd_speed(&ecmd) != SPEED_100)
545 mode &= ~SR_MEDIUM_PS;
546
547 if (ecmd.duplex != DUPLEX_FULL)
548 mode &= ~SR_MEDIUM_FD;
549
550 netdev_dbg(dev->net, "%s : speed: %u duplex: %d mode: 0x%04x\n",
551 __func__, ethtool_cmd_speed(&ecmd), ecmd.duplex, mode);
552
553 sr_write_medium_mode(dev, mode);
554
555 return 0;
556}
557
558
559static int sr9800_set_default_mode(struct usbnet *dev)
560{
561 u16 rx_ctl;
562 int ret;
563
564 sr_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET);
565 sr_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
566 ADVERTISE_ALL | ADVERTISE_CSMA);
567 mii_nway_restart(&dev->mii);
568
569 ret = sr_write_medium_mode(dev, SR9800_MEDIUM_DEFAULT);
570 if (ret < 0)
571 goto out;
572
573 ret = sr_write_cmd(dev, SR_CMD_WRITE_IPG012,
574 SR9800_IPG0_DEFAULT | SR9800_IPG1_DEFAULT,
575 SR9800_IPG2_DEFAULT, 0, NULL);
576 if (ret < 0) {
577 netdev_dbg(dev->net, "Write IPG,IPG1,IPG2 failed: %d\n", ret);
578 goto out;
579 }
580
581 /* Set RX_CTL to default values with 2k buffer, and enable cactus */
582 ret = sr_write_rx_ctl(dev, SR_DEFAULT_RX_CTL);
583 if (ret < 0)
584 goto out;
585
586 rx_ctl = sr_read_rx_ctl(dev);
587 netdev_dbg(dev->net, "RX_CTL is 0x%04x after all initializations\n",
588 rx_ctl);
589
590 rx_ctl = sr_read_medium_status(dev);
591 netdev_dbg(dev->net, "Medium Status:0x%04x after all initializations\n",
592 rx_ctl);
593
594 return 0;
595out:
596 return ret;
597}
598
599static int sr9800_reset(struct usbnet *dev)
600{
601 struct sr_data *data = (struct sr_data *)&dev->data;
602 int ret, embd_phy;
603 u16 rx_ctl;
604
605 ret = sr_write_gpio(dev,
606 SR_GPIO_RSE | SR_GPIO_GPO_2 | SR_GPIO_GPO2EN, 5);
607 if (ret < 0)
608 goto out;
609
610 embd_phy = ((sr_get_phy_addr(dev) & 0x1f) == 0x10 ? 1 : 0);
611
612 ret = sr_write_cmd(dev, SR_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL);
613 if (ret < 0) {
614 netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret);
615 goto out;
616 }
617
618 ret = sr_sw_reset(dev, SR_SWRESET_IPPD | SR_SWRESET_PRL);
619 if (ret < 0)
620 goto out;
621
622 msleep(150);
623
624 ret = sr_sw_reset(dev, SR_SWRESET_CLEAR);
625 if (ret < 0)
626 goto out;
627
628 msleep(150);
629
630 if (embd_phy) {
631 ret = sr_sw_reset(dev, SR_SWRESET_IPRL);
632 if (ret < 0)
633 goto out;
634 } else {
635 ret = sr_sw_reset(dev, SR_SWRESET_PRTE);
636 if (ret < 0)
637 goto out;
638 }
639
640 msleep(150);
641 rx_ctl = sr_read_rx_ctl(dev);
642 netdev_dbg(dev->net, "RX_CTL is 0x%04x after software reset\n", rx_ctl);
643 ret = sr_write_rx_ctl(dev, 0x0000);
644 if (ret < 0)
645 goto out;
646
647 rx_ctl = sr_read_rx_ctl(dev);
648 netdev_dbg(dev->net, "RX_CTL is 0x%04x setting to 0x0000\n", rx_ctl);
649
650 ret = sr_sw_reset(dev, SR_SWRESET_PRL);
651 if (ret < 0)
652 goto out;
653
654 msleep(150);
655
656 ret = sr_sw_reset(dev, SR_SWRESET_IPRL | SR_SWRESET_PRL);
657 if (ret < 0)
658 goto out;
659
660 msleep(150);
661
662 ret = sr9800_set_default_mode(dev);
663 if (ret < 0)
664 goto out;
665
666 /* Rewrite MAC address */
667 memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN);
668 ret = sr_write_cmd(dev, SR_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
669 data->mac_addr);
670 if (ret < 0)
671 goto out;
672
673 return 0;
674
675out:
676 return ret;
677}
678
679static const struct net_device_ops sr9800_netdev_ops = {
680 .ndo_open = usbnet_open,
681 .ndo_stop = usbnet_stop,
682 .ndo_start_xmit = usbnet_start_xmit,
683 .ndo_tx_timeout = usbnet_tx_timeout,
684 .ndo_change_mtu = usbnet_change_mtu,
685 .ndo_set_mac_address = sr_set_mac_address,
686 .ndo_validate_addr = eth_validate_addr,
687 .ndo_do_ioctl = sr_ioctl,
688 .ndo_set_rx_mode = sr_set_multicast,
689};
690
691static int sr9800_phy_powerup(struct usbnet *dev)
692{
693 int ret;
694
695 /* set the embedded Ethernet PHY in power-down state */
696 ret = sr_sw_reset(dev, SR_SWRESET_IPPD | SR_SWRESET_IPRL);
697 if (ret < 0) {
698 netdev_err(dev->net, "Failed to power down PHY : %d\n", ret);
699 return ret;
700 }
701 msleep(20);
702
703 /* set the embedded Ethernet PHY in power-up state */
704 ret = sr_sw_reset(dev, SR_SWRESET_IPRL);
705 if (ret < 0) {
706 netdev_err(dev->net, "Failed to reset PHY: %d\n", ret);
707 return ret;
708 }
709 msleep(600);
710
711 /* set the embedded Ethernet PHY in reset state */
712 ret = sr_sw_reset(dev, SR_SWRESET_CLEAR);
713 if (ret < 0) {
714 netdev_err(dev->net, "Failed to power up PHY: %d\n", ret);
715 return ret;
716 }
717 msleep(20);
718
719 /* set the embedded Ethernet PHY in power-up state */
720 ret = sr_sw_reset(dev, SR_SWRESET_IPRL);
721 if (ret < 0) {
722 netdev_err(dev->net, "Failed to reset PHY: %d\n", ret);
723 return ret;
724 }
725
726 return 0;
727}
728
729static int sr9800_bind(struct usbnet *dev, struct usb_interface *intf)
730{
731 struct sr_data *data = (struct sr_data *)&dev->data;
732 u16 led01_mux, led23_mux;
733 int ret, embd_phy;
734 u32 phyid;
735 u16 rx_ctl;
736
737 data->eeprom_len = SR9800_EEPROM_LEN;
738
739 usbnet_get_endpoints(dev, intf);
740
741 /* LED Setting Rule :
742 * AABB:CCDD
743 * AA : MFA0(LED0)
744 * BB : MFA1(LED1)
745 * CC : MFA2(LED2), Reserved for SR9800
746 * DD : MFA3(LED3), Reserved for SR9800
747 */
748 led01_mux = (SR_LED_MUX_LINK_ACTIVE << 8) | SR_LED_MUX_LINK;
749 led23_mux = (SR_LED_MUX_LINK_ACTIVE << 8) | SR_LED_MUX_TX_ACTIVE;
750 ret = sr_write_cmd(dev, SR_CMD_LED_MUX, led01_mux, led23_mux, 0, NULL);
751 if (ret < 0) {
752 netdev_err(dev->net, "set LINK LED failed : %d\n", ret);
753 goto out;
754 }
755
756 /* Get the MAC address */
757 ret = sr_read_cmd(dev, SR_CMD_READ_NODE_ID, 0, 0, ETH_ALEN,
758 dev->net->dev_addr);
759 if (ret < 0) {
760 netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret);
761 return ret;
762 }
763 netdev_dbg(dev->net, "mac addr : %pM\n", dev->net->dev_addr);
764
765 /* Initialize MII structure */
766 dev->mii.dev = dev->net;
767 dev->mii.mdio_read = sr_mdio_read;
768 dev->mii.mdio_write = sr_mdio_write;
769 dev->mii.phy_id_mask = 0x1f;
770 dev->mii.reg_num_mask = 0x1f;
771 dev->mii.phy_id = sr_get_phy_addr(dev);
772
773 dev->net->netdev_ops = &sr9800_netdev_ops;
774 dev->net->ethtool_ops = &sr9800_ethtool_ops;
775
776 embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0);
777 /* Reset the PHY to normal operation mode */
778 ret = sr_write_cmd(dev, SR_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL);
779 if (ret < 0) {
780 netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret);
781 return ret;
782 }
783
784 /* Init PHY routine */
785 ret = sr9800_phy_powerup(dev);
786 if (ret < 0)
787 goto out;
788
789 rx_ctl = sr_read_rx_ctl(dev);
790 netdev_dbg(dev->net, "RX_CTL is 0x%04x after software reset\n", rx_ctl);
791 ret = sr_write_rx_ctl(dev, 0x0000);
792 if (ret < 0)
793 goto out;
794
795 rx_ctl = sr_read_rx_ctl(dev);
796 netdev_dbg(dev->net, "RX_CTL is 0x%04x setting to 0x0000\n", rx_ctl);
797
798 /* Read PHYID register *AFTER* the PHY was reset properly */
799 phyid = sr_get_phyid(dev);
800 netdev_dbg(dev->net, "PHYID=0x%08x\n", phyid);
801
802 /* medium mode setting */
803 ret = sr9800_set_default_mode(dev);
804 if (ret < 0)
805 goto out;
806
807 if (dev->udev->speed == USB_SPEED_HIGH) {
808 ret = sr_write_cmd(dev, SR_CMD_BULKIN_SIZE,
809 SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_4K].byte_cnt,
810 SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_4K].threshold,
811 0, NULL);
812 if (ret < 0) {
813 netdev_err(dev->net, "Reset RX_CTL failed: %d\n", ret);
814 goto out;
815 }
816 dev->rx_urb_size =
817 SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_4K].size;
818 } else {
819 ret = sr_write_cmd(dev, SR_CMD_BULKIN_SIZE,
820 SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_2K].byte_cnt,
821 SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_2K].threshold,
822 0, NULL);
823 if (ret < 0) {
824 netdev_err(dev->net, "Reset RX_CTL failed: %d\n", ret);
825 goto out;
826 }
827 dev->rx_urb_size =
828 SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_2K].size;
829 }
830 netdev_dbg(dev->net, "%s : setting rx_urb_size with : %zu\n", __func__,
831 dev->rx_urb_size);
832 return 0;
833
834out:
835 return ret;
836}
837
838static const struct driver_info sr9800_driver_info = {
839 .description = "CoreChip SR9800 USB 2.0 Ethernet",
840 .bind = sr9800_bind,
841 .status = sr_status,
842 .link_reset = sr9800_link_reset,
843 .reset = sr9800_reset,
844 .flags = DRIVER_FLAG,
845 .rx_fixup = sr_rx_fixup,
846 .tx_fixup = sr_tx_fixup,
847};
848
849static const struct usb_device_id products[] = {
850 {
851 USB_DEVICE(0x0fe6, 0x9800), /* SR9800 Device */
852 .driver_info = (unsigned long) &sr9800_driver_info,
853 },
854 {}, /* END */
855};
856
857MODULE_DEVICE_TABLE(usb, products);
858
859static struct usb_driver sr_driver = {
860 .name = DRIVER_NAME,
861 .id_table = products,
862 .probe = usbnet_probe,
863 .suspend = usbnet_suspend,
864 .resume = usbnet_resume,
865 .disconnect = usbnet_disconnect,
866 .supports_autosuspend = 1,
867};
868
869module_usb_driver(sr_driver);
870
871MODULE_AUTHOR("Liu Junliang <liujunliang_ljl@163.com");
872MODULE_VERSION(DRIVER_VERSION);
873MODULE_DESCRIPTION("SR9800 USB 2.0 USB2NET Dev : http://www.corechip-sz.com");
874MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/sr9800.h b/drivers/net/usb/sr9800.h
new file mode 100644
index 000000000000..18f670251275
--- /dev/null
+++ b/drivers/net/usb/sr9800.h
@@ -0,0 +1,202 @@
1/* CoreChip-sz SR9800 one chip USB 2.0 Ethernet Devices
2 *
3 * Author : Liu Junliang <liujunliang_ljl@163.com>
4 *
5 * This file is licensed under the terms of the GNU General Public License
6 * version 2. This program is licensed "as is" without any warranty of any
7 * kind, whether express or implied.
8 */
9
10#ifndef _SR9800_H
11#define _SR9800_H
12
13/* SR9800 spec. command table on Linux Platform */
14
15/* command : Software Station Management Control Reg */
16#define SR_CMD_SET_SW_MII 0x06
17/* command : PHY Read Reg */
18#define SR_CMD_READ_MII_REG 0x07
19/* command : PHY Write Reg */
20#define SR_CMD_WRITE_MII_REG 0x08
21/* command : Hardware Station Management Control Reg */
22#define SR_CMD_SET_HW_MII 0x0a
23/* command : SROM Read Reg */
24#define SR_CMD_READ_EEPROM 0x0b
25/* command : SROM Write Reg */
26#define SR_CMD_WRITE_EEPROM 0x0c
27/* command : SROM Write Enable Reg */
28#define SR_CMD_WRITE_ENABLE 0x0d
29/* command : SROM Write Disable Reg */
30#define SR_CMD_WRITE_DISABLE 0x0e
31/* command : RX Control Read Reg */
32#define SR_CMD_READ_RX_CTL 0x0f
33#define SR_RX_CTL_PRO (1 << 0)
34#define SR_RX_CTL_AMALL (1 << 1)
35#define SR_RX_CTL_SEP (1 << 2)
36#define SR_RX_CTL_AB (1 << 3)
37#define SR_RX_CTL_AM (1 << 4)
38#define SR_RX_CTL_AP (1 << 5)
39#define SR_RX_CTL_ARP (1 << 6)
40#define SR_RX_CTL_SO (1 << 7)
41#define SR_RX_CTL_RH1M (1 << 8)
42#define SR_RX_CTL_RH2M (1 << 9)
43#define SR_RX_CTL_RH3M (1 << 10)
44/* command : RX Control Write Reg */
45#define SR_CMD_WRITE_RX_CTL 0x10
46/* command : IPG0/IPG1/IPG2 Control Read Reg */
47#define SR_CMD_READ_IPG012 0x11
48/* command : IPG0/IPG1/IPG2 Control Write Reg */
49#define SR_CMD_WRITE_IPG012 0x12
50/* command : Node ID Read Reg */
51#define SR_CMD_READ_NODE_ID 0x13
52/* command : Node ID Write Reg */
53#define SR_CMD_WRITE_NODE_ID 0x14
54/* command : Multicast Filter Array Read Reg */
55#define SR_CMD_READ_MULTI_FILTER 0x15
56/* command : Multicast Filter Array Write Reg */
57#define SR_CMD_WRITE_MULTI_FILTER 0x16
58/* command : Eth/HomePNA PHY Address Reg */
59#define SR_CMD_READ_PHY_ID 0x19
60/* command : Medium Status Read Reg */
61#define SR_CMD_READ_MEDIUM_STATUS 0x1a
62#define SR_MONITOR_LINK (1 << 1)
63#define SR_MONITOR_MAGIC (1 << 2)
64#define SR_MONITOR_HSFS (1 << 4)
65/* command : Medium Status Write Reg */
66#define SR_CMD_WRITE_MEDIUM_MODE 0x1b
67#define SR_MEDIUM_GM (1 << 0)
68#define SR_MEDIUM_FD (1 << 1)
69#define SR_MEDIUM_AC (1 << 2)
70#define SR_MEDIUM_ENCK (1 << 3)
71#define SR_MEDIUM_RFC (1 << 4)
72#define SR_MEDIUM_TFC (1 << 5)
73#define SR_MEDIUM_JFE (1 << 6)
74#define SR_MEDIUM_PF (1 << 7)
75#define SR_MEDIUM_RE (1 << 8)
76#define SR_MEDIUM_PS (1 << 9)
77#define SR_MEDIUM_RSV (1 << 10)
78#define SR_MEDIUM_SBP (1 << 11)
79#define SR_MEDIUM_SM (1 << 12)
80/* command : Monitor Mode Status Read Reg */
81#define SR_CMD_READ_MONITOR_MODE 0x1c
82/* command : Monitor Mode Status Write Reg */
83#define SR_CMD_WRITE_MONITOR_MODE 0x1d
84/* command : GPIO Status Read Reg */
85#define SR_CMD_READ_GPIOS 0x1e
86#define SR_GPIO_GPO0EN (1 << 0) /* GPIO0 Output enable */
87#define SR_GPIO_GPO_0 (1 << 1) /* GPIO0 Output value */
88#define SR_GPIO_GPO1EN (1 << 2) /* GPIO1 Output enable */
89#define SR_GPIO_GPO_1 (1 << 3) /* GPIO1 Output value */
90#define SR_GPIO_GPO2EN (1 << 4) /* GPIO2 Output enable */
91#define SR_GPIO_GPO_2 (1 << 5) /* GPIO2 Output value */
92#define SR_GPIO_RESERVED (1 << 6) /* Reserved */
93#define SR_GPIO_RSE (1 << 7) /* Reload serial EEPROM */
94/* command : GPIO Status Write Reg */
95#define SR_CMD_WRITE_GPIOS 0x1f
96/* command : Eth PHY Power and Reset Control Reg */
97#define SR_CMD_SW_RESET 0x20
98#define SR_SWRESET_CLEAR 0x00
99#define SR_SWRESET_RR (1 << 0)
100#define SR_SWRESET_RT (1 << 1)
101#define SR_SWRESET_PRTE (1 << 2)
102#define SR_SWRESET_PRL (1 << 3)
103#define SR_SWRESET_BZ (1 << 4)
104#define SR_SWRESET_IPRL (1 << 5)
105#define SR_SWRESET_IPPD (1 << 6)
106/* command : Software Interface Selection Status Read Reg */
107#define SR_CMD_SW_PHY_STATUS 0x21
108/* command : Software Interface Selection Status Write Reg */
109#define SR_CMD_SW_PHY_SELECT 0x22
110/* command : BULK in Buffer Size Reg */
111#define SR_CMD_BULKIN_SIZE 0x2A
112/* command : LED_MUX Control Reg */
113#define SR_CMD_LED_MUX 0x70
114#define SR_LED_MUX_TX_ACTIVE (1 << 0)
115#define SR_LED_MUX_RX_ACTIVE (1 << 1)
116#define SR_LED_MUX_COLLISION (1 << 2)
117#define SR_LED_MUX_DUP_COL (1 << 3)
118#define SR_LED_MUX_DUP (1 << 4)
119#define SR_LED_MUX_SPEED (1 << 5)
120#define SR_LED_MUX_LINK_ACTIVE (1 << 6)
121#define SR_LED_MUX_LINK (1 << 7)
122
123/* Register Access Flags */
124#define SR_REQ_RD_REG (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE)
125#define SR_REQ_WR_REG (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE)
126
127/* Multicast Filter Array size & Max Number */
128#define SR_MCAST_FILTER_SIZE 8
129#define SR_MAX_MCAST 64
130
131/* IPG0/1/2 Default Value */
132#define SR9800_IPG0_DEFAULT 0x15
133#define SR9800_IPG1_DEFAULT 0x0c
134#define SR9800_IPG2_DEFAULT 0x12
135
136/* Medium Status Default Mode */
137#define SR9800_MEDIUM_DEFAULT \
138 (SR_MEDIUM_FD | SR_MEDIUM_RFC | \
139 SR_MEDIUM_TFC | SR_MEDIUM_PS | \
140 SR_MEDIUM_AC | SR_MEDIUM_RE)
141
142/* RX Control Default Setting */
143#define SR_DEFAULT_RX_CTL \
144 (SR_RX_CTL_SO | SR_RX_CTL_AB | SR_RX_CTL_RH1M)
145
146/* EEPROM Magic Number & EEPROM Size */
147#define SR_EEPROM_MAGIC 0xdeadbeef
148#define SR9800_EEPROM_LEN 0xff
149
150/* SR9800 Driver Version and Driver Name */
151#define DRIVER_VERSION "11-Nov-2013"
152#define DRIVER_NAME "CoreChips"
153#define DRIVER_FLAG \
154 (FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET)
155
156/* SR9800 BULKIN Buffer Size */
157#define SR9800_MAX_BULKIN_2K 0
158#define SR9800_MAX_BULKIN_4K 1
159#define SR9800_MAX_BULKIN_6K 2
160#define SR9800_MAX_BULKIN_8K 3
161#define SR9800_MAX_BULKIN_16K 4
162#define SR9800_MAX_BULKIN_20K 5
163#define SR9800_MAX_BULKIN_24K 6
164#define SR9800_MAX_BULKIN_32K 7
165
166struct {unsigned short size, byte_cnt, threshold; } SR9800_BULKIN_SIZE[] = {
167 /* 2k */
168 {2048, 0x8000, 0x8001},
169 /* 4k */
170 {4096, 0x8100, 0x8147},
171 /* 6k */
172 {6144, 0x8200, 0x81EB},
173 /* 8k */
174 {8192, 0x8300, 0x83D7},
175 /* 16 */
176 {16384, 0x8400, 0x851E},
177 /* 20k */
178 {20480, 0x8500, 0x8666},
179 /* 24k */
180 {24576, 0x8600, 0x87AE},
181 /* 32k */
182 {32768, 0x8700, 0x8A3D},
183};
184
185/* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */
186struct sr_data {
187 u8 multi_filter[SR_MCAST_FILTER_SIZE];
188 u8 mac_addr[ETH_ALEN];
189 u8 phymode;
190 u8 ledmode;
191 u8 eeprom_len;
192};
193
194struct sr9800_int_data {
195 __le16 res1;
196 u8 link;
197 __le16 res2;
198 u8 status;
199 __le16 res3;
200} __packed;
201
202#endif /* _SR9800_H */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 4671da755e7b..dd10d5817d2a 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -542,17 +542,19 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
542 } 542 }
543 // else network stack removes extra byte if we forced a short packet 543 // else network stack removes extra byte if we forced a short packet
544 544
545 if (skb->len) { 545 /* all data was already cloned from skb inside the driver */
546 /* all data was already cloned from skb inside the driver */ 546 if (dev->driver_info->flags & FLAG_MULTI_PACKET)
547 if (dev->driver_info->flags & FLAG_MULTI_PACKET) 547 goto done;
548 dev_kfree_skb_any(skb); 548
549 else 549 if (skb->len < ETH_HLEN) {
550 usbnet_skb_return(dev, skb); 550 dev->net->stats.rx_errors++;
551 dev->net->stats.rx_length_errors++;
552 netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len);
553 } else {
554 usbnet_skb_return(dev, skb);
551 return; 555 return;
552 } 556 }
553 557
554 netif_dbg(dev, rx_err, dev->net, "drop\n");
555 dev->net->stats.rx_errors++;
556done: 558done:
557 skb_queue_tail(&dev->done, skb); 559 skb_queue_tail(&dev->done, skb);
558} 560}
@@ -574,13 +576,6 @@ static void rx_complete (struct urb *urb)
574 switch (urb_status) { 576 switch (urb_status) {
575 /* success */ 577 /* success */
576 case 0: 578 case 0:
577 if (skb->len < dev->net->hard_header_len) {
578 state = rx_cleanup;
579 dev->net->stats.rx_errors++;
580 dev->net->stats.rx_length_errors++;
581 netif_dbg(dev, rx_err, dev->net,
582 "rx length %d\n", skb->len);
583 }
584 break; 579 break;
585 580
586 /* stalls need manual reset. this is rare ... except that 581 /* stalls need manual reset. this is rare ... except that
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 026a313c2d2d..b0f705c2378f 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -469,7 +469,6 @@ static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
469/* Look up Ethernet address in forwarding table */ 469/* Look up Ethernet address in forwarding table */
470static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan, 470static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
471 const u8 *mac) 471 const u8 *mac)
472
473{ 472{
474 struct hlist_head *head = vxlan_fdb_head(vxlan, mac); 473 struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
475 struct vxlan_fdb *f; 474 struct vxlan_fdb *f;
@@ -596,10 +595,8 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff
596 NAPI_GRO_CB(p)->same_flow = 0; 595 NAPI_GRO_CB(p)->same_flow = 0;
597 continue; 596 continue;
598 } 597 }
599 goto found;
600 } 598 }
601 599
602found:
603 type = eh->h_proto; 600 type = eh->h_proto;
604 601
605 rcu_read_lock(); 602 rcu_read_lock();
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
index 0d1c7592efa0..19f7cb2cdef3 100644
--- a/drivers/net/wan/dlci.c
+++ b/drivers/net/wan/dlci.c
@@ -71,12 +71,9 @@ static int dlci_header(struct sk_buff *skb, struct net_device *dev,
71 const void *saddr, unsigned len) 71 const void *saddr, unsigned len)
72{ 72{
73 struct frhdr hdr; 73 struct frhdr hdr;
74 struct dlci_local *dlp;
75 unsigned int hlen; 74 unsigned int hlen;
76 char *dest; 75 char *dest;
77 76
78 dlp = netdev_priv(dev);
79
80 hdr.control = FRAD_I_UI; 77 hdr.control = FRAD_I_UI;
81 switch (type) 78 switch (type)
82 { 79 {
@@ -107,11 +104,9 @@ static int dlci_header(struct sk_buff *skb, struct net_device *dev,
107 104
108static void dlci_receive(struct sk_buff *skb, struct net_device *dev) 105static void dlci_receive(struct sk_buff *skb, struct net_device *dev)
109{ 106{
110 struct dlci_local *dlp;
111 struct frhdr *hdr; 107 struct frhdr *hdr;
112 int process, header; 108 int process, header;
113 109
114 dlp = netdev_priv(dev);
115 if (!pskb_may_pull(skb, sizeof(*hdr))) { 110 if (!pskb_may_pull(skb, sizeof(*hdr))) {
116 netdev_notice(dev, "invalid data no header\n"); 111 netdev_notice(dev, "invalid data no header\n");
117 dev->stats.rx_errors++; 112 dev->stats.rx_errors++;
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 8aa20df55e50..507d9a9ee69a 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1764,7 +1764,7 @@ static struct usb_device_id ar5523_id_table[] = {
1764 AR5523_DEVICE_UG(0x07d1, 0x3a07), /* D-Link / WUA-2340 rev A1 */ 1764 AR5523_DEVICE_UG(0x07d1, 0x3a07), /* D-Link / WUA-2340 rev A1 */
1765 AR5523_DEVICE_UG(0x1690, 0x0712), /* Gigaset / AR5523 */ 1765 AR5523_DEVICE_UG(0x1690, 0x0712), /* Gigaset / AR5523 */
1766 AR5523_DEVICE_UG(0x1690, 0x0710), /* Gigaset / SMCWUSBTG */ 1766 AR5523_DEVICE_UG(0x1690, 0x0710), /* Gigaset / SMCWUSBTG */
1767 AR5523_DEVICE_UG(0x129b, 0x160c), /* Gigaset / USB stick 108 1767 AR5523_DEVICE_UG(0x129b, 0x160b), /* Gigaset / USB stick 108
1768 (CyberTAN Technology) */ 1768 (CyberTAN Technology) */
1769 AR5523_DEVICE_UG(0x16ab, 0x7801), /* Globalsun / AR5523_1 */ 1769 AR5523_DEVICE_UG(0x16ab, 0x7801), /* Globalsun / AR5523_1 */
1770 AR5523_DEVICE_UX(0x16ab, 0x7811), /* Globalsun / AR5523_2 */ 1770 AR5523_DEVICE_UX(0x16ab, 0x7811), /* Globalsun / AR5523_2 */
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index d6bc7cb61bfb..1a2973b7acf2 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -110,7 +110,7 @@ ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
110 ath5k_hw_reg_write(ah, 0x00010000, AR5K_PHY(0x20)); 110 ath5k_hw_reg_write(ah, 0x00010000, AR5K_PHY(0x20));
111 111
112 if (ah->ah_version == AR5K_AR5210) { 112 if (ah->ah_version == AR5K_AR5210) {
113 srev = ath5k_hw_reg_read(ah, AR5K_PHY(256) >> 28) & 0xf; 113 srev = (ath5k_hw_reg_read(ah, AR5K_PHY(256)) >> 28) & 0xf;
114 ret = (u16)ath5k_hw_bitswap(srev, 4) + 1; 114 ret = (u16)ath5k_hw_bitswap(srev, 4) + 1;
115 } else { 115 } else {
116 srev = (ath5k_hw_reg_read(ah, AR5K_PHY(0x100)) >> 24) & 0xff; 116 srev = (ath5k_hw_reg_read(ah, AR5K_PHY(0x100)) >> 24) & 0xff;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 25243cbc07f0..b8daff78b9d1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -5065,6 +5065,10 @@ static u16 ar9003_hw_get_max_edge_power(struct ar9300_eeprom *eep,
5065 break; 5065 break;
5066 } 5066 }
5067 } 5067 }
5068
5069 if (is2GHz && !twiceMaxEdgePower)
5070 twiceMaxEdgePower = 60;
5071
5068 return twiceMaxEdgePower; 5072 return twiceMaxEdgePower;
5069} 5073}
5070 5074
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 58da3468d1f0..99a203174f45 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -262,6 +262,8 @@ enum tid_aggr_state {
262struct ath9k_htc_sta { 262struct ath9k_htc_sta {
263 u8 index; 263 u8 index;
264 enum tid_aggr_state tid_state[ATH9K_HTC_MAX_TID]; 264 enum tid_aggr_state tid_state[ATH9K_HTC_MAX_TID];
265 struct work_struct rc_update_work;
266 struct ath9k_htc_priv *htc_priv;
265}; 267};
266 268
267#define ATH9K_HTC_RXBUF 256 269#define ATH9K_HTC_RXBUF 256
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index f4e1de20d99c..c57d6b859c04 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -34,6 +34,10 @@ static int ath9k_htc_btcoex_enable;
34module_param_named(btcoex_enable, ath9k_htc_btcoex_enable, int, 0444); 34module_param_named(btcoex_enable, ath9k_htc_btcoex_enable, int, 0444);
35MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence"); 35MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
36 36
37static int ath9k_ps_enable;
38module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
39MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
40
37#define CHAN2G(_freq, _idx) { \ 41#define CHAN2G(_freq, _idx) { \
38 .center_freq = (_freq), \ 42 .center_freq = (_freq), \
39 .hw_value = (_idx), \ 43 .hw_value = (_idx), \
@@ -725,12 +729,14 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
725 IEEE80211_HW_SPECTRUM_MGMT | 729 IEEE80211_HW_SPECTRUM_MGMT |
726 IEEE80211_HW_HAS_RATE_CONTROL | 730 IEEE80211_HW_HAS_RATE_CONTROL |
727 IEEE80211_HW_RX_INCLUDES_FCS | 731 IEEE80211_HW_RX_INCLUDES_FCS |
728 IEEE80211_HW_SUPPORTS_PS |
729 IEEE80211_HW_PS_NULLFUNC_STACK | 732 IEEE80211_HW_PS_NULLFUNC_STACK |
730 IEEE80211_HW_REPORTS_TX_ACK_STATUS | 733 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
731 IEEE80211_HW_MFP_CAPABLE | 734 IEEE80211_HW_MFP_CAPABLE |
732 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING; 735 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
733 736
737 if (ath9k_ps_enable)
738 hw->flags |= IEEE80211_HW_SUPPORTS_PS;
739
734 hw->wiphy->interface_modes = 740 hw->wiphy->interface_modes =
735 BIT(NL80211_IFTYPE_STATION) | 741 BIT(NL80211_IFTYPE_STATION) |
736 BIT(NL80211_IFTYPE_ADHOC) | 742 BIT(NL80211_IFTYPE_ADHOC) |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 608d739d1378..c9254a61ca52 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1270,18 +1270,50 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
1270 mutex_unlock(&priv->mutex); 1270 mutex_unlock(&priv->mutex);
1271} 1271}
1272 1272
1273static void ath9k_htc_sta_rc_update_work(struct work_struct *work)
1274{
1275 struct ath9k_htc_sta *ista =
1276 container_of(work, struct ath9k_htc_sta, rc_update_work);
1277 struct ieee80211_sta *sta =
1278 container_of((void *)ista, struct ieee80211_sta, drv_priv);
1279 struct ath9k_htc_priv *priv = ista->htc_priv;
1280 struct ath_common *common = ath9k_hw_common(priv->ah);
1281 struct ath9k_htc_target_rate trate;
1282
1283 mutex_lock(&priv->mutex);
1284 ath9k_htc_ps_wakeup(priv);
1285
1286 memset(&trate, 0, sizeof(struct ath9k_htc_target_rate));
1287 ath9k_htc_setup_rate(priv, sta, &trate);
1288 if (!ath9k_htc_send_rate_cmd(priv, &trate))
1289 ath_dbg(common, CONFIG,
1290 "Supported rates for sta: %pM updated, rate caps: 0x%X\n",
1291 sta->addr, be32_to_cpu(trate.capflags));
1292 else
1293 ath_dbg(common, CONFIG,
1294 "Unable to update supported rates for sta: %pM\n",
1295 sta->addr);
1296
1297 ath9k_htc_ps_restore(priv);
1298 mutex_unlock(&priv->mutex);
1299}
1300
1273static int ath9k_htc_sta_add(struct ieee80211_hw *hw, 1301static int ath9k_htc_sta_add(struct ieee80211_hw *hw,
1274 struct ieee80211_vif *vif, 1302 struct ieee80211_vif *vif,
1275 struct ieee80211_sta *sta) 1303 struct ieee80211_sta *sta)
1276{ 1304{
1277 struct ath9k_htc_priv *priv = hw->priv; 1305 struct ath9k_htc_priv *priv = hw->priv;
1306 struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
1278 int ret; 1307 int ret;
1279 1308
1280 mutex_lock(&priv->mutex); 1309 mutex_lock(&priv->mutex);
1281 ath9k_htc_ps_wakeup(priv); 1310 ath9k_htc_ps_wakeup(priv);
1282 ret = ath9k_htc_add_station(priv, vif, sta); 1311 ret = ath9k_htc_add_station(priv, vif, sta);
1283 if (!ret) 1312 if (!ret) {
1313 INIT_WORK(&ista->rc_update_work, ath9k_htc_sta_rc_update_work);
1314 ista->htc_priv = priv;
1284 ath9k_htc_init_rate(priv, sta); 1315 ath9k_htc_init_rate(priv, sta);
1316 }
1285 ath9k_htc_ps_restore(priv); 1317 ath9k_htc_ps_restore(priv);
1286 mutex_unlock(&priv->mutex); 1318 mutex_unlock(&priv->mutex);
1287 1319
@@ -1293,12 +1325,13 @@ static int ath9k_htc_sta_remove(struct ieee80211_hw *hw,
1293 struct ieee80211_sta *sta) 1325 struct ieee80211_sta *sta)
1294{ 1326{
1295 struct ath9k_htc_priv *priv = hw->priv; 1327 struct ath9k_htc_priv *priv = hw->priv;
1296 struct ath9k_htc_sta *ista; 1328 struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
1297 int ret; 1329 int ret;
1298 1330
1331 cancel_work_sync(&ista->rc_update_work);
1332
1299 mutex_lock(&priv->mutex); 1333 mutex_lock(&priv->mutex);
1300 ath9k_htc_ps_wakeup(priv); 1334 ath9k_htc_ps_wakeup(priv);
1301 ista = (struct ath9k_htc_sta *) sta->drv_priv;
1302 htc_sta_drain(priv->htc, ista->index); 1335 htc_sta_drain(priv->htc, ista->index);
1303 ret = ath9k_htc_remove_station(priv, vif, sta); 1336 ret = ath9k_htc_remove_station(priv, vif, sta);
1304 ath9k_htc_ps_restore(priv); 1337 ath9k_htc_ps_restore(priv);
@@ -1311,28 +1344,12 @@ static void ath9k_htc_sta_rc_update(struct ieee80211_hw *hw,
1311 struct ieee80211_vif *vif, 1344 struct ieee80211_vif *vif,
1312 struct ieee80211_sta *sta, u32 changed) 1345 struct ieee80211_sta *sta, u32 changed)
1313{ 1346{
1314 struct ath9k_htc_priv *priv = hw->priv; 1347 struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
1315 struct ath_common *common = ath9k_hw_common(priv->ah);
1316 struct ath9k_htc_target_rate trate;
1317
1318 mutex_lock(&priv->mutex);
1319 ath9k_htc_ps_wakeup(priv);
1320 1348
1321 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) { 1349 if (!(changed & IEEE80211_RC_SUPP_RATES_CHANGED))
1322 memset(&trate, 0, sizeof(struct ath9k_htc_target_rate)); 1350 return;
1323 ath9k_htc_setup_rate(priv, sta, &trate);
1324 if (!ath9k_htc_send_rate_cmd(priv, &trate))
1325 ath_dbg(common, CONFIG,
1326 "Supported rates for sta: %pM updated, rate caps: 0x%X\n",
1327 sta->addr, be32_to_cpu(trate.capflags));
1328 else
1329 ath_dbg(common, CONFIG,
1330 "Unable to update supported rates for sta: %pM\n",
1331 sta->addr);
1332 }
1333 1351
1334 ath9k_htc_ps_restore(priv); 1352 schedule_work(&ista->rc_update_work);
1335 mutex_unlock(&priv->mutex);
1336} 1353}
1337 1354
1338static int ath9k_htc_conf_tx(struct ieee80211_hw *hw, 1355static int ath9k_htc_conf_tx(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index fbf43c05713f..11eab9f01fd8 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1316,7 +1316,7 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
1316 if (AR_SREV_9300_20_OR_LATER(ah)) 1316 if (AR_SREV_9300_20_OR_LATER(ah))
1317 udelay(50); 1317 udelay(50);
1318 else if (AR_SREV_9100(ah)) 1318 else if (AR_SREV_9100(ah))
1319 udelay(10000); 1319 mdelay(10);
1320 else 1320 else
1321 udelay(100); 1321 udelay(100);
1322 1322
@@ -2051,9 +2051,8 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah)
2051 2051
2052 REG_SET_BIT(ah, AR_RTC_FORCE_WAKE, 2052 REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
2053 AR_RTC_FORCE_WAKE_EN); 2053 AR_RTC_FORCE_WAKE_EN);
2054
2055 if (AR_SREV_9100(ah)) 2054 if (AR_SREV_9100(ah))
2056 udelay(10000); 2055 mdelay(10);
2057 else 2056 else
2058 udelay(50); 2057 udelay(50);
2059 2058
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index c36de303c8f3..1fc2e5a26b52 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -57,6 +57,10 @@ static int ath9k_bt_ant_diversity;
57module_param_named(bt_ant_diversity, ath9k_bt_ant_diversity, int, 0444); 57module_param_named(bt_ant_diversity, ath9k_bt_ant_diversity, int, 0444);
58MODULE_PARM_DESC(bt_ant_diversity, "Enable WLAN/BT RX antenna diversity"); 58MODULE_PARM_DESC(bt_ant_diversity, "Enable WLAN/BT RX antenna diversity");
59 59
60static int ath9k_ps_enable;
61module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
62MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
63
60bool is_ath9k_unloaded; 64bool is_ath9k_unloaded;
61/* We use the hw_value as an index into our private channel structure */ 65/* We use the hw_value as an index into our private channel structure */
62 66
@@ -903,13 +907,15 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
903 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | 907 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
904 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 908 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
905 IEEE80211_HW_SIGNAL_DBM | 909 IEEE80211_HW_SIGNAL_DBM |
906 IEEE80211_HW_SUPPORTS_PS |
907 IEEE80211_HW_PS_NULLFUNC_STACK | 910 IEEE80211_HW_PS_NULLFUNC_STACK |
908 IEEE80211_HW_SPECTRUM_MGMT | 911 IEEE80211_HW_SPECTRUM_MGMT |
909 IEEE80211_HW_REPORTS_TX_ACK_STATUS | 912 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
910 IEEE80211_HW_SUPPORTS_RC_TABLE | 913 IEEE80211_HW_SUPPORTS_RC_TABLE |
911 IEEE80211_HW_SUPPORTS_HT_CCK_RATES; 914 IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
912 915
916 if (ath9k_ps_enable)
917 hw->flags |= IEEE80211_HW_SUPPORTS_PS;
918
913 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) { 919 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
914 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; 920 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
915 921
diff --git a/drivers/net/wireless/hostap/hostap_proc.c b/drivers/net/wireless/hostap/hostap_proc.c
index aa7ad3a7a69b..4e5c0f8c9496 100644
--- a/drivers/net/wireless/hostap/hostap_proc.c
+++ b/drivers/net/wireless/hostap/hostap_proc.c
@@ -496,7 +496,7 @@ void hostap_init_proc(local_info_t *local)
496 496
497void hostap_remove_proc(local_info_t *local) 497void hostap_remove_proc(local_info_t *local)
498{ 498{
499 remove_proc_subtree(local->ddev->name, hostap_proc); 499 proc_remove(local->proc);
500} 500}
501 501
502 502
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index c24d1d3d55f6..73086c1629ca 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -696,6 +696,24 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
696 return ret; 696 return ret;
697} 697}
698 698
699static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
700{
701 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
702 return false;
703 return true;
704}
705
706static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
707{
708 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
709 return false;
710 if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
711 return true;
712
713 /* disabled by default */
714 return false;
715}
716
699static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, 717static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
700 struct ieee80211_vif *vif, 718 struct ieee80211_vif *vif,
701 enum ieee80211_ampdu_mlme_action action, 719 enum ieee80211_ampdu_mlme_action action,
@@ -717,7 +735,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
717 735
718 switch (action) { 736 switch (action) {
719 case IEEE80211_AMPDU_RX_START: 737 case IEEE80211_AMPDU_RX_START:
720 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) 738 if (!iwl_enable_rx_ampdu(priv->cfg))
721 break; 739 break;
722 IWL_DEBUG_HT(priv, "start Rx\n"); 740 IWL_DEBUG_HT(priv, "start Rx\n");
723 ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn); 741 ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
@@ -729,7 +747,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
729 case IEEE80211_AMPDU_TX_START: 747 case IEEE80211_AMPDU_TX_START:
730 if (!priv->trans->ops->txq_enable) 748 if (!priv->trans->ops->txq_enable)
731 break; 749 break;
732 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) 750 if (!iwl_enable_tx_ampdu(priv->cfg))
733 break; 751 break;
734 IWL_DEBUG_HT(priv, "start Tx\n"); 752 IWL_DEBUG_HT(priv, "start Tx\n");
735 ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn); 753 ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index c3728163be46..75103554cd63 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -1286,7 +1286,7 @@ module_param_named(swcrypto, iwlwifi_mod_params.sw_crypto, int, S_IRUGO);
1286MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])"); 1286MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
1287module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, S_IRUGO); 1287module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, S_IRUGO);
1288MODULE_PARM_DESC(11n_disable, 1288MODULE_PARM_DESC(11n_disable,
1289 "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX"); 1289 "disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX");
1290module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K, 1290module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K,
1291 int, S_IRUGO); 1291 int, S_IRUGO);
1292MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)"); 1292MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)");
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
index 0a84ade7edac..b29075c3da8e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
@@ -79,9 +79,12 @@ enum iwl_power_level {
79 IWL_POWER_NUM 79 IWL_POWER_NUM
80}; 80};
81 81
82#define IWL_DISABLE_HT_ALL BIT(0) 82enum iwl_disable_11n {
83#define IWL_DISABLE_HT_TXAGG BIT(1) 83 IWL_DISABLE_HT_ALL = BIT(0),
84#define IWL_DISABLE_HT_RXAGG BIT(2) 84 IWL_DISABLE_HT_TXAGG = BIT(1),
85 IWL_DISABLE_HT_RXAGG = BIT(2),
86 IWL_ENABLE_HT_TXAGG = BIT(3),
87};
85 88
86/** 89/**
87 * struct iwl_mod_params 90 * struct iwl_mod_params
@@ -90,7 +93,7 @@ enum iwl_power_level {
90 * 93 *
91 * @sw_crypto: using hardware encryption, default = 0 94 * @sw_crypto: using hardware encryption, default = 0
92 * @disable_11n: disable 11n capabilities, default = 0, 95 * @disable_11n: disable 11n capabilities, default = 0,
93 * use IWL_DISABLE_HT_* constants 96 * use IWL_[DIS,EN]ABLE_HT_* constants
94 * @amsdu_size_8K: enable 8K amsdu size, default = 0 97 * @amsdu_size_8K: enable 8K amsdu size, default = 0
95 * @restart_fw: restart firmware, default = 1 98 * @restart_fw: restart firmware, default = 1
96 * @wd_disable: enable stuck queue check, default = 0 99 * @wd_disable: enable stuck queue check, default = 0
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index f06f4cbe1317..725e954d8475 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -182,6 +182,11 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
182 182
183 for (ch_idx = 0; ch_idx < IWL_NUM_CHANNELS; ch_idx++) { 183 for (ch_idx = 0; ch_idx < IWL_NUM_CHANNELS; ch_idx++) {
184 ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx); 184 ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
185
186 if (ch_idx >= NUM_2GHZ_CHANNELS &&
187 !data->sku_cap_band_52GHz_enable)
188 ch_flags &= ~NVM_CHANNEL_VALID;
189
185 if (!(ch_flags & NVM_CHANNEL_VALID)) { 190 if (!(ch_flags & NVM_CHANNEL_VALID)) {
186 IWL_DEBUG_EEPROM(dev, 191 IWL_DEBUG_EEPROM(dev,
187 "Ch. %d Flags %x [%sGHz] - No traffic\n", 192 "Ch. %d Flags %x [%sGHz] - No traffic\n",
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 73cbba7424f2..9426905de6b2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -504,6 +504,7 @@ struct iwl_scan_offload_profile {
504 * @match_notify: clients waiting for match found notification 504 * @match_notify: clients waiting for match found notification
505 * @pass_match: clients waiting for the results 505 * @pass_match: clients waiting for the results
506 * @active_clients: active clients bitmap - enum scan_framework_client 506 * @active_clients: active clients bitmap - enum scan_framework_client
507 * @any_beacon_notify: clients waiting for match notification without match
507 */ 508 */
508struct iwl_scan_offload_profile_cfg { 509struct iwl_scan_offload_profile_cfg {
509 struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES]; 510 struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES];
@@ -512,7 +513,8 @@ struct iwl_scan_offload_profile_cfg {
512 u8 match_notify; 513 u8 match_notify;
513 u8 pass_match; 514 u8 pass_match;
514 u8 active_clients; 515 u8 active_clients;
515 u8 reserved[3]; 516 u8 any_beacon_notify;
517 u8 reserved[2];
516} __packed; 518} __packed;
517 519
518/** 520/**
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index c49b5073c251..c35b8661b395 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -246,7 +246,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
246 else 246 else
247 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 247 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
248 248
249 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) { 249 if (0 && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
250 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; 250 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
251 hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; 251 hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
252 hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES; 252 hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
@@ -328,6 +328,24 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
328 ieee80211_free_txskb(hw, skb); 328 ieee80211_free_txskb(hw, skb);
329} 329}
330 330
331static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
332{
333 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
334 return false;
335 return true;
336}
337
338static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
339{
340 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
341 return false;
342 if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
343 return true;
344
345 /* enabled by default */
346 return true;
347}
348
331static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, 349static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
332 struct ieee80211_vif *vif, 350 struct ieee80211_vif *vif,
333 enum ieee80211_ampdu_mlme_action action, 351 enum ieee80211_ampdu_mlme_action action,
@@ -347,7 +365,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
347 365
348 switch (action) { 366 switch (action) {
349 case IEEE80211_AMPDU_RX_START: 367 case IEEE80211_AMPDU_RX_START:
350 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) { 368 if (!iwl_enable_rx_ampdu(mvm->cfg)) {
351 ret = -EINVAL; 369 ret = -EINVAL;
352 break; 370 break;
353 } 371 }
@@ -357,7 +375,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
357 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false); 375 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false);
358 break; 376 break;
359 case IEEE80211_AMPDU_TX_START: 377 case IEEE80211_AMPDU_TX_START:
360 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) { 378 if (!iwl_enable_tx_ampdu(mvm->cfg)) {
361 ret = -EINVAL; 379 ret = -EINVAL;
362 break; 380 break;
363 } 381 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 0e0007960612..742afc429c94 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -344,7 +344,8 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
344 344
345 iwl_mvm_scan_fill_ssids(cmd, req, basic_ssid ? 1 : 0); 345 iwl_mvm_scan_fill_ssids(cmd, req, basic_ssid ? 1 : 0);
346 346
347 cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL); 347 cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
348 TX_CMD_FLG_BT_DIS);
348 cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id; 349 cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id;
349 cmd->tx_cmd.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); 350 cmd->tx_cmd.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
350 cmd->tx_cmd.rate_n_flags = 351 cmd->tx_cmd.rate_n_flags =
@@ -807,6 +808,8 @@ int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
807 profile_cfg->active_clients = SCAN_CLIENT_SCHED_SCAN; 808 profile_cfg->active_clients = SCAN_CLIENT_SCHED_SCAN;
808 profile_cfg->pass_match = SCAN_CLIENT_SCHED_SCAN; 809 profile_cfg->pass_match = SCAN_CLIENT_SCHED_SCAN;
809 profile_cfg->match_notify = SCAN_CLIENT_SCHED_SCAN; 810 profile_cfg->match_notify = SCAN_CLIENT_SCHED_SCAN;
811 if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len)
812 profile_cfg->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN;
810 813
811 for (i = 0; i < req->n_match_sets; i++) { 814 for (i = 0; i < req->n_match_sets; i++) {
812 profile = &profile_cfg->profiles[i]; 815 profile = &profile_cfg->profiles[i];
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index ec1812133235..3397f59cd4e4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -652,7 +652,7 @@ int iwl_mvm_send_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
652{ 652{
653 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 653 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
654 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; 654 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
655 static const u8 *baddr = _baddr; 655 const u8 *baddr = _baddr;
656 656
657 lockdep_assert_held(&mvm->mutex); 657 lockdep_assert_held(&mvm->mutex);
658 658
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 90378c217bc7..4df12fa9d336 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -659,8 +659,14 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
659 rcu_read_lock(); 659 rcu_read_lock();
660 660
661 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 661 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
662 /*
663 * sta can't be NULL otherwise it'd mean that the sta has been freed in
664 * the firmware while we still have packets for it in the Tx queues.
665 */
666 if (WARN_ON_ONCE(!sta))
667 goto out;
662 668
663 if (!IS_ERR_OR_NULL(sta)) { 669 if (!IS_ERR(sta)) {
664 mvmsta = iwl_mvm_sta_from_mac80211(sta); 670 mvmsta = iwl_mvm_sta_from_mac80211(sta);
665 671
666 if (tid != IWL_TID_NON_QOS) { 672 if (tid != IWL_TID_NON_QOS) {
@@ -675,7 +681,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
675 spin_unlock_bh(&mvmsta->lock); 681 spin_unlock_bh(&mvmsta->lock);
676 } 682 }
677 } else { 683 } else {
678 sta = NULL;
679 mvmsta = NULL; 684 mvmsta = NULL;
680 } 685 }
681 686
@@ -683,42 +688,38 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
683 * If the txq is not an AMPDU queue, there is no chance we freed 688 * If the txq is not an AMPDU queue, there is no chance we freed
684 * several skbs. Check that out... 689 * several skbs. Check that out...
685 */ 690 */
686 if (txq_id < mvm->first_agg_queue && !WARN_ON(skb_freed > 1) && 691 if (txq_id >= mvm->first_agg_queue)
687 atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) { 692 goto out;
688 if (mvmsta) { 693
689 /* 694 /* We can't free more than one frame at once on a shared queue */
690 * If there are no pending frames for this STA, notify 695 WARN_ON(skb_freed > 1);
691 * mac80211 that this station can go to sleep in its 696
692 * STA table. 697 /* If we have still frames from this STA nothing to do here */
693 */ 698 if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id]))
694 if (mvmsta->vif->type == NL80211_IFTYPE_AP) 699 goto out;
695 ieee80211_sta_block_awake(mvm->hw, sta, false); 700
696 /* 701 if (mvmsta && mvmsta->vif->type == NL80211_IFTYPE_AP) {
697 * We might very well have taken mvmsta pointer while 702 /*
698 * the station was being removed. The remove flow might 703 * If there are no pending frames for this STA, notify
699 * have seen a pending_frame (because we didn't take 704 * mac80211 that this station can go to sleep in its
700 * the lock) even if now the queues are drained. So make 705 * STA table.
701 * really sure now that this the station is not being 706 * If mvmsta is not NULL, sta is valid.
702 * removed. If it is, run the drain worker to remove it. 707 */
703 */ 708 ieee80211_sta_block_awake(mvm->hw, sta, false);
704 spin_lock_bh(&mvmsta->lock); 709 }
705 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 710
706 if (!sta || PTR_ERR(sta) == -EBUSY) { 711 if (PTR_ERR(sta) == -EBUSY || PTR_ERR(sta) == -ENOENT) {
707 /* 712 /*
708 * Station disappeared in the meantime: 713 * We are draining and this was the last packet - pre_rcu_remove
709 * so we are draining. 714 * has been called already. We might be after the
710 */ 715 * synchronize_net already.
711 set_bit(sta_id, mvm->sta_drained); 716 * Don't rely on iwl_mvm_rm_sta to see the empty Tx queues.
712 schedule_work(&mvm->sta_drained_wk); 717 */
713 } 718 set_bit(sta_id, mvm->sta_drained);
714 spin_unlock_bh(&mvmsta->lock); 719 schedule_work(&mvm->sta_drained_wk);
715 } else if (!mvmsta && PTR_ERR(sta) == -EBUSY) {
716 /* Tx response without STA, so we are draining */
717 set_bit(sta_id, mvm->sta_drained);
718 schedule_work(&mvm->sta_drained_wk);
719 }
720 } 720 }
721 721
722out:
722 rcu_read_unlock(); 723 rcu_read_unlock();
723} 724}
724 725
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index a4a5e25623c3..86989df69356 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -411,6 +411,8 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
411 mvm->status, table.valid); 411 mvm->status, table.valid);
412 } 412 }
413 413
414 IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
415
414 trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low, 416 trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
415 table.data1, table.data2, table.data3, 417 table.data1, table.data2, table.data3,
416 table.blink1, table.blink2, table.ilink1, 418 table.blink1, table.blink2, table.ilink1,
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 3040924f5f3c..f47bcbe2945a 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -359,20 +359,25 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
359/* 7265 Series */ 359/* 7265 Series */
360 {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, 360 {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
361 {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)}, 361 {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)},
362 {IWL_PCI_DEVICE(0x095A, 0x5112, iwl7265_2ac_cfg)},
363 {IWL_PCI_DEVICE(0x095A, 0x5100, iwl7265_2ac_cfg)},
364 {IWL_PCI_DEVICE(0x095A, 0x510A, iwl7265_2ac_cfg)},
362 {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, 365 {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
363 {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)}, 366 {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)},
364 {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, 367 {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
365 {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)}, 368 {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
366 {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2ac_cfg)},
367 {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, 369 {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
368 {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)}, 370 {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)},
369 {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, 371 {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
370 {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)}, 372 {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)},
373 {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2n_cfg)},
371 {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)}, 374 {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)},
372 {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)}, 375 {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)},
373 {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, 376 {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)},
374 {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, 377 {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)},
378 {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)},
375 {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, 379 {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
380 {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
376 {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, 381 {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
377 {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)}, 382 {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
378 {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)}, 383 {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 4d79761b9c87..9d3d2758ec35 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -748,7 +748,7 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev)
748 748
749static u16 749static u16
750mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb, 750mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb,
751 void *accel_priv) 751 void *accel_priv, select_queue_fallback_t fallback)
752{ 752{
753 skb->priority = cfg80211_classify8021d(skb, NULL); 753 skb->priority = cfg80211_classify8021d(skb, NULL);
754 return mwifiex_1d_to_wmm_queue[skb->priority]; 754 return mwifiex_1d_to_wmm_queue[skb->priority];
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index abc5f56f29fe..2f1cd929c6f6 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -1877,6 +1877,11 @@ static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1877 EEPROM_MAC_ADDR_0)); 1877 EEPROM_MAC_ADDR_0));
1878 1878
1879 /* 1879 /*
1880 * Disable powersaving as default.
1881 */
1882 rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
1883
1884 /*
1880 * Initialize hw_mode information. 1885 * Initialize hw_mode information.
1881 */ 1886 */
1882 spec->supported_bands = SUPPORT_BAND_2GHZ; 1887 spec->supported_bands = SUPPORT_BAND_2GHZ;
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 9f16824cd1bc..d849d590de25 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -1706,6 +1706,11 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1706 IEEE80211_HW_SUPPORTS_PS | 1706 IEEE80211_HW_SUPPORTS_PS |
1707 IEEE80211_HW_PS_NULLFUNC_STACK; 1707 IEEE80211_HW_PS_NULLFUNC_STACK;
1708 1708
1709 /*
1710 * Disable powersaving as default.
1711 */
1712 rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
1713
1709 SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev); 1714 SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
1710 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, 1715 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
1711 rt2x00_eeprom_addr(rt2x00dev, 1716 rt2x00_eeprom_addr(rt2x00dev,
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index b8f5b06006c4..7f8b5d156c8c 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -7458,10 +7458,9 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
7458 u32 reg; 7458 u32 reg;
7459 7459
7460 /* 7460 /*
7461 * Disable powersaving as default on PCI devices. 7461 * Disable powersaving as default.
7462 */ 7462 */
7463 if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) 7463 rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
7464 rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
7465 7464
7466 /* 7465 /*
7467 * Initialize all hw fields. 7466 * Initialize all hw fields.
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 8ec17aad0e52..3867d1470b36 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -107,6 +107,7 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
107 struct rtl8180_priv *priv = dev->priv; 107 struct rtl8180_priv *priv = dev->priv;
108 unsigned int count = 32; 108 unsigned int count = 32;
109 u8 signal, agc, sq; 109 u8 signal, agc, sq;
110 dma_addr_t mapping;
110 111
111 while (count--) { 112 while (count--) {
112 struct rtl8180_rx_desc *entry = &priv->rx_ring[priv->rx_idx]; 113 struct rtl8180_rx_desc *entry = &priv->rx_ring[priv->rx_idx];
@@ -128,6 +129,17 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
128 if (unlikely(!new_skb)) 129 if (unlikely(!new_skb))
129 goto done; 130 goto done;
130 131
132 mapping = pci_map_single(priv->pdev,
133 skb_tail_pointer(new_skb),
134 MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
135
136 if (pci_dma_mapping_error(priv->pdev, mapping)) {
137 kfree_skb(new_skb);
138 dev_err(&priv->pdev->dev, "RX DMA map error\n");
139
140 goto done;
141 }
142
131 pci_unmap_single(priv->pdev, 143 pci_unmap_single(priv->pdev,
132 *((dma_addr_t *)skb->cb), 144 *((dma_addr_t *)skb->cb),
133 MAX_RX_SIZE, PCI_DMA_FROMDEVICE); 145 MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
@@ -158,9 +170,7 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
158 170
159 skb = new_skb; 171 skb = new_skb;
160 priv->rx_buf[priv->rx_idx] = skb; 172 priv->rx_buf[priv->rx_idx] = skb;
161 *((dma_addr_t *) skb->cb) = 173 *((dma_addr_t *) skb->cb) = mapping;
162 pci_map_single(priv->pdev, skb_tail_pointer(skb),
163 MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
164 } 174 }
165 175
166 done: 176 done:
@@ -266,6 +276,13 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
266 mapping = pci_map_single(priv->pdev, skb->data, 276 mapping = pci_map_single(priv->pdev, skb->data,
267 skb->len, PCI_DMA_TODEVICE); 277 skb->len, PCI_DMA_TODEVICE);
268 278
279 if (pci_dma_mapping_error(priv->pdev, mapping)) {
280 kfree_skb(skb);
281 dev_err(&priv->pdev->dev, "TX DMA mapping error\n");
282 return;
283
284 }
285
269 tx_flags = RTL818X_TX_DESC_FLAG_OWN | RTL818X_TX_DESC_FLAG_FS | 286 tx_flags = RTL818X_TX_DESC_FLAG_OWN | RTL818X_TX_DESC_FLAG_FS |
270 RTL818X_TX_DESC_FLAG_LS | 287 RTL818X_TX_DESC_FLAG_LS |
271 (ieee80211_get_tx_rate(dev, info)->hw_value << 24) | 288 (ieee80211_get_tx_rate(dev, info)->hw_value << 24) |
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
index 56aee067f324..a6ad79f61bf9 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
+++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
@@ -15,6 +15,8 @@
15#ifndef RTL8187_H 15#ifndef RTL8187_H
16#define RTL8187_H 16#define RTL8187_H
17 17
18#include <linux/cache.h>
19
18#include "rtl818x.h" 20#include "rtl818x.h"
19#include "leds.h" 21#include "leds.h"
20 22
@@ -139,7 +141,10 @@ struct rtl8187_priv {
139 u8 aifsn[4]; 141 u8 aifsn[4];
140 u8 rfkill_mask; 142 u8 rfkill_mask;
141 struct { 143 struct {
142 __le64 buf; 144 union {
145 __le64 buf;
146 u8 dummy1[L1_CACHE_BYTES];
147 } ____cacheline_aligned;
143 struct sk_buff_head queue; 148 struct sk_buff_head queue;
144 } b_tx_status; /* This queue is used by both -b and non-b devices */ 149 } b_tx_status; /* This queue is used by both -b and non-b devices */
145 struct mutex io_mutex; 150 struct mutex io_mutex;
@@ -147,7 +152,8 @@ struct rtl8187_priv {
147 u8 bits8; 152 u8 bits8;
148 __le16 bits16; 153 __le16 bits16;
149 __le32 bits32; 154 __le32 bits32;
150 } *io_dmabuf; 155 u8 dummy2[L1_CACHE_BYTES];
156 } *io_dmabuf ____cacheline_aligned;
151 bool rfkill_off; 157 bool rfkill_off;
152 u16 seqno; 158 u16 seqno;
153}; 159};
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index deedae3c5449..d1c0191a195b 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -48,7 +48,7 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
48 48
49 /*<2> Enable Adapter */ 49 /*<2> Enable Adapter */
50 if (rtlpriv->cfg->ops->hw_init(hw)) 50 if (rtlpriv->cfg->ops->hw_init(hw))
51 return 1; 51 return false;
52 RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); 52 RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
53 53
54 /*<3> Enable Interrupt */ 54 /*<3> Enable Interrupt */
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index a82b30a1996c..2eb0b38384dd 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -937,14 +937,26 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
937 bool is92c; 937 bool is92c;
938 int err; 938 int err;
939 u8 tmp_u1b; 939 u8 tmp_u1b;
940 unsigned long flags;
940 941
941 rtlpci->being_init_adapter = true; 942 rtlpci->being_init_adapter = true;
943
944 /* Since this function can take a very long time (up to 350 ms)
945 * and can be called with irqs disabled, reenable the irqs
946 * to let the other devices continue being serviced.
947 *
948 * It is safe doing so since our own interrupts will only be enabled
949 * in a subsequent step.
950 */
951 local_save_flags(flags);
952 local_irq_enable();
953
942 rtlpriv->intf_ops->disable_aspm(hw); 954 rtlpriv->intf_ops->disable_aspm(hw);
943 rtstatus = _rtl92ce_init_mac(hw); 955 rtstatus = _rtl92ce_init_mac(hw);
944 if (!rtstatus) { 956 if (!rtstatus) {
945 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n"); 957 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
946 err = 1; 958 err = 1;
947 return err; 959 goto exit;
948 } 960 }
949 961
950 err = rtl92c_download_fw(hw); 962 err = rtl92c_download_fw(hw);
@@ -952,7 +964,7 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
952 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, 964 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
953 "Failed to download FW. Init HW without FW now..\n"); 965 "Failed to download FW. Init HW without FW now..\n");
954 err = 1; 966 err = 1;
955 return err; 967 goto exit;
956 } 968 }
957 969
958 rtlhal->last_hmeboxnum = 0; 970 rtlhal->last_hmeboxnum = 0;
@@ -1032,6 +1044,8 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
1032 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n"); 1044 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n");
1033 } 1045 }
1034 rtl92c_dm_init(hw); 1046 rtl92c_dm_init(hw);
1047exit:
1048 local_irq_restore(flags);
1035 rtlpci->being_init_adapter = false; 1049 rtlpci->being_init_adapter = false;
1036 return err; 1050 return err;
1037} 1051}
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 4c76bcb9a879..ae413a2cbee7 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -143,11 +143,7 @@ struct xenvif {
143 char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */ 143 char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
144 struct xen_netif_rx_back_ring rx; 144 struct xen_netif_rx_back_ring rx;
145 struct sk_buff_head rx_queue; 145 struct sk_buff_head rx_queue;
146 bool rx_queue_stopped; 146 RING_IDX rx_last_skb_slots;
147 /* Set when the RX interrupt is triggered by the frontend.
148 * The worker thread may need to wake the queue.
149 */
150 bool rx_event;
151 147
152 /* This array is allocated seperately as it is large */ 148 /* This array is allocated seperately as it is large */
153 struct gnttab_copy *grant_copy_op; 149 struct gnttab_copy *grant_copy_op;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index b9de31ea7fc4..7669d49a67e2 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -100,7 +100,6 @@ static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
100{ 100{
101 struct xenvif *vif = dev_id; 101 struct xenvif *vif = dev_id;
102 102
103 vif->rx_event = true;
104 xenvif_kick_thread(vif); 103 xenvif_kick_thread(vif);
105 104
106 return IRQ_HANDLED; 105 return IRQ_HANDLED;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 6b62c3eb8e18..e5284bca2d90 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -476,7 +476,6 @@ static void xenvif_rx_action(struct xenvif *vif)
476 unsigned long offset; 476 unsigned long offset;
477 struct skb_cb_overlay *sco; 477 struct skb_cb_overlay *sco;
478 bool need_to_notify = false; 478 bool need_to_notify = false;
479 bool ring_full = false;
480 479
481 struct netrx_pending_operations npo = { 480 struct netrx_pending_operations npo = {
482 .copy = vif->grant_copy_op, 481 .copy = vif->grant_copy_op,
@@ -486,7 +485,7 @@ static void xenvif_rx_action(struct xenvif *vif)
486 skb_queue_head_init(&rxq); 485 skb_queue_head_init(&rxq);
487 486
488 while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) { 487 while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
489 int max_slots_needed; 488 RING_IDX max_slots_needed;
490 int i; 489 int i;
491 490
492 /* We need a cheap worse case estimate for the number of 491 /* We need a cheap worse case estimate for the number of
@@ -509,9 +508,10 @@ static void xenvif_rx_action(struct xenvif *vif)
509 if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) { 508 if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) {
510 skb_queue_head(&vif->rx_queue, skb); 509 skb_queue_head(&vif->rx_queue, skb);
511 need_to_notify = true; 510 need_to_notify = true;
512 ring_full = true; 511 vif->rx_last_skb_slots = max_slots_needed;
513 break; 512 break;
514 } 513 } else
514 vif->rx_last_skb_slots = 0;
515 515
516 sco = (struct skb_cb_overlay *)skb->cb; 516 sco = (struct skb_cb_overlay *)skb->cb;
517 sco->meta_slots_used = xenvif_gop_skb(skb, &npo); 517 sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
@@ -522,8 +522,6 @@ static void xenvif_rx_action(struct xenvif *vif)
522 522
523 BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta)); 523 BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));
524 524
525 vif->rx_queue_stopped = !npo.copy_prod && ring_full;
526
527 if (!npo.copy_prod) 525 if (!npo.copy_prod)
528 goto done; 526 goto done;
529 527
@@ -1473,8 +1471,8 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
1473 1471
1474static inline int rx_work_todo(struct xenvif *vif) 1472static inline int rx_work_todo(struct xenvif *vif)
1475{ 1473{
1476 return (!skb_queue_empty(&vif->rx_queue) && !vif->rx_queue_stopped) || 1474 return !skb_queue_empty(&vif->rx_queue) &&
1477 vif->rx_event; 1475 xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots);
1478} 1476}
1479 1477
1480static inline int tx_work_todo(struct xenvif *vif) 1478static inline int tx_work_todo(struct xenvif *vif)
@@ -1560,8 +1558,6 @@ int xenvif_kthread(void *data)
1560 if (!skb_queue_empty(&vif->rx_queue)) 1558 if (!skb_queue_empty(&vif->rx_queue))
1561 xenvif_rx_action(vif); 1559 xenvif_rx_action(vif);
1562 1560
1563 vif->rx_event = false;
1564
1565 if (skb_queue_empty(&vif->rx_queue) && 1561 if (skb_queue_empty(&vif->rx_queue) &&
1566 netif_queue_stopped(vif->dev)) 1562 netif_queue_stopped(vif->dev))
1567 xenvif_start_queue(vif); 1563 xenvif_start_queue(vif);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index ff04d4f95baa..f9daa9e183f2 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1832,7 +1832,6 @@ static void netback_changed(struct xenbus_device *dev,
1832 case XenbusStateReconfiguring: 1832 case XenbusStateReconfiguring:
1833 case XenbusStateReconfigured: 1833 case XenbusStateReconfigured:
1834 case XenbusStateUnknown: 1834 case XenbusStateUnknown:
1835 case XenbusStateClosed:
1836 break; 1835 break;
1837 1836
1838 case XenbusStateInitWait: 1837 case XenbusStateInitWait:
@@ -1847,6 +1846,10 @@ static void netback_changed(struct xenbus_device *dev,
1847 netdev_notify_peers(netdev); 1846 netdev_notify_peers(netdev);
1848 break; 1847 break;
1849 1848
1849 case XenbusStateClosed:
1850 if (dev->state == XenbusStateClosed)
1851 break;
1852 /* Missed the backend's CLOSING state -- fallthrough */
1850 case XenbusStateClosing: 1853 case XenbusStateClosing:
1851 xenbus_frontend_closed(dev); 1854 xenbus_frontend_closed(dev);
1852 break; 1855 break;
diff --git a/drivers/of/address.c b/drivers/of/address.c
index d3dd41c840f1..1a54f1ffaadb 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -99,11 +99,12 @@ static unsigned int of_bus_default_get_flags(const __be32 *addr)
99static int of_bus_pci_match(struct device_node *np) 99static int of_bus_pci_match(struct device_node *np)
100{ 100{
101 /* 101 /*
102 * "pciex" is PCI Express
102 * "vci" is for the /chaos bridge on 1st-gen PCI powermacs 103 * "vci" is for the /chaos bridge on 1st-gen PCI powermacs
103 * "ht" is hypertransport 104 * "ht" is hypertransport
104 */ 105 */
105 return !strcmp(np->type, "pci") || !strcmp(np->type, "vci") || 106 return !strcmp(np->type, "pci") || !strcmp(np->type, "pciex") ||
106 !strcmp(np->type, "ht"); 107 !strcmp(np->type, "vci") || !strcmp(np->type, "ht");
107} 108}
108 109
109static void of_bus_pci_count_cells(struct device_node *np, 110static void of_bus_pci_count_cells(struct device_node *np,
diff --git a/drivers/of/base.c b/drivers/of/base.c
index ff85450d5683..89e888a78899 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -342,27 +342,72 @@ struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
342} 342}
343EXPORT_SYMBOL(of_get_cpu_node); 343EXPORT_SYMBOL(of_get_cpu_node);
344 344
345/** Checks if the given "compat" string matches one of the strings in 345/**
346 * the device's "compatible" property 346 * __of_device_is_compatible() - Check if the node matches given constraints
347 * @device: pointer to node
348 * @compat: required compatible string, NULL or "" for any match
349 * @type: required device_type value, NULL or "" for any match
350 * @name: required node name, NULL or "" for any match
351 *
352 * Checks if the given @compat, @type and @name strings match the
353 * properties of the given @device. A constraints can be skipped by
354 * passing NULL or an empty string as the constraint.
355 *
356 * Returns 0 for no match, and a positive integer on match. The return
357 * value is a relative score with larger values indicating better
358 * matches. The score is weighted for the most specific compatible value
359 * to get the highest score. Matching type is next, followed by matching
360 * name. Practically speaking, this results in the following priority
361 * order for matches:
362 *
363 * 1. specific compatible && type && name
364 * 2. specific compatible && type
365 * 3. specific compatible && name
366 * 4. specific compatible
367 * 5. general compatible && type && name
368 * 6. general compatible && type
369 * 7. general compatible && name
370 * 8. general compatible
371 * 9. type && name
372 * 10. type
373 * 11. name
347 */ 374 */
348static int __of_device_is_compatible(const struct device_node *device, 375static int __of_device_is_compatible(const struct device_node *device,
349 const char *compat) 376 const char *compat, const char *type, const char *name)
350{ 377{
351 const char* cp; 378 struct property *prop;
352 int cplen, l; 379 const char *cp;
380 int index = 0, score = 0;
381
382 /* Compatible match has highest priority */
383 if (compat && compat[0]) {
384 prop = __of_find_property(device, "compatible", NULL);
385 for (cp = of_prop_next_string(prop, NULL); cp;
386 cp = of_prop_next_string(prop, cp), index++) {
387 if (of_compat_cmp(cp, compat, strlen(compat)) == 0) {
388 score = INT_MAX/2 - (index << 2);
389 break;
390 }
391 }
392 if (!score)
393 return 0;
394 }
353 395
354 cp = __of_get_property(device, "compatible", &cplen); 396 /* Matching type is better than matching name */
355 if (cp == NULL) 397 if (type && type[0]) {
356 return 0; 398 if (!device->type || of_node_cmp(type, device->type))
357 while (cplen > 0) { 399 return 0;
358 if (of_compat_cmp(cp, compat, strlen(compat)) == 0) 400 score += 2;
359 return 1;
360 l = strlen(cp) + 1;
361 cp += l;
362 cplen -= l;
363 } 401 }
364 402
365 return 0; 403 /* Matching name is a bit better than not */
404 if (name && name[0]) {
405 if (!device->name || of_node_cmp(name, device->name))
406 return 0;
407 score++;
408 }
409
410 return score;
366} 411}
367 412
368/** Checks if the given "compat" string matches one of the strings in 413/** Checks if the given "compat" string matches one of the strings in
@@ -375,7 +420,7 @@ int of_device_is_compatible(const struct device_node *device,
375 int res; 420 int res;
376 421
377 raw_spin_lock_irqsave(&devtree_lock, flags); 422 raw_spin_lock_irqsave(&devtree_lock, flags);
378 res = __of_device_is_compatible(device, compat); 423 res = __of_device_is_compatible(device, compat, NULL, NULL);
379 raw_spin_unlock_irqrestore(&devtree_lock, flags); 424 raw_spin_unlock_irqrestore(&devtree_lock, flags);
380 return res; 425 return res;
381} 426}
@@ -681,10 +726,7 @@ struct device_node *of_find_compatible_node(struct device_node *from,
681 raw_spin_lock_irqsave(&devtree_lock, flags); 726 raw_spin_lock_irqsave(&devtree_lock, flags);
682 np = from ? from->allnext : of_allnodes; 727 np = from ? from->allnext : of_allnodes;
683 for (; np; np = np->allnext) { 728 for (; np; np = np->allnext) {
684 if (type 729 if (__of_device_is_compatible(np, compatible, type, NULL) &&
685 && !(np->type && (of_node_cmp(np->type, type) == 0)))
686 continue;
687 if (__of_device_is_compatible(np, compatible) &&
688 of_node_get(np)) 730 of_node_get(np))
689 break; 731 break;
690 } 732 }
@@ -734,43 +776,22 @@ static
734const struct of_device_id *__of_match_node(const struct of_device_id *matches, 776const struct of_device_id *__of_match_node(const struct of_device_id *matches,
735 const struct device_node *node) 777 const struct device_node *node)
736{ 778{
737 const char *cp; 779 const struct of_device_id *best_match = NULL;
738 int cplen, l; 780 int score, best_score = 0;
739 781
740 if (!matches) 782 if (!matches)
741 return NULL; 783 return NULL;
742 784
743 cp = __of_get_property(node, "compatible", &cplen); 785 for (; matches->name[0] || matches->type[0] || matches->compatible[0]; matches++) {
744 do { 786 score = __of_device_is_compatible(node, matches->compatible,
745 const struct of_device_id *m = matches; 787 matches->type, matches->name);
746 788 if (score > best_score) {
747 /* Check against matches with current compatible string */ 789 best_match = matches;
748 while (m->name[0] || m->type[0] || m->compatible[0]) { 790 best_score = score;
749 int match = 1;
750 if (m->name[0])
751 match &= node->name
752 && !strcmp(m->name, node->name);
753 if (m->type[0])
754 match &= node->type
755 && !strcmp(m->type, node->type);
756 if (m->compatible[0])
757 match &= cp
758 && !of_compat_cmp(m->compatible, cp,
759 strlen(m->compatible));
760 if (match)
761 return m;
762 m++;
763 }
764
765 /* Get node's next compatible string */
766 if (cp) {
767 l = strlen(cp) + 1;
768 cp += l;
769 cplen -= l;
770 } 791 }
771 } while (cp && (cplen > 0)); 792 }
772 793
773 return NULL; 794 return best_match;
774} 795}
775 796
776/** 797/**
@@ -778,10 +799,7 @@ const struct of_device_id *__of_match_node(const struct of_device_id *matches,
778 * @matches: array of of device match structures to search in 799 * @matches: array of of device match structures to search in
779 * @node: the of device structure to match against 800 * @node: the of device structure to match against
780 * 801 *
781 * Low level utility function used by device matching. Matching order 802 * Low level utility function used by device matching.
782 * is to compare each of the node's compatibles with all given matches
783 * first. This implies node's compatible is sorted from specific to
784 * generic while matches can be in any order.
785 */ 803 */
786const struct of_device_id *of_match_node(const struct of_device_id *matches, 804const struct of_device_id *of_match_node(const struct of_device_id *matches,
787 const struct device_node *node) 805 const struct device_node *node)
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 875b7b6f0d2a..5b3c24f3cde5 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -24,7 +24,11 @@ MODULE_LICENSE("GPL");
24 24
25static void of_set_phy_supported(struct phy_device *phydev, u32 max_speed) 25static void of_set_phy_supported(struct phy_device *phydev, u32 max_speed)
26{ 26{
27 phydev->supported |= PHY_DEFAULT_FEATURES; 27 /* The default values for phydev->supported are provided by the PHY
28 * driver "features" member, we want to reset to sane defaults fist
29 * before supporting higher speeds.
30 */
31 phydev->supported &= PHY_DEFAULT_FEATURES;
28 32
29 switch (max_speed) { 33 switch (max_speed) {
30 default: 34 default:
@@ -44,7 +48,7 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi
44{ 48{
45 struct phy_device *phy; 49 struct phy_device *phy;
46 bool is_c45; 50 bool is_c45;
47 int rc, prev_irq; 51 int rc;
48 u32 max_speed = 0; 52 u32 max_speed = 0;
49 53
50 is_c45 = of_device_is_compatible(child, 54 is_c45 = of_device_is_compatible(child,
@@ -54,12 +58,14 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi
54 if (!phy || IS_ERR(phy)) 58 if (!phy || IS_ERR(phy))
55 return 1; 59 return 1;
56 60
57 if (mdio->irq) { 61 rc = irq_of_parse_and_map(child, 0);
58 prev_irq = mdio->irq[addr]; 62 if (rc > 0) {
59 mdio->irq[addr] = 63 phy->irq = rc;
60 irq_of_parse_and_map(child, 0); 64 if (mdio->irq)
61 if (!mdio->irq[addr]) 65 mdio->irq[addr] = rc;
62 mdio->irq[addr] = prev_irq; 66 } else {
67 if (mdio->irq)
68 phy->irq = mdio->irq[addr];
63 } 69 }
64 70
65 /* Associate the OF node with the device structure so it 71 /* Associate the OF node with the device structure so it
diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c
index e21012bde639..6643d1920985 100644
--- a/drivers/of/selftest.c
+++ b/drivers/of/selftest.c
@@ -300,6 +300,72 @@ static void __init of_selftest_parse_interrupts_extended(void)
300 of_node_put(np); 300 of_node_put(np);
301} 301}
302 302
303static struct of_device_id match_node_table[] = {
304 { .data = "A", .name = "name0", }, /* Name alone is lowest priority */
305 { .data = "B", .type = "type1", }, /* followed by type alone */
306
307 { .data = "Ca", .name = "name2", .type = "type1", }, /* followed by both together */
308 { .data = "Cb", .name = "name2", }, /* Only match when type doesn't match */
309 { .data = "Cc", .name = "name2", .type = "type2", },
310
311 { .data = "E", .compatible = "compat3" },
312 { .data = "G", .compatible = "compat2", },
313 { .data = "H", .compatible = "compat2", .name = "name5", },
314 { .data = "I", .compatible = "compat2", .type = "type1", },
315 { .data = "J", .compatible = "compat2", .type = "type1", .name = "name8", },
316 { .data = "K", .compatible = "compat2", .name = "name9", },
317 {}
318};
319
320static struct {
321 const char *path;
322 const char *data;
323} match_node_tests[] = {
324 { .path = "/testcase-data/match-node/name0", .data = "A", },
325 { .path = "/testcase-data/match-node/name1", .data = "B", },
326 { .path = "/testcase-data/match-node/a/name2", .data = "Ca", },
327 { .path = "/testcase-data/match-node/b/name2", .data = "Cb", },
328 { .path = "/testcase-data/match-node/c/name2", .data = "Cc", },
329 { .path = "/testcase-data/match-node/name3", .data = "E", },
330 { .path = "/testcase-data/match-node/name4", .data = "G", },
331 { .path = "/testcase-data/match-node/name5", .data = "H", },
332 { .path = "/testcase-data/match-node/name6", .data = "G", },
333 { .path = "/testcase-data/match-node/name7", .data = "I", },
334 { .path = "/testcase-data/match-node/name8", .data = "J", },
335 { .path = "/testcase-data/match-node/name9", .data = "K", },
336};
337
338static void __init of_selftest_match_node(void)
339{
340 struct device_node *np;
341 const struct of_device_id *match;
342 int i;
343
344 for (i = 0; i < ARRAY_SIZE(match_node_tests); i++) {
345 np = of_find_node_by_path(match_node_tests[i].path);
346 if (!np) {
347 selftest(0, "missing testcase node %s\n",
348 match_node_tests[i].path);
349 continue;
350 }
351
352 match = of_match_node(match_node_table, np);
353 if (!match) {
354 selftest(0, "%s didn't match anything\n",
355 match_node_tests[i].path);
356 continue;
357 }
358
359 if (strcmp(match->data, match_node_tests[i].data) != 0) {
360 selftest(0, "%s got wrong match. expected %s, got %s\n",
361 match_node_tests[i].path, match_node_tests[i].data,
362 (const char *)match->data);
363 continue;
364 }
365 selftest(1, "passed");
366 }
367}
368
303static int __init of_selftest(void) 369static int __init of_selftest(void)
304{ 370{
305 struct device_node *np; 371 struct device_node *np;
@@ -316,6 +382,7 @@ static int __init of_selftest(void)
316 of_selftest_property_match_string(); 382 of_selftest_property_match_string();
317 of_selftest_parse_interrupts(); 383 of_selftest_parse_interrupts();
318 of_selftest_parse_interrupts_extended(); 384 of_selftest_parse_interrupts_extended();
385 of_selftest_match_node();
319 pr_info("end of selftest - %i passed, %i failed\n", 386 pr_info("end of selftest - %i passed, %i failed\n",
320 selftest_results.passed, selftest_results.failed); 387 selftest_results.passed, selftest_results.failed);
321 return 0; 388 return 0;
diff --git a/drivers/of/testcase-data/testcases.dtsi b/drivers/of/testcase-data/testcases.dtsi
new file mode 100644
index 000000000000..3a5b75a8e4d7
--- /dev/null
+++ b/drivers/of/testcase-data/testcases.dtsi
@@ -0,0 +1,3 @@
1#include "tests-phandle.dtsi"
2#include "tests-interrupts.dtsi"
3#include "tests-match.dtsi"
diff --git a/arch/arm/boot/dts/testcases/tests-interrupts.dtsi b/drivers/of/testcase-data/tests-interrupts.dtsi
index c843720bd3e5..c843720bd3e5 100644
--- a/arch/arm/boot/dts/testcases/tests-interrupts.dtsi
+++ b/drivers/of/testcase-data/tests-interrupts.dtsi
diff --git a/drivers/of/testcase-data/tests-match.dtsi b/drivers/of/testcase-data/tests-match.dtsi
new file mode 100644
index 000000000000..c9e541129534
--- /dev/null
+++ b/drivers/of/testcase-data/tests-match.dtsi
@@ -0,0 +1,19 @@
1
2/ {
3 testcase-data {
4 match-node {
5 name0 { };
6 name1 { device_type = "type1"; };
7 a { name2 { device_type = "type1"; }; };
8 b { name2 { }; };
9 c { name2 { device_type = "type2"; }; };
10 name3 { compatible = "compat3"; };
11 name4 { compatible = "compat2", "compat3"; };
12 name5 { compatible = "compat2", "compat3"; };
13 name6 { compatible = "compat1", "compat2", "compat3"; };
14 name7 { compatible = "compat2"; device_type = "type1"; };
15 name8 { compatible = "compat2"; device_type = "type1"; };
16 name9 { compatible = "compat2"; };
17 };
18 };
19};
diff --git a/arch/arm/boot/dts/testcases/tests-phandle.dtsi b/drivers/of/testcase-data/tests-phandle.dtsi
index 0007d3cd7dc2..0007d3cd7dc2 100644
--- a/arch/arm/boot/dts/testcases/tests-phandle.dtsi
+++ b/drivers/of/testcase-data/tests-phandle.dtsi
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 13478ecd4113..0e79665afd44 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -60,14 +60,6 @@
60#define PCIE_DEBUG_CTRL 0x1a60 60#define PCIE_DEBUG_CTRL 0x1a60
61#define PCIE_DEBUG_SOFT_RESET BIT(20) 61#define PCIE_DEBUG_SOFT_RESET BIT(20)
62 62
63/*
64 * This product ID is registered by Marvell, and used when the Marvell
65 * SoC is not the root complex, but an endpoint on the PCIe bus. It is
66 * therefore safe to re-use this PCI ID for our emulated PCI-to-PCI
67 * bridge.
68 */
69#define MARVELL_EMULATED_PCI_PCI_BRIDGE_ID 0x7846
70
71/* PCI configuration space of a PCI-to-PCI bridge */ 63/* PCI configuration space of a PCI-to-PCI bridge */
72struct mvebu_sw_pci_bridge { 64struct mvebu_sw_pci_bridge {
73 u16 vendor; 65 u16 vendor;
@@ -388,7 +380,8 @@ static void mvebu_sw_pci_bridge_init(struct mvebu_pcie_port *port)
388 380
389 bridge->class = PCI_CLASS_BRIDGE_PCI; 381 bridge->class = PCI_CLASS_BRIDGE_PCI;
390 bridge->vendor = PCI_VENDOR_ID_MARVELL; 382 bridge->vendor = PCI_VENDOR_ID_MARVELL;
391 bridge->device = MARVELL_EMULATED_PCI_PCI_BRIDGE_ID; 383 bridge->device = mvebu_readl(port, PCIE_DEV_ID_OFF) >> 16;
384 bridge->revision = mvebu_readl(port, PCIE_DEV_REV_OFF) & 0xff;
392 bridge->header_type = PCI_HEADER_TYPE_BRIDGE; 385 bridge->header_type = PCI_HEADER_TYPE_BRIDGE;
393 bridge->cache_line_size = 0x10; 386 bridge->cache_line_size = 0x10;
394 387
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index e2a783fdb98f..7c7a388c85ab 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -730,6 +730,17 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
730 return (unsigned int)sta; 730 return (unsigned int)sta;
731} 731}
732 732
733static inline bool device_status_valid(unsigned int sta)
734{
735 /*
736 * ACPI spec says that _STA may return bit 0 clear with bit 3 set
737 * if the device is valid but does not require a device driver to be
738 * loaded (Section 6.3.7 of ACPI 5.0A).
739 */
740 unsigned int mask = ACPI_STA_DEVICE_ENABLED | ACPI_STA_DEVICE_FUNCTIONING;
741 return (sta & mask) == mask;
742}
743
733/** 744/**
734 * trim_stale_devices - remove PCI devices that are not responding. 745 * trim_stale_devices - remove PCI devices that are not responding.
735 * @dev: PCI device to start walking the hierarchy from. 746 * @dev: PCI device to start walking the hierarchy from.
@@ -745,7 +756,7 @@ static void trim_stale_devices(struct pci_dev *dev)
745 unsigned long long sta; 756 unsigned long long sta;
746 757
747 status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); 758 status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
748 alive = (ACPI_SUCCESS(status) && sta == ACPI_STA_ALL) 759 alive = (ACPI_SUCCESS(status) && device_status_valid(sta))
749 || acpiphp_no_hotplug(handle); 760 || acpiphp_no_hotplug(handle);
750 } 761 }
751 if (!alive) { 762 if (!alive) {
@@ -792,7 +803,7 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
792 mutex_lock(&slot->crit_sect); 803 mutex_lock(&slot->crit_sect);
793 if (slot_no_hotplug(slot)) { 804 if (slot_no_hotplug(slot)) {
794 ; /* do nothing */ 805 ; /* do nothing */
795 } else if (get_slot_status(slot) == ACPI_STA_ALL) { 806 } else if (device_status_valid(get_slot_status(slot))) {
796 /* remove stale devices if any */ 807 /* remove stale devices if any */
797 list_for_each_entry_safe_reverse(dev, tmp, 808 list_for_each_entry_safe_reverse(dev, tmp,
798 &bus->devices, bus_list) 809 &bus->devices, bus_list)
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 7a0fec6ce571..955ab7990c5b 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -545,9 +545,15 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
545 return -ENOMEM; 545 return -ENOMEM;
546 list_for_each_entry(entry, &pdev->msi_list, list) { 546 list_for_each_entry(entry, &pdev->msi_list, list) {
547 char *name = kmalloc(20, GFP_KERNEL); 547 char *name = kmalloc(20, GFP_KERNEL);
548 if (!name)
549 goto error_attrs;
550
548 msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL); 551 msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL);
549 if (!msi_dev_attr) 552 if (!msi_dev_attr) {
553 kfree(name);
550 goto error_attrs; 554 goto error_attrs;
555 }
556
551 sprintf(name, "%d", entry->irq); 557 sprintf(name, "%d", entry->irq);
552 sysfs_attr_init(&msi_dev_attr->attr); 558 sysfs_attr_init(&msi_dev_attr->attr);
553 msi_dev_attr->attr.name = name; 559 msi_dev_attr->attr.name = name;
@@ -589,6 +595,7 @@ error_attrs:
589 ++count; 595 ++count;
590 msi_attr = msi_attrs[count]; 596 msi_attr = msi_attrs[count];
591 } 597 }
598 kfree(msi_attrs);
592 return ret; 599 return ret;
593} 600}
594 601
@@ -959,7 +966,6 @@ EXPORT_SYMBOL(pci_disable_msi);
959/** 966/**
960 * pci_msix_vec_count - return the number of device's MSI-X table entries 967 * pci_msix_vec_count - return the number of device's MSI-X table entries
961 * @dev: pointer to the pci_dev data structure of MSI-X device function 968 * @dev: pointer to the pci_dev data structure of MSI-X device function
962
963 * This function returns the number of device's MSI-X table entries and 969 * This function returns the number of device's MSI-X table entries and
964 * therefore the number of MSI-X vectors device is capable of sending. 970 * therefore the number of MSI-X vectors device is capable of sending.
965 * It returns a negative errno if the device is not capable of sending MSI-X 971 * It returns a negative errno if the device is not capable of sending MSI-X
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 1febe90831b4..6b05f6134b68 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1181,6 +1181,8 @@ EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1181static int do_pci_enable_device(struct pci_dev *dev, int bars) 1181static int do_pci_enable_device(struct pci_dev *dev, int bars)
1182{ 1182{
1183 int err; 1183 int err;
1184 u16 cmd;
1185 u8 pin;
1184 1186
1185 err = pci_set_power_state(dev, PCI_D0); 1187 err = pci_set_power_state(dev, PCI_D0);
1186 if (err < 0 && err != -EIO) 1188 if (err < 0 && err != -EIO)
@@ -1190,6 +1192,14 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars)
1190 return err; 1192 return err;
1191 pci_fixup_device(pci_fixup_enable, dev); 1193 pci_fixup_device(pci_fixup_enable, dev);
1192 1194
1195 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1196 if (pin) {
1197 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1198 if (cmd & PCI_COMMAND_INTX_DISABLE)
1199 pci_write_config_word(dev, PCI_COMMAND,
1200 cmd & ~PCI_COMMAND_INTX_DISABLE);
1201 }
1202
1193 return 0; 1203 return 0;
1194} 1204}
1195 1205
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index afa2354f6600..c7a551c2d5f1 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -5,7 +5,7 @@
5menu "PHY Subsystem" 5menu "PHY Subsystem"
6 6
7config GENERIC_PHY 7config GENERIC_PHY
8 tristate "PHY Core" 8 bool "PHY Core"
9 help 9 help
10 Generic PHY support. 10 Generic PHY support.
11 11
@@ -61,6 +61,7 @@ config PHY_EXYNOS_DP_VIDEO
61config BCM_KONA_USB2_PHY 61config BCM_KONA_USB2_PHY
62 tristate "Broadcom Kona USB2 PHY Driver" 62 tristate "Broadcom Kona USB2 PHY Driver"
63 depends on GENERIC_PHY 63 depends on GENERIC_PHY
64 depends on HAS_IOMEM
64 help 65 help
65 Enable this to support the Broadcom Kona USB 2.0 PHY. 66 Enable this to support the Broadcom Kona USB 2.0 PHY.
66 67
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 645c867c1257..6c738376daff 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -162,6 +162,9 @@ int phy_init(struct phy *phy)
162{ 162{
163 int ret; 163 int ret;
164 164
165 if (!phy)
166 return 0;
167
165 ret = phy_pm_runtime_get_sync(phy); 168 ret = phy_pm_runtime_get_sync(phy);
166 if (ret < 0 && ret != -ENOTSUPP) 169 if (ret < 0 && ret != -ENOTSUPP)
167 return ret; 170 return ret;
@@ -173,6 +176,8 @@ int phy_init(struct phy *phy)
173 dev_err(&phy->dev, "phy init failed --> %d\n", ret); 176 dev_err(&phy->dev, "phy init failed --> %d\n", ret);
174 goto out; 177 goto out;
175 } 178 }
179 } else {
180 ret = 0; /* Override possible ret == -ENOTSUPP */
176 } 181 }
177 ++phy->init_count; 182 ++phy->init_count;
178 183
@@ -187,6 +192,9 @@ int phy_exit(struct phy *phy)
187{ 192{
188 int ret; 193 int ret;
189 194
195 if (!phy)
196 return 0;
197
190 ret = phy_pm_runtime_get_sync(phy); 198 ret = phy_pm_runtime_get_sync(phy);
191 if (ret < 0 && ret != -ENOTSUPP) 199 if (ret < 0 && ret != -ENOTSUPP)
192 return ret; 200 return ret;
@@ -212,6 +220,9 @@ int phy_power_on(struct phy *phy)
212{ 220{
213 int ret; 221 int ret;
214 222
223 if (!phy)
224 return 0;
225
215 ret = phy_pm_runtime_get_sync(phy); 226 ret = phy_pm_runtime_get_sync(phy);
216 if (ret < 0 && ret != -ENOTSUPP) 227 if (ret < 0 && ret != -ENOTSUPP)
217 return ret; 228 return ret;
@@ -223,6 +234,8 @@ int phy_power_on(struct phy *phy)
223 dev_err(&phy->dev, "phy poweron failed --> %d\n", ret); 234 dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
224 goto out; 235 goto out;
225 } 236 }
237 } else {
238 ret = 0; /* Override possible ret == -ENOTSUPP */
226 } 239 }
227 ++phy->power_count; 240 ++phy->power_count;
228 mutex_unlock(&phy->mutex); 241 mutex_unlock(&phy->mutex);
@@ -240,6 +253,9 @@ int phy_power_off(struct phy *phy)
240{ 253{
241 int ret; 254 int ret;
242 255
256 if (!phy)
257 return 0;
258
243 mutex_lock(&phy->mutex); 259 mutex_lock(&phy->mutex);
244 if (phy->power_count == 1 && phy->ops->power_off) { 260 if (phy->power_count == 1 && phy->ops->power_off) {
245 ret = phy->ops->power_off(phy); 261 ret = phy->ops->power_off(phy);
@@ -308,7 +324,7 @@ err0:
308 */ 324 */
309void phy_put(struct phy *phy) 325void phy_put(struct phy *phy)
310{ 326{
311 if (IS_ERR(phy)) 327 if (!phy || IS_ERR(phy))
312 return; 328 return;
313 329
314 module_put(phy->ops->owner); 330 module_put(phy->ops->owner);
@@ -328,6 +344,9 @@ void devm_phy_put(struct device *dev, struct phy *phy)
328{ 344{
329 int r; 345 int r;
330 346
347 if (!phy)
348 return;
349
331 r = devres_destroy(dev, devm_phy_release, devm_phy_match, phy); 350 r = devres_destroy(dev, devm_phy_release, devm_phy_match, phy);
332 dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n"); 351 dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
333} 352}
@@ -389,17 +408,11 @@ struct phy *phy_get(struct device *dev, const char *string)
389 index = of_property_match_string(dev->of_node, "phy-names", 408 index = of_property_match_string(dev->of_node, "phy-names",
390 string); 409 string);
391 phy = of_phy_get(dev, index); 410 phy = of_phy_get(dev, index);
392 if (IS_ERR(phy)) {
393 dev_err(dev, "unable to find phy\n");
394 return phy;
395 }
396 } else { 411 } else {
397 phy = phy_lookup(dev, string); 412 phy = phy_lookup(dev, string);
398 if (IS_ERR(phy)) {
399 dev_err(dev, "unable to find phy\n");
400 return phy;
401 }
402 } 413 }
414 if (IS_ERR(phy))
415 return phy;
403 416
404 if (!try_module_get(phy->ops->owner)) 417 if (!try_module_get(phy->ops->owner))
405 return ERR_PTR(-EPROBE_DEFER); 418 return ERR_PTR(-EPROBE_DEFER);
@@ -411,6 +424,27 @@ struct phy *phy_get(struct device *dev, const char *string)
411EXPORT_SYMBOL_GPL(phy_get); 424EXPORT_SYMBOL_GPL(phy_get);
412 425
413/** 426/**
427 * phy_optional_get() - lookup and obtain a reference to an optional phy.
428 * @dev: device that requests this phy
429 * @string: the phy name as given in the dt data or the name of the controller
430 * port for non-dt case
431 *
432 * Returns the phy driver, after getting a refcount to it; or
433 * NULL if there is no such phy. The caller is responsible for
434 * calling phy_put() to release that count.
435 */
436struct phy *phy_optional_get(struct device *dev, const char *string)
437{
438 struct phy *phy = phy_get(dev, string);
439
440 if (PTR_ERR(phy) == -ENODEV)
441 phy = NULL;
442
443 return phy;
444}
445EXPORT_SYMBOL_GPL(phy_optional_get);
446
447/**
414 * devm_phy_get() - lookup and obtain a reference to a phy. 448 * devm_phy_get() - lookup and obtain a reference to a phy.
415 * @dev: device that requests this phy 449 * @dev: device that requests this phy
416 * @string: the phy name as given in the dt data or phy device name 450 * @string: the phy name as given in the dt data or phy device name
@@ -441,6 +475,30 @@ struct phy *devm_phy_get(struct device *dev, const char *string)
441EXPORT_SYMBOL_GPL(devm_phy_get); 475EXPORT_SYMBOL_GPL(devm_phy_get);
442 476
443/** 477/**
478 * devm_phy_optional_get() - lookup and obtain a reference to an optional phy.
479 * @dev: device that requests this phy
480 * @string: the phy name as given in the dt data or phy device name
481 * for non-dt case
482 *
483 * Gets the phy using phy_get(), and associates a device with it using
484 * devres. On driver detach, release function is invoked on the devres
485 * data, then, devres data is freed. This differs to devm_phy_get() in
486 * that if the phy does not exist, it is not considered an error and
487 * -ENODEV will not be returned. Instead the NULL phy is returned,
488 * which can be passed to all other phy consumer calls.
489 */
490struct phy *devm_phy_optional_get(struct device *dev, const char *string)
491{
492 struct phy *phy = devm_phy_get(dev, string);
493
494 if (PTR_ERR(phy) == -ENODEV)
495 phy = NULL;
496
497 return phy;
498}
499EXPORT_SYMBOL_GPL(devm_phy_optional_get);
500
501/**
444 * phy_create() - create a new phy 502 * phy_create() - create a new phy
445 * @dev: device that is creating the new phy 503 * @dev: device that is creating the new phy
446 * @ops: function pointers for performing phy operations 504 * @ops: function pointers for performing phy operations
diff --git a/drivers/phy/phy-exynos-dp-video.c b/drivers/phy/phy-exynos-dp-video.c
index 1dbe6ce7b2ce..0786fef842e7 100644
--- a/drivers/phy/phy-exynos-dp-video.c
+++ b/drivers/phy/phy-exynos-dp-video.c
@@ -76,10 +76,6 @@ static int exynos_dp_video_phy_probe(struct platform_device *pdev)
76 if (IS_ERR(state->regs)) 76 if (IS_ERR(state->regs))
77 return PTR_ERR(state->regs); 77 return PTR_ERR(state->regs);
78 78
79 phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
80 if (IS_ERR(phy_provider))
81 return PTR_ERR(phy_provider);
82
83 phy = devm_phy_create(dev, &exynos_dp_video_phy_ops, NULL); 79 phy = devm_phy_create(dev, &exynos_dp_video_phy_ops, NULL);
84 if (IS_ERR(phy)) { 80 if (IS_ERR(phy)) {
85 dev_err(dev, "failed to create Display Port PHY\n"); 81 dev_err(dev, "failed to create Display Port PHY\n");
@@ -87,6 +83,10 @@ static int exynos_dp_video_phy_probe(struct platform_device *pdev)
87 } 83 }
88 phy_set_drvdata(phy, state); 84 phy_set_drvdata(phy, state);
89 85
86 phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
87 if (IS_ERR(phy_provider))
88 return PTR_ERR(phy_provider);
89
90 return 0; 90 return 0;
91} 91}
92 92
diff --git a/drivers/phy/phy-exynos-mipi-video.c b/drivers/phy/phy-exynos-mipi-video.c
index 0c5efab11af1..7f139326a642 100644
--- a/drivers/phy/phy-exynos-mipi-video.c
+++ b/drivers/phy/phy-exynos-mipi-video.c
@@ -134,11 +134,6 @@ static int exynos_mipi_video_phy_probe(struct platform_device *pdev)
134 dev_set_drvdata(dev, state); 134 dev_set_drvdata(dev, state);
135 spin_lock_init(&state->slock); 135 spin_lock_init(&state->slock);
136 136
137 phy_provider = devm_of_phy_provider_register(dev,
138 exynos_mipi_video_phy_xlate);
139 if (IS_ERR(phy_provider))
140 return PTR_ERR(phy_provider);
141
142 for (i = 0; i < EXYNOS_MIPI_PHYS_NUM; i++) { 137 for (i = 0; i < EXYNOS_MIPI_PHYS_NUM; i++) {
143 struct phy *phy = devm_phy_create(dev, 138 struct phy *phy = devm_phy_create(dev,
144 &exynos_mipi_video_phy_ops, NULL); 139 &exynos_mipi_video_phy_ops, NULL);
@@ -152,6 +147,11 @@ static int exynos_mipi_video_phy_probe(struct platform_device *pdev)
152 phy_set_drvdata(phy, &state->phys[i]); 147 phy_set_drvdata(phy, &state->phys[i]);
153 } 148 }
154 149
150 phy_provider = devm_of_phy_provider_register(dev,
151 exynos_mipi_video_phy_xlate);
152 if (IS_ERR(phy_provider))
153 return PTR_ERR(phy_provider);
154
155 return 0; 155 return 0;
156} 156}
157 157
diff --git a/drivers/phy/phy-mvebu-sata.c b/drivers/phy/phy-mvebu-sata.c
index d43786f62437..d70ecd6a1b3f 100644
--- a/drivers/phy/phy-mvebu-sata.c
+++ b/drivers/phy/phy-mvebu-sata.c
@@ -99,17 +99,17 @@ static int phy_mvebu_sata_probe(struct platform_device *pdev)
99 if (IS_ERR(priv->clk)) 99 if (IS_ERR(priv->clk))
100 return PTR_ERR(priv->clk); 100 return PTR_ERR(priv->clk);
101 101
102 phy_provider = devm_of_phy_provider_register(&pdev->dev,
103 of_phy_simple_xlate);
104 if (IS_ERR(phy_provider))
105 return PTR_ERR(phy_provider);
106
107 phy = devm_phy_create(&pdev->dev, &phy_mvebu_sata_ops, NULL); 102 phy = devm_phy_create(&pdev->dev, &phy_mvebu_sata_ops, NULL);
108 if (IS_ERR(phy)) 103 if (IS_ERR(phy))
109 return PTR_ERR(phy); 104 return PTR_ERR(phy);
110 105
111 phy_set_drvdata(phy, priv); 106 phy_set_drvdata(phy, priv);
112 107
108 phy_provider = devm_of_phy_provider_register(&pdev->dev,
109 of_phy_simple_xlate);
110 if (IS_ERR(phy_provider))
111 return PTR_ERR(phy_provider);
112
113 /* The boot loader may of left it on. Turn it off. */ 113 /* The boot loader may of left it on. Turn it off. */
114 phy_mvebu_sata_power_off(phy); 114 phy_mvebu_sata_power_off(phy);
115 115
diff --git a/drivers/phy/phy-omap-usb2.c b/drivers/phy/phy-omap-usb2.c
index bfc5c337f99a..7699752fba11 100644
--- a/drivers/phy/phy-omap-usb2.c
+++ b/drivers/phy/phy-omap-usb2.c
@@ -177,11 +177,6 @@ static int omap_usb2_probe(struct platform_device *pdev)
177 phy->phy.otg = otg; 177 phy->phy.otg = otg;
178 phy->phy.type = USB_PHY_TYPE_USB2; 178 phy->phy.type = USB_PHY_TYPE_USB2;
179 179
180 phy_provider = devm_of_phy_provider_register(phy->dev,
181 of_phy_simple_xlate);
182 if (IS_ERR(phy_provider))
183 return PTR_ERR(phy_provider);
184
185 control_node = of_parse_phandle(node, "ctrl-module", 0); 180 control_node = of_parse_phandle(node, "ctrl-module", 0);
186 if (!control_node) { 181 if (!control_node) {
187 dev_err(&pdev->dev, "Failed to get control device phandle\n"); 182 dev_err(&pdev->dev, "Failed to get control device phandle\n");
@@ -214,6 +209,11 @@ static int omap_usb2_probe(struct platform_device *pdev)
214 209
215 phy_set_drvdata(generic_phy, phy); 210 phy_set_drvdata(generic_phy, phy);
216 211
212 phy_provider = devm_of_phy_provider_register(phy->dev,
213 of_phy_simple_xlate);
214 if (IS_ERR(phy_provider))
215 return PTR_ERR(phy_provider);
216
217 phy->wkupclk = devm_clk_get(phy->dev, "usb_phy_cm_clk32k"); 217 phy->wkupclk = devm_clk_get(phy->dev, "usb_phy_cm_clk32k");
218 if (IS_ERR(phy->wkupclk)) { 218 if (IS_ERR(phy->wkupclk)) {
219 dev_err(&pdev->dev, "unable to get usb_phy_cm_clk32k\n"); 219 dev_err(&pdev->dev, "unable to get usb_phy_cm_clk32k\n");
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
index daf65e68aaab..c3ace1db8136 100644
--- a/drivers/phy/phy-twl4030-usb.c
+++ b/drivers/phy/phy-twl4030-usb.c
@@ -695,11 +695,6 @@ static int twl4030_usb_probe(struct platform_device *pdev)
695 otg->set_host = twl4030_set_host; 695 otg->set_host = twl4030_set_host;
696 otg->set_peripheral = twl4030_set_peripheral; 696 otg->set_peripheral = twl4030_set_peripheral;
697 697
698 phy_provider = devm_of_phy_provider_register(twl->dev,
699 of_phy_simple_xlate);
700 if (IS_ERR(phy_provider))
701 return PTR_ERR(phy_provider);
702
703 phy = devm_phy_create(twl->dev, &ops, init_data); 698 phy = devm_phy_create(twl->dev, &ops, init_data);
704 if (IS_ERR(phy)) { 699 if (IS_ERR(phy)) {
705 dev_dbg(&pdev->dev, "Failed to create PHY\n"); 700 dev_dbg(&pdev->dev, "Failed to create PHY\n");
@@ -708,6 +703,11 @@ static int twl4030_usb_probe(struct platform_device *pdev)
708 703
709 phy_set_drvdata(phy, twl); 704 phy_set_drvdata(phy, twl);
710 705
706 phy_provider = devm_of_phy_provider_register(twl->dev,
707 of_phy_simple_xlate);
708 if (IS_ERR(phy_provider))
709 return PTR_ERR(phy_provider);
710
711 /* init spinlock for workqueue */ 711 /* init spinlock for workqueue */
712 spin_lock_init(&twl->lock); 712 spin_lock_init(&twl->lock);
713 713
diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c
index 563174891c90..041f9b638d28 100644
--- a/drivers/power/ds2782_battery.c
+++ b/drivers/power/ds2782_battery.c
@@ -192,7 +192,7 @@ static int ds2786_get_voltage(struct ds278x_info *info, int *voltage_uV)
192 192
193 /* 193 /*
194 * Voltage is measured in units of 1.22mV. The voltage is stored as 194 * Voltage is measured in units of 1.22mV. The voltage is stored as
195 * a 10-bit number plus sign, in the upper bits of a 16-bit register 195 * a 12-bit number plus sign, in the upper bits of a 16-bit register
196 */ 196 */
197 err = ds278x_read_reg16(info, DS278x_REG_VOLT_MSB, &raw); 197 err = ds278x_read_reg16(info, DS278x_REG_VOLT_MSB, &raw);
198 if (err) 198 if (err)
diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c
index 80edb7d8cb54..0b4cf9d63291 100644
--- a/drivers/power/isp1704_charger.c
+++ b/drivers/power/isp1704_charger.c
@@ -444,8 +444,6 @@ static int isp1704_charger_probe(struct platform_device *pdev)
444 ret = PTR_ERR(isp->phy); 444 ret = PTR_ERR(isp->phy);
445 goto fail0; 445 goto fail0;
446 } 446 }
447 if (!isp->phy)
448 goto fail0;
449 447
450 isp->dev = &pdev->dev; 448 isp->dev = &pdev->dev;
451 platform_set_drvdata(pdev, isp); 449 platform_set_drvdata(pdev, isp);
diff --git a/drivers/power/max17040_battery.c b/drivers/power/max17040_battery.c
index c7ff6d67f158..0fbac861080d 100644
--- a/drivers/power/max17040_battery.c
+++ b/drivers/power/max17040_battery.c
@@ -148,7 +148,7 @@ static void max17040_get_online(struct i2c_client *client)
148{ 148{
149 struct max17040_chip *chip = i2c_get_clientdata(client); 149 struct max17040_chip *chip = i2c_get_clientdata(client);
150 150
151 if (chip->pdata->battery_online) 151 if (chip->pdata && chip->pdata->battery_online)
152 chip->online = chip->pdata->battery_online(); 152 chip->online = chip->pdata->battery_online();
153 else 153 else
154 chip->online = 1; 154 chip->online = 1;
@@ -158,7 +158,8 @@ static void max17040_get_status(struct i2c_client *client)
158{ 158{
159 struct max17040_chip *chip = i2c_get_clientdata(client); 159 struct max17040_chip *chip = i2c_get_clientdata(client);
160 160
161 if (!chip->pdata->charger_online || !chip->pdata->charger_enable) { 161 if (!chip->pdata || !chip->pdata->charger_online
162 || !chip->pdata->charger_enable) {
162 chip->status = POWER_SUPPLY_STATUS_UNKNOWN; 163 chip->status = POWER_SUPPLY_STATUS_UNKNOWN;
163 return; 164 return;
164 } 165 }
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 16a309e5c024..d1ac4caaf1b0 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1359,7 +1359,7 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
1359 goto found; 1359 goto found;
1360 /* Don't log an error when called from regulator_get_optional() */ 1360 /* Don't log an error when called from regulator_get_optional() */
1361 } else if (!have_full_constraints() || exclusive) { 1361 } else if (!have_full_constraints() || exclusive) {
1362 dev_err(dev, "dummy supplies not allowed\n"); 1362 dev_warn(dev, "dummy supplies not allowed\n");
1363 } 1363 }
1364 1364
1365 mutex_unlock(&regulator_list_mutex); 1365 mutex_unlock(&regulator_list_mutex);
diff --git a/drivers/regulator/da9055-regulator.c b/drivers/regulator/da9055-regulator.c
index 7f340206d329..b14ebdad5dd2 100644
--- a/drivers/regulator/da9055-regulator.c
+++ b/drivers/regulator/da9055-regulator.c
@@ -576,7 +576,9 @@ static int da9055_regulator_probe(struct platform_device *pdev)
576 /* Only LDO 5 and 6 has got the over current interrupt */ 576 /* Only LDO 5 and 6 has got the over current interrupt */
577 if (pdev->id == DA9055_ID_LDO5 || pdev->id == DA9055_ID_LDO6) { 577 if (pdev->id == DA9055_ID_LDO5 || pdev->id == DA9055_ID_LDO6) {
578 irq = platform_get_irq_byname(pdev, "REGULATOR"); 578 irq = platform_get_irq_byname(pdev, "REGULATOR");
579 irq = regmap_irq_get_virq(da9055->irq_data, irq); 579 if (irq < 0)
580 return irq;
581
580 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, 582 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
581 da9055_ldo5_6_oc_irq, 583 da9055_ldo5_6_oc_irq,
582 IRQF_TRIGGER_HIGH | 584 IRQF_TRIGGER_HIGH |
diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
index 56727eb745df..91e99a2c8dc1 100644
--- a/drivers/regulator/da9063-regulator.c
+++ b/drivers/regulator/da9063-regulator.c
@@ -1,3 +1,4 @@
1
1/* 2/*
2 * Regulator driver for DA9063 PMIC series 3 * Regulator driver for DA9063 PMIC series
3 * 4 *
@@ -60,7 +61,8 @@ struct da9063_regulator_info {
60 .desc.ops = &da9063_ldo_ops, \ 61 .desc.ops = &da9063_ldo_ops, \
61 .desc.min_uV = (min_mV) * 1000, \ 62 .desc.min_uV = (min_mV) * 1000, \
62 .desc.uV_step = (step_mV) * 1000, \ 63 .desc.uV_step = (step_mV) * 1000, \
63 .desc.n_voltages = (((max_mV) - (min_mV))/(step_mV) + 1), \ 64 .desc.n_voltages = (((max_mV) - (min_mV))/(step_mV) + 1 \
65 + (DA9063_V##regl_name##_BIAS)), \
64 .desc.enable_reg = DA9063_REG_##regl_name##_CONT, \ 66 .desc.enable_reg = DA9063_REG_##regl_name##_CONT, \
65 .desc.enable_mask = DA9063_LDO_EN, \ 67 .desc.enable_mask = DA9063_LDO_EN, \
66 .desc.vsel_reg = DA9063_REG_V##regl_name##_A, \ 68 .desc.vsel_reg = DA9063_REG_V##regl_name##_A, \
diff --git a/drivers/regulator/max14577.c b/drivers/regulator/max14577.c
index b1078ba3f393..e0619526708c 100644
--- a/drivers/regulator/max14577.c
+++ b/drivers/regulator/max14577.c
@@ -166,12 +166,14 @@ static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev)
166 166
167 ret = of_regulator_match(&pdev->dev, np, max14577_regulator_matches, 167 ret = of_regulator_match(&pdev->dev, np, max14577_regulator_matches,
168 MAX14577_REG_MAX); 168 MAX14577_REG_MAX);
169 if (ret < 0) { 169 if (ret < 0)
170 dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret); 170 dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret);
171 return ret; 171 else
172 } 172 ret = 0;
173 173
174 return 0; 174 of_node_put(np);
175
176 return ret;
175} 177}
176 178
177static inline struct regulator_init_data *match_init_data(int index) 179static inline struct regulator_init_data *match_init_data(int index)
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index d7164bb75d3e..d958dfa05125 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -535,7 +535,7 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
535 return -ENODEV; 535 return -ENODEV;
536 } 536 }
537 537
538 regulators_np = of_find_node_by_name(pmic_np, "regulators"); 538 regulators_np = of_get_child_by_name(pmic_np, "regulators");
539 if (!regulators_np) { 539 if (!regulators_np) {
540 dev_err(iodev->dev, "could not find regulators sub-node\n"); 540 dev_err(iodev->dev, "could not find regulators sub-node\n");
541 return -EINVAL; 541 return -EINVAL;
@@ -591,6 +591,8 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
591 rmode++; 591 rmode++;
592 } 592 }
593 593
594 of_node_put(regulators_np);
595
594 if (of_get_property(pmic_np, "s5m8767,pmic-buck2-uses-gpio-dvs", NULL)) { 596 if (of_get_property(pmic_np, "s5m8767,pmic-buck2-uses-gpio-dvs", NULL)) {
595 pdata->buck2_gpiodvs = true; 597 pdata->buck2_gpiodvs = true;
596 598
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index f6b9188c5af5..9f0ea6cb6922 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -610,6 +610,7 @@ void chsc_chp_online(struct chp_id chpid)
610 css_wait_for_slow_path(); 610 css_wait_for_slow_path();
611 for_each_subchannel_staged(__s390_process_res_acc, NULL, 611 for_each_subchannel_staged(__s390_process_res_acc, NULL,
612 &link); 612 &link);
613 css_schedule_reprobe();
613 } 614 }
614} 615}
615 616
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 88e35d85d205..8ee88c4ebd83 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -342,8 +342,9 @@ static int cio_check_config(struct subchannel *sch, struct schib *schib)
342 */ 342 */
343int cio_commit_config(struct subchannel *sch) 343int cio_commit_config(struct subchannel *sch)
344{ 344{
345 struct schib schib;
346 int ccode, retry, ret = 0; 345 int ccode, retry, ret = 0;
346 struct schib schib;
347 struct irb irb;
347 348
348 if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib)) 349 if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib))
349 return -ENODEV; 350 return -ENODEV;
@@ -367,7 +368,10 @@ int cio_commit_config(struct subchannel *sch)
367 ret = -EAGAIN; 368 ret = -EAGAIN;
368 break; 369 break;
369 case 1: /* status pending */ 370 case 1: /* status pending */
370 return -EBUSY; 371 ret = -EBUSY;
372 if (tsch(sch->schid, &irb))
373 return ret;
374 break;
371 case 2: /* busy */ 375 case 2: /* busy */
372 udelay(100); /* allow for recovery */ 376 udelay(100); /* allow for recovery */
373 ret = -EBUSY; 377 ret = -EBUSY;
@@ -403,7 +407,6 @@ EXPORT_SYMBOL_GPL(cio_update_schib);
403 */ 407 */
404int cio_enable_subchannel(struct subchannel *sch, u32 intparm) 408int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
405{ 409{
406 int retry;
407 int ret; 410 int ret;
408 411
409 CIO_TRACE_EVENT(2, "ensch"); 412 CIO_TRACE_EVENT(2, "ensch");
@@ -418,20 +421,14 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
418 sch->config.isc = sch->isc; 421 sch->config.isc = sch->isc;
419 sch->config.intparm = intparm; 422 sch->config.intparm = intparm;
420 423
421 for (retry = 0; retry < 3; retry++) { 424 ret = cio_commit_config(sch);
425 if (ret == -EIO) {
426 /*
427 * Got a program check in msch. Try without
428 * the concurrent sense bit the next time.
429 */
430 sch->config.csense = 0;
422 ret = cio_commit_config(sch); 431 ret = cio_commit_config(sch);
423 if (ret == -EIO) {
424 /*
425 * Got a program check in msch. Try without
426 * the concurrent sense bit the next time.
427 */
428 sch->config.csense = 0;
429 } else if (ret == -EBUSY) {
430 struct irb irb;
431 if (tsch(sch->schid, &irb) != 0)
432 break;
433 } else
434 break;
435 } 432 }
436 CIO_HEX_EVENT(2, &ret, sizeof(ret)); 433 CIO_HEX_EVENT(2, &ret, sizeof(ret));
437 return ret; 434 return ret;
@@ -444,7 +441,6 @@ EXPORT_SYMBOL_GPL(cio_enable_subchannel);
444 */ 441 */
445int cio_disable_subchannel(struct subchannel *sch) 442int cio_disable_subchannel(struct subchannel *sch)
446{ 443{
447 int retry;
448 int ret; 444 int ret;
449 445
450 CIO_TRACE_EVENT(2, "dissch"); 446 CIO_TRACE_EVENT(2, "dissch");
@@ -456,16 +452,8 @@ int cio_disable_subchannel(struct subchannel *sch)
456 return -ENODEV; 452 return -ENODEV;
457 453
458 sch->config.ena = 0; 454 sch->config.ena = 0;
455 ret = cio_commit_config(sch);
459 456
460 for (retry = 0; retry < 3; retry++) {
461 ret = cio_commit_config(sch);
462 if (ret == -EBUSY) {
463 struct irb irb;
464 if (tsch(sch->schid, &irb) != 0)
465 break;
466 } else
467 break;
468 }
469 CIO_HEX_EVENT(2, &ret, sizeof(ret)); 457 CIO_HEX_EVENT(2, &ret, sizeof(ret));
470 return ret; 458 return ret;
471} 459}
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 8acaae18bd11..a563e4c00590 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -359,14 +359,12 @@ static inline int multicast_outbound(struct qdio_q *q)
359#define need_siga_sync_out_after_pci(q) \ 359#define need_siga_sync_out_after_pci(q) \
360 (unlikely(q->irq_ptr->siga_flag.sync_out_after_pci)) 360 (unlikely(q->irq_ptr->siga_flag.sync_out_after_pci))
361 361
362#define for_each_input_queue(irq_ptr, q, i) \ 362#define for_each_input_queue(irq_ptr, q, i) \
363 for (i = 0, q = irq_ptr->input_qs[0]; \ 363 for (i = 0; i < irq_ptr->nr_input_qs && \
364 i < irq_ptr->nr_input_qs; \ 364 ({ q = irq_ptr->input_qs[i]; 1; }); i++)
365 q = irq_ptr->input_qs[++i]) 365#define for_each_output_queue(irq_ptr, q, i) \
366#define for_each_output_queue(irq_ptr, q, i) \ 366 for (i = 0; i < irq_ptr->nr_output_qs && \
367 for (i = 0, q = irq_ptr->output_qs[0]; \ 367 ({ q = irq_ptr->output_qs[i]; 1; }); i++)
368 i < irq_ptr->nr_output_qs; \
369 q = irq_ptr->output_qs[++i])
370 368
371#define prev_buf(bufnr) \ 369#define prev_buf(bufnr) \
372 ((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK) 370 ((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK)
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index c883a085c059..77466c4faabb 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -996,7 +996,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
996 } 996 }
997 } 997 }
998 998
999 if (!pci_out_supported(q)) 999 if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
1000 return; 1000 return;
1001 1001
1002 for_each_output_queue(irq_ptr, q, i) { 1002 for_each_output_queue(irq_ptr, q, i) {
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index dc542e0a3055..0bc91e46395a 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -311,7 +311,7 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
311 } __packed * msg = ap_msg->message; 311 } __packed * msg = ap_msg->message;
312 312
313 int rcblen = CEIL4(xcRB->request_control_blk_length); 313 int rcblen = CEIL4(xcRB->request_control_blk_length);
314 int replylen; 314 int replylen, req_sumlen, resp_sumlen;
315 char *req_data = ap_msg->message + sizeof(struct type6_hdr) + rcblen; 315 char *req_data = ap_msg->message + sizeof(struct type6_hdr) + rcblen;
316 char *function_code; 316 char *function_code;
317 317
@@ -321,12 +321,34 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
321 xcRB->request_data_length; 321 xcRB->request_data_length;
322 if (ap_msg->length > MSGTYPE06_MAX_MSG_SIZE) 322 if (ap_msg->length > MSGTYPE06_MAX_MSG_SIZE)
323 return -EINVAL; 323 return -EINVAL;
324
325 /* Overflow check
326 sum must be greater (or equal) than the largest operand */
327 req_sumlen = CEIL4(xcRB->request_control_blk_length) +
328 xcRB->request_data_length;
329 if ((CEIL4(xcRB->request_control_blk_length) <=
330 xcRB->request_data_length) ?
331 (req_sumlen < xcRB->request_data_length) :
332 (req_sumlen < CEIL4(xcRB->request_control_blk_length))) {
333 return -EINVAL;
334 }
335
324 replylen = sizeof(struct type86_fmt2_msg) + 336 replylen = sizeof(struct type86_fmt2_msg) +
325 CEIL4(xcRB->reply_control_blk_length) + 337 CEIL4(xcRB->reply_control_blk_length) +
326 xcRB->reply_data_length; 338 xcRB->reply_data_length;
327 if (replylen > MSGTYPE06_MAX_MSG_SIZE) 339 if (replylen > MSGTYPE06_MAX_MSG_SIZE)
328 return -EINVAL; 340 return -EINVAL;
329 341
342 /* Overflow check
343 sum must be greater (or equal) than the largest operand */
344 resp_sumlen = CEIL4(xcRB->reply_control_blk_length) +
345 xcRB->reply_data_length;
346 if ((CEIL4(xcRB->reply_control_blk_length) <= xcRB->reply_data_length) ?
347 (resp_sumlen < xcRB->reply_data_length) :
348 (resp_sumlen < CEIL4(xcRB->reply_control_blk_length))) {
349 return -EINVAL;
350 }
351
330 /* prepare type6 header */ 352 /* prepare type6 header */
331 msg->hdr = static_type6_hdrX; 353 msg->hdr = static_type6_hdrX;
332 memcpy(msg->hdr.agent_id , &(xcRB->agent_ID), sizeof(xcRB->agent_ID)); 354 memcpy(msg->hdr.agent_id , &(xcRB->agent_ID), sizeof(xcRB->agent_ID));
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
index 6b4678a7900a..4ccb5d869389 100644
--- a/drivers/sbus/char/jsflash.c
+++ b/drivers/sbus/char/jsflash.c
@@ -507,7 +507,6 @@ static int jsflash_init(void)
507 } 507 }
508 508
509 /* Let us be really paranoid for modifications to probing code. */ 509 /* Let us be really paranoid for modifications to probing code. */
510 /* extern enum sparc_cpu sparc_cpu_model; */ /* in <asm/system.h> */
511 if (sparc_cpu_model != sun4m) { 510 if (sparc_cpu_model != sun4m) {
512 /* We must be on sun4m because we use MMU Bypass ASI. */ 511 /* We must be on sun4m because we use MMU Bypass ASI. */
513 return -ENXIO; 512 return -ENXIO;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 9e80d61e5a3a..2eb97d7e8d12 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -2595,8 +2595,6 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
2595 return -ENOMEM; 2595 return -ENOMEM;
2596 } 2596 }
2597 2597
2598 INIT_LIST_HEAD(&cmd->cmd_list);
2599
2600 memcpy(&cmd->atio, atio, sizeof(*atio)); 2598 memcpy(&cmd->atio, atio, sizeof(*atio));
2601 cmd->state = QLA_TGT_STATE_NEW; 2599 cmd->state = QLA_TGT_STATE_NEW;
2602 cmd->tgt = vha->vha_tgt.qla_tgt; 2600 cmd->tgt = vha->vha_tgt.qla_tgt;
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 1d10eecad499..66e755cdde57 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -855,7 +855,6 @@ struct qla_tgt_cmd {
855 uint16_t loop_id; /* to save extra sess dereferences */ 855 uint16_t loop_id; /* to save extra sess dereferences */
856 struct qla_tgt *tgt; /* to save extra sess dereferences */ 856 struct qla_tgt *tgt; /* to save extra sess dereferences */
857 struct scsi_qla_host *vha; 857 struct scsi_qla_host *vha;
858 struct list_head cmd_list;
859 858
860 struct atio_from_isp atio; 859 struct atio_from_isp atio;
861}; 860};
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 7bd7f0d5f050..62ec84b42e31 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1684,7 +1684,7 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1684 1684
1685 host_dev = scsi_get_device(shost); 1685 host_dev = scsi_get_device(shost);
1686 if (host_dev && host_dev->dma_mask) 1686 if (host_dev && host_dev->dma_mask)
1687 bounce_limit = dma_max_pfn(host_dev) << PAGE_SHIFT; 1687 bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT;
1688 1688
1689 return bounce_limit; 1689 return bounce_limit;
1690} 1690}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index ba9310bc9acb..581ee2a8856b 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -376,10 +376,10 @@ config SPI_PXA2XX_PCI
376 def_tristate SPI_PXA2XX && PCI 376 def_tristate SPI_PXA2XX && PCI
377 377
378config SPI_RSPI 378config SPI_RSPI
379 tristate "Renesas RSPI controller" 379 tristate "Renesas RSPI/QSPI controller"
380 depends on (SUPERH && SH_DMAE_BASE) || ARCH_SHMOBILE 380 depends on (SUPERH && SH_DMAE_BASE) || ARCH_SHMOBILE
381 help 381 help
382 SPI driver for Renesas RSPI blocks. 382 SPI driver for Renesas RSPI and QSPI blocks.
383 383
384config SPI_S3C24XX 384config SPI_S3C24XX
385 tristate "Samsung S3C24XX series SPI" 385 tristate "Samsung S3C24XX series SPI"
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c
index 50406306bc20..bae97ffec4b9 100644
--- a/drivers/spi/spi-nuc900.c
+++ b/drivers/spi/spi-nuc900.c
@@ -361,6 +361,8 @@ static int nuc900_spi_probe(struct platform_device *pdev)
361 init_completion(&hw->done); 361 init_completion(&hw->done);
362 362
363 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 363 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
364 if (hw->pdata->lsb)
365 master->mode_bits |= SPI_LSB_FIRST;
364 master->num_chipselect = hw->pdata->num_cs; 366 master->num_chipselect = hw->pdata->num_cs;
365 master->bus_num = hw->pdata->bus_num; 367 master->bus_num = hw->pdata->bus_num;
366 hw->bitbang.master = hw->master; 368 hw->bitbang.master = hw->master;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 23756b0f9036..d0b28bba38be 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -755,9 +755,7 @@ static void spi_pump_messages(struct kthread_work *work)
755 ret = master->transfer_one_message(master, master->cur_msg); 755 ret = master->transfer_one_message(master, master->cur_msg);
756 if (ret) { 756 if (ret) {
757 dev_err(&master->dev, 757 dev_err(&master->dev,
758 "failed to transfer one message from queue: %d\n", ret); 758 "failed to transfer one message from queue\n");
759 master->cur_msg->status = ret;
760 spi_finalize_current_message(master);
761 return; 759 return;
762 } 760 }
763} 761}
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 23948f167012..713a97226787 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -295,21 +295,29 @@ static ssize_t ashmem_read(struct file *file, char __user *buf,
295 295
296 /* If size is not set, or set to 0, always return EOF. */ 296 /* If size is not set, or set to 0, always return EOF. */
297 if (asma->size == 0) 297 if (asma->size == 0)
298 goto out; 298 goto out_unlock;
299 299
300 if (!asma->file) { 300 if (!asma->file) {
301 ret = -EBADF; 301 ret = -EBADF;
302 goto out; 302 goto out_unlock;
303 } 303 }
304 304
305 ret = asma->file->f_op->read(asma->file, buf, len, pos); 305 mutex_unlock(&ashmem_mutex);
306 if (ret < 0)
307 goto out;
308 306
309 /** Update backing file pos, since f_ops->read() doesn't */ 307 /*
310 asma->file->f_pos = *pos; 308 * asma and asma->file are used outside the lock here. We assume
309 * once asma->file is set it will never be changed, and will not
310 * be destroyed until all references to the file are dropped and
311 * ashmem_release is called.
312 */
313 ret = asma->file->f_op->read(asma->file, buf, len, pos);
314 if (ret >= 0) {
315 /** Update backing file pos, since f_ops->read() doesn't */
316 asma->file->f_pos = *pos;
317 }
318 return ret;
311 319
312out: 320out_unlock:
313 mutex_unlock(&ashmem_mutex); 321 mutex_unlock(&ashmem_mutex);
314 return ret; 322 return ret;
315} 323}
@@ -498,6 +506,7 @@ out:
498 506
499static int set_name(struct ashmem_area *asma, void __user *name) 507static int set_name(struct ashmem_area *asma, void __user *name)
500{ 508{
509 int len;
501 int ret = 0; 510 int ret = 0;
502 char local_name[ASHMEM_NAME_LEN]; 511 char local_name[ASHMEM_NAME_LEN];
503 512
@@ -510,21 +519,19 @@ static int set_name(struct ashmem_area *asma, void __user *name)
510 * variable that does not need protection and later copy the local 519 * variable that does not need protection and later copy the local
511 * variable to the structure member with lock held. 520 * variable to the structure member with lock held.
512 */ 521 */
513 if (copy_from_user(local_name, name, ASHMEM_NAME_LEN)) 522 len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN);
514 return -EFAULT; 523 if (len < 0)
515 524 return len;
525 if (len == ASHMEM_NAME_LEN)
526 local_name[ASHMEM_NAME_LEN - 1] = '\0';
516 mutex_lock(&ashmem_mutex); 527 mutex_lock(&ashmem_mutex);
517 /* cannot change an existing mapping's name */ 528 /* cannot change an existing mapping's name */
518 if (unlikely(asma->file)) { 529 if (unlikely(asma->file))
519 ret = -EINVAL; 530 ret = -EINVAL;
520 goto out; 531 else
521 } 532 strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name);
522 memcpy(asma->name + ASHMEM_NAME_PREFIX_LEN,
523 local_name, ASHMEM_NAME_LEN);
524 asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0';
525out:
526 mutex_unlock(&ashmem_mutex);
527 533
534 mutex_unlock(&ashmem_mutex);
528 return ret; 535 return ret;
529} 536}
530 537
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
index eaec1dab7fe4..1432d956769c 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/staging/android/binder.c
@@ -2904,7 +2904,7 @@ static int binder_node_release(struct binder_node *node, int refs)
2904 refs++; 2904 refs++;
2905 2905
2906 if (!ref->death) 2906 if (!ref->death)
2907 goto out; 2907 continue;
2908 2908
2909 death++; 2909 death++;
2910 2910
@@ -2917,7 +2917,6 @@ static int binder_node_release(struct binder_node *node, int refs)
2917 BUG(); 2917 BUG();
2918 } 2918 }
2919 2919
2920out:
2921 binder_debug(BINDER_DEBUG_DEAD_BINDER, 2920 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2922 "node %d now dead, refs %d, death %d\n", 2921 "node %d now dead, refs %d, death %d\n",
2923 node->debug_id, refs, death); 2922 node->debug_id, refs, death);
diff --git a/drivers/staging/android/ion/compat_ion.c b/drivers/staging/android/ion/compat_ion.c
index af6cd370b30f..ee3a7380e53b 100644
--- a/drivers/staging/android/ion/compat_ion.c
+++ b/drivers/staging/android/ion/compat_ion.c
@@ -35,9 +35,14 @@ struct compat_ion_custom_data {
35 compat_ulong_t arg; 35 compat_ulong_t arg;
36}; 36};
37 37
38struct compat_ion_handle_data {
39 compat_int_t handle;
40};
41
38#define COMPAT_ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \ 42#define COMPAT_ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
39 struct compat_ion_allocation_data) 43 struct compat_ion_allocation_data)
40#define COMPAT_ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data) 44#define COMPAT_ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, \
45 struct compat_ion_handle_data)
41#define COMPAT_ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, \ 46#define COMPAT_ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, \
42 struct compat_ion_custom_data) 47 struct compat_ion_custom_data)
43 48
@@ -64,6 +69,19 @@ static int compat_get_ion_allocation_data(
64 return err; 69 return err;
65} 70}
66 71
72static int compat_get_ion_handle_data(
73 struct compat_ion_handle_data __user *data32,
74 struct ion_handle_data __user *data)
75{
76 compat_int_t i;
77 int err;
78
79 err = get_user(i, &data32->handle);
80 err |= put_user(i, &data->handle);
81
82 return err;
83}
84
67static int compat_put_ion_allocation_data( 85static int compat_put_ion_allocation_data(
68 struct compat_ion_allocation_data __user *data32, 86 struct compat_ion_allocation_data __user *data32,
69 struct ion_allocation_data __user *data) 87 struct ion_allocation_data __user *data)
@@ -132,8 +150,8 @@ long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
132 } 150 }
133 case COMPAT_ION_IOC_FREE: 151 case COMPAT_ION_IOC_FREE:
134 { 152 {
135 struct compat_ion_allocation_data __user *data32; 153 struct compat_ion_handle_data __user *data32;
136 struct ion_allocation_data __user *data; 154 struct ion_handle_data __user *data;
137 int err; 155 int err;
138 156
139 data32 = compat_ptr(arg); 157 data32 = compat_ptr(arg);
@@ -141,7 +159,7 @@ long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
141 if (data == NULL) 159 if (data == NULL)
142 return -EFAULT; 160 return -EFAULT;
143 161
144 err = compat_get_ion_allocation_data(data32, data); 162 err = compat_get_ion_handle_data(data32, data);
145 if (err) 163 if (err)
146 return err; 164 return err;
147 165
diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c
index 55b2002753f2..01cdc8aee898 100644
--- a/drivers/staging/android/ion/ion_dummy_driver.c
+++ b/drivers/staging/android/ion/ion_dummy_driver.c
@@ -17,9 +17,11 @@
17#include <linux/err.h> 17#include <linux/err.h>
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/init.h>
20#include <linux/bootmem.h> 21#include <linux/bootmem.h>
21#include <linux/memblock.h> 22#include <linux/memblock.h>
22#include <linux/sizes.h> 23#include <linux/sizes.h>
24#include <linux/io.h>
23#include "ion.h" 25#include "ion.h"
24#include "ion_priv.h" 26#include "ion_priv.h"
25 27
@@ -57,7 +59,7 @@ struct ion_platform_heap dummy_heaps[] = {
57}; 59};
58 60
59struct ion_platform_data dummy_ion_pdata = { 61struct ion_platform_data dummy_ion_pdata = {
60 .nr = 4, 62 .nr = ARRAY_SIZE(dummy_heaps),
61 .heaps = dummy_heaps, 63 .heaps = dummy_heaps,
62}; 64};
63 65
@@ -69,7 +71,7 @@ static int __init ion_dummy_init(void)
69 heaps = kzalloc(sizeof(struct ion_heap *) * dummy_ion_pdata.nr, 71 heaps = kzalloc(sizeof(struct ion_heap *) * dummy_ion_pdata.nr,
70 GFP_KERNEL); 72 GFP_KERNEL);
71 if (!heaps) 73 if (!heaps)
72 return PTR_ERR(heaps); 74 return -ENOMEM;
73 75
74 76
75 /* Allocate a dummy carveout heap */ 77 /* Allocate a dummy carveout heap */
@@ -128,6 +130,7 @@ err:
128 } 130 }
129 return err; 131 return err;
130} 132}
133device_initcall(ion_dummy_init);
131 134
132static void __exit ion_dummy_exit(void) 135static void __exit ion_dummy_exit(void)
133{ 136{
@@ -152,7 +155,4 @@ static void __exit ion_dummy_exit(void)
152 155
153 return; 156 return;
154} 157}
155 158__exitcall(ion_dummy_exit);
156module_init(ion_dummy_init);
157module_exit(ion_dummy_exit);
158
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
index 296c74f98dc0..37e64d51394c 100644
--- a/drivers/staging/android/ion/ion_heap.c
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -243,12 +243,12 @@ int ion_heap_init_deferred_free(struct ion_heap *heap)
243 init_waitqueue_head(&heap->waitqueue); 243 init_waitqueue_head(&heap->waitqueue);
244 heap->task = kthread_run(ion_heap_deferred_free, heap, 244 heap->task = kthread_run(ion_heap_deferred_free, heap,
245 "%s", heap->name); 245 "%s", heap->name);
246 sched_setscheduler(heap->task, SCHED_IDLE, &param);
247 if (IS_ERR(heap->task)) { 246 if (IS_ERR(heap->task)) {
248 pr_err("%s: creating thread for deferred free failed\n", 247 pr_err("%s: creating thread for deferred free failed\n",
249 __func__); 248 __func__);
250 return PTR_RET(heap->task); 249 return PTR_RET(heap->task);
251 } 250 }
251 sched_setscheduler(heap->task, SCHED_IDLE, &param);
252 return 0; 252 return 0;
253} 253}
254 254
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
index d98673981cc4..fc2e4fccf69d 100644
--- a/drivers/staging/android/ion/ion_priv.h
+++ b/drivers/staging/android/ion/ion_priv.h
@@ -17,6 +17,7 @@
17#ifndef _ION_PRIV_H 17#ifndef _ION_PRIV_H
18#define _ION_PRIV_H 18#define _ION_PRIV_H
19 19
20#include <linux/device.h>
20#include <linux/dma-direction.h> 21#include <linux/dma-direction.h>
21#include <linux/kref.h> 22#include <linux/kref.h>
22#include <linux/mm_types.h> 23#include <linux/mm_types.h>
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index 7f0729130d65..9849f3963e75 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -124,6 +124,7 @@ static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
124 124
125 info->page = page; 125 info->page = page;
126 info->order = orders[i]; 126 info->order = orders[i];
127 INIT_LIST_HEAD(&info->list);
127 return info; 128 return info;
128 } 129 }
129 kfree(info); 130 kfree(info);
@@ -145,12 +146,15 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
145 struct list_head pages; 146 struct list_head pages;
146 struct page_info *info, *tmp_info; 147 struct page_info *info, *tmp_info;
147 int i = 0; 148 int i = 0;
148 long size_remaining = PAGE_ALIGN(size); 149 unsigned long size_remaining = PAGE_ALIGN(size);
149 unsigned int max_order = orders[0]; 150 unsigned int max_order = orders[0];
150 151
151 if (align > PAGE_SIZE) 152 if (align > PAGE_SIZE)
152 return -EINVAL; 153 return -EINVAL;
153 154
155 if (size / PAGE_SIZE > totalram_pages / 2)
156 return -ENOMEM;
157
154 INIT_LIST_HEAD(&pages); 158 INIT_LIST_HEAD(&pages);
155 while (size_remaining > 0) { 159 while (size_remaining > 0) {
156 info = alloc_largest_available(sys_heap, buffer, size_remaining, 160 info = alloc_largest_available(sys_heap, buffer, size_remaining,
diff --git a/drivers/staging/android/sw_sync.h b/drivers/staging/android/sw_sync.h
index 585040be5f18..5aaf71d6974b 100644
--- a/drivers/staging/android/sw_sync.h
+++ b/drivers/staging/android/sw_sync.h
@@ -35,10 +35,27 @@ struct sw_sync_pt {
35 u32 value; 35 u32 value;
36}; 36};
37 37
38#if IS_ENABLED(CONFIG_SW_SYNC)
38struct sw_sync_timeline *sw_sync_timeline_create(const char *name); 39struct sw_sync_timeline *sw_sync_timeline_create(const char *name);
39void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc); 40void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc);
40 41
41struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value); 42struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value);
43#else
44static inline struct sw_sync_timeline *sw_sync_timeline_create(const char *name)
45{
46 return NULL;
47}
48
49static inline void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc)
50{
51}
52
53static inline struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj,
54 u32 value)
55{
56 return NULL;
57}
58#endif /* IS_ENABLED(CONFIG_SW_SYNC) */
42 59
43#endif /* __KERNEL __ */ 60#endif /* __KERNEL __ */
44 61
diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c
index 38e5d3b5ed9b..3d05f662110b 100644
--- a/drivers/staging/android/sync.c
+++ b/drivers/staging/android/sync.c
@@ -79,27 +79,27 @@ static void sync_timeline_free(struct kref *kref)
79 container_of(kref, struct sync_timeline, kref); 79 container_of(kref, struct sync_timeline, kref);
80 unsigned long flags; 80 unsigned long flags;
81 81
82 if (obj->ops->release_obj)
83 obj->ops->release_obj(obj);
84
85 spin_lock_irqsave(&sync_timeline_list_lock, flags); 82 spin_lock_irqsave(&sync_timeline_list_lock, flags);
86 list_del(&obj->sync_timeline_list); 83 list_del(&obj->sync_timeline_list);
87 spin_unlock_irqrestore(&sync_timeline_list_lock, flags); 84 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
88 85
86 if (obj->ops->release_obj)
87 obj->ops->release_obj(obj);
88
89 kfree(obj); 89 kfree(obj);
90} 90}
91 91
92void sync_timeline_destroy(struct sync_timeline *obj) 92void sync_timeline_destroy(struct sync_timeline *obj)
93{ 93{
94 obj->destroyed = true; 94 obj->destroyed = true;
95 smp_wmb();
95 96
96 /* 97 /*
97 * If this is not the last reference, signal any children 98 * signal any children that their parent is going away.
98 * that their parent is going away.
99 */ 99 */
100 sync_timeline_signal(obj);
100 101
101 if (!kref_put(&obj->kref, sync_timeline_free)) 102 kref_put(&obj->kref, sync_timeline_free);
102 sync_timeline_signal(obj);
103} 103}
104EXPORT_SYMBOL(sync_timeline_destroy); 104EXPORT_SYMBOL(sync_timeline_destroy);
105 105
diff --git a/drivers/staging/bcm/Bcmnet.c b/drivers/staging/bcm/Bcmnet.c
index 8dfdd2732bdc..95a2358267ba 100644
--- a/drivers/staging/bcm/Bcmnet.c
+++ b/drivers/staging/bcm/Bcmnet.c
@@ -40,7 +40,7 @@ static INT bcm_close(struct net_device *dev)
40} 40}
41 41
42static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb, 42static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb,
43 void *accel_priv) 43 void *accel_priv, select_queue_fallback_t fallback)
44{ 44{
45 return ClassifyPacket(netdev_priv(dev), skb); 45 return ClassifyPacket(netdev_priv(dev), skb);
46} 46}
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 246080316c90..5b15033a94bf 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -616,8 +616,6 @@ int comedi_auto_config(struct device *hardware_device,
616 ret = driver->auto_attach(dev, context); 616 ret = driver->auto_attach(dev, context);
617 if (ret >= 0) 617 if (ret >= 0)
618 ret = comedi_device_postconfig(dev); 618 ret = comedi_device_postconfig(dev);
619 if (ret < 0)
620 comedi_device_detach(dev);
621 mutex_unlock(&dev->mutex); 619 mutex_unlock(&dev->mutex);
622 620
623 if (ret < 0) { 621 if (ret < 0) {
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
index 593676cf706a..d9ad2c0fdda2 100644
--- a/drivers/staging/comedi/drivers/adv_pci1710.c
+++ b/drivers/staging/comedi/drivers/adv_pci1710.c
@@ -494,6 +494,7 @@ static int pci171x_insn_write_ao(struct comedi_device *dev,
494 struct comedi_insn *insn, unsigned int *data) 494 struct comedi_insn *insn, unsigned int *data)
495{ 495{
496 struct pci1710_private *devpriv = dev->private; 496 struct pci1710_private *devpriv = dev->private;
497 unsigned int val;
497 int n, chan, range, ofs; 498 int n, chan, range, ofs;
498 499
499 chan = CR_CHAN(insn->chanspec); 500 chan = CR_CHAN(insn->chanspec);
@@ -509,11 +510,14 @@ static int pci171x_insn_write_ao(struct comedi_device *dev,
509 outw(devpriv->da_ranges, dev->iobase + PCI171x_DAREF); 510 outw(devpriv->da_ranges, dev->iobase + PCI171x_DAREF);
510 ofs = PCI171x_DA1; 511 ofs = PCI171x_DA1;
511 } 512 }
513 val = devpriv->ao_data[chan];
512 514
513 for (n = 0; n < insn->n; n++) 515 for (n = 0; n < insn->n; n++) {
514 outw(data[n], dev->iobase + ofs); 516 val = data[n];
517 outw(val, dev->iobase + ofs);
518 }
515 519
516 devpriv->ao_data[chan] = data[n]; 520 devpriv->ao_data[chan] = val;
517 521
518 return n; 522 return n;
519 523
@@ -679,6 +683,7 @@ static int pci1720_insn_write_ao(struct comedi_device *dev,
679 struct comedi_insn *insn, unsigned int *data) 683 struct comedi_insn *insn, unsigned int *data)
680{ 684{
681 struct pci1710_private *devpriv = dev->private; 685 struct pci1710_private *devpriv = dev->private;
686 unsigned int val;
682 int n, rangereg, chan; 687 int n, rangereg, chan;
683 688
684 chan = CR_CHAN(insn->chanspec); 689 chan = CR_CHAN(insn->chanspec);
@@ -688,13 +693,15 @@ static int pci1720_insn_write_ao(struct comedi_device *dev,
688 outb(rangereg, dev->iobase + PCI1720_RANGE); 693 outb(rangereg, dev->iobase + PCI1720_RANGE);
689 devpriv->da_ranges = rangereg; 694 devpriv->da_ranges = rangereg;
690 } 695 }
696 val = devpriv->ao_data[chan];
691 697
692 for (n = 0; n < insn->n; n++) { 698 for (n = 0; n < insn->n; n++) {
693 outw(data[n], dev->iobase + PCI1720_DA0 + (chan << 1)); 699 val = data[n];
700 outw(val, dev->iobase + PCI1720_DA0 + (chan << 1));
694 outb(0, dev->iobase + PCI1720_SYNCOUT); /* update outputs */ 701 outb(0, dev->iobase + PCI1720_SYNCOUT); /* update outputs */
695 } 702 }
696 703
697 devpriv->ao_data[chan] = data[n]; 704 devpriv->ao_data[chan] = val;
698 705
699 return n; 706 return n;
700} 707}
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
index 3beeb1254152..88c60b6020c4 100644
--- a/drivers/staging/comedi/drivers/usbduxsigma.c
+++ b/drivers/staging/comedi/drivers/usbduxsigma.c
@@ -48,6 +48,7 @@
48#include <linux/usb.h> 48#include <linux/usb.h>
49#include <linux/fcntl.h> 49#include <linux/fcntl.h>
50#include <linux/compiler.h> 50#include <linux/compiler.h>
51#include <asm/unaligned.h>
51 52
52#include "comedi_fc.h" 53#include "comedi_fc.h"
53#include "../comedidev.h" 54#include "../comedidev.h"
@@ -792,7 +793,8 @@ static int usbduxsigma_ai_insn_read(struct comedi_device *dev,
792 } 793 }
793 794
794 /* 32 bits big endian from the A/D converter */ 795 /* 32 bits big endian from the A/D converter */
795 val = be32_to_cpu(*((uint32_t *)((devpriv->insn_buf) + 1))); 796 val = be32_to_cpu(get_unaligned((uint32_t
797 *)(devpriv->insn_buf + 1)));
796 val &= 0x00ffffff; /* strip status byte */ 798 val &= 0x00ffffff; /* strip status byte */
797 val ^= 0x00800000; /* convert to unsigned */ 799 val ^= 0x00800000; /* convert to unsigned */
798 800
@@ -1357,7 +1359,7 @@ static int usbduxsigma_getstatusinfo(struct comedi_device *dev, int chan)
1357 return ret; 1359 return ret;
1358 1360
1359 /* 32 bits big endian from the A/D converter */ 1361 /* 32 bits big endian from the A/D converter */
1360 val = be32_to_cpu(*((uint32_t *)((devpriv->insn_buf)+1))); 1362 val = be32_to_cpu(get_unaligned((uint32_t *)(devpriv->insn_buf + 1)));
1361 val &= 0x00ffffff; /* strip status byte */ 1363 val &= 0x00ffffff; /* strip status byte */
1362 val ^= 0x00800000; /* convert to unsigned */ 1364 val ^= 0x00800000; /* convert to unsigned */
1363 1365
diff --git a/drivers/staging/dgrp/dgrp_net_ops.c b/drivers/staging/dgrp/dgrp_net_ops.c
index 1f61b89eca44..33ac7fb88cbd 100644
--- a/drivers/staging/dgrp/dgrp_net_ops.c
+++ b/drivers/staging/dgrp/dgrp_net_ops.c
@@ -2232,177 +2232,6 @@ done:
2232 return rtn; 2232 return rtn;
2233} 2233}
2234 2234
2235/*
2236 * Common Packet Handling code
2237 */
2238
2239static void handle_data_in_packet(struct nd_struct *nd, struct ch_struct *ch,
2240 long dlen, long plen, int n1, u8 *dbuf)
2241{
2242 char *error;
2243 long n;
2244 long remain;
2245 u8 *buf;
2246 u8 *b;
2247
2248 remain = nd->nd_remain;
2249 nd->nd_tx_work = 1;
2250
2251 /*
2252 * Otherwise data should appear only when we are
2253 * in the CS_READY state.
2254 */
2255
2256 if (ch->ch_state < CS_READY) {
2257 error = "Data received before RWIN established";
2258 nd->nd_remain = 0;
2259 nd->nd_state = NS_SEND_ERROR;
2260 nd->nd_error = error;
2261 }
2262
2263 /*
2264 * Assure that the data received is within the
2265 * allowable window.
2266 */
2267
2268 n = (ch->ch_s_rwin - ch->ch_s_rin) & 0xffff;
2269
2270 if (dlen > n) {
2271 error = "Receive data overrun";
2272 nd->nd_remain = 0;
2273 nd->nd_state = NS_SEND_ERROR;
2274 nd->nd_error = error;
2275 }
2276
2277 /*
2278 * If we received 3 or less characters,
2279 * assume it is a human typing, and set RTIME
2280 * to 10 milliseconds.
2281 *
2282 * If we receive 10 or more characters,
2283 * assume its not a human typing, and set RTIME
2284 * to 100 milliseconds.
2285 */
2286
2287 if (ch->ch_edelay != DGRP_RTIME) {
2288 if (ch->ch_rtime != ch->ch_edelay) {
2289 ch->ch_rtime = ch->ch_edelay;
2290 ch->ch_flag |= CH_PARAM;
2291 }
2292 } else if (dlen <= 3) {
2293 if (ch->ch_rtime != 10) {
2294 ch->ch_rtime = 10;
2295 ch->ch_flag |= CH_PARAM;
2296 }
2297 } else {
2298 if (ch->ch_rtime != DGRP_RTIME) {
2299 ch->ch_rtime = DGRP_RTIME;
2300 ch->ch_flag |= CH_PARAM;
2301 }
2302 }
2303
2304 /*
2305 * If a portion of the packet is outside the
2306 * buffer, shorten the effective length of the
2307 * data packet to be the amount of data received.
2308 */
2309
2310 if (remain < plen)
2311 dlen -= plen - remain;
2312
2313 /*
2314 * Detect if receive flush is now complete.
2315 */
2316
2317 if ((ch->ch_flag & CH_RX_FLUSH) != 0 &&
2318 ((ch->ch_flush_seq - nd->nd_seq_out) & SEQ_MASK) >=
2319 ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK)) {
2320 ch->ch_flag &= ~CH_RX_FLUSH;
2321 }
2322
2323 /*
2324 * If we are ready to receive, move the data into
2325 * the receive buffer.
2326 */
2327
2328 ch->ch_s_rin = (ch->ch_s_rin + dlen) & 0xffff;
2329
2330 if (ch->ch_state == CS_READY &&
2331 (ch->ch_tun.un_open_count != 0) &&
2332 (ch->ch_tun.un_flag & UN_CLOSING) == 0 &&
2333 (ch->ch_cflag & CF_CREAD) != 0 &&
2334 (ch->ch_flag & (CH_BAUD0 | CH_RX_FLUSH)) == 0 &&
2335 (ch->ch_send & RR_RX_FLUSH) == 0) {
2336
2337 if (ch->ch_rin + dlen >= RBUF_MAX) {
2338 n = RBUF_MAX - ch->ch_rin;
2339
2340 memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, n);
2341
2342 ch->ch_rin = 0;
2343 dbuf += n;
2344 dlen -= n;
2345 }
2346
2347 memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, dlen);
2348
2349 ch->ch_rin += dlen;
2350
2351
2352 /*
2353 * If we are not in fastcook mode, or
2354 * if there is a fastcook thread
2355 * waiting for data, send the data to
2356 * the line discipline.
2357 */
2358
2359 if ((ch->ch_flag & CH_FAST_READ) == 0 ||
2360 ch->ch_inwait != 0) {
2361 dgrp_input(ch);
2362 }
2363
2364 /*
2365 * If there is a read thread waiting
2366 * in select, and we are in fastcook
2367 * mode, wake him up.
2368 */
2369
2370 if (waitqueue_active(&ch->ch_tun.un_tty->read_wait) &&
2371 (ch->ch_flag & CH_FAST_READ) != 0)
2372 wake_up_interruptible(&ch->ch_tun.un_tty->read_wait);
2373
2374 /*
2375 * Wake any thread waiting in the
2376 * fastcook loop.
2377 */
2378
2379 if ((ch->ch_flag & CH_INPUT) != 0) {
2380 ch->ch_flag &= ~CH_INPUT;
2381 wake_up_interruptible(&ch->ch_flag_wait);
2382 }
2383 }
2384
2385 /*
2386 * Fabricate and insert a data packet header to
2387 * preced the remaining data when it comes in.
2388 */
2389
2390 if (remain < plen) {
2391 dlen = plen - remain;
2392 b = buf;
2393
2394 b[0] = 0x90 + n1;
2395 put_unaligned_be16(dlen, b + 1);
2396
2397 remain = 3;
2398 if (remain > 0 && b != buf)
2399 memcpy(buf, b, remain);
2400
2401 nd->nd_remain = remain;
2402 return;
2403 }
2404}
2405
2406/** 2235/**
2407 * dgrp_receive() -- decode data packets received from the remote PortServer. 2236 * dgrp_receive() -- decode data packets received from the remote PortServer.
2408 * @nd: pointer to a node structure 2237 * @nd: pointer to a node structure
@@ -2477,8 +2306,7 @@ static void dgrp_receive(struct nd_struct *nd)
2477 plen = dlen + 1; 2306 plen = dlen + 1;
2478 2307
2479 dbuf = b + 1; 2308 dbuf = b + 1;
2480 handle_data_in_packet(nd, ch, dlen, plen, n1, dbuf); 2309 goto data;
2481 break;
2482 2310
2483 /* 2311 /*
2484 * Process 2-byte header data packet. 2312 * Process 2-byte header data packet.
@@ -2492,8 +2320,7 @@ static void dgrp_receive(struct nd_struct *nd)
2492 plen = dlen + 2; 2320 plen = dlen + 2;
2493 2321
2494 dbuf = b + 2; 2322 dbuf = b + 2;
2495 handle_data_in_packet(nd, ch, dlen, plen, n1, dbuf); 2323 goto data;
2496 break;
2497 2324
2498 /* 2325 /*
2499 * Process 3-byte header data packet. 2326 * Process 3-byte header data packet.
@@ -2508,6 +2335,159 @@ static void dgrp_receive(struct nd_struct *nd)
2508 2335
2509 dbuf = b + 3; 2336 dbuf = b + 3;
2510 2337
2338 /*
2339 * Common packet handling code.
2340 */
2341
2342data:
2343 nd->nd_tx_work = 1;
2344
2345 /*
2346 * Otherwise data should appear only when we are
2347 * in the CS_READY state.
2348 */
2349
2350 if (ch->ch_state < CS_READY) {
2351 error = "Data received before RWIN established";
2352 goto prot_error;
2353 }
2354
2355 /*
2356 * Assure that the data received is within the
2357 * allowable window.
2358 */
2359
2360 n = (ch->ch_s_rwin - ch->ch_s_rin) & 0xffff;
2361
2362 if (dlen > n) {
2363 error = "Receive data overrun";
2364 goto prot_error;
2365 }
2366
2367 /*
2368 * If we received 3 or less characters,
2369 * assume it is a human typing, and set RTIME
2370 * to 10 milliseconds.
2371 *
2372 * If we receive 10 or more characters,
2373 * assume its not a human typing, and set RTIME
2374 * to 100 milliseconds.
2375 */
2376
2377 if (ch->ch_edelay != DGRP_RTIME) {
2378 if (ch->ch_rtime != ch->ch_edelay) {
2379 ch->ch_rtime = ch->ch_edelay;
2380 ch->ch_flag |= CH_PARAM;
2381 }
2382 } else if (dlen <= 3) {
2383 if (ch->ch_rtime != 10) {
2384 ch->ch_rtime = 10;
2385 ch->ch_flag |= CH_PARAM;
2386 }
2387 } else {
2388 if (ch->ch_rtime != DGRP_RTIME) {
2389 ch->ch_rtime = DGRP_RTIME;
2390 ch->ch_flag |= CH_PARAM;
2391 }
2392 }
2393
2394 /*
2395 * If a portion of the packet is outside the
2396 * buffer, shorten the effective length of the
2397 * data packet to be the amount of data received.
2398 */
2399
2400 if (remain < plen)
2401 dlen -= plen - remain;
2402
2403 /*
2404 * Detect if receive flush is now complete.
2405 */
2406
2407 if ((ch->ch_flag & CH_RX_FLUSH) != 0 &&
2408 ((ch->ch_flush_seq - nd->nd_seq_out) & SEQ_MASK) >=
2409 ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK)) {
2410 ch->ch_flag &= ~CH_RX_FLUSH;
2411 }
2412
2413 /*
2414 * If we are ready to receive, move the data into
2415 * the receive buffer.
2416 */
2417
2418 ch->ch_s_rin = (ch->ch_s_rin + dlen) & 0xffff;
2419
2420 if (ch->ch_state == CS_READY &&
2421 (ch->ch_tun.un_open_count != 0) &&
2422 (ch->ch_tun.un_flag & UN_CLOSING) == 0 &&
2423 (ch->ch_cflag & CF_CREAD) != 0 &&
2424 (ch->ch_flag & (CH_BAUD0 | CH_RX_FLUSH)) == 0 &&
2425 (ch->ch_send & RR_RX_FLUSH) == 0) {
2426
2427 if (ch->ch_rin + dlen >= RBUF_MAX) {
2428 n = RBUF_MAX - ch->ch_rin;
2429
2430 memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, n);
2431
2432 ch->ch_rin = 0;
2433 dbuf += n;
2434 dlen -= n;
2435 }
2436
2437 memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, dlen);
2438
2439 ch->ch_rin += dlen;
2440
2441
2442 /*
2443 * If we are not in fastcook mode, or
2444 * if there is a fastcook thread
2445 * waiting for data, send the data to
2446 * the line discipline.
2447 */
2448
2449 if ((ch->ch_flag & CH_FAST_READ) == 0 ||
2450 ch->ch_inwait != 0) {
2451 dgrp_input(ch);
2452 }
2453
2454 /*
2455 * If there is a read thread waiting
2456 * in select, and we are in fastcook
2457 * mode, wake him up.
2458 */
2459
2460 if (waitqueue_active(&ch->ch_tun.un_tty->read_wait) &&
2461 (ch->ch_flag & CH_FAST_READ) != 0)
2462 wake_up_interruptible(&ch->ch_tun.un_tty->read_wait);
2463
2464 /*
2465 * Wake any thread waiting in the
2466 * fastcook loop.
2467 */
2468
2469 if ((ch->ch_flag & CH_INPUT) != 0) {
2470 ch->ch_flag &= ~CH_INPUT;
2471
2472 wake_up_interruptible(&ch->ch_flag_wait);
2473 }
2474 }
2475
2476 /*
2477 * Fabricate and insert a data packet header to
2478 * preced the remaining data when it comes in.
2479 */
2480
2481 if (remain < plen) {
2482 dlen = plen - remain;
2483 b = buf;
2484
2485 b[0] = 0x90 + n1;
2486 put_unaligned_be16(dlen, b + 1);
2487
2488 remain = 3;
2489 goto done;
2490 }
2511 break; 2491 break;
2512 2492
2513 /* 2493 /*
diff --git a/drivers/staging/gdm72xx/gdm_usb.c b/drivers/staging/gdm72xx/gdm_usb.c
index f8788bf0a7d3..cdeffe75496b 100644
--- a/drivers/staging/gdm72xx/gdm_usb.c
+++ b/drivers/staging/gdm72xx/gdm_usb.c
@@ -635,11 +635,14 @@ static int gdm_usb_probe(struct usb_interface *intf,
635#endif /* CONFIG_WIMAX_GDM72XX_USB_PM */ 635#endif /* CONFIG_WIMAX_GDM72XX_USB_PM */
636 636
637 ret = register_wimax_device(phy_dev, &intf->dev); 637 ret = register_wimax_device(phy_dev, &intf->dev);
638 if (ret)
639 release_usb(udev);
638 640
639out: 641out:
640 if (ret) { 642 if (ret) {
641 kfree(phy_dev); 643 kfree(phy_dev);
642 kfree(udev); 644 kfree(udev);
645 usb_put_dev(usbdev);
643 } else { 646 } else {
644 usb_set_intfdata(intf, phy_dev); 647 usb_set_intfdata(intf, phy_dev);
645 } 648 }
diff --git a/drivers/staging/iio/Documentation/iio_utils.h b/drivers/staging/iio/Documentation/iio_utils.h
index 35154d60faf6..c9fedb79e3a2 100644
--- a/drivers/staging/iio/Documentation/iio_utils.h
+++ b/drivers/staging/iio/Documentation/iio_utils.h
@@ -77,7 +77,6 @@ struct iio_channel_info {
77 uint64_t mask; 77 uint64_t mask;
78 unsigned be; 78 unsigned be;
79 unsigned is_signed; 79 unsigned is_signed;
80 unsigned enabled;
81 unsigned location; 80 unsigned location;
82}; 81};
83 82
@@ -335,6 +334,7 @@ inline int build_channel_array(const char *device_dir,
335 while (ent = readdir(dp), ent != NULL) { 334 while (ent = readdir(dp), ent != NULL) {
336 if (strcmp(ent->d_name + strlen(ent->d_name) - strlen("_en"), 335 if (strcmp(ent->d_name + strlen(ent->d_name) - strlen("_en"),
337 "_en") == 0) { 336 "_en") == 0) {
337 int current_enabled = 0;
338 current = &(*ci_array)[count++]; 338 current = &(*ci_array)[count++];
339 ret = asprintf(&filename, 339 ret = asprintf(&filename,
340 "%s/%s", scan_el_dir, ent->d_name); 340 "%s/%s", scan_el_dir, ent->d_name);
@@ -350,10 +350,10 @@ inline int build_channel_array(const char *device_dir,
350 ret = -errno; 350 ret = -errno;
351 goto error_cleanup_array; 351 goto error_cleanup_array;
352 } 352 }
353 fscanf(sysfsfp, "%u", &current->enabled); 353 fscanf(sysfsfp, "%u", &current_enabled);
354 fclose(sysfsfp); 354 fclose(sysfsfp);
355 355
356 if (!current->enabled) { 356 if (!current_enabled) {
357 free(filename); 357 free(filename);
358 count--; 358 count--;
359 continue; 359 continue;
diff --git a/drivers/staging/iio/adc/ad799x_core.c b/drivers/staging/iio/adc/ad799x_core.c
index 5ea36410f716..5708ffc62aec 100644
--- a/drivers/staging/iio/adc/ad799x_core.c
+++ b/drivers/staging/iio/adc/ad799x_core.c
@@ -393,7 +393,7 @@ static const struct iio_event_spec ad799x_events[] = {
393 }, { 393 }, {
394 .type = IIO_EV_TYPE_THRESH, 394 .type = IIO_EV_TYPE_THRESH,
395 .dir = IIO_EV_DIR_FALLING, 395 .dir = IIO_EV_DIR_FALLING,
396 .mask_separate = BIT(IIO_EV_INFO_VALUE), 396 .mask_separate = BIT(IIO_EV_INFO_VALUE) |
397 BIT(IIO_EV_INFO_ENABLE), 397 BIT(IIO_EV_INFO_ENABLE),
398 }, { 398 }, {
399 .type = IIO_EV_TYPE_THRESH, 399 .type = IIO_EV_TYPE_THRESH,
@@ -409,7 +409,13 @@ static const struct iio_event_spec ad799x_events[] = {
409 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ 409 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
410 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ 410 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
411 .scan_index = (_index), \ 411 .scan_index = (_index), \
412 .scan_type = IIO_ST('u', _realbits, 16, 12 - (_realbits)), \ 412 .scan_type = { \
413 .sign = 'u', \
414 .realbits = (_realbits), \
415 .storagebits = 16, \
416 .shift = 12 - (_realbits), \
417 .endianness = IIO_BE, \
418 }, \
413 .event_spec = _ev_spec, \ 419 .event_spec = _ev_spec, \
414 .num_event_specs = _num_ev_spec, \ 420 .num_event_specs = _num_ev_spec, \
415} 421}
@@ -588,7 +594,8 @@ static int ad799x_probe(struct i2c_client *client,
588 return 0; 594 return 0;
589 595
590error_free_irq: 596error_free_irq:
591 free_irq(client->irq, indio_dev); 597 if (client->irq > 0)
598 free_irq(client->irq, indio_dev);
592error_cleanup_ring: 599error_cleanup_ring:
593 ad799x_ring_cleanup(indio_dev); 600 ad799x_ring_cleanup(indio_dev);
594error_disable_reg: 601error_disable_reg:
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
index df71669bb60e..7fc66a6a6e36 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/staging/iio/adc/mxs-lradc.c
@@ -1035,8 +1035,6 @@ SHOW_SCALE_AVAILABLE_ATTR(4);
1035SHOW_SCALE_AVAILABLE_ATTR(5); 1035SHOW_SCALE_AVAILABLE_ATTR(5);
1036SHOW_SCALE_AVAILABLE_ATTR(6); 1036SHOW_SCALE_AVAILABLE_ATTR(6);
1037SHOW_SCALE_AVAILABLE_ATTR(7); 1037SHOW_SCALE_AVAILABLE_ATTR(7);
1038SHOW_SCALE_AVAILABLE_ATTR(8);
1039SHOW_SCALE_AVAILABLE_ATTR(9);
1040SHOW_SCALE_AVAILABLE_ATTR(10); 1038SHOW_SCALE_AVAILABLE_ATTR(10);
1041SHOW_SCALE_AVAILABLE_ATTR(11); 1039SHOW_SCALE_AVAILABLE_ATTR(11);
1042SHOW_SCALE_AVAILABLE_ATTR(12); 1040SHOW_SCALE_AVAILABLE_ATTR(12);
@@ -1053,8 +1051,6 @@ static struct attribute *mxs_lradc_attributes[] = {
1053 &iio_dev_attr_in_voltage5_scale_available.dev_attr.attr, 1051 &iio_dev_attr_in_voltage5_scale_available.dev_attr.attr,
1054 &iio_dev_attr_in_voltage6_scale_available.dev_attr.attr, 1052 &iio_dev_attr_in_voltage6_scale_available.dev_attr.attr,
1055 &iio_dev_attr_in_voltage7_scale_available.dev_attr.attr, 1053 &iio_dev_attr_in_voltage7_scale_available.dev_attr.attr,
1056 &iio_dev_attr_in_voltage8_scale_available.dev_attr.attr,
1057 &iio_dev_attr_in_voltage9_scale_available.dev_attr.attr,
1058 &iio_dev_attr_in_voltage10_scale_available.dev_attr.attr, 1054 &iio_dev_attr_in_voltage10_scale_available.dev_attr.attr,
1059 &iio_dev_attr_in_voltage11_scale_available.dev_attr.attr, 1055 &iio_dev_attr_in_voltage11_scale_available.dev_attr.attr,
1060 &iio_dev_attr_in_voltage12_scale_available.dev_attr.attr, 1056 &iio_dev_attr_in_voltage12_scale_available.dev_attr.attr,
@@ -1613,7 +1609,7 @@ static int mxs_lradc_probe(struct platform_device *pdev)
1613 * of the array. 1609 * of the array.
1614 */ 1610 */
1615 scale_uv = ((u64)lradc->vref_mv[i] * 100000000) >> 1611 scale_uv = ((u64)lradc->vref_mv[i] * 100000000) >>
1616 (iio->channels[i].scan_type.realbits - s); 1612 (LRADC_RESOLUTION - s);
1617 lradc->scale_avail[i][s].nano = 1613 lradc->scale_avail[i][s].nano =
1618 do_div(scale_uv, 100000000) * 10; 1614 do_div(scale_uv, 100000000) * 10;
1619 lradc->scale_avail[i][s].integer = scale_uv; 1615 lradc->scale_avail[i][s].integer = scale_uv;
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 0a4298b744e6..2b96665da8a2 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -629,7 +629,7 @@ static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev)
629 struct iio_buffer *buffer; 629 struct iio_buffer *buffer;
630 630
631 buffer = iio_kfifo_allocate(indio_dev); 631 buffer = iio_kfifo_allocate(indio_dev);
632 if (buffer) 632 if (!buffer)
633 return -ENOMEM; 633 return -ENOMEM;
634 634
635 iio_device_attach_buffer(indio_dev, buffer); 635 iio_device_attach_buffer(indio_dev, buffer);
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 09ef5fb8bae6..236ed66f116a 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -88,9 +88,9 @@ static int imx_drm_driver_unload(struct drm_device *drm)
88 88
89 imx_drm_device_put(); 89 imx_drm_device_put();
90 90
91 drm_vblank_cleanup(imxdrm->drm); 91 drm_vblank_cleanup(drm);
92 drm_kms_helper_poll_fini(imxdrm->drm); 92 drm_kms_helper_poll_fini(drm);
93 drm_mode_config_cleanup(imxdrm->drm); 93 drm_mode_config_cleanup(drm);
94 94
95 return 0; 95 return 0;
96} 96}
@@ -142,19 +142,19 @@ EXPORT_SYMBOL_GPL(imx_drm_crtc_panel_format);
142 142
143int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc) 143int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc)
144{ 144{
145 return drm_vblank_get(imx_drm_crtc->imxdrm->drm, imx_drm_crtc->pipe); 145 return drm_vblank_get(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe);
146} 146}
147EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_get); 147EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_get);
148 148
149void imx_drm_crtc_vblank_put(struct imx_drm_crtc *imx_drm_crtc) 149void imx_drm_crtc_vblank_put(struct imx_drm_crtc *imx_drm_crtc)
150{ 150{
151 drm_vblank_put(imx_drm_crtc->imxdrm->drm, imx_drm_crtc->pipe); 151 drm_vblank_put(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe);
152} 152}
153EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_put); 153EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_put);
154 154
155void imx_drm_handle_vblank(struct imx_drm_crtc *imx_drm_crtc) 155void imx_drm_handle_vblank(struct imx_drm_crtc *imx_drm_crtc)
156{ 156{
157 drm_handle_vblank(imx_drm_crtc->imxdrm->drm, imx_drm_crtc->pipe); 157 drm_handle_vblank(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe);
158} 158}
159EXPORT_SYMBOL_GPL(imx_drm_handle_vblank); 159EXPORT_SYMBOL_GPL(imx_drm_handle_vblank);
160 160
@@ -370,29 +370,6 @@ static void imx_drm_connector_unregister(
370} 370}
371 371
372/* 372/*
373 * register a crtc to the drm core
374 */
375static int imx_drm_crtc_register(struct imx_drm_crtc *imx_drm_crtc)
376{
377 struct imx_drm_device *imxdrm = __imx_drm_device();
378 int ret;
379
380 ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256);
381 if (ret)
382 return ret;
383
384 drm_crtc_helper_add(imx_drm_crtc->crtc,
385 imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
386
387 drm_crtc_init(imxdrm->drm, imx_drm_crtc->crtc,
388 imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
389
390 drm_mode_group_reinit(imxdrm->drm);
391
392 return 0;
393}
394
395/*
396 * Called by the CRTC driver when all CRTCs are registered. This 373 * Called by the CRTC driver when all CRTCs are registered. This
397 * puts all the pieces together and initializes the driver. 374 * puts all the pieces together and initializes the driver.
398 * Once this is called no more CRTCs can be registered since 375 * Once this is called no more CRTCs can be registered since
@@ -424,15 +401,15 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
424 401
425 mutex_lock(&imxdrm->mutex); 402 mutex_lock(&imxdrm->mutex);
426 403
427 drm_kms_helper_poll_init(imxdrm->drm); 404 drm_kms_helper_poll_init(drm);
428 405
429 /* setup the grouping for the legacy output */ 406 /* setup the grouping for the legacy output */
430 ret = drm_mode_group_init_legacy_group(imxdrm->drm, 407 ret = drm_mode_group_init_legacy_group(drm,
431 &imxdrm->drm->primary->mode_group); 408 &drm->primary->mode_group);
432 if (ret) 409 if (ret)
433 goto err_kms; 410 goto err_kms;
434 411
435 ret = drm_vblank_init(imxdrm->drm, MAX_CRTC); 412 ret = drm_vblank_init(drm, MAX_CRTC);
436 if (ret) 413 if (ret)
437 goto err_kms; 414 goto err_kms;
438 415
@@ -441,7 +418,7 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
441 * by drm timer once a current process gives up ownership of 418 * by drm timer once a current process gives up ownership of
442 * vblank event.(after drm_vblank_put function is called) 419 * vblank event.(after drm_vblank_put function is called)
443 */ 420 */
444 imxdrm->drm->vblank_disable_allowed = true; 421 drm->vblank_disable_allowed = true;
445 422
446 if (!imx_drm_device_get()) { 423 if (!imx_drm_device_get()) {
447 ret = -EINVAL; 424 ret = -EINVAL;
@@ -536,10 +513,18 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
536 513
537 *new_crtc = imx_drm_crtc; 514 *new_crtc = imx_drm_crtc;
538 515
539 ret = imx_drm_crtc_register(imx_drm_crtc); 516 ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256);
540 if (ret) 517 if (ret)
541 goto err_register; 518 goto err_register;
542 519
520 drm_crtc_helper_add(crtc,
521 imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
522
523 drm_crtc_init(imxdrm->drm, crtc,
524 imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
525
526 drm_mode_group_reinit(imxdrm->drm);
527
543 imx_drm_update_possible_crtcs(); 528 imx_drm_update_possible_crtcs();
544 529
545 mutex_unlock(&imxdrm->mutex); 530 mutex_unlock(&imxdrm->mutex);
diff --git a/drivers/staging/imx-drm/imx-hdmi.c b/drivers/staging/imx-drm/imx-hdmi.c
index f3a1f5e2e492..62ce0e86f14b 100644
--- a/drivers/staging/imx-drm/imx-hdmi.c
+++ b/drivers/staging/imx-drm/imx-hdmi.c
@@ -16,6 +16,7 @@
16#include <linux/delay.h> 16#include <linux/delay.h>
17#include <linux/err.h> 17#include <linux/err.h>
18#include <linux/clk.h> 18#include <linux/clk.h>
19#include <linux/hdmi.h>
19#include <linux/regmap.h> 20#include <linux/regmap.h>
20#include <linux/mfd/syscon.h> 21#include <linux/mfd/syscon.h>
21#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> 22#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
@@ -52,11 +53,6 @@ enum hdmi_datamap {
52 YCbCr422_12B = 0x12, 53 YCbCr422_12B = 0x12,
53}; 54};
54 55
55enum hdmi_colorimetry {
56 ITU601,
57 ITU709,
58};
59
60enum imx_hdmi_devtype { 56enum imx_hdmi_devtype {
61 IMX6Q_HDMI, 57 IMX6Q_HDMI,
62 IMX6DL_HDMI, 58 IMX6DL_HDMI,
@@ -489,12 +485,12 @@ static void imx_hdmi_update_csc_coeffs(struct imx_hdmi *hdmi)
489 485
490 if (is_color_space_conversion(hdmi)) { 486 if (is_color_space_conversion(hdmi)) {
491 if (hdmi->hdmi_data.enc_out_format == RGB) { 487 if (hdmi->hdmi_data.enc_out_format == RGB) {
492 if (hdmi->hdmi_data.colorimetry == ITU601) 488 if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
493 csc_coeff = &csc_coeff_rgb_out_eitu601; 489 csc_coeff = &csc_coeff_rgb_out_eitu601;
494 else 490 else
495 csc_coeff = &csc_coeff_rgb_out_eitu709; 491 csc_coeff = &csc_coeff_rgb_out_eitu709;
496 } else if (hdmi->hdmi_data.enc_in_format == RGB) { 492 } else if (hdmi->hdmi_data.enc_in_format == RGB) {
497 if (hdmi->hdmi_data.colorimetry == ITU601) 493 if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
498 csc_coeff = &csc_coeff_rgb_in_eitu601; 494 csc_coeff = &csc_coeff_rgb_in_eitu601;
499 else 495 else
500 csc_coeff = &csc_coeff_rgb_in_eitu709; 496 csc_coeff = &csc_coeff_rgb_in_eitu709;
@@ -1140,16 +1136,16 @@ static void hdmi_config_AVI(struct imx_hdmi *hdmi)
1140 /* Set up colorimetry */ 1136 /* Set up colorimetry */
1141 if (hdmi->hdmi_data.enc_out_format == XVYCC444) { 1137 if (hdmi->hdmi_data.enc_out_format == XVYCC444) {
1142 colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_EXTENDED_INFO; 1138 colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_EXTENDED_INFO;
1143 if (hdmi->hdmi_data.colorimetry == ITU601) 1139 if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
1144 ext_colorimetry = 1140 ext_colorimetry =
1145 HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601; 1141 HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
1146 else /* hdmi->hdmi_data.colorimetry == ITU709 */ 1142 else /*hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_709*/
1147 ext_colorimetry = 1143 ext_colorimetry =
1148 HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC709; 1144 HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC709;
1149 } else if (hdmi->hdmi_data.enc_out_format != RGB) { 1145 } else if (hdmi->hdmi_data.enc_out_format != RGB) {
1150 if (hdmi->hdmi_data.colorimetry == ITU601) 1146 if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
1151 colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_SMPTE; 1147 colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_SMPTE;
1152 else /* hdmi->hdmi_data.colorimetry == ITU709 */ 1148 else /*hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_709*/
1153 colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_ITUR; 1149 colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_ITUR;
1154 ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601; 1150 ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
1155 } else { /* Carries no data */ 1151 } else { /* Carries no data */
@@ -1379,9 +1375,9 @@ static int imx_hdmi_setup(struct imx_hdmi *hdmi, struct drm_display_mode *mode)
1379 (hdmi->vic == 21) || (hdmi->vic == 22) || 1375 (hdmi->vic == 21) || (hdmi->vic == 22) ||
1380 (hdmi->vic == 2) || (hdmi->vic == 3) || 1376 (hdmi->vic == 2) || (hdmi->vic == 3) ||
1381 (hdmi->vic == 17) || (hdmi->vic == 18)) 1377 (hdmi->vic == 17) || (hdmi->vic == 18))
1382 hdmi->hdmi_data.colorimetry = ITU601; 1378 hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_601;
1383 else 1379 else
1384 hdmi->hdmi_data.colorimetry = ITU709; 1380 hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_709;
1385 1381
1386 if ((hdmi->vic == 10) || (hdmi->vic == 11) || 1382 if ((hdmi->vic == 10) || (hdmi->vic == 11) ||
1387 (hdmi->vic == 12) || (hdmi->vic == 13) || 1383 (hdmi->vic == 12) || (hdmi->vic == 13) ||
diff --git a/drivers/staging/lustre/TODO b/drivers/staging/lustre/TODO
index 22742d6d62a8..0a2b6cb3775e 100644
--- a/drivers/staging/lustre/TODO
+++ b/drivers/staging/lustre/TODO
@@ -9,5 +9,6 @@
9* Other minor misc cleanups... 9* Other minor misc cleanups...
10 10
11Please send any patches to Greg Kroah-Hartman <greg@kroah.com>, Andreas Dilger 11Please send any patches to Greg Kroah-Hartman <greg@kroah.com>, Andreas Dilger
12<andreas.dilger@intel.com> and Peng Tao <tao.peng@emc.com>. CCing 12<andreas.dilger@intel.com>, Oleg Drokin <oleg.drokin@intel.com> and
13hpdd-discuss <hpdd-discuss@lists.01.org> would be great too. 13Peng Tao <tao.peng@emc.com>. CCing hpdd-discuss <hpdd-discuss@lists.01.org>
14would be great too.
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h
index 596a15fc8996..037ae8a6d531 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h
@@ -61,6 +61,8 @@ struct kuc_hdr {
61 __u16 kuc_msglen; /* Including header */ 61 __u16 kuc_msglen; /* Including header */
62} __attribute__((aligned(sizeof(__u64)))); 62} __attribute__((aligned(sizeof(__u64))));
63 63
64#define KUC_CHANGELOG_MSG_MAXSIZE (sizeof(struct kuc_hdr)+CR_MAXSIZE)
65
64#define KUC_MAGIC 0x191C /*Lustre9etLinC */ 66#define KUC_MAGIC 0x191C /*Lustre9etLinC */
65#define KUC_FL_BLOCK 0x01 /* Wait for send */ 67#define KUC_FL_BLOCK 0x01 /* Wait for send */
66 68
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
index d0d942ced01a..dddccca120c9 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
@@ -120,7 +120,7 @@ do { \
120do { \ 120do { \
121 LASSERT(!in_interrupt() || \ 121 LASSERT(!in_interrupt() || \
122 ((size) <= LIBCFS_VMALLOC_SIZE && \ 122 ((size) <= LIBCFS_VMALLOC_SIZE && \
123 ((mask) & GFP_ATOMIC)) != 0); \ 123 ((mask) & __GFP_WAIT) == 0)); \
124} while (0) 124} while (0)
125 125
126#define LIBCFS_ALLOC_POST(ptr, size) \ 126#define LIBCFS_ALLOC_POST(ptr, size) \
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index 93648632ba26..6f58ead20393 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -529,7 +529,7 @@ kiblnd_kvaddr_to_page (unsigned long vaddr)
529{ 529{
530 struct page *page; 530 struct page *page;
531 531
532 if (is_vmalloc_addr(vaddr)) { 532 if (is_vmalloc_addr((void *)vaddr)) {
533 page = vmalloc_to_page ((void *)vaddr); 533 page = vmalloc_to_page ((void *)vaddr);
534 LASSERT (page != NULL); 534 LASSERT (page != NULL);
535 return page; 535 return page;
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index 68a4f52ec998..b7b53b579c85 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -924,7 +924,7 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
924int 924int
925ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) 925ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
926{ 926{
927 int mpflag = 0; 927 int mpflag = 1;
928 int type = lntmsg->msg_type; 928 int type = lntmsg->msg_type;
929 lnet_process_id_t target = lntmsg->msg_target; 929 lnet_process_id_t target = lntmsg->msg_target;
930 unsigned int payload_niov = lntmsg->msg_niov; 930 unsigned int payload_niov = lntmsg->msg_niov;
@@ -993,8 +993,9 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
993 993
994 /* The first fragment will be set later in pro_pack */ 994 /* The first fragment will be set later in pro_pack */
995 rc = ksocknal_launch_packet(ni, tx, target); 995 rc = ksocknal_launch_packet(ni, tx, target);
996 if (lntmsg->msg_vmflush) 996 if (!mpflag)
997 cfs_memory_pressure_restore(mpflag); 997 cfs_memory_pressure_restore(mpflag);
998
998 if (rc == 0) 999 if (rc == 0)
999 return (0); 1000 return (0);
1000 1001
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
index 6b6c0240e824..7893d83e131f 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
@@ -760,7 +760,8 @@ static inline void hsm_set_cl_error(int *flags, int error)
760 *flags |= (error << CLF_HSM_ERR_L); 760 *flags |= (error << CLF_HSM_ERR_L);
761} 761}
762 762
763#define CR_MAXSIZE cfs_size_round(2*NAME_MAX + 1 + sizeof(struct changelog_rec)) 763#define CR_MAXSIZE cfs_size_round(2*NAME_MAX + 1 + \
764 sizeof(struct changelog_ext_rec))
764 765
765struct changelog_rec { 766struct changelog_rec {
766 __u16 cr_namelen; 767 __u16 cr_namelen;
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 22d0acc95bc5..52b7731bcc38 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -1086,7 +1086,7 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
1086 break; 1086 break;
1087 case Q_GETQUOTA: 1087 case Q_GETQUOTA:
1088 if (((type == USRQUOTA && 1088 if (((type == USRQUOTA &&
1089 uid_eq(current_euid(), make_kuid(&init_user_ns, id))) || 1089 !uid_eq(current_euid(), make_kuid(&init_user_ns, id))) ||
1090 (type == GRPQUOTA && 1090 (type == GRPQUOTA &&
1091 !in_egroup_p(make_kgid(&init_user_ns, id)))) && 1091 !in_egroup_p(make_kgid(&init_user_ns, id)))) &&
1092 (!cfs_capable(CFS_CAP_SYS_ADMIN) || 1092 (!cfs_capable(CFS_CAP_SYS_ADMIN) ||
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index d1ad91c34ddc..83013927e131 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -1430,7 +1430,7 @@ static struct kuc_hdr *changelog_kuc_hdr(char *buf, int len, int flags)
1430{ 1430{
1431 struct kuc_hdr *lh = (struct kuc_hdr *)buf; 1431 struct kuc_hdr *lh = (struct kuc_hdr *)buf;
1432 1432
1433 LASSERT(len <= CR_MAXSIZE); 1433 LASSERT(len <= KUC_CHANGELOG_MSG_MAXSIZE);
1434 1434
1435 lh->kuc_magic = KUC_MAGIC; 1435 lh->kuc_magic = KUC_MAGIC;
1436 lh->kuc_transport = KUC_TRANSPORT_CHANGELOG; 1436 lh->kuc_transport = KUC_TRANSPORT_CHANGELOG;
@@ -1503,7 +1503,7 @@ static int mdc_changelog_send_thread(void *csdata)
1503 CDEBUG(D_CHANGELOG, "changelog to fp=%p start "LPU64"\n", 1503 CDEBUG(D_CHANGELOG, "changelog to fp=%p start "LPU64"\n",
1504 cs->cs_fp, cs->cs_startrec); 1504 cs->cs_fp, cs->cs_startrec);
1505 1505
1506 OBD_ALLOC(cs->cs_buf, CR_MAXSIZE); 1506 OBD_ALLOC(cs->cs_buf, KUC_CHANGELOG_MSG_MAXSIZE);
1507 if (cs->cs_buf == NULL) 1507 if (cs->cs_buf == NULL)
1508 GOTO(out, rc = -ENOMEM); 1508 GOTO(out, rc = -ENOMEM);
1509 1509
@@ -1540,7 +1540,7 @@ out:
1540 if (ctxt) 1540 if (ctxt)
1541 llog_ctxt_put(ctxt); 1541 llog_ctxt_put(ctxt);
1542 if (cs->cs_buf) 1542 if (cs->cs_buf)
1543 OBD_FREE(cs->cs_buf, CR_MAXSIZE); 1543 OBD_FREE(cs->cs_buf, KUC_CHANGELOG_MSG_MAXSIZE);
1544 OBD_FREE_PTR(cs); 1544 OBD_FREE_PTR(cs);
1545 return rc; 1545 return rc;
1546} 1546}
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index eedffed17e39..31b269a5fff7 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -307,7 +307,7 @@ static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb,
307} 307}
308 308
309static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb, 309static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb,
310 void *accel_priv) 310 void *accel_priv, select_queue_fallback_t fallback)
311{ 311{
312 return (u16)smp_processor_id(); 312 return (u16)smp_processor_id();
313} 313}
@@ -892,6 +892,11 @@ static int xlr_setup_mdio(struct xlr_net_priv *priv,
892 priv->mii_bus->write = xlr_mii_write; 892 priv->mii_bus->write = xlr_mii_write;
893 priv->mii_bus->parent = &pdev->dev; 893 priv->mii_bus->parent = &pdev->dev;
894 priv->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); 894 priv->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
895 if (priv->mii_bus->irq == NULL) {
896 pr_err("irq alloc failed\n");
897 mdiobus_free(priv->mii_bus);
898 return -ENOMEM;
899 }
895 priv->mii_bus->irq[priv->phy_addr] = priv->ndev->irq; 900 priv->mii_bus->irq[priv->phy_addr] = priv->ndev->irq;
896 901
897 /* Scan only the enabled address */ 902 /* Scan only the enabled address */
diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c
index 47e0a91238a1..5a001d9b4252 100644
--- a/drivers/staging/octeon-usb/octeon-hcd.c
+++ b/drivers/staging/octeon-usb/octeon-hcd.c
@@ -275,13 +275,6 @@ enum cvmx_usb_pipe_flags {
275 */ 275 */
276#define MAX_TRANSFER_PACKETS ((1<<10)-1) 276#define MAX_TRANSFER_PACKETS ((1<<10)-1)
277 277
278enum {
279 USB_CLOCK_TYPE_REF_12,
280 USB_CLOCK_TYPE_REF_24,
281 USB_CLOCK_TYPE_REF_48,
282 USB_CLOCK_TYPE_CRYSTAL_12,
283};
284
285/** 278/**
286 * Logical transactions may take numerous low level 279 * Logical transactions may take numerous low level
287 * transactions, especially when splits are concerned. This 280 * transactions, especially when splits are concerned. This
@@ -471,19 +464,6 @@ struct octeon_hcd {
471/* Returns the IO address to push/pop stuff data from the FIFOs */ 464/* Returns the IO address to push/pop stuff data from the FIFOs */
472#define USB_FIFO_ADDRESS(channel, usb_index) (CVMX_USBCX_GOTGCTL(usb_index) + ((channel)+1)*0x1000) 465#define USB_FIFO_ADDRESS(channel, usb_index) (CVMX_USBCX_GOTGCTL(usb_index) + ((channel)+1)*0x1000)
473 466
474static int octeon_usb_get_clock_type(void)
475{
476 switch (cvmx_sysinfo_get()->board_type) {
477 case CVMX_BOARD_TYPE_BBGW_REF:
478 case CVMX_BOARD_TYPE_LANAI2_A:
479 case CVMX_BOARD_TYPE_LANAI2_U:
480 case CVMX_BOARD_TYPE_LANAI2_G:
481 case CVMX_BOARD_TYPE_UBNT_E100:
482 return USB_CLOCK_TYPE_CRYSTAL_12;
483 }
484 return USB_CLOCK_TYPE_REF_48;
485}
486
487/** 467/**
488 * Read a USB 32bit CSR. It performs the necessary address swizzle 468 * Read a USB 32bit CSR. It performs the necessary address swizzle
489 * for 32bit CSRs and logs the value in a readable format if 469 * for 32bit CSRs and logs the value in a readable format if
@@ -582,37 +562,6 @@ static inline int __cvmx_usb_get_data_pid(struct cvmx_usb_pipe *pipe)
582 return 0; /* Data0 */ 562 return 0; /* Data0 */
583} 563}
584 564
585
586/**
587 * Return the number of USB ports supported by this Octeon
588 * chip. If the chip doesn't support USB, or is not supported
589 * by this API, a zero will be returned. Most Octeon chips
590 * support one usb port, but some support two ports.
591 * cvmx_usb_initialize() must be called on independent
592 * struct cvmx_usb_state.
593 *
594 * Returns: Number of port, zero if usb isn't supported
595 */
596static int cvmx_usb_get_num_ports(void)
597{
598 int arch_ports = 0;
599
600 if (OCTEON_IS_MODEL(OCTEON_CN56XX))
601 arch_ports = 1;
602 else if (OCTEON_IS_MODEL(OCTEON_CN52XX))
603 arch_ports = 2;
604 else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
605 arch_ports = 1;
606 else if (OCTEON_IS_MODEL(OCTEON_CN31XX))
607 arch_ports = 1;
608 else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
609 arch_ports = 1;
610 else
611 arch_ports = 0;
612
613 return arch_ports;
614}
615
616/** 565/**
617 * Initialize a USB port for use. This must be called before any 566 * Initialize a USB port for use. This must be called before any
618 * other access to the Octeon USB port is made. The port starts 567 * other access to the Octeon USB port is made. The port starts
@@ -628,41 +577,16 @@ static int cvmx_usb_get_num_ports(void)
628 * Returns: 0 or a negative error code. 577 * Returns: 0 or a negative error code.
629 */ 578 */
630static int cvmx_usb_initialize(struct cvmx_usb_state *usb, 579static int cvmx_usb_initialize(struct cvmx_usb_state *usb,
631 int usb_port_number) 580 int usb_port_number,
581 enum cvmx_usb_initialize_flags flags)
632{ 582{
633 union cvmx_usbnx_clk_ctl usbn_clk_ctl; 583 union cvmx_usbnx_clk_ctl usbn_clk_ctl;
634 union cvmx_usbnx_usbp_ctl_status usbn_usbp_ctl_status; 584 union cvmx_usbnx_usbp_ctl_status usbn_usbp_ctl_status;
635 enum cvmx_usb_initialize_flags flags = 0;
636 int i; 585 int i;
637 586
638 /* At first allow 0-1 for the usb port number */ 587 /* At first allow 0-1 for the usb port number */
639 if ((usb_port_number < 0) || (usb_port_number > 1)) 588 if ((usb_port_number < 0) || (usb_port_number > 1))
640 return -EINVAL; 589 return -EINVAL;
641 /* For all chips except 52XX there is only one port */
642 if (!OCTEON_IS_MODEL(OCTEON_CN52XX) && (usb_port_number > 0))
643 return -EINVAL;
644 /* Try to determine clock type automatically */
645 if (octeon_usb_get_clock_type() == USB_CLOCK_TYPE_CRYSTAL_12) {
646 /* Only 12 MHZ crystals are supported */
647 flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI;
648 } else {
649 flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND;
650
651 switch (octeon_usb_get_clock_type()) {
652 case USB_CLOCK_TYPE_REF_12:
653 flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ;
654 break;
655 case USB_CLOCK_TYPE_REF_24:
656 flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ;
657 break;
658 case USB_CLOCK_TYPE_REF_48:
659 flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ;
660 break;
661 default:
662 return -EINVAL;
663 break;
664 }
665 }
666 590
667 memset(usb, 0, sizeof(*usb)); 591 memset(usb, 0, sizeof(*usb));
668 usb->init_flags = flags; 592 usb->init_flags = flags;
@@ -3431,7 +3355,6 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
3431 return 0; 3355 return 0;
3432} 3356}
3433 3357
3434
3435static const struct hc_driver octeon_hc_driver = { 3358static const struct hc_driver octeon_hc_driver = {
3436 .description = "Octeon USB", 3359 .description = "Octeon USB",
3437 .product_desc = "Octeon Host Controller", 3360 .product_desc = "Octeon Host Controller",
@@ -3448,15 +3371,74 @@ static const struct hc_driver octeon_hc_driver = {
3448 .hub_control = octeon_usb_hub_control, 3371 .hub_control = octeon_usb_hub_control,
3449}; 3372};
3450 3373
3451 3374static int octeon_usb_probe(struct platform_device *pdev)
3452static int octeon_usb_driver_probe(struct device *dev)
3453{ 3375{
3454 int status; 3376 int status;
3455 int usb_num = to_platform_device(dev)->id; 3377 int initialize_flags;
3456 int irq = platform_get_irq(to_platform_device(dev), 0); 3378 int usb_num;
3379 struct resource *res_mem;
3380 struct device_node *usbn_node;
3381 int irq = platform_get_irq(pdev, 0);
3382 struct device *dev = &pdev->dev;
3457 struct octeon_hcd *priv; 3383 struct octeon_hcd *priv;
3458 struct usb_hcd *hcd; 3384 struct usb_hcd *hcd;
3459 unsigned long flags; 3385 unsigned long flags;
3386 u32 clock_rate = 48000000;
3387 bool is_crystal_clock = false;
3388 const char *clock_type;
3389 int i;
3390
3391 if (dev->of_node == NULL) {
3392 dev_err(dev, "Error: empty of_node\n");
3393 return -ENXIO;
3394 }
3395 usbn_node = dev->of_node->parent;
3396
3397 i = of_property_read_u32(usbn_node,
3398 "refclk-frequency", &clock_rate);
3399 if (i) {
3400 dev_err(dev, "No USBN \"refclk-frequency\"\n");
3401 return -ENXIO;
3402 }
3403 switch (clock_rate) {
3404 case 12000000:
3405 initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ;
3406 break;
3407 case 24000000:
3408 initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ;
3409 break;
3410 case 48000000:
3411 initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ;
3412 break;
3413 default:
3414 dev_err(dev, "Illebal USBN \"refclk-frequency\" %u\n", clock_rate);
3415 return -ENXIO;
3416
3417 }
3418
3419 i = of_property_read_string(usbn_node,
3420 "refclk-type", &clock_type);
3421
3422 if (!i && strcmp("crystal", clock_type) == 0)
3423 is_crystal_clock = true;
3424
3425 if (is_crystal_clock)
3426 initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI;
3427 else
3428 initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND;
3429
3430 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3431 if (res_mem == NULL) {
3432 dev_err(dev, "found no memory resource\n");
3433 return -ENXIO;
3434 }
3435 usb_num = (res_mem->start >> 44) & 1;
3436
3437 if (irq < 0) {
3438 /* Defective device tree, but we know how to fix it. */
3439 irq_hw_number_t hwirq = usb_num ? (1 << 6) + 17 : 56;
3440 irq = irq_create_mapping(NULL, hwirq);
3441 }
3460 3442
3461 /* 3443 /*
3462 * Set the DMA mask to 64bits so we get buffers already translated for 3444 * Set the DMA mask to 64bits so we get buffers already translated for
@@ -3465,6 +3447,26 @@ static int octeon_usb_driver_probe(struct device *dev)
3465 dev->coherent_dma_mask = ~0; 3447 dev->coherent_dma_mask = ~0;
3466 dev->dma_mask = &dev->coherent_dma_mask; 3448 dev->dma_mask = &dev->coherent_dma_mask;
3467 3449
3450 /*
3451 * Only cn52XX and cn56XX have DWC_OTG USB hardware and the
3452 * IOB priority registers. Under heavy network load USB
3453 * hardware can be starved by the IOB causing a crash. Give
3454 * it a priority boost if it has been waiting more than 400
3455 * cycles to avoid this situation.
3456 *
3457 * Testing indicates that a cnt_val of 8192 is not sufficient,
3458 * but no failures are seen with 4096. We choose a value of
3459 * 400 to give a safety factor of 10.
3460 */
3461 if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
3462 union cvmx_iob_n2c_l2c_pri_cnt pri_cnt;
3463
3464 pri_cnt.u64 = 0;
3465 pri_cnt.s.cnt_enb = 1;
3466 pri_cnt.s.cnt_val = 400;
3467 cvmx_write_csr(CVMX_IOB_N2C_L2C_PRI_CNT, pri_cnt.u64);
3468 }
3469
3468 hcd = usb_create_hcd(&octeon_hc_driver, dev, dev_name(dev)); 3470 hcd = usb_create_hcd(&octeon_hc_driver, dev, dev_name(dev));
3469 if (!hcd) { 3471 if (!hcd) {
3470 dev_dbg(dev, "Failed to allocate memory for HCD\n"); 3472 dev_dbg(dev, "Failed to allocate memory for HCD\n");
@@ -3478,7 +3480,7 @@ static int octeon_usb_driver_probe(struct device *dev)
3478 tasklet_init(&priv->dequeue_tasklet, octeon_usb_urb_dequeue_work, (unsigned long)priv); 3480 tasklet_init(&priv->dequeue_tasklet, octeon_usb_urb_dequeue_work, (unsigned long)priv);
3479 INIT_LIST_HEAD(&priv->dequeue_list); 3481 INIT_LIST_HEAD(&priv->dequeue_list);
3480 3482
3481 status = cvmx_usb_initialize(&priv->usb, usb_num); 3483 status = cvmx_usb_initialize(&priv->usb, usb_num, initialize_flags);
3482 if (status) { 3484 if (status) {
3483 dev_dbg(dev, "USB initialization failed with %d\n", status); 3485 dev_dbg(dev, "USB initialization failed with %d\n", status);
3484 kfree(hcd); 3486 kfree(hcd);
@@ -3492,7 +3494,7 @@ static int octeon_usb_driver_probe(struct device *dev)
3492 cvmx_usb_poll(&priv->usb); 3494 cvmx_usb_poll(&priv->usb);
3493 spin_unlock_irqrestore(&priv->lock, flags); 3495 spin_unlock_irqrestore(&priv->lock, flags);
3494 3496
3495 status = usb_add_hcd(hcd, irq, IRQF_SHARED); 3497 status = usb_add_hcd(hcd, irq, 0);
3496 if (status) { 3498 if (status) {
3497 dev_dbg(dev, "USB add HCD failed with %d\n", status); 3499 dev_dbg(dev, "USB add HCD failed with %d\n", status);
3498 kfree(hcd); 3500 kfree(hcd);
@@ -3500,14 +3502,15 @@ static int octeon_usb_driver_probe(struct device *dev)
3500 } 3502 }
3501 device_wakeup_enable(hcd->self.controller); 3503 device_wakeup_enable(hcd->self.controller);
3502 3504
3503 dev_dbg(dev, "Registered HCD for port %d on irq %d\n", usb_num, irq); 3505 dev_info(dev, "Registered HCD for port %d on irq %d\n", usb_num, irq);
3504 3506
3505 return 0; 3507 return 0;
3506} 3508}
3507 3509
3508static int octeon_usb_driver_remove(struct device *dev) 3510static int octeon_usb_remove(struct platform_device *pdev)
3509{ 3511{
3510 int status; 3512 int status;
3513 struct device *dev = &pdev->dev;
3511 struct usb_hcd *hcd = dev_get_drvdata(dev); 3514 struct usb_hcd *hcd = dev_get_drvdata(dev);
3512 struct octeon_hcd *priv = hcd_to_octeon(hcd); 3515 struct octeon_hcd *priv = hcd_to_octeon(hcd);
3513 unsigned long flags; 3516 unsigned long flags;
@@ -3525,85 +3528,41 @@ static int octeon_usb_driver_remove(struct device *dev)
3525 return 0; 3528 return 0;
3526} 3529}
3527 3530
3528static struct device_driver octeon_usb_driver = { 3531static struct of_device_id octeon_usb_match[] = {
3529 .name = "OcteonUSB", 3532 {
3530 .bus = &platform_bus_type, 3533 .compatible = "cavium,octeon-5750-usbc",
3531 .probe = octeon_usb_driver_probe, 3534 },
3532 .remove = octeon_usb_driver_remove, 3535 {},
3533}; 3536};
3534 3537
3538static struct platform_driver octeon_usb_driver = {
3539 .driver = {
3540 .name = "OcteonUSB",
3541 .owner = THIS_MODULE,
3542 .of_match_table = octeon_usb_match,
3543 },
3544 .probe = octeon_usb_probe,
3545 .remove = octeon_usb_remove,
3546};
3535 3547
3536#define MAX_USB_PORTS 10 3548static int __init octeon_usb_driver_init(void)
3537static struct platform_device *pdev_glob[MAX_USB_PORTS];
3538static int octeon_usb_registered;
3539static int __init octeon_usb_module_init(void)
3540{ 3549{
3541 int num_devices = cvmx_usb_get_num_ports(); 3550 if (usb_disabled())
3542 int device; 3551 return 0;
3543
3544 if (usb_disabled() || num_devices == 0)
3545 return -ENODEV;
3546
3547 if (driver_register(&octeon_usb_driver))
3548 return -ENOMEM;
3549
3550 octeon_usb_registered = 1;
3551
3552 /*
3553 * Only cn52XX and cn56XX have DWC_OTG USB hardware and the
3554 * IOB priority registers. Under heavy network load USB
3555 * hardware can be starved by the IOB causing a crash. Give
3556 * it a priority boost if it has been waiting more than 400
3557 * cycles to avoid this situation.
3558 *
3559 * Testing indicates that a cnt_val of 8192 is not sufficient,
3560 * but no failures are seen with 4096. We choose a value of
3561 * 400 to give a safety factor of 10.
3562 */
3563 if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
3564 union cvmx_iob_n2c_l2c_pri_cnt pri_cnt;
3565
3566 pri_cnt.u64 = 0;
3567 pri_cnt.s.cnt_enb = 1;
3568 pri_cnt.s.cnt_val = 400;
3569 cvmx_write_csr(CVMX_IOB_N2C_L2C_PRI_CNT, pri_cnt.u64);
3570 }
3571
3572 for (device = 0; device < num_devices; device++) {
3573 struct resource irq_resource;
3574 struct platform_device *pdev;
3575 memset(&irq_resource, 0, sizeof(irq_resource));
3576 irq_resource.start = (device == 0) ? OCTEON_IRQ_USB0 : OCTEON_IRQ_USB1;
3577 irq_resource.end = irq_resource.start;
3578 irq_resource.flags = IORESOURCE_IRQ;
3579 pdev = platform_device_register_simple((char *)octeon_usb_driver. name, device, &irq_resource, 1);
3580 if (IS_ERR(pdev)) {
3581 driver_unregister(&octeon_usb_driver);
3582 octeon_usb_registered = 0;
3583 return PTR_ERR(pdev);
3584 }
3585 if (device < MAX_USB_PORTS)
3586 pdev_glob[device] = pdev;
3587 3552
3588 } 3553 return platform_driver_register(&octeon_usb_driver);
3589 return 0;
3590} 3554}
3555module_init(octeon_usb_driver_init);
3591 3556
3592static void __exit octeon_usb_module_cleanup(void) 3557static void __exit octeon_usb_driver_exit(void)
3593{ 3558{
3594 int i; 3559 if (usb_disabled())
3560 return;
3595 3561
3596 for (i = 0; i < MAX_USB_PORTS; i++) 3562 platform_driver_unregister(&octeon_usb_driver);
3597 if (pdev_glob[i]) {
3598 platform_device_unregister(pdev_glob[i]);
3599 pdev_glob[i] = NULL;
3600 }
3601 if (octeon_usb_registered)
3602 driver_unregister(&octeon_usb_driver);
3603} 3563}
3564module_exit(octeon_usb_driver_exit);
3604 3565
3605MODULE_LICENSE("GPL"); 3566MODULE_LICENSE("GPL");
3606MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>"); 3567MODULE_AUTHOR("Cavium, Inc. <support@cavium.com>");
3607MODULE_DESCRIPTION("Cavium Networks Octeon USB Host driver."); 3568MODULE_DESCRIPTION("Cavium Inc. OCTEON USB Host driver.");
3608module_init(octeon_usb_module_init);
3609module_exit(octeon_usb_module_cleanup);
diff --git a/drivers/staging/ozwpan/ozproto.c b/drivers/staging/ozwpan/ozproto.c
index cb060364dfe7..5d965cf06d59 100644
--- a/drivers/staging/ozwpan/ozproto.c
+++ b/drivers/staging/ozwpan/ozproto.c
@@ -668,8 +668,8 @@ void oz_binding_add(const char *net_dev)
668 if (binding) { 668 if (binding) {
669 binding->ptype.type = __constant_htons(OZ_ETHERTYPE); 669 binding->ptype.type = __constant_htons(OZ_ETHERTYPE);
670 binding->ptype.func = oz_pkt_recv; 670 binding->ptype.func = oz_pkt_recv;
671 memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN);
672 if (net_dev && *net_dev) { 671 if (net_dev && *net_dev) {
672 memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN);
673 oz_dbg(ON, "Adding binding: %s\n", net_dev); 673 oz_dbg(ON, "Adding binding: %s\n", net_dev);
674 binding->ptype.dev = 674 binding->ptype.dev =
675 dev_get_by_name(&init_net, net_dev); 675 dev_get_by_name(&init_net, net_dev);
@@ -680,6 +680,7 @@ void oz_binding_add(const char *net_dev)
680 } 680 }
681 } else { 681 } else {
682 oz_dbg(ON, "Binding to all netcards\n"); 682 oz_dbg(ON, "Binding to all netcards\n");
683 memset(binding->name, 0, OZ_MAX_BINDING_LEN);
683 binding->ptype.dev = NULL; 684 binding->ptype.dev = NULL;
684 } 685 }
685 if (binding) { 686 if (binding) {
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index 153ec61493ab..96df62f95b6b 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -912,12 +912,12 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
912 unsigned char *pbuf; 912 unsigned char *pbuf;
913 u32 wpa_ielen = 0; 913 u32 wpa_ielen = 0;
914 u8 *pbssid = GetAddr3Ptr(pframe); 914 u8 *pbssid = GetAddr3Ptr(pframe);
915 u32 hidden_ssid = 0;
916 struct HT_info_element *pht_info = NULL; 915 struct HT_info_element *pht_info = NULL;
917 struct rtw_ieee80211_ht_cap *pht_cap = NULL; 916 struct rtw_ieee80211_ht_cap *pht_cap = NULL;
918 u32 bcn_channel; 917 u32 bcn_channel;
919 unsigned short ht_cap_info; 918 unsigned short ht_cap_info;
920 unsigned char ht_info_infos_0; 919 unsigned char ht_info_infos_0;
920 int ssid_len;
921 921
922 if (is_client_associated_to_ap(Adapter) == false) 922 if (is_client_associated_to_ap(Adapter) == false)
923 return true; 923 return true;
@@ -999,21 +999,15 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
999 } 999 }
1000 1000
1001 /* checking SSID */ 1001 /* checking SSID */
1002 ssid_len = 0;
1002 p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _SSID_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_); 1003 p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _SSID_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
1003 if (p == NULL) { 1004 if (p) {
1004 DBG_88E("%s marc: cannot find SSID for survey event\n", __func__); 1005 ssid_len = *(p + 1);
1005 hidden_ssid = true; 1006 if (ssid_len > NDIS_802_11_LENGTH_SSID)
1006 } else { 1007 ssid_len = 0;
1007 hidden_ssid = false;
1008 }
1009
1010 if ((NULL != p) && (false == hidden_ssid && (*(p + 1)))) {
1011 memcpy(bssid->Ssid.Ssid, (p + 2), *(p + 1));
1012 bssid->Ssid.SsidLength = *(p + 1);
1013 } else {
1014 bssid->Ssid.SsidLength = 0;
1015 bssid->Ssid.Ssid[0] = '\0';
1016 } 1008 }
1009 memcpy(bssid->Ssid.Ssid, (p + 2), ssid_len);
1010 bssid->Ssid.SsidLength = ssid_len;
1017 1011
1018 RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s bssid.Ssid.Ssid:%s bssid.Ssid.SsidLength:%d " 1012 RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s bssid.Ssid.Ssid:%s bssid.Ssid.SsidLength:%d "
1019 "cur_network->network.Ssid.Ssid:%s len:%d\n", __func__, bssid->Ssid.Ssid, 1013 "cur_network->network.Ssid.Ssid:%s len:%d\n", __func__, bssid->Ssid.Ssid,
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index dec992569476..4ad80ae1067f 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -2500,7 +2500,7 @@ static int rtw_mp_ioctl_hdl(struct net_device *dev, struct iw_request_info *info
2500 ("rtw_mp_ioctl_hdl: subcode [%d], len[%d], buffer_len[%d]\r\n", 2500 ("rtw_mp_ioctl_hdl: subcode [%d], len[%d], buffer_len[%d]\r\n",
2501 poidparam->subcode, poidparam->len, len)); 2501 poidparam->subcode, poidparam->len, len));
2502 2502
2503 if (poidparam->subcode >= MAX_MP_IOCTL_SUBCODE) { 2503 if (poidparam->subcode >= ARRAY_SIZE(mp_ioctl_hdl)) {
2504 RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("no matching drvext subcodes\r\n")); 2504 RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("no matching drvext subcodes\r\n"));
2505 ret = -EINVAL; 2505 ret = -EINVAL;
2506 goto _rtw_mp_ioctl_hdl_exit; 2506 goto _rtw_mp_ioctl_hdl_exit;
@@ -3164,9 +3164,7 @@ static int rtw_p2p_get_go_device_address(struct net_device *dev,
3164 u8 *p2pie; 3164 u8 *p2pie;
3165 uint p2pielen = 0, attr_contentlen = 0; 3165 uint p2pielen = 0, attr_contentlen = 0;
3166 u8 attr_content[100] = {0x00}; 3166 u8 attr_content[100] = {0x00};
3167 3167 u8 go_devadd_str[17 + 12] = {};
3168 u8 go_devadd_str[17 + 10] = {0x00};
3169 /* +10 is for the str "go_devadd =", we have to clear it at wrqu->data.pointer */
3170 3168
3171 /* Commented by Albert 20121209 */ 3169 /* Commented by Albert 20121209 */
3172 /* The input data is the GO's interface address which the application wants to know its device address. */ 3170 /* The input data is the GO's interface address which the application wants to know its device address. */
@@ -3223,12 +3221,12 @@ static int rtw_p2p_get_go_device_address(struct net_device *dev,
3223 spin_unlock_bh(&pmlmepriv->scanned_queue.lock); 3221 spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
3224 3222
3225 if (!blnMatch) 3223 if (!blnMatch)
3226 sprintf(go_devadd_str, "\n\ndev_add = NULL"); 3224 snprintf(go_devadd_str, sizeof(go_devadd_str), "\n\ndev_add = NULL");
3227 else 3225 else
3228 sprintf(go_devadd_str, "\n\ndev_add =%.2X:%.2X:%.2X:%.2X:%.2X:%.2X", 3226 snprintf(go_devadd_str, sizeof(go_devadd_str), "\n\ndev_add =%.2X:%.2X:%.2X:%.2X:%.2X:%.2X",
3229 attr_content[0], attr_content[1], attr_content[2], attr_content[3], attr_content[4], attr_content[5]); 3227 attr_content[0], attr_content[1], attr_content[2], attr_content[3], attr_content[4], attr_content[5]);
3230 3228
3231 if (copy_to_user(wrqu->data.pointer, go_devadd_str, 10 + 17)) 3229 if (copy_to_user(wrqu->data.pointer, go_devadd_str, sizeof(go_devadd_str)))
3232 return -EFAULT; 3230 return -EFAULT;
3233 return ret; 3231 return ret;
3234} 3232}
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index 68f98fa114d2..7c9ee58f47bb 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -653,7 +653,7 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
653} 653}
654 654
655static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb, 655static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
656 void *accel_priv) 656 void *accel_priv, select_queue_fallback_t fallback)
657{ 657{
658 struct adapter *padapter = rtw_netdev_priv(dev); 658 struct adapter *padapter = rtw_netdev_priv(dev);
659 struct mlme_priv *pmlmepriv = &padapter->mlmepriv; 659 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 0a341d6ec51f..a70dcef1419e 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -53,7 +53,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
53 {USB_DEVICE(USB_VENDER_ID_REALTEK, 0x0179)}, /* 8188ETV */ 53 {USB_DEVICE(USB_VENDER_ID_REALTEK, 0x0179)}, /* 8188ETV */
54 /*=== Customer ID ===*/ 54 /*=== Customer ID ===*/
55 /****** 8188EUS ********/ 55 /****** 8188EUS ********/
56 {USB_DEVICE(0x8179, 0x07B8)}, /* Abocom - Abocom */ 56 {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */
57 {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */ 57 {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
58 {} /* Terminating entry */ 58 {} /* Terminating entry */
59}; 59};
diff --git a/drivers/staging/rtl8821ae/Kconfig b/drivers/staging/rtl8821ae/Kconfig
index 2aa5dac2f1df..abccc9dabd65 100644
--- a/drivers/staging/rtl8821ae/Kconfig
+++ b/drivers/staging/rtl8821ae/Kconfig
@@ -1,6 +1,6 @@
1config R8821AE 1config R8821AE
2 tristate "RealTek RTL8821AE Wireless LAN NIC driver" 2 tristate "RealTek RTL8821AE Wireless LAN NIC driver"
3 depends on PCI && WLAN 3 depends on PCI && WLAN && MAC80211
4 depends on m 4 depends on m
5 select WIRELESS_EXT 5 select WIRELESS_EXT
6 select WEXT_PRIV 6 select WEXT_PRIV
diff --git a/drivers/staging/rtl8821ae/wifi.h b/drivers/staging/rtl8821ae/wifi.h
index cfe88a1efd55..76bef93ad70a 100644
--- a/drivers/staging/rtl8821ae/wifi.h
+++ b/drivers/staging/rtl8821ae/wifi.h
@@ -1414,7 +1414,7 @@ struct rtl_dm {
1414 1414
1415 1415
1416 /*88e tx power tracking*/ 1416 /*88e tx power tracking*/
1417 u8 bb_swing_idx_ofdm[2]; 1417 u8 bb_swing_idx_ofdm[MAX_RF_PATH];
1418 u8 bb_swing_idx_ofdm_current; 1418 u8 bb_swing_idx_ofdm_current;
1419 u8 bb_swing_idx_ofdm_base[MAX_RF_PATH]; 1419 u8 bb_swing_idx_ofdm_base[MAX_RF_PATH];
1420 bool bb_swing_flag_Ofdm; 1420 bool bb_swing_flag_Ofdm;
diff --git a/drivers/staging/usbip/userspace/libsrc/names.c b/drivers/staging/usbip/userspace/libsrc/names.c
index 3c8d28b771e0..81ff8522405c 100644
--- a/drivers/staging/usbip/userspace/libsrc/names.c
+++ b/drivers/staging/usbip/userspace/libsrc/names.c
@@ -169,14 +169,14 @@ static void *my_malloc(size_t size)
169 struct pool *p; 169 struct pool *p;
170 170
171 p = calloc(1, sizeof(struct pool)); 171 p = calloc(1, sizeof(struct pool));
172 if (!p) { 172 if (!p)
173 free(p);
174 return NULL; 173 return NULL;
175 }
176 174
177 p->mem = calloc(1, size); 175 p->mem = calloc(1, size);
178 if (!p->mem) 176 if (!p->mem) {
177 free(p);
179 return NULL; 178 return NULL;
179 }
180 180
181 p->next = pool_head; 181 p->next = pool_head;
182 pool_head = p; 182 pool_head = p;
diff --git a/drivers/staging/usbip/vhci_sysfs.c b/drivers/staging/usbip/vhci_sysfs.c
index 9b51586d11d9..0141bc34d5cc 100644
--- a/drivers/staging/usbip/vhci_sysfs.c
+++ b/drivers/staging/usbip/vhci_sysfs.c
@@ -149,7 +149,8 @@ static int valid_args(__u32 rhport, enum usb_device_speed speed)
149 case USB_SPEED_WIRELESS: 149 case USB_SPEED_WIRELESS:
150 break; 150 break;
151 default: 151 default:
152 pr_err("speed %d\n", speed); 152 pr_err("Failed attach request for unsupported USB speed: %s\n",
153 usb_speed_string(speed));
153 return -EINVAL; 154 return -EINVAL;
154 } 155 }
155 156
diff --git a/drivers/staging/wlags49_h2/wl_wext.c b/drivers/staging/wlags49_h2/wl_wext.c
index 4a1ddaf5e00f..187fc060de26 100644
--- a/drivers/staging/wlags49_h2/wl_wext.c
+++ b/drivers/staging/wlags49_h2/wl_wext.c
@@ -1061,7 +1061,7 @@ static int wireless_set_essid(struct net_device *dev, struct iw_request_info *in
1061 goto out; 1061 goto out;
1062 } 1062 }
1063 1063
1064 if (data->flags != 0 && data->length > HCF_MAX_NAME_LEN + 1) { 1064 if (data->flags != 0 && data->length > HCF_MAX_NAME_LEN) {
1065 ret = -EINVAL; 1065 ret = -EINVAL;
1066 goto out; 1066 goto out;
1067 } 1067 }
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index e048d6439f4a..cda4d80cfaef 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -507,7 +507,9 @@ int iscsit_handle_status_snack(
507 u32 last_statsn; 507 u32 last_statsn;
508 int found_cmd; 508 int found_cmd;
509 509
510 if (conn->exp_statsn > begrun) { 510 if (!begrun) {
511 begrun = conn->exp_statsn;
512 } else if (conn->exp_statsn > begrun) {
511 pr_err("Got Status SNACK Begrun: 0x%08x, RunLength:" 513 pr_err("Got Status SNACK Begrun: 0x%08x, RunLength:"
512 " 0x%08x but already got ExpStatSN: 0x%08x on CID:" 514 " 0x%08x but already got ExpStatSN: 0x%08x on CID:"
513 " %hu.\n", begrun, runlength, conn->exp_statsn, 515 " %hu.\n", begrun, runlength, conn->exp_statsn,
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 12da9b386169..c3d9df6aaf5f 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -500,7 +500,7 @@ static inline int core_alua_state_lba_dependent(
500 500
501 if (segment_mult) { 501 if (segment_mult) {
502 u64 tmp = lba; 502 u64 tmp = lba;
503 start_lba = sector_div(tmp, segment_size * segment_mult); 503 start_lba = do_div(tmp, segment_size * segment_mult);
504 504
505 last_lba = first_lba + segment_size - 1; 505 last_lba = first_lba + segment_size - 1;
506 if (start_lba >= first_lba && 506 if (start_lba >= first_lba &&
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 2f5d77932c80..3013287a2aaa 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -2009,7 +2009,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
2009 struct t10_reservation *pr_tmpl = &dev->t10_pr; 2009 struct t10_reservation *pr_tmpl = &dev->t10_pr;
2010 unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL; 2010 unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
2011 sense_reason_t ret = TCM_NO_SENSE; 2011 sense_reason_t ret = TCM_NO_SENSE;
2012 int pr_holder = 0; 2012 int pr_holder = 0, type;
2013 2013
2014 if (!se_sess || !se_lun) { 2014 if (!se_sess || !se_lun) {
2015 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); 2015 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
@@ -2131,6 +2131,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
2131 ret = TCM_RESERVATION_CONFLICT; 2131 ret = TCM_RESERVATION_CONFLICT;
2132 goto out; 2132 goto out;
2133 } 2133 }
2134 type = pr_reg->pr_res_type;
2134 2135
2135 spin_lock(&pr_tmpl->registration_lock); 2136 spin_lock(&pr_tmpl->registration_lock);
2136 /* 2137 /*
@@ -2161,6 +2162,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
2161 * Release the calling I_T Nexus registration now.. 2162 * Release the calling I_T Nexus registration now..
2162 */ 2163 */
2163 __core_scsi3_free_registration(cmd->se_dev, pr_reg, NULL, 1); 2164 __core_scsi3_free_registration(cmd->se_dev, pr_reg, NULL, 1);
2165 pr_reg = NULL;
2164 2166
2165 /* 2167 /*
2166 * From spc4r17, section 5.7.11.3 Unregistering 2168 * From spc4r17, section 5.7.11.3 Unregistering
@@ -2174,8 +2176,8 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
2174 * RESERVATIONS RELEASED. 2176 * RESERVATIONS RELEASED.
2175 */ 2177 */
2176 if (pr_holder && 2178 if (pr_holder &&
2177 (pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY || 2179 (type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY ||
2178 pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) { 2180 type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) {
2179 list_for_each_entry(pr_reg_p, 2181 list_for_each_entry(pr_reg_p,
2180 &pr_tmpl->registration_list, 2182 &pr_tmpl->registration_list,
2181 pr_reg_list) { 2183 pr_reg_list) {
@@ -2194,7 +2196,8 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
2194 ret = core_scsi3_update_and_write_aptpl(dev, aptpl); 2196 ret = core_scsi3_update_and_write_aptpl(dev, aptpl);
2195 2197
2196out: 2198out:
2197 core_scsi3_put_pr_reg(pr_reg); 2199 if (pr_reg)
2200 core_scsi3_put_pr_reg(pr_reg);
2198 return ret; 2201 return ret;
2199} 2202}
2200 2203
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index fa3cae393e13..a4489444ffbc 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -1074,12 +1074,19 @@ sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
1074 struct scatterlist *psg; 1074 struct scatterlist *psg;
1075 void *paddr, *addr; 1075 void *paddr, *addr;
1076 unsigned int i, len, left; 1076 unsigned int i, len, left;
1077 unsigned int offset = 0;
1077 1078
1078 left = sectors * dev->prot_length; 1079 left = sectors * dev->prot_length;
1079 1080
1080 for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { 1081 for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
1081 1082
1082 len = min(psg->length, left); 1083 len = min(psg->length, left);
1084 if (offset >= sg->length) {
1085 sg = sg_next(sg);
1086 offset = 0;
1087 sg_off = sg->offset;
1088 }
1089
1083 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1090 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1084 addr = kmap_atomic(sg_page(sg)) + sg_off; 1091 addr = kmap_atomic(sg_page(sg)) + sg_off;
1085 1092
@@ -1089,6 +1096,7 @@ sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
1089 memcpy(addr, paddr, len); 1096 memcpy(addr, paddr, len);
1090 1097
1091 left -= len; 1098 left -= len;
1099 offset += len;
1092 kunmap_atomic(paddr); 1100 kunmap_atomic(paddr);
1093 kunmap_atomic(addr); 1101 kunmap_atomic(addr);
1094 } 1102 }
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 43c5ca9878bc..3bebc71ea033 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -440,8 +440,8 @@ check_scsi_name:
440 padding = ((-scsi_target_len) & 3); 440 padding = ((-scsi_target_len) & 3);
441 if (padding) 441 if (padding)
442 scsi_target_len += padding; 442 scsi_target_len += padding;
443 if (scsi_name_len > 256) 443 if (scsi_target_len > 256)
444 scsi_name_len = 256; 444 scsi_target_len = 256;
445 445
446 buf[off-1] = scsi_target_len; 446 buf[off-1] = scsi_target_len;
447 off += scsi_target_len; 447 off += scsi_target_len;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index c50fd9f11aab..24b4f65d8777 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -669,9 +669,6 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
669 return; 669 return;
670 } 670 }
671 671
672 if (!success)
673 cmd->transport_state |= CMD_T_FAILED;
674
675 /* 672 /*
676 * Check for case where an explicit ABORT_TASK has been received 673 * Check for case where an explicit ABORT_TASK has been received
677 * and transport_wait_for_tasks() will be waiting for completion.. 674 * and transport_wait_for_tasks() will be waiting for completion..
@@ -681,7 +678,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
681 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 678 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
682 complete(&cmd->t_transport_stop_comp); 679 complete(&cmd->t_transport_stop_comp);
683 return; 680 return;
684 } else if (cmd->transport_state & CMD_T_FAILED) { 681 } else if (!success) {
685 INIT_WORK(&cmd->work, target_complete_failure_work); 682 INIT_WORK(&cmd->work, target_complete_failure_work);
686 } else { 683 } else {
687 INIT_WORK(&cmd->work, target_complete_ok_work); 684 INIT_WORK(&cmd->work, target_complete_ok_work);
diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c
index 6496872e2e47..b01659bd4f7c 100644
--- a/drivers/tty/hvc/hvc_opal.c
+++ b/drivers/tty/hvc/hvc_opal.c
@@ -255,13 +255,7 @@ static int __init hvc_opal_init(void)
255 /* Register as a vio device to receive callbacks */ 255 /* Register as a vio device to receive callbacks */
256 return platform_driver_register(&hvc_opal_driver); 256 return platform_driver_register(&hvc_opal_driver);
257} 257}
258module_init(hvc_opal_init); 258device_initcall(hvc_opal_init);
259
260static void __exit hvc_opal_exit(void)
261{
262 platform_driver_unregister(&hvc_opal_driver);
263}
264module_exit(hvc_opal_exit);
265 259
266static void udbg_opal_putc(char c) 260static void udbg_opal_putc(char c)
267{ 261{
diff --git a/drivers/tty/hvc/hvc_rtas.c b/drivers/tty/hvc/hvc_rtas.c
index 0069bb86ba49..08c87920b74a 100644
--- a/drivers/tty/hvc/hvc_rtas.c
+++ b/drivers/tty/hvc/hvc_rtas.c
@@ -102,17 +102,7 @@ static int __init hvc_rtas_init(void)
102 102
103 return 0; 103 return 0;
104} 104}
105module_init(hvc_rtas_init); 105device_initcall(hvc_rtas_init);
106
107/* This will tear down the tty portion of the driver */
108static void __exit hvc_rtas_exit(void)
109{
110 /* Really the fun isn't over until the worker thread breaks down and
111 * the tty cleans up */
112 if (hvc_rtas_dev)
113 hvc_remove(hvc_rtas_dev);
114}
115module_exit(hvc_rtas_exit);
116 106
117/* This will happen prior to module init. There is no tty at this time? */ 107/* This will happen prior to module init. There is no tty at this time? */
118static int __init hvc_rtas_console_init(void) 108static int __init hvc_rtas_console_init(void)
diff --git a/drivers/tty/hvc/hvc_udbg.c b/drivers/tty/hvc/hvc_udbg.c
index 72228276fe31..9cf573d06a29 100644
--- a/drivers/tty/hvc/hvc_udbg.c
+++ b/drivers/tty/hvc/hvc_udbg.c
@@ -80,14 +80,7 @@ static int __init hvc_udbg_init(void)
80 80
81 return 0; 81 return 0;
82} 82}
83module_init(hvc_udbg_init); 83device_initcall(hvc_udbg_init);
84
85static void __exit hvc_udbg_exit(void)
86{
87 if (hvc_udbg_dev)
88 hvc_remove(hvc_udbg_dev);
89}
90module_exit(hvc_udbg_exit);
91 84
92static int __init hvc_udbg_console_init(void) 85static int __init hvc_udbg_console_init(void)
93{ 86{
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index 636c9baad7a5..2dc2831840ca 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -561,18 +561,7 @@ static int __init xen_hvc_init(void)
561#endif 561#endif
562 return r; 562 return r;
563} 563}
564 564device_initcall(xen_hvc_init);
565static void __exit xen_hvc_fini(void)
566{
567 struct xencons_info *entry, *next;
568
569 if (list_empty(&xenconsoles))
570 return;
571
572 list_for_each_entry_safe(entry, next, &xenconsoles, list) {
573 xen_console_remove(entry);
574 }
575}
576 565
577static int xen_cons_init(void) 566static int xen_cons_init(void)
578{ 567{
@@ -598,10 +587,6 @@ static int xen_cons_init(void)
598 hvc_instantiate(HVC_COOKIE, 0, ops); 587 hvc_instantiate(HVC_COOKIE, 0, ops);
599 return 0; 588 return 0;
600} 589}
601
602
603module_init(xen_hvc_init);
604module_exit(xen_hvc_fini);
605console_initcall(xen_cons_init); 590console_initcall(xen_cons_init);
606 591
607#ifdef CONFIG_EARLY_PRINTK 592#ifdef CONFIG_EARLY_PRINTK
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index f34461c5f14e..2ebe47b78a3e 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -1090,6 +1090,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen)
1090{ 1090{
1091 unsigned int addr = 0; 1091 unsigned int addr = 0;
1092 unsigned int modem = 0; 1092 unsigned int modem = 0;
1093 unsigned int brk = 0;
1093 struct gsm_dlci *dlci; 1094 struct gsm_dlci *dlci;
1094 int len = clen; 1095 int len = clen;
1095 u8 *dp = data; 1096 u8 *dp = data;
@@ -1116,6 +1117,16 @@ static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen)
1116 if (len == 0) 1117 if (len == 0)
1117 return; 1118 return;
1118 } 1119 }
1120 len--;
1121 if (len > 0) {
1122 while (gsm_read_ea(&brk, *dp++) == 0) {
1123 len--;
1124 if (len == 0)
1125 return;
1126 }
1127 modem <<= 7;
1128 modem |= (brk & 0x7f);
1129 }
1119 tty = tty_port_tty_get(&dlci->port); 1130 tty = tty_port_tty_get(&dlci->port);
1120 gsm_process_modem(tty, dlci, modem, clen); 1131 gsm_process_modem(tty, dlci, modem, clen);
1121 if (tty) { 1132 if (tty) {
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index cb8017aa4434..d15624c1b751 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -817,8 +817,7 @@ static void process_echoes(struct tty_struct *tty)
817 struct n_tty_data *ldata = tty->disc_data; 817 struct n_tty_data *ldata = tty->disc_data;
818 size_t echoed; 818 size_t echoed;
819 819
820 if ((!L_ECHO(tty) && !L_ECHONL(tty)) || 820 if (ldata->echo_mark == ldata->echo_tail)
821 ldata->echo_mark == ldata->echo_tail)
822 return; 821 return;
823 822
824 mutex_lock(&ldata->output_lock); 823 mutex_lock(&ldata->output_lock);
@@ -1244,7 +1243,8 @@ n_tty_receive_signal_char(struct tty_struct *tty, int signal, unsigned char c)
1244 if (L_ECHO(tty)) { 1243 if (L_ECHO(tty)) {
1245 echo_char(c, tty); 1244 echo_char(c, tty);
1246 commit_echoes(tty); 1245 commit_echoes(tty);
1247 } 1246 } else
1247 process_echoes(tty);
1248 isig(signal, tty); 1248 isig(signal, tty);
1249 return; 1249 return;
1250} 1250}
@@ -1274,7 +1274,7 @@ n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
1274 if (I_IXON(tty)) { 1274 if (I_IXON(tty)) {
1275 if (c == START_CHAR(tty)) { 1275 if (c == START_CHAR(tty)) {
1276 start_tty(tty); 1276 start_tty(tty);
1277 commit_echoes(tty); 1277 process_echoes(tty);
1278 return 0; 1278 return 0;
1279 } 1279 }
1280 if (c == STOP_CHAR(tty)) { 1280 if (c == STOP_CHAR(tty)) {
@@ -1820,8 +1820,10 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
1820 * Fix tty hang when I_IXON(tty) is cleared, but the tty 1820 * Fix tty hang when I_IXON(tty) is cleared, but the tty
1821 * been stopped by STOP_CHAR(tty) before it. 1821 * been stopped by STOP_CHAR(tty) before it.
1822 */ 1822 */
1823 if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped) 1823 if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped) {
1824 start_tty(tty); 1824 start_tty(tty);
1825 process_echoes(tty);
1826 }
1825 1827
1826 /* The termios change make the tty ready for I/O */ 1828 /* The termios change make the tty ready for I/O */
1827 if (waitqueue_active(&tty->write_wait)) 1829 if (waitqueue_active(&tty->write_wait))
@@ -1896,7 +1898,7 @@ err:
1896static inline int input_available_p(struct tty_struct *tty, int poll) 1898static inline int input_available_p(struct tty_struct *tty, int poll)
1897{ 1899{
1898 struct n_tty_data *ldata = tty->disc_data; 1900 struct n_tty_data *ldata = tty->disc_data;
1899 int amt = poll && !TIME_CHAR(tty) ? MIN_CHAR(tty) : 1; 1901 int amt = poll && !TIME_CHAR(tty) && MIN_CHAR(tty) ? MIN_CHAR(tty) : 1;
1900 1902
1901 if (ldata->icanon && !L_EXTPROC(tty)) { 1903 if (ldata->icanon && !L_EXTPROC(tty)) {
1902 if (ldata->canon_head != ldata->read_tail) 1904 if (ldata->canon_head != ldata->read_tail)
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 61ecd709a722..69932b7556cf 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -2433,6 +2433,24 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
2433 serial_dl_write(up, quot); 2433 serial_dl_write(up, quot);
2434 2434
2435 /* 2435 /*
2436 * XR17V35x UARTs have an extra fractional divisor register (DLD)
2437 *
2438 * We need to recalculate all of the registers, because DLM and DLL
2439 * are already rounded to a whole integer.
2440 *
2441 * When recalculating we use a 32x clock instead of a 16x clock to
2442 * allow 1-bit for rounding in the fractional part.
2443 */
2444 if (up->port.type == PORT_XR17V35X) {
2445 unsigned int baud_x32 = (port->uartclk * 2) / baud;
2446 u16 quot = baud_x32 / 32;
2447 u8 quot_frac = DIV_ROUND_CLOSEST(baud_x32 % 32, 2);
2448
2449 serial_dl_write(up, quot);
2450 serial_port_out(port, 0x2, quot_frac & 0xf);
2451 }
2452
2453 /*
2436 * LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR 2454 * LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR
2437 * is written without DLAB set, this mode will be disabled. 2455 * is written without DLAB set, this mode will be disabled.
2438 */ 2456 */
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index faa64e646100..ed3113576740 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -391,7 +391,7 @@ static int dw8250_remove(struct platform_device *pdev)
391 return 0; 391 return 0;
392} 392}
393 393
394#ifdef CONFIG_PM 394#ifdef CONFIG_PM_SLEEP
395static int dw8250_suspend(struct device *dev) 395static int dw8250_suspend(struct device *dev)
396{ 396{
397 struct dw8250_data *data = dev_get_drvdata(dev); 397 struct dw8250_data *data = dev_get_drvdata(dev);
@@ -409,7 +409,7 @@ static int dw8250_resume(struct device *dev)
409 409
410 return 0; 410 return 0;
411} 411}
412#endif /* CONFIG_PM */ 412#endif /* CONFIG_PM_SLEEP */
413 413
414#ifdef CONFIG_PM_RUNTIME 414#ifdef CONFIG_PM_RUNTIME
415static int dw8250_runtime_suspend(struct device *dev) 415static int dw8250_runtime_suspend(struct device *dev)
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 50228eed3b6f..0ff3e3624d4c 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -783,7 +783,8 @@ static int pci_netmos_9900_setup(struct serial_private *priv,
783{ 783{
784 unsigned int bar; 784 unsigned int bar;
785 785
786 if ((priv->dev->subsystem_device & 0xff00) == 0x3000) { 786 if ((priv->dev->device != PCI_DEVICE_ID_NETMOS_9865) &&
787 (priv->dev->subsystem_device & 0xff00) == 0x3000) {
787 /* netmos apparently orders BARs by datasheet layout, so serial 788 /* netmos apparently orders BARs by datasheet layout, so serial
788 * ports get BARs 0 and 3 (or 1 and 4 for memmapped) 789 * ports get BARs 0 and 3 (or 1 and 4 for memmapped)
789 */ 790 */
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index fa511ebab67c..77f035158d6c 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -738,9 +738,6 @@ static int serial_omap_startup(struct uart_port *port)
738 return retval; 738 return retval;
739 } 739 }
740 disable_irq(up->wakeirq); 740 disable_irq(up->wakeirq);
741 } else {
742 dev_info(up->port.dev, "no wakeirq for uart%d\n",
743 up->port.line);
744 } 741 }
745 742
746 dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->port.line); 743 dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->port.line);
@@ -1604,8 +1601,11 @@ static int serial_omap_probe_rs485(struct uart_omap_port *up,
1604 flags & SER_RS485_RTS_AFTER_SEND); 1601 flags & SER_RS485_RTS_AFTER_SEND);
1605 if (ret < 0) 1602 if (ret < 0)
1606 return ret; 1603 return ret;
1607 } else 1604 } else if (up->rts_gpio == -EPROBE_DEFER) {
1605 return -EPROBE_DEFER;
1606 } else {
1608 up->rts_gpio = -EINVAL; 1607 up->rts_gpio = -EINVAL;
1608 }
1609 1609
1610 if (of_property_read_u32_array(np, "rs485-rts-delay", 1610 if (of_property_read_u32_array(np, "rs485-rts-delay",
1611 rs485_delay, 2) == 0) { 1611 rs485_delay, 2) == 0) {
@@ -1687,6 +1687,9 @@ static int serial_omap_probe(struct platform_device *pdev)
1687 up->port.iotype = UPIO_MEM; 1687 up->port.iotype = UPIO_MEM;
1688 up->port.irq = uartirq; 1688 up->port.irq = uartirq;
1689 up->wakeirq = wakeirq; 1689 up->wakeirq = wakeirq;
1690 if (!up->wakeirq)
1691 dev_info(up->port.dev, "no wakeirq for uart%d\n",
1692 up->port.line);
1690 1693
1691 up->port.regshift = 2; 1694 up->port.regshift = 2;
1692 up->port.fifosize = 64; 1695 up->port.fifosize = 64;
diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
index 49a2ffd101a7..b7bfe24d4ebc 100644
--- a/drivers/tty/serial/sirfsoc_uart.c
+++ b/drivers/tty/serial/sirfsoc_uart.c
@@ -542,8 +542,10 @@ static void sirfsoc_rx_tmo_process_tl(unsigned long param)
542 wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, 542 wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
543 rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) | 543 rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
544 SIRFUART_IO_MODE); 544 SIRFUART_IO_MODE);
545 sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
546 spin_unlock_irqrestore(&sirfport->rx_lock, flags); 545 spin_unlock_irqrestore(&sirfport->rx_lock, flags);
546 spin_lock(&port->lock);
547 sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
548 spin_unlock(&port->lock);
547 if (sirfport->rx_io_count == 4) { 549 if (sirfport->rx_io_count == 4) {
548 spin_lock_irqsave(&sirfport->rx_lock, flags); 550 spin_lock_irqsave(&sirfport->rx_lock, flags);
549 sirfport->rx_io_count = 0; 551 sirfport->rx_io_count = 0;
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 61b1137d7e56..23b5d32954bf 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1164,6 +1164,8 @@ static void csi_J(struct vc_data *vc, int vpar)
1164 scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char, 1164 scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char,
1165 vc->vc_screenbuf_size >> 1); 1165 vc->vc_screenbuf_size >> 1);
1166 set_origin(vc); 1166 set_origin(vc);
1167 if (CON_IS_VISIBLE(vc))
1168 update_screen(vc);
1167 /* fall through */ 1169 /* fall through */
1168 case 2: /* erase whole display */ 1170 case 2: /* erase whole display */
1169 count = vc->vc_cols * vc->vc_rows; 1171 count = vc->vc_cols * vc->vc_rows;
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 80de2f88ed2c..4ab2cb62dfce 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -105,7 +105,7 @@ static int hw_ep_flush(struct ci_hdrc *ci, int num, int dir)
105 105
106 do { 106 do {
107 /* flush any pending transfer */ 107 /* flush any pending transfer */
108 hw_write(ci, OP_ENDPTFLUSH, BIT(n), BIT(n)); 108 hw_write(ci, OP_ENDPTFLUSH, ~0, BIT(n));
109 while (hw_read(ci, OP_ENDPTFLUSH, BIT(n))) 109 while (hw_read(ci, OP_ENDPTFLUSH, BIT(n)))
110 cpu_relax(); 110 cpu_relax();
111 } while (hw_read(ci, OP_ENDPTSTAT, BIT(n))); 111 } while (hw_read(ci, OP_ENDPTSTAT, BIT(n)));
@@ -205,7 +205,7 @@ static int hw_ep_prime(struct ci_hdrc *ci, int num, int dir, int is_ctrl)
205 if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num))) 205 if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num)))
206 return -EAGAIN; 206 return -EAGAIN;
207 207
208 hw_write(ci, OP_ENDPTPRIME, BIT(n), BIT(n)); 208 hw_write(ci, OP_ENDPTPRIME, ~0, BIT(n));
209 209
210 while (hw_read(ci, OP_ENDPTPRIME, BIT(n))) 210 while (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
211 cpu_relax(); 211 cpu_relax();
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 5d01558cef66..ab90a0156828 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -63,8 +63,10 @@ ssize_t usb_store_new_id(struct usb_dynids *dynids,
63 dynid->id.idProduct = idProduct; 63 dynid->id.idProduct = idProduct;
64 dynid->id.match_flags = USB_DEVICE_ID_MATCH_DEVICE; 64 dynid->id.match_flags = USB_DEVICE_ID_MATCH_DEVICE;
65 if (fields > 2 && bInterfaceClass) { 65 if (fields > 2 && bInterfaceClass) {
66 if (bInterfaceClass > 255) 66 if (bInterfaceClass > 255) {
67 return -EINVAL; 67 retval = -EINVAL;
68 goto fail;
69 }
68 70
69 dynid->id.bInterfaceClass = (u8)bInterfaceClass; 71 dynid->id.bInterfaceClass = (u8)bInterfaceClass;
70 dynid->id.match_flags |= USB_DEVICE_ID_MATCH_INT_CLASS; 72 dynid->id.match_flags |= USB_DEVICE_ID_MATCH_INT_CLASS;
@@ -73,17 +75,21 @@ ssize_t usb_store_new_id(struct usb_dynids *dynids,
73 if (fields > 4) { 75 if (fields > 4) {
74 const struct usb_device_id *id = id_table; 76 const struct usb_device_id *id = id_table;
75 77
76 if (!id) 78 if (!id) {
77 return -ENODEV; 79 retval = -ENODEV;
80 goto fail;
81 }
78 82
79 for (; id->match_flags; id++) 83 for (; id->match_flags; id++)
80 if (id->idVendor == refVendor && id->idProduct == refProduct) 84 if (id->idVendor == refVendor && id->idProduct == refProduct)
81 break; 85 break;
82 86
83 if (id->match_flags) 87 if (id->match_flags) {
84 dynid->id.driver_info = id->driver_info; 88 dynid->id.driver_info = id->driver_info;
85 else 89 } else {
86 return -ENODEV; 90 retval = -ENODEV;
91 goto fail;
92 }
87 } 93 }
88 94
89 spin_lock(&dynids->lock); 95 spin_lock(&dynids->lock);
@@ -95,6 +101,10 @@ ssize_t usb_store_new_id(struct usb_dynids *dynids,
95 if (retval) 101 if (retval)
96 return retval; 102 return retval;
97 return count; 103 return count;
104
105fail:
106 kfree(dynid);
107 return retval;
98} 108}
99EXPORT_SYMBOL_GPL(usb_store_new_id); 109EXPORT_SYMBOL_GPL(usb_store_new_id);
100 110
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 199aaea6bfe0..2518c3250750 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1032,7 +1032,6 @@ static int register_root_hub(struct usb_hcd *hcd)
1032 dev_name(&usb_dev->dev), retval); 1032 dev_name(&usb_dev->dev), retval);
1033 return retval; 1033 return retval;
1034 } 1034 }
1035 usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev);
1036 } 1035 }
1037 1036
1038 retval = usb_new_device (usb_dev); 1037 retval = usb_new_device (usb_dev);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index babba885978d..64ea21971be2 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -128,7 +128,7 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev)
128 return usb_get_intfdata(hdev->actconfig->interface[0]); 128 return usb_get_intfdata(hdev->actconfig->interface[0]);
129} 129}
130 130
131int usb_device_supports_lpm(struct usb_device *udev) 131static int usb_device_supports_lpm(struct usb_device *udev)
132{ 132{
133 /* USB 2.1 (and greater) devices indicate LPM support through 133 /* USB 2.1 (and greater) devices indicate LPM support through
134 * their USB 2.0 Extended Capabilities BOS descriptor. 134 * their USB 2.0 Extended Capabilities BOS descriptor.
@@ -149,11 +149,6 @@ int usb_device_supports_lpm(struct usb_device *udev)
149 "Power management will be impacted.\n"); 149 "Power management will be impacted.\n");
150 return 0; 150 return 0;
151 } 151 }
152
153 /* udev is root hub */
154 if (!udev->parent)
155 return 1;
156
157 if (udev->parent->lpm_capable) 152 if (udev->parent->lpm_capable)
158 return 1; 153 return 1;
159 154
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index c49383669cd8..823857767a16 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -35,7 +35,6 @@ extern int usb_get_device_descriptor(struct usb_device *dev,
35 unsigned int size); 35 unsigned int size);
36extern int usb_get_bos_descriptor(struct usb_device *dev); 36extern int usb_get_bos_descriptor(struct usb_device *dev);
37extern void usb_release_bos_descriptor(struct usb_device *dev); 37extern void usb_release_bos_descriptor(struct usb_device *dev);
38extern int usb_device_supports_lpm(struct usb_device *udev);
39extern char *usb_cache_string(struct usb_device *udev, int index); 38extern char *usb_cache_string(struct usb_device *udev, int index);
40extern int usb_set_configuration(struct usb_device *dev, int configuration); 39extern int usb_set_configuration(struct usb_device *dev, int configuration);
41extern int usb_choose_configuration(struct usb_device *udev); 40extern int usb_choose_configuration(struct usb_device *udev);
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index 8565d87f94b4..1d129884cc39 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -216,7 +216,7 @@ static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
216 int retval = 0; 216 int retval = 0;
217 217
218 if (!select_phy) 218 if (!select_phy)
219 return -ENODEV; 219 return 0;
220 220
221 usbcfg = readl(hsotg->regs + GUSBCFG); 221 usbcfg = readl(hsotg->regs + GUSBCFG);
222 222
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index f59484d43b35..4d918ed8d343 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -2565,25 +2565,14 @@ static void _dwc2_hcd_endpoint_reset(struct usb_hcd *hcd,
2565 struct usb_host_endpoint *ep) 2565 struct usb_host_endpoint *ep)
2566{ 2566{
2567 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); 2567 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
2568 int is_control = usb_endpoint_xfer_control(&ep->desc);
2569 int is_out = usb_endpoint_dir_out(&ep->desc);
2570 int epnum = usb_endpoint_num(&ep->desc);
2571 struct usb_device *udev;
2572 unsigned long flags; 2568 unsigned long flags;
2573 2569
2574 dev_dbg(hsotg->dev, 2570 dev_dbg(hsotg->dev,
2575 "DWC OTG HCD EP RESET: bEndpointAddress=0x%02x\n", 2571 "DWC OTG HCD EP RESET: bEndpointAddress=0x%02x\n",
2576 ep->desc.bEndpointAddress); 2572 ep->desc.bEndpointAddress);
2577 2573
2578 udev = to_usb_device(hsotg->dev);
2579
2580 spin_lock_irqsave(&hsotg->lock, flags); 2574 spin_lock_irqsave(&hsotg->lock, flags);
2581
2582 usb_settoggle(udev, epnum, is_out, 0);
2583 if (is_control)
2584 usb_settoggle(udev, epnum, !is_out, 0);
2585 dwc2_hcd_endpoint_reset(hsotg, ep); 2575 dwc2_hcd_endpoint_reset(hsotg, ep);
2586
2587 spin_unlock_irqrestore(&hsotg->lock, flags); 2576 spin_unlock_irqrestore(&hsotg->lock, flags);
2588} 2577}
2589 2578
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index d01d0d3f2cf0..eaba547ce26b 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -124,6 +124,9 @@ static int dwc2_driver_probe(struct platform_device *dev)
124 int retval; 124 int retval;
125 int irq; 125 int irq;
126 126
127 if (usb_disabled())
128 return -ENODEV;
129
127 match = of_match_device(dwc2_of_match_table, &dev->dev); 130 match = of_match_device(dwc2_of_match_table, &dev->dev);
128 if (match && match->data) { 131 if (match && match->data) {
129 params = match->data; 132 params = match->data;
diff --git a/drivers/usb/gadget/bcm63xx_udc.c b/drivers/usb/gadget/bcm63xx_udc.c
index 888fbb43b338..e969eb809a85 100644
--- a/drivers/usb/gadget/bcm63xx_udc.c
+++ b/drivers/usb/gadget/bcm63xx_udc.c
@@ -360,24 +360,30 @@ static inline void usb_dma_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
360 bcm_writel(val, udc->iudma_regs + off); 360 bcm_writel(val, udc->iudma_regs + off);
361} 361}
362 362
363static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off) 363static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off, int chan)
364{ 364{
365 return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off); 365 return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
366 (ENETDMA_CHAN_WIDTH * chan));
366} 367}
367 368
368static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off) 369static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
370 int chan)
369{ 371{
370 bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off); 372 bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
373 (ENETDMA_CHAN_WIDTH * chan));
371} 374}
372 375
373static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off) 376static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off, int chan)
374{ 377{
375 return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off); 378 return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
379 (ENETDMA_CHAN_WIDTH * chan));
376} 380}
377 381
378static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off) 382static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
383 int chan)
379{ 384{
380 bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off); 385 bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
386 (ENETDMA_CHAN_WIDTH * chan));
381} 387}
382 388
383static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled) 389static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled)
@@ -638,7 +644,7 @@ static void iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma,
638 } while (!last_bd); 644 } while (!last_bd);
639 645
640 usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK, 646 usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK,
641 ENETDMAC_CHANCFG_REG(iudma->ch_idx)); 647 ENETDMAC_CHANCFG_REG, iudma->ch_idx);
642} 648}
643 649
644/** 650/**
@@ -694,9 +700,9 @@ static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
694 bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num)); 700 bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num));
695 701
696 /* stop DMA, then wait for the hardware to wrap up */ 702 /* stop DMA, then wait for the hardware to wrap up */
697 usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG(ch_idx)); 703 usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG, ch_idx);
698 704
699 while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG(ch_idx)) & 705 while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx) &
700 ENETDMAC_CHANCFG_EN_MASK) { 706 ENETDMAC_CHANCFG_EN_MASK) {
701 udelay(1); 707 udelay(1);
702 708
@@ -713,10 +719,10 @@ static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
713 dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n", 719 dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n",
714 ch_idx); 720 ch_idx);
715 usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK, 721 usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK,
716 ENETDMAC_CHANCFG_REG(ch_idx)); 722 ENETDMAC_CHANCFG_REG, ch_idx);
717 } 723 }
718 } 724 }
719 usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG(ch_idx)); 725 usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG, ch_idx);
720 726
721 /* don't leave "live" HW-owned entries for the next guy to step on */ 727 /* don't leave "live" HW-owned entries for the next guy to step on */
722 for (d = iudma->bd_ring; d <= iudma->end_bd; d++) 728 for (d = iudma->bd_ring; d <= iudma->end_bd; d++)
@@ -728,11 +734,11 @@ static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
728 734
729 /* set up IRQs, UBUS burst size, and BD base for this channel */ 735 /* set up IRQs, UBUS burst size, and BD base for this channel */
730 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK, 736 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
731 ENETDMAC_IRMASK_REG(ch_idx)); 737 ENETDMAC_IRMASK_REG, ch_idx);
732 usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG(ch_idx)); 738 usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG, ch_idx);
733 739
734 usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG(ch_idx)); 740 usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG, ch_idx);
735 usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG(ch_idx)); 741 usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG, ch_idx);
736} 742}
737 743
738/** 744/**
@@ -2035,7 +2041,7 @@ static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id)
2035 spin_lock(&udc->lock); 2041 spin_lock(&udc->lock);
2036 2042
2037 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK, 2043 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
2038 ENETDMAC_IR_REG(iudma->ch_idx)); 2044 ENETDMAC_IR_REG, iudma->ch_idx);
2039 bep = iudma->bep; 2045 bep = iudma->bep;
2040 rc = iudma_read(udc, iudma); 2046 rc = iudma_read(udc, iudma);
2041 2047
@@ -2175,18 +2181,18 @@ static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
2175 seq_printf(s, " [ep%d]:\n", 2181 seq_printf(s, " [ep%d]:\n",
2176 max_t(int, iudma_defaults[ch_idx].ep_num, 0)); 2182 max_t(int, iudma_defaults[ch_idx].ep_num, 0));
2177 seq_printf(s, " cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n", 2183 seq_printf(s, " cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
2178 usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG(ch_idx)), 2184 usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx),
2179 usb_dmac_readl(udc, ENETDMAC_IR_REG(ch_idx)), 2185 usb_dmac_readl(udc, ENETDMAC_IR_REG, ch_idx),
2180 usb_dmac_readl(udc, ENETDMAC_IRMASK_REG(ch_idx)), 2186 usb_dmac_readl(udc, ENETDMAC_IRMASK_REG, ch_idx),
2181 usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG(ch_idx))); 2187 usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG, ch_idx));
2182 2188
2183 sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG(ch_idx)); 2189 sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG, ch_idx);
2184 sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG(ch_idx)); 2190 sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG, ch_idx);
2185 seq_printf(s, " base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n", 2191 seq_printf(s, " base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
2186 usb_dmas_readl(udc, ENETDMAS_RSTART_REG(ch_idx)), 2192 usb_dmas_readl(udc, ENETDMAS_RSTART_REG, ch_idx),
2187 sram2 >> 16, sram2 & 0xffff, 2193 sram2 >> 16, sram2 & 0xffff,
2188 sram3 >> 16, sram3 & 0xffff, 2194 sram3 >> 16, sram3 & 0xffff,
2189 usb_dmas_readl(udc, ENETDMAS_SRAM4_REG(ch_idx))); 2195 usb_dmas_readl(udc, ENETDMAS_SRAM4_REG, ch_idx));
2190 seq_printf(s, " desc: %d/%d used", iudma->n_bds_used, 2196 seq_printf(s, " desc: %d/%d used", iudma->n_bds_used,
2191 iudma->n_bds); 2197 iudma->n_bds);
2192 2198
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 306a2b52125c..2b4334394076 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -585,7 +585,6 @@ static ssize_t ffs_epfile_io(struct file *file,
585 char __user *buf, size_t len, int read) 585 char __user *buf, size_t len, int read)
586{ 586{
587 struct ffs_epfile *epfile = file->private_data; 587 struct ffs_epfile *epfile = file->private_data;
588 struct usb_gadget *gadget = epfile->ffs->gadget;
589 struct ffs_ep *ep; 588 struct ffs_ep *ep;
590 char *data = NULL; 589 char *data = NULL;
591 ssize_t ret, data_len; 590 ssize_t ret, data_len;
@@ -622,6 +621,12 @@ static ssize_t ffs_epfile_io(struct file *file,
622 /* Allocate & copy */ 621 /* Allocate & copy */
623 if (!halt) { 622 if (!halt) {
624 /* 623 /*
624 * if we _do_ wait above, the epfile->ffs->gadget might be NULL
625 * before the waiting completes, so do not assign to 'gadget' earlier
626 */
627 struct usb_gadget *gadget = epfile->ffs->gadget;
628
629 /*
625 * Controller may require buffer size to be aligned to 630 * Controller may require buffer size to be aligned to
626 * maxpacketsize of an out endpoint. 631 * maxpacketsize of an out endpoint.
627 */ 632 */
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
index bf7a56b6d48a..69b76efd11e9 100644
--- a/drivers/usb/gadget/printer.c
+++ b/drivers/usb/gadget/printer.c
@@ -1157,7 +1157,7 @@ static int __init printer_bind_config(struct usb_configuration *c)
1157 1157
1158 usb_gadget_set_selfpowered(gadget); 1158 usb_gadget_set_selfpowered(gadget);
1159 1159
1160 if (gadget->is_otg) { 1160 if (gadget_is_otg(gadget)) {
1161 otg_descriptor.bmAttributes |= USB_OTG_HNP; 1161 otg_descriptor.bmAttributes |= USB_OTG_HNP;
1162 printer_cfg_driver.descriptors = otg_desc; 1162 printer_cfg_driver.descriptors = otg_desc;
1163 printer_cfg_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP; 1163 printer_cfg_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index f04b2c3154de..dd9678f85c58 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -1629,7 +1629,7 @@ static void s3c2410_udc_reinit(struct s3c2410_udc *dev)
1629 ep->ep.desc = NULL; 1629 ep->ep.desc = NULL;
1630 ep->halted = 0; 1630 ep->halted = 0;
1631 INIT_LIST_HEAD(&ep->queue); 1631 INIT_LIST_HEAD(&ep->queue);
1632 usb_ep_set_maxpacket_limit(&ep->ep, &ep->ep.maxpacket); 1632 usb_ep_set_maxpacket_limit(&ep->ep, ep->ep.maxpacket);
1633 } 1633 }
1634} 1634}
1635 1635
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 47b858fc50b2..7ae0c4d51741 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -238,6 +238,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
238 int port; 238 int port;
239 int mask; 239 int mask;
240 int changed; 240 int changed;
241 bool fs_idle_delay;
241 242
242 ehci_dbg(ehci, "suspend root hub\n"); 243 ehci_dbg(ehci, "suspend root hub\n");
243 244
@@ -272,6 +273,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
272 ehci->bus_suspended = 0; 273 ehci->bus_suspended = 0;
273 ehci->owned_ports = 0; 274 ehci->owned_ports = 0;
274 changed = 0; 275 changed = 0;
276 fs_idle_delay = false;
275 port = HCS_N_PORTS(ehci->hcs_params); 277 port = HCS_N_PORTS(ehci->hcs_params);
276 while (port--) { 278 while (port--) {
277 u32 __iomem *reg = &ehci->regs->port_status [port]; 279 u32 __iomem *reg = &ehci->regs->port_status [port];
@@ -300,16 +302,32 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
300 } 302 }
301 303
302 if (t1 != t2) { 304 if (t1 != t2) {
305 /*
306 * On some controllers, Wake-On-Disconnect will
307 * generate false wakeup signals until the bus
308 * switches over to full-speed idle. For their
309 * sake, add a delay if we need one.
310 */
311 if ((t2 & PORT_WKDISC_E) &&
312 ehci_port_speed(ehci, t2) ==
313 USB_PORT_STAT_HIGH_SPEED)
314 fs_idle_delay = true;
303 ehci_writel(ehci, t2, reg); 315 ehci_writel(ehci, t2, reg);
304 changed = 1; 316 changed = 1;
305 } 317 }
306 } 318 }
319 spin_unlock_irq(&ehci->lock);
320
321 if ((changed && ehci->has_tdi_phy_lpm) || fs_idle_delay) {
322 /*
323 * Wait for HCD to enter low-power mode or for the bus
324 * to switch to full-speed idle.
325 */
326 usleep_range(5000, 5500);
327 }
307 328
308 if (changed && ehci->has_tdi_phy_lpm) { 329 if (changed && ehci->has_tdi_phy_lpm) {
309 spin_unlock_irq(&ehci->lock);
310 msleep(5); /* 5 ms for HCD to enter low-power mode */
311 spin_lock_irq(&ehci->lock); 330 spin_lock_irq(&ehci->lock);
312
313 port = HCS_N_PORTS(ehci->hcs_params); 331 port = HCS_N_PORTS(ehci->hcs_params);
314 while (port--) { 332 while (port--) {
315 u32 __iomem *hostpc_reg = &ehci->regs->hostpc[port]; 333 u32 __iomem *hostpc_reg = &ehci->regs->hostpc[port];
@@ -322,8 +340,8 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
322 port, (t3 & HOSTPC_PHCD) ? 340 port, (t3 & HOSTPC_PHCD) ?
323 "succeeded" : "failed"); 341 "succeeded" : "failed");
324 } 342 }
343 spin_unlock_irq(&ehci->lock);
325 } 344 }
326 spin_unlock_irq(&ehci->lock);
327 345
328 /* Apparently some devices need a >= 1-uframe delay here */ 346 /* Apparently some devices need a >= 1-uframe delay here */
329 if (ehci->bus_suspended) 347 if (ehci->bus_suspended)
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index b016d38199f2..eb009a457fb5 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -203,12 +203,12 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num)
203 addr, (unsigned int)temp); 203 addr, (unsigned int)temp);
204 204
205 addr = &ir_set->erst_base; 205 addr = &ir_set->erst_base;
206 temp_64 = readq(addr); 206 temp_64 = xhci_read_64(xhci, addr);
207 xhci_dbg(xhci, " %p: ir_set.erst_base = @%08llx\n", 207 xhci_dbg(xhci, " %p: ir_set.erst_base = @%08llx\n",
208 addr, temp_64); 208 addr, temp_64);
209 209
210 addr = &ir_set->erst_dequeue; 210 addr = &ir_set->erst_dequeue;
211 temp_64 = readq(addr); 211 temp_64 = xhci_read_64(xhci, addr);
212 xhci_dbg(xhci, " %p: ir_set.erst_dequeue = @%08llx\n", 212 xhci_dbg(xhci, " %p: ir_set.erst_dequeue = @%08llx\n",
213 addr, temp_64); 213 addr, temp_64);
214} 214}
@@ -412,7 +412,7 @@ void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci)
412{ 412{
413 u64 val; 413 u64 val;
414 414
415 val = readq(&xhci->op_regs->cmd_ring); 415 val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
416 xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = @%08x\n", 416 xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = @%08x\n",
417 lower_32_bits(val)); 417 lower_32_bits(val));
418 xhci_dbg(xhci, "// xHC command ring deq ptr high bits = @%08x\n", 418 xhci_dbg(xhci, "// xHC command ring deq ptr high bits = @%08x\n",
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 873c272b3ef5..bce4391a0e7d 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1958,7 +1958,7 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
1958 xhci_warn(xhci, "WARN something wrong with SW event ring " 1958 xhci_warn(xhci, "WARN something wrong with SW event ring "
1959 "dequeue ptr.\n"); 1959 "dequeue ptr.\n");
1960 /* Update HC event ring dequeue pointer */ 1960 /* Update HC event ring dequeue pointer */
1961 temp = readq(&xhci->ir_set->erst_dequeue); 1961 temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
1962 temp &= ERST_PTR_MASK; 1962 temp &= ERST_PTR_MASK;
1963 /* Don't clear the EHB bit (which is RW1C) because 1963 /* Don't clear the EHB bit (which is RW1C) because
1964 * there might be more events to service. 1964 * there might be more events to service.
@@ -1967,7 +1967,7 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
1967 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 1967 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1968 "// Write event ring dequeue pointer, " 1968 "// Write event ring dequeue pointer, "
1969 "preserving EHB bit"); 1969 "preserving EHB bit");
1970 writeq(((u64) deq & (u64) ~ERST_PTR_MASK) | temp, 1970 xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
1971 &xhci->ir_set->erst_dequeue); 1971 &xhci->ir_set->erst_dequeue);
1972} 1972}
1973 1973
@@ -2269,7 +2269,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2269 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2269 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2270 "// Device context base array address = 0x%llx (DMA), %p (virt)", 2270 "// Device context base array address = 0x%llx (DMA), %p (virt)",
2271 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); 2271 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
2272 writeq(dma, &xhci->op_regs->dcbaa_ptr); 2272 xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
2273 2273
2274 /* 2274 /*
2275 * Initialize the ring segment pool. The ring must be a contiguous 2275 * Initialize the ring segment pool. The ring must be a contiguous
@@ -2312,13 +2312,13 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2312 (unsigned long long)xhci->cmd_ring->first_seg->dma); 2312 (unsigned long long)xhci->cmd_ring->first_seg->dma);
2313 2313
2314 /* Set the address in the Command Ring Control register */ 2314 /* Set the address in the Command Ring Control register */
2315 val_64 = readq(&xhci->op_regs->cmd_ring); 2315 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
2316 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | 2316 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
2317 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | 2317 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
2318 xhci->cmd_ring->cycle_state; 2318 xhci->cmd_ring->cycle_state;
2319 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2319 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2320 "// Setting command ring address to 0x%x", val); 2320 "// Setting command ring address to 0x%x", val);
2321 writeq(val_64, &xhci->op_regs->cmd_ring); 2321 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
2322 xhci_dbg_cmd_ptrs(xhci); 2322 xhci_dbg_cmd_ptrs(xhci);
2323 2323
2324 xhci->lpm_command = xhci_alloc_command(xhci, true, true, flags); 2324 xhci->lpm_command = xhci_alloc_command(xhci, true, true, flags);
@@ -2396,10 +2396,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2396 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2396 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2397 "// Set ERST base address for ir_set 0 = 0x%llx", 2397 "// Set ERST base address for ir_set 0 = 0x%llx",
2398 (unsigned long long)xhci->erst.erst_dma_addr); 2398 (unsigned long long)xhci->erst.erst_dma_addr);
2399 val_64 = readq(&xhci->ir_set->erst_base); 2399 val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
2400 val_64 &= ERST_PTR_MASK; 2400 val_64 &= ERST_PTR_MASK;
2401 val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK); 2401 val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
2402 writeq(val_64, &xhci->ir_set->erst_base); 2402 xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
2403 2403
2404 /* Set the event ring dequeue address */ 2404 /* Set the event ring dequeue address */
2405 xhci_set_hc_event_deq(xhci); 2405 xhci_set_hc_event_deq(xhci);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 3c898c12a06b..04f986d9234f 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -142,6 +142,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
142 "QUIRK: Resetting on resume"); 142 "QUIRK: Resetting on resume");
143 xhci->quirks |= XHCI_TRUST_TX_LENGTH; 143 xhci->quirks |= XHCI_TRUST_TX_LENGTH;
144 } 144 }
145 if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
146 pdev->device == 0x0015 &&
147 pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG &&
148 pdev->subsystem_device == 0xc0cd)
149 xhci->quirks |= XHCI_RESET_ON_RESUME;
145 if (pdev->vendor == PCI_VENDOR_ID_VIA) 150 if (pdev->vendor == PCI_VENDOR_ID_VIA)
146 xhci->quirks |= XHCI_RESET_ON_RESUME; 151 xhci->quirks |= XHCI_RESET_ON_RESUME;
147} 152}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index a0b248c34526..0ed64eb68e48 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -307,13 +307,14 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
307 return 0; 307 return 0;
308 } 308 }
309 309
310 temp_64 = readq(&xhci->op_regs->cmd_ring); 310 temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
311 if (!(temp_64 & CMD_RING_RUNNING)) { 311 if (!(temp_64 & CMD_RING_RUNNING)) {
312 xhci_dbg(xhci, "Command ring had been stopped\n"); 312 xhci_dbg(xhci, "Command ring had been stopped\n");
313 return 0; 313 return 0;
314 } 314 }
315 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; 315 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
316 writeq(temp_64 | CMD_RING_ABORT, &xhci->op_regs->cmd_ring); 316 xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
317 &xhci->op_regs->cmd_ring);
317 318
318 /* Section 4.6.1.2 of xHCI 1.0 spec says software should 319 /* Section 4.6.1.2 of xHCI 1.0 spec says software should
319 * time the completion od all xHCI commands, including 320 * time the completion od all xHCI commands, including
@@ -2864,8 +2865,9 @@ hw_died:
2864 /* Clear the event handler busy flag (RW1C); 2865 /* Clear the event handler busy flag (RW1C);
2865 * the event ring should be empty. 2866 * the event ring should be empty.
2866 */ 2867 */
2867 temp_64 = readq(&xhci->ir_set->erst_dequeue); 2868 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2868 writeq(temp_64 | ERST_EHB, &xhci->ir_set->erst_dequeue); 2869 xhci_write_64(xhci, temp_64 | ERST_EHB,
2870 &xhci->ir_set->erst_dequeue);
2869 spin_unlock(&xhci->lock); 2871 spin_unlock(&xhci->lock);
2870 2872
2871 return IRQ_HANDLED; 2873 return IRQ_HANDLED;
@@ -2877,7 +2879,7 @@ hw_died:
2877 */ 2879 */
2878 while (xhci_handle_event(xhci) > 0) {} 2880 while (xhci_handle_event(xhci) > 0) {}
2879 2881
2880 temp_64 = readq(&xhci->ir_set->erst_dequeue); 2882 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2881 /* If necessary, update the HW's version of the event ring deq ptr. */ 2883 /* If necessary, update the HW's version of the event ring deq ptr. */
2882 if (event_ring_deq != xhci->event_ring->dequeue) { 2884 if (event_ring_deq != xhci->event_ring->dequeue) {
2883 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, 2885 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
@@ -2892,7 +2894,7 @@ hw_died:
2892 2894
2893 /* Clear the event handler busy flag (RW1C); event ring is empty. */ 2895 /* Clear the event handler busy flag (RW1C); event ring is empty. */
2894 temp_64 |= ERST_EHB; 2896 temp_64 |= ERST_EHB;
2895 writeq(temp_64, &xhci->ir_set->erst_dequeue); 2897 xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
2896 2898
2897 spin_unlock(&xhci->lock); 2899 spin_unlock(&xhci->lock);
2898 2900
@@ -2965,58 +2967,8 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
2965 } 2967 }
2966 2968
2967 while (1) { 2969 while (1) {
2968 if (room_on_ring(xhci, ep_ring, num_trbs)) { 2970 if (room_on_ring(xhci, ep_ring, num_trbs))
2969 union xhci_trb *trb = ep_ring->enqueue; 2971 break;
2970 unsigned int usable = ep_ring->enq_seg->trbs +
2971 TRBS_PER_SEGMENT - 1 - trb;
2972 u32 nop_cmd;
2973
2974 /*
2975 * Section 4.11.7.1 TD Fragments states that a link
2976 * TRB must only occur at the boundary between
2977 * data bursts (eg 512 bytes for 480M).
2978 * While it is possible to split a large fragment
2979 * we don't know the size yet.
2980 * Simplest solution is to fill the trb before the
2981 * LINK with nop commands.
2982 */
2983 if (num_trbs == 1 || num_trbs <= usable || usable == 0)
2984 break;
2985
2986 if (ep_ring->type != TYPE_BULK)
2987 /*
2988 * While isoc transfers might have a buffer that
2989 * crosses a 64k boundary it is unlikely.
2990 * Since we can't add NOPs without generating
2991 * gaps in the traffic just hope it never
2992 * happens at the end of the ring.
2993 * This could be fixed by writing a LINK TRB
2994 * instead of the first NOP - however the
2995 * TRB_TYPE_LINK_LE32() calls would all need
2996 * changing to check the ring length.
2997 */
2998 break;
2999
3000 if (num_trbs >= TRBS_PER_SEGMENT) {
3001 xhci_err(xhci, "Too many fragments %d, max %d\n",
3002 num_trbs, TRBS_PER_SEGMENT - 1);
3003 return -EINVAL;
3004 }
3005
3006 nop_cmd = cpu_to_le32(TRB_TYPE(TRB_TR_NOOP) |
3007 ep_ring->cycle_state);
3008 ep_ring->num_trbs_free -= usable;
3009 do {
3010 trb->generic.field[0] = 0;
3011 trb->generic.field[1] = 0;
3012 trb->generic.field[2] = 0;
3013 trb->generic.field[3] = nop_cmd;
3014 trb++;
3015 } while (--usable);
3016 ep_ring->enqueue = trb;
3017 if (room_on_ring(xhci, ep_ring, num_trbs))
3018 break;
3019 }
3020 2972
3021 if (ep_ring == xhci->cmd_ring) { 2973 if (ep_ring == xhci->cmd_ring) {
3022 xhci_err(xhci, "Do not support expand command ring\n"); 2974 xhci_err(xhci, "Do not support expand command ring\n");
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index ad364394885a..6fe577d46fa2 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -611,7 +611,7 @@ int xhci_run(struct usb_hcd *hcd)
611 xhci_dbg(xhci, "Event ring:\n"); 611 xhci_dbg(xhci, "Event ring:\n");
612 xhci_debug_ring(xhci, xhci->event_ring); 612 xhci_debug_ring(xhci, xhci->event_ring);
613 xhci_dbg_ring_ptrs(xhci, xhci->event_ring); 613 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
614 temp_64 = readq(&xhci->ir_set->erst_dequeue); 614 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
615 temp_64 &= ~ERST_PTR_MASK; 615 temp_64 &= ~ERST_PTR_MASK;
616 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 616 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
617 "ERST deq = 64'h%0lx", (long unsigned int) temp_64); 617 "ERST deq = 64'h%0lx", (long unsigned int) temp_64);
@@ -756,11 +756,11 @@ static void xhci_save_registers(struct xhci_hcd *xhci)
756{ 756{
757 xhci->s3.command = readl(&xhci->op_regs->command); 757 xhci->s3.command = readl(&xhci->op_regs->command);
758 xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification); 758 xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
759 xhci->s3.dcbaa_ptr = readq(&xhci->op_regs->dcbaa_ptr); 759 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
760 xhci->s3.config_reg = readl(&xhci->op_regs->config_reg); 760 xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
761 xhci->s3.erst_size = readl(&xhci->ir_set->erst_size); 761 xhci->s3.erst_size = readl(&xhci->ir_set->erst_size);
762 xhci->s3.erst_base = readq(&xhci->ir_set->erst_base); 762 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
763 xhci->s3.erst_dequeue = readq(&xhci->ir_set->erst_dequeue); 763 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
764 xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending); 764 xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending);
765 xhci->s3.irq_control = readl(&xhci->ir_set->irq_control); 765 xhci->s3.irq_control = readl(&xhci->ir_set->irq_control);
766} 766}
@@ -769,11 +769,11 @@ static void xhci_restore_registers(struct xhci_hcd *xhci)
769{ 769{
770 writel(xhci->s3.command, &xhci->op_regs->command); 770 writel(xhci->s3.command, &xhci->op_regs->command);
771 writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification); 771 writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
772 writeq(xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr); 772 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
773 writel(xhci->s3.config_reg, &xhci->op_regs->config_reg); 773 writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
774 writel(xhci->s3.erst_size, &xhci->ir_set->erst_size); 774 writel(xhci->s3.erst_size, &xhci->ir_set->erst_size);
775 writeq(xhci->s3.erst_base, &xhci->ir_set->erst_base); 775 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
776 writeq(xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue); 776 xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
777 writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending); 777 writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
778 writel(xhci->s3.irq_control, &xhci->ir_set->irq_control); 778 writel(xhci->s3.irq_control, &xhci->ir_set->irq_control);
779} 779}
@@ -783,7 +783,7 @@ static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
783 u64 val_64; 783 u64 val_64;
784 784
785 /* step 2: initialize command ring buffer */ 785 /* step 2: initialize command ring buffer */
786 val_64 = readq(&xhci->op_regs->cmd_ring); 786 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
787 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | 787 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
788 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, 788 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
789 xhci->cmd_ring->dequeue) & 789 xhci->cmd_ring->dequeue) &
@@ -792,7 +792,7 @@ static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
792 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 792 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
793 "// Setting command ring address to 0x%llx", 793 "// Setting command ring address to 0x%llx",
794 (long unsigned long) val_64); 794 (long unsigned long) val_64);
795 writeq(val_64, &xhci->op_regs->cmd_ring); 795 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
796} 796}
797 797
798/* 798/*
@@ -3842,7 +3842,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3842 if (ret) { 3842 if (ret) {
3843 return ret; 3843 return ret;
3844 } 3844 }
3845 temp_64 = readq(&xhci->op_regs->dcbaa_ptr); 3845 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
3846 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 3846 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3847 "Op regs DCBAA ptr = %#016llx", temp_64); 3847 "Op regs DCBAA ptr = %#016llx", temp_64);
3848 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 3848 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
@@ -4730,11 +4730,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4730 struct device *dev = hcd->self.controller; 4730 struct device *dev = hcd->self.controller;
4731 int retval; 4731 int retval;
4732 4732
4733 /* Limit the block layer scatter-gather lists to half a segment. */ 4733 /* Accept arbitrarily long scatter-gather lists */
4734 hcd->self.sg_tablesize = TRBS_PER_SEGMENT / 2; 4734 hcd->self.sg_tablesize = ~0;
4735
4736 /* support to build packet from discontinuous buffers */
4737 hcd->self.no_sg_constraint = 1;
4738 4735
4739 /* XHCI controllers don't stop the ep queue on short packets :| */ 4736 /* XHCI controllers don't stop the ep queue on short packets :| */
4740 hcd->self.no_stop_on_short = 1; 4737 hcd->self.no_stop_on_short = 1;
@@ -4760,6 +4757,14 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4760 /* xHCI private pointer was set in xhci_pci_probe for the second 4757 /* xHCI private pointer was set in xhci_pci_probe for the second
4761 * registered roothub. 4758 * registered roothub.
4762 */ 4759 */
4760 xhci = hcd_to_xhci(hcd);
4761 /*
4762 * Support arbitrarily aligned sg-list entries on hosts without
4763 * TD fragment rules (which are currently unsupported).
4764 */
4765 if (xhci->hci_version < 0x100)
4766 hcd->self.no_sg_constraint = 1;
4767
4763 return 0; 4768 return 0;
4764 } 4769 }
4765 4770
@@ -4788,6 +4793,9 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4788 if (xhci->hci_version > 0x96) 4793 if (xhci->hci_version > 0x96)
4789 xhci->quirks |= XHCI_SPURIOUS_SUCCESS; 4794 xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
4790 4795
4796 if (xhci->hci_version < 0x100)
4797 hcd->self.no_sg_constraint = 1;
4798
4791 /* Make sure the HC is halted. */ 4799 /* Make sure the HC is halted. */
4792 retval = xhci_halt(xhci); 4800 retval = xhci_halt(xhci);
4793 if (retval) 4801 if (retval)
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index f8416639bf31..58ed9d088e63 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -28,17 +28,6 @@
28#include <linux/kernel.h> 28#include <linux/kernel.h>
29#include <linux/usb/hcd.h> 29#include <linux/usb/hcd.h>
30 30
31/*
32 * Registers should always be accessed with double word or quad word accesses.
33 *
34 * Some xHCI implementations may support 64-bit address pointers. Registers
35 * with 64-bit address pointers should be written to with dword accesses by
36 * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
37 * xHCI implementations that do not support 64-bit address pointers will ignore
38 * the high dword, and write order is irrelevant.
39 */
40#include <asm-generic/io-64-nonatomic-lo-hi.h>
41
42/* Code sharing between pci-quirks and xhci hcd */ 31/* Code sharing between pci-quirks and xhci hcd */
43#include "xhci-ext-caps.h" 32#include "xhci-ext-caps.h"
44#include "pci-quirks.h" 33#include "pci-quirks.h"
@@ -1279,7 +1268,7 @@ union xhci_trb {
1279 * since the command ring is 64-byte aligned. 1268 * since the command ring is 64-byte aligned.
1280 * It must also be greater than 16. 1269 * It must also be greater than 16.
1281 */ 1270 */
1282#define TRBS_PER_SEGMENT 256 1271#define TRBS_PER_SEGMENT 64
1283/* Allow two commands + a link TRB, along with any reserved command TRBs */ 1272/* Allow two commands + a link TRB, along with any reserved command TRBs */
1284#define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3) 1273#define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3)
1285#define TRB_SEGMENT_SIZE (TRBS_PER_SEGMENT*16) 1274#define TRB_SEGMENT_SIZE (TRBS_PER_SEGMENT*16)
@@ -1614,6 +1603,34 @@ static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci)
1614#define xhci_warn_ratelimited(xhci, fmt, args...) \ 1603#define xhci_warn_ratelimited(xhci, fmt, args...) \
1615 dev_warn_ratelimited(xhci_to_hcd(xhci)->self.controller , fmt , ## args) 1604 dev_warn_ratelimited(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
1616 1605
1606/*
1607 * Registers should always be accessed with double word or quad word accesses.
1608 *
1609 * Some xHCI implementations may support 64-bit address pointers. Registers
1610 * with 64-bit address pointers should be written to with dword accesses by
1611 * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
1612 * xHCI implementations that do not support 64-bit address pointers will ignore
1613 * the high dword, and write order is irrelevant.
1614 */
1615static inline u64 xhci_read_64(const struct xhci_hcd *xhci,
1616 __le64 __iomem *regs)
1617{
1618 __u32 __iomem *ptr = (__u32 __iomem *) regs;
1619 u64 val_lo = readl(ptr);
1620 u64 val_hi = readl(ptr + 1);
1621 return val_lo + (val_hi << 32);
1622}
1623static inline void xhci_write_64(struct xhci_hcd *xhci,
1624 const u64 val, __le64 __iomem *regs)
1625{
1626 __u32 __iomem *ptr = (__u32 __iomem *) regs;
1627 u32 val_lo = lower_32_bits(val);
1628 u32 val_hi = upper_32_bits(val);
1629
1630 writel(val_lo, ptr);
1631 writel(val_hi, ptr + 1);
1632}
1633
1617static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci) 1634static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci)
1618{ 1635{
1619 return xhci->quirks & XHCI_LINK_TRB_QUIRK; 1636 return xhci->quirks & XHCI_LINK_TRB_QUIRK;
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index fc192ad9cc6a..239ad0b1ceb6 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -477,8 +477,11 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
477 musb->port1_status |= 477 musb->port1_status |=
478 (USB_PORT_STAT_C_SUSPEND << 16) 478 (USB_PORT_STAT_C_SUSPEND << 16)
479 | MUSB_PORT_STAT_RESUME; 479 | MUSB_PORT_STAT_RESUME;
480 musb->rh_timer = jiffies
481 + msecs_to_jiffies(20);
480 schedule_delayed_work( 482 schedule_delayed_work(
481 &musb->finish_resume_work, 20); 483 &musb->finish_resume_work,
484 msecs_to_jiffies(20));
482 485
483 musb->xceiv->state = OTG_STATE_A_HOST; 486 musb->xceiv->state = OTG_STATE_A_HOST;
484 musb->is_active = 1; 487 musb->is_active = 1;
@@ -2157,11 +2160,19 @@ static void musb_restore_context(struct musb *musb)
2157 void __iomem *musb_base = musb->mregs; 2160 void __iomem *musb_base = musb->mregs;
2158 void __iomem *ep_target_regs; 2161 void __iomem *ep_target_regs;
2159 void __iomem *epio; 2162 void __iomem *epio;
2163 u8 power;
2160 2164
2161 musb_writew(musb_base, MUSB_FRAME, musb->context.frame); 2165 musb_writew(musb_base, MUSB_FRAME, musb->context.frame);
2162 musb_writeb(musb_base, MUSB_TESTMODE, musb->context.testmode); 2166 musb_writeb(musb_base, MUSB_TESTMODE, musb->context.testmode);
2163 musb_write_ulpi_buscontrol(musb->mregs, musb->context.busctl); 2167 musb_write_ulpi_buscontrol(musb->mregs, musb->context.busctl);
2164 musb_writeb(musb_base, MUSB_POWER, musb->context.power); 2168
2169 /* Don't affect SUSPENDM/RESUME bits in POWER reg */
2170 power = musb_readb(musb_base, MUSB_POWER);
2171 power &= MUSB_POWER_SUSPENDM | MUSB_POWER_RESUME;
2172 musb->context.power &= ~(MUSB_POWER_SUSPENDM | MUSB_POWER_RESUME);
2173 power |= musb->context.power;
2174 musb_writeb(musb_base, MUSB_POWER, power);
2175
2165 musb_writew(musb_base, MUSB_INTRTXE, musb->intrtxe); 2176 musb_writew(musb_base, MUSB_INTRTXE, musb->intrtxe);
2166 musb_writew(musb_base, MUSB_INTRRXE, musb->intrrxe); 2177 musb_writew(musb_base, MUSB_INTRRXE, musb->intrrxe);
2167 musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe); 2178 musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe);
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index ed455724017b..abb38c3833ef 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -1183,6 +1183,9 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
1183 csr = MUSB_CSR0_H_STATUSPKT 1183 csr = MUSB_CSR0_H_STATUSPKT
1184 | MUSB_CSR0_TXPKTRDY; 1184 | MUSB_CSR0_TXPKTRDY;
1185 1185
1186 /* disable ping token in status phase */
1187 csr |= MUSB_CSR0_H_DIS_PING;
1188
1186 /* flag status stage */ 1189 /* flag status stage */
1187 musb->ep0_stage = MUSB_EP0_STATUS; 1190 musb->ep0_stage = MUSB_EP0_STATUS;
1188 1191
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index eb634433ef09..e2d2d8c9891b 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -135,7 +135,8 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
135 135
136 /* later, GetPortStatus will stop RESUME signaling */ 136 /* later, GetPortStatus will stop RESUME signaling */
137 musb->port1_status |= MUSB_PORT_STAT_RESUME; 137 musb->port1_status |= MUSB_PORT_STAT_RESUME;
138 schedule_delayed_work(&musb->finish_resume_work, 20); 138 schedule_delayed_work(&musb->finish_resume_work,
139 msecs_to_jiffies(20));
139 } 140 }
140} 141}
141 142
@@ -158,7 +159,6 @@ void musb_port_reset(struct musb *musb, bool do_reset)
158 */ 159 */
159 power = musb_readb(mbase, MUSB_POWER); 160 power = musb_readb(mbase, MUSB_POWER);
160 if (do_reset) { 161 if (do_reset) {
161
162 /* 162 /*
163 * If RESUME is set, we must make sure it stays minimum 20 ms. 163 * If RESUME is set, we must make sure it stays minimum 20 ms.
164 * Then we must clear RESUME and wait a bit to let musb start 164 * Then we must clear RESUME and wait a bit to let musb start
@@ -167,11 +167,22 @@ void musb_port_reset(struct musb *musb, bool do_reset)
167 * detected". 167 * detected".
168 */ 168 */
169 if (power & MUSB_POWER_RESUME) { 169 if (power & MUSB_POWER_RESUME) {
170 while (time_before(jiffies, musb->rh_timer)) 170 long remain = (unsigned long) musb->rh_timer - jiffies;
171 msleep(1); 171
172 if (musb->rh_timer > 0 && remain > 0) {
173 /* take into account the minimum delay after resume */
174 schedule_delayed_work(
175 &musb->deassert_reset_work, remain);
176 return;
177 }
178
172 musb_writeb(mbase, MUSB_POWER, 179 musb_writeb(mbase, MUSB_POWER,
173 power & ~MUSB_POWER_RESUME); 180 power & ~MUSB_POWER_RESUME);
174 msleep(1); 181
182 /* Give the core 1 ms to clear MUSB_POWER_RESUME */
183 schedule_delayed_work(&musb->deassert_reset_work,
184 msecs_to_jiffies(1));
185 return;
175 } 186 }
176 187
177 power &= 0xf0; 188 power &= 0xf0;
@@ -180,7 +191,8 @@ void musb_port_reset(struct musb *musb, bool do_reset)
180 191
181 musb->port1_status |= USB_PORT_STAT_RESET; 192 musb->port1_status |= USB_PORT_STAT_RESET;
182 musb->port1_status &= ~USB_PORT_STAT_ENABLE; 193 musb->port1_status &= ~USB_PORT_STAT_ENABLE;
183 schedule_delayed_work(&musb->deassert_reset_work, 50); 194 schedule_delayed_work(&musb->deassert_reset_work,
195 msecs_to_jiffies(50));
184 } else { 196 } else {
185 dev_dbg(musb->controller, "root port reset stopped\n"); 197 dev_dbg(musb->controller, "root port reset stopped\n");
186 musb_writeb(mbase, MUSB_POWER, 198 musb_writeb(mbase, MUSB_POWER,
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 2a408cdaf7b2..8aa59a2c5eb2 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -659,7 +659,6 @@ static int omap2430_runtime_suspend(struct device *dev)
659 OTG_INTERFSEL); 659 OTG_INTERFSEL);
660 660
661 omap2430_low_level_exit(musb); 661 omap2430_low_level_exit(musb);
662 phy_power_off(musb->phy);
663 } 662 }
664 663
665 return 0; 664 return 0;
@@ -674,7 +673,6 @@ static int omap2430_runtime_resume(struct device *dev)
674 omap2430_low_level_init(musb); 673 omap2430_low_level_init(musb);
675 musb_writel(musb->mregs, OTG_INTERFSEL, 674 musb_writel(musb->mregs, OTG_INTERFSEL,
676 musb->context.otg_interfsel); 675 musb->context.otg_interfsel);
677 phy_power_on(musb->phy);
678 } 676 }
679 677
680 return 0; 678 return 0;
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 8546c8dccd51..d204f745ed05 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -159,32 +159,6 @@ put_3p3:
159 return rc; 159 return rc;
160} 160}
161 161
162#ifdef CONFIG_PM_SLEEP
163#define USB_PHY_SUSP_DIG_VOL 500000
164static int msm_hsusb_config_vddcx(int high)
165{
166 int max_vol = USB_PHY_VDD_DIG_VOL_MAX;
167 int min_vol;
168 int ret;
169
170 if (high)
171 min_vol = USB_PHY_VDD_DIG_VOL_MIN;
172 else
173 min_vol = USB_PHY_SUSP_DIG_VOL;
174
175 ret = regulator_set_voltage(hsusb_vddcx, min_vol, max_vol);
176 if (ret) {
177 pr_err("%s: unable to set the voltage for regulator "
178 "HSUSB_VDDCX\n", __func__);
179 return ret;
180 }
181
182 pr_debug("%s: min_vol:%d max_vol:%d\n", __func__, min_vol, max_vol);
183
184 return ret;
185}
186#endif
187
188static int msm_hsusb_ldo_set_mode(int on) 162static int msm_hsusb_ldo_set_mode(int on)
189{ 163{
190 int ret = 0; 164 int ret = 0;
@@ -440,7 +414,32 @@ static int msm_otg_reset(struct usb_phy *phy)
440#define PHY_SUSPEND_TIMEOUT_USEC (500 * 1000) 414#define PHY_SUSPEND_TIMEOUT_USEC (500 * 1000)
441#define PHY_RESUME_TIMEOUT_USEC (100 * 1000) 415#define PHY_RESUME_TIMEOUT_USEC (100 * 1000)
442 416
443#ifdef CONFIG_PM_SLEEP 417#ifdef CONFIG_PM
418
419#define USB_PHY_SUSP_DIG_VOL 500000
420static int msm_hsusb_config_vddcx(int high)
421{
422 int max_vol = USB_PHY_VDD_DIG_VOL_MAX;
423 int min_vol;
424 int ret;
425
426 if (high)
427 min_vol = USB_PHY_VDD_DIG_VOL_MIN;
428 else
429 min_vol = USB_PHY_SUSP_DIG_VOL;
430
431 ret = regulator_set_voltage(hsusb_vddcx, min_vol, max_vol);
432 if (ret) {
433 pr_err("%s: unable to set the voltage for regulator "
434 "HSUSB_VDDCX\n", __func__);
435 return ret;
436 }
437
438 pr_debug("%s: min_vol:%d max_vol:%d\n", __func__, min_vol, max_vol);
439
440 return ret;
441}
442
444static int msm_otg_suspend(struct msm_otg *motg) 443static int msm_otg_suspend(struct msm_otg *motg)
445{ 444{
446 struct usb_phy *phy = &motg->phy; 445 struct usb_phy *phy = &motg->phy;
@@ -1733,22 +1732,18 @@ static int msm_otg_pm_resume(struct device *dev)
1733} 1732}
1734#endif 1733#endif
1735 1734
1736#ifdef CONFIG_PM
1737static const struct dev_pm_ops msm_otg_dev_pm_ops = { 1735static const struct dev_pm_ops msm_otg_dev_pm_ops = {
1738 SET_SYSTEM_SLEEP_PM_OPS(msm_otg_pm_suspend, msm_otg_pm_resume) 1736 SET_SYSTEM_SLEEP_PM_OPS(msm_otg_pm_suspend, msm_otg_pm_resume)
1739 SET_RUNTIME_PM_OPS(msm_otg_runtime_suspend, msm_otg_runtime_resume, 1737 SET_RUNTIME_PM_OPS(msm_otg_runtime_suspend, msm_otg_runtime_resume,
1740 msm_otg_runtime_idle) 1738 msm_otg_runtime_idle)
1741}; 1739};
1742#endif
1743 1740
1744static struct platform_driver msm_otg_driver = { 1741static struct platform_driver msm_otg_driver = {
1745 .remove = msm_otg_remove, 1742 .remove = msm_otg_remove,
1746 .driver = { 1743 .driver = {
1747 .name = DRIVER_NAME, 1744 .name = DRIVER_NAME,
1748 .owner = THIS_MODULE, 1745 .owner = THIS_MODULE,
1749#ifdef CONFIG_PM
1750 .pm = &msm_otg_dev_pm_ops, 1746 .pm = &msm_otg_dev_pm_ops,
1751#endif
1752 }, 1747 },
1753}; 1748};
1754 1749
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index e6f61e4361df..8afa813d690b 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -130,7 +130,7 @@ struct usb_phy *usb_get_phy(enum usb_phy_type type)
130 130
131 phy = __usb_find_phy(&phy_list, type); 131 phy = __usb_find_phy(&phy_list, type);
132 if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) { 132 if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) {
133 pr_err("unable to find transceiver of type %s\n", 133 pr_debug("PHY: unable to find transceiver of type %s\n",
134 usb_phy_type_string(type)); 134 usb_phy_type_string(type));
135 goto err0; 135 goto err0;
136 } 136 }
@@ -228,7 +228,7 @@ struct usb_phy *usb_get_phy_dev(struct device *dev, u8 index)
228 228
229 phy = __usb_find_phy_dev(dev, &phy_bind_list, index); 229 phy = __usb_find_phy_dev(dev, &phy_bind_list, index);
230 if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) { 230 if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) {
231 pr_err("unable to find transceiver\n"); 231 dev_dbg(dev, "unable to find transceiver\n");
232 goto err0; 232 goto err0;
233 } 233 }
234 234
@@ -424,10 +424,8 @@ int usb_bind_phy(const char *dev_name, u8 index,
424 unsigned long flags; 424 unsigned long flags;
425 425
426 phy_bind = kzalloc(sizeof(*phy_bind), GFP_KERNEL); 426 phy_bind = kzalloc(sizeof(*phy_bind), GFP_KERNEL);
427 if (!phy_bind) { 427 if (!phy_bind)
428 pr_err("phy_bind(): No memory for phy_bind");
429 return -ENOMEM; 428 return -ENOMEM;
430 }
431 429
432 phy_bind->dev_name = dev_name; 430 phy_bind->dev_name = dev_name;
433 phy_bind->phy_dev_name = phy_dev_name; 431 phy_bind->phy_dev_name = phy_dev_name;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index ce0d7b0db012..ee1f00f03c43 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -152,6 +152,7 @@ static const struct usb_device_id id_table_combined[] = {
152 { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, 152 { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
153 { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) }, 153 { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) },
154 { USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) }, 154 { USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) },
155 { USB_DEVICE(FTDI_VID, FTDI_EV3CON_PID) },
155 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) }, 156 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) },
156 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) }, 157 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) },
157 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) }, 158 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) },
@@ -191,6 +192,8 @@ static const struct usb_device_id id_table_combined[] = {
191 { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) }, 192 { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
192 { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) }, 193 { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
193 { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) }, 194 { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
195 { USB_DEVICE(FTDI_VID, FTDI_TAGSYS_LP101_PID) },
196 { USB_DEVICE(FTDI_VID, FTDI_TAGSYS_P200X_PID) },
194 { USB_DEVICE(FTDI_VID, FTDI_LENZ_LIUSB_PID) }, 197 { USB_DEVICE(FTDI_VID, FTDI_LENZ_LIUSB_PID) },
195 { USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) }, 198 { USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) },
196 { USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) }, 199 { USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index a7019d1e3058..1e2d369df86e 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -50,6 +50,7 @@
50#define TI_XDS100V2_PID 0xa6d0 50#define TI_XDS100V2_PID 0xa6d0
51 51
52#define FTDI_NXTCAM_PID 0xABB8 /* NXTCam for Mindstorms NXT */ 52#define FTDI_NXTCAM_PID 0xABB8 /* NXTCam for Mindstorms NXT */
53#define FTDI_EV3CON_PID 0xABB9 /* Mindstorms EV3 Console Adapter */
53 54
54/* US Interface Navigator (http://www.usinterface.com/) */ 55/* US Interface Navigator (http://www.usinterface.com/) */
55#define FTDI_USINT_CAT_PID 0xb810 /* Navigator CAT and 2nd PTT lines */ 56#define FTDI_USINT_CAT_PID 0xb810 /* Navigator CAT and 2nd PTT lines */
@@ -363,6 +364,12 @@
363/* Sprog II (Andrew Crosland's SprogII DCC interface) */ 364/* Sprog II (Andrew Crosland's SprogII DCC interface) */
364#define FTDI_SPROG_II 0xF0C8 365#define FTDI_SPROG_II 0xF0C8
365 366
367/*
368 * Two of the Tagsys RFID Readers
369 */
370#define FTDI_TAGSYS_LP101_PID 0xF0E9 /* Tagsys L-P101 RFID*/
371#define FTDI_TAGSYS_P200X_PID 0xF0EE /* Tagsys Medio P200x RFID*/
372
366/* an infrared receiver for user access control with IR tags */ 373/* an infrared receiver for user access control with IR tags */
367#define FTDI_PIEGROUP_PID 0xF208 /* Product Id */ 374#define FTDI_PIEGROUP_PID 0xF208 /* Product Id */
368 375
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 5c86f57e4afa..68fc9fe65936 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1362,7 +1362,8 @@ static const struct usb_device_id option_ids[] = {
1362 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1267, 0xff, 0xff, 0xff) }, 1362 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1267, 0xff, 0xff, 0xff) },
1363 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) }, 1363 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) },
1364 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) }, 1364 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) },
1365 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff) }, 1365 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff),
1366 .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
1366 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) }, 1367 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) },
1367 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) }, 1368 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) },
1368 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) }, 1369 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) },
@@ -1525,7 +1526,8 @@ static const struct usb_device_id option_ids[] = {
1525 /* Cinterion */ 1526 /* Cinterion */
1526 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) }, 1527 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
1527 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) }, 1528 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
1528 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) }, 1529 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8),
1530 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1529 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) }, 1531 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) },
1530 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX), 1532 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
1531 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1533 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index c65437cfd4a2..968a40201e5f 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -139,6 +139,9 @@ static const struct usb_device_id id_table[] = {
139 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 0)}, /* Sierra Wireless EM7700 Device Management */ 139 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 0)}, /* Sierra Wireless EM7700 Device Management */
140 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 2)}, /* Sierra Wireless EM7700 NMEA */ 140 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 2)}, /* Sierra Wireless EM7700 NMEA */
141 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 3)}, /* Sierra Wireless EM7700 Modem */ 141 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 3)}, /* Sierra Wireless EM7700 Modem */
142 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 0)}, /* Netgear AirCard 340U Device Management */
143 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 2)}, /* Netgear AirCard 340U NMEA */
144 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 3)}, /* Netgear AirCard 340U Modem */
142 145
143 { } /* Terminating entry */ 146 { } /* Terminating entry */
144}; 147};
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index f112b079ddfc..fb79775447b0 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -71,7 +71,8 @@ DEVICE(hp4x, HP4X_IDS);
71 71
72/* Suunto ANT+ USB Driver */ 72/* Suunto ANT+ USB Driver */
73#define SUUNTO_IDS() \ 73#define SUUNTO_IDS() \
74 { USB_DEVICE(0x0fcf, 0x1008) } 74 { USB_DEVICE(0x0fcf, 0x1008) }, \
75 { USB_DEVICE(0x0fcf, 0x1009) } /* Dynastream ANT USB-m Stick */
75DEVICE(suunto, SUUNTO_IDS); 76DEVICE(suunto, SUUNTO_IDS);
76 77
77/* Siemens USB/MPI adapter */ 78/* Siemens USB/MPI adapter */
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
index 8470e1b114f2..1dd0604d1911 100644
--- a/drivers/usb/storage/Kconfig
+++ b/drivers/usb/storage/Kconfig
@@ -18,7 +18,9 @@ config USB_STORAGE
18 18
19 This option depends on 'SCSI' support being enabled, but you 19 This option depends on 'SCSI' support being enabled, but you
20 probably also need 'SCSI device support: SCSI disk support' 20 probably also need 'SCSI device support: SCSI disk support'
21 (BLK_DEV_SD) for most USB storage devices. 21 (BLK_DEV_SD) for most USB storage devices. Some devices also
22 will require 'Probe all LUNs on each SCSI device'
23 (SCSI_MULTI_LUN).
22 24
23 To compile this driver as a module, choose M here: the 25 To compile this driver as a module, choose M here: the
24 module will be called usb-storage. 26 module will be called usb-storage.
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 18509e6c21ab..9d38ddc8da49 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -78,6 +78,8 @@ static const char* host_info(struct Scsi_Host *host)
78 78
79static int slave_alloc (struct scsi_device *sdev) 79static int slave_alloc (struct scsi_device *sdev)
80{ 80{
81 struct us_data *us = host_to_us(sdev->host);
82
81 /* 83 /*
82 * Set the INQUIRY transfer length to 36. We don't use any of 84 * Set the INQUIRY transfer length to 36. We don't use any of
83 * the extra data and many devices choke if asked for more or 85 * the extra data and many devices choke if asked for more or
@@ -102,6 +104,10 @@ static int slave_alloc (struct scsi_device *sdev)
102 */ 104 */
103 blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1)); 105 blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
104 106
107 /* Tell the SCSI layer if we know there is more than one LUN */
108 if (us->protocol == USB_PR_BULK && us->max_lun > 0)
109 sdev->sdev_bflags |= BLIST_FORCELUN;
110
105 return 0; 111 return 0;
106} 112}
107 113
diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h
index 65a6a75066a8..82e8ed0324e3 100644
--- a/drivers/usb/storage/unusual_cypress.h
+++ b/drivers/usb/storage/unusual_cypress.h
@@ -31,7 +31,7 @@ UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x9999,
31 "Cypress ISD-300LP", 31 "Cypress ISD-300LP",
32 USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0), 32 USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
33 33
34UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x0219, 34UNUSUAL_DEV( 0x14cd, 0x6116, 0x0160, 0x0160,
35 "Super Top", 35 "Super Top",
36 "USB 2.0 SATA BRIDGE", 36 "USB 2.0 SATA BRIDGE",
37 USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0), 37 USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index ad06255c2ade..adbeb255616a 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1455,6 +1455,13 @@ UNUSUAL_DEV( 0x0f88, 0x042e, 0x0100, 0x0100,
1455 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 1455 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1456 US_FL_FIX_CAPACITY ), 1456 US_FL_FIX_CAPACITY ),
1457 1457
1458/* Reported by Moritz Moeller-Herrmann <moritz-kernel@moeller-herrmann.de> */
1459UNUSUAL_DEV( 0x0fca, 0x8004, 0x0201, 0x0201,
1460 "Research In Motion",
1461 "BlackBerry Bold 9000",
1462 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1463 US_FL_MAX_SECTORS_64 ),
1464
1458/* Reported by Michael Stattmann <michael@stattmann.com> */ 1465/* Reported by Michael Stattmann <michael@stattmann.com> */
1459UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000, 1466UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000,
1460 "Sony Ericsson", 1467 "Sony Ericsson",
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 9a68409580d5..a0fa5de210cf 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -70,7 +70,12 @@ enum {
70}; 70};
71 71
72struct vhost_net_ubuf_ref { 72struct vhost_net_ubuf_ref {
73 struct kref kref; 73 /* refcount follows semantics similar to kref:
74 * 0: object is released
75 * 1: no outstanding ubufs
76 * >1: outstanding ubufs
77 */
78 atomic_t refcount;
74 wait_queue_head_t wait; 79 wait_queue_head_t wait;
75 struct vhost_virtqueue *vq; 80 struct vhost_virtqueue *vq;
76}; 81};
@@ -116,14 +121,6 @@ static void vhost_net_enable_zcopy(int vq)
116 vhost_net_zcopy_mask |= 0x1 << vq; 121 vhost_net_zcopy_mask |= 0x1 << vq;
117} 122}
118 123
119static void vhost_net_zerocopy_done_signal(struct kref *kref)
120{
121 struct vhost_net_ubuf_ref *ubufs;
122
123 ubufs = container_of(kref, struct vhost_net_ubuf_ref, kref);
124 wake_up(&ubufs->wait);
125}
126
127static struct vhost_net_ubuf_ref * 124static struct vhost_net_ubuf_ref *
128vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy) 125vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
129{ 126{
@@ -134,21 +131,24 @@ vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
134 ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL); 131 ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL);
135 if (!ubufs) 132 if (!ubufs)
136 return ERR_PTR(-ENOMEM); 133 return ERR_PTR(-ENOMEM);
137 kref_init(&ubufs->kref); 134 atomic_set(&ubufs->refcount, 1);
138 init_waitqueue_head(&ubufs->wait); 135 init_waitqueue_head(&ubufs->wait);
139 ubufs->vq = vq; 136 ubufs->vq = vq;
140 return ubufs; 137 return ubufs;
141} 138}
142 139
143static void vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs) 140static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
144{ 141{
145 kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal); 142 int r = atomic_sub_return(1, &ubufs->refcount);
143 if (unlikely(!r))
144 wake_up(&ubufs->wait);
145 return r;
146} 146}
147 147
148static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs) 148static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
149{ 149{
150 kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal); 150 vhost_net_ubuf_put(ubufs);
151 wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount)); 151 wait_event(ubufs->wait, !atomic_read(&ubufs->refcount));
152} 152}
153 153
154static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs) 154static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
@@ -306,23 +306,26 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
306{ 306{
307 struct vhost_net_ubuf_ref *ubufs = ubuf->ctx; 307 struct vhost_net_ubuf_ref *ubufs = ubuf->ctx;
308 struct vhost_virtqueue *vq = ubufs->vq; 308 struct vhost_virtqueue *vq = ubufs->vq;
309 int cnt = atomic_read(&ubufs->kref.refcount); 309 int cnt;
310
311 rcu_read_lock_bh();
310 312
311 /* set len to mark this desc buffers done DMA */ 313 /* set len to mark this desc buffers done DMA */
312 vq->heads[ubuf->desc].len = success ? 314 vq->heads[ubuf->desc].len = success ?
313 VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN; 315 VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
314 vhost_net_ubuf_put(ubufs); 316 cnt = vhost_net_ubuf_put(ubufs);
315 317
316 /* 318 /*
317 * Trigger polling thread if guest stopped submitting new buffers: 319 * Trigger polling thread if guest stopped submitting new buffers:
318 * in this case, the refcount after decrement will eventually reach 1 320 * in this case, the refcount after decrement will eventually reach 1.
319 * so here it is 2.
320 * We also trigger polling periodically after each 16 packets 321 * We also trigger polling periodically after each 16 packets
321 * (the value 16 here is more or less arbitrary, it's tuned to trigger 322 * (the value 16 here is more or less arbitrary, it's tuned to trigger
322 * less than 10% of times). 323 * less than 10% of times).
323 */ 324 */
324 if (cnt <= 2 || !(cnt % 16)) 325 if (cnt <= 1 || !(cnt % 16))
325 vhost_poll_queue(&vq->poll); 326 vhost_poll_queue(&vq->poll);
327
328 rcu_read_unlock_bh();
326} 329}
327 330
328/* Expects to be always run from workqueue - which acts as 331/* Expects to be always run from workqueue - which acts as
@@ -420,7 +423,7 @@ static void handle_tx(struct vhost_net *net)
420 msg.msg_control = ubuf; 423 msg.msg_control = ubuf;
421 msg.msg_controllen = sizeof(ubuf); 424 msg.msg_controllen = sizeof(ubuf);
422 ubufs = nvq->ubufs; 425 ubufs = nvq->ubufs;
423 kref_get(&ubufs->kref); 426 atomic_inc(&ubufs->refcount);
424 nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV; 427 nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
425 } else { 428 } else {
426 msg.msg_control = NULL; 429 msg.msg_control = NULL;
@@ -780,7 +783,7 @@ static void vhost_net_flush(struct vhost_net *n)
780 vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs); 783 vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs);
781 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); 784 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
782 n->tx_flush = false; 785 n->tx_flush = false;
783 kref_init(&n->vqs[VHOST_NET_VQ_TX].ubufs->kref); 786 atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1);
784 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); 787 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
785 } 788 }
786} 789}
@@ -800,6 +803,8 @@ static int vhost_net_release(struct inode *inode, struct file *f)
800 fput(tx_sock->file); 803 fput(tx_sock->file);
801 if (rx_sock) 804 if (rx_sock)
802 fput(rx_sock->file); 805 fput(rx_sock->file);
806 /* Make sure no callbacks are outstanding */
807 synchronize_rcu_bh();
803 /* We do an extra flush before freeing memory, 808 /* We do an extra flush before freeing memory,
804 * since jobs can re-queue themselves. */ 809 * since jobs can re-queue themselves. */
805 vhost_net_flush(n); 810 vhost_net_flush(n);
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 22262a3a0e2d..dade5b7699bc 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -364,7 +364,7 @@ config FB_SA1100
364 364
365config FB_IMX 365config FB_IMX
366 tristate "Freescale i.MX1/21/25/27 LCD support" 366 tristate "Freescale i.MX1/21/25/27 LCD support"
367 depends on FB && IMX_HAVE_PLATFORM_IMX_FB 367 depends on FB && ARCH_MXC
368 select FB_CFB_FILLRECT 368 select FB_CFB_FILLRECT
369 select FB_CFB_COPYAREA 369 select FB_CFB_COPYAREA
370 select FB_CFB_IMAGEBLIT 370 select FB_CFB_IMAGEBLIT
diff --git a/drivers/video/exynos/Kconfig b/drivers/video/exynos/Kconfig
index 1129d0e9e640..75c8a8e7efc0 100644
--- a/drivers/video/exynos/Kconfig
+++ b/drivers/video/exynos/Kconfig
@@ -22,7 +22,8 @@ config EXYNOS_MIPI_DSI
22 22
23config EXYNOS_LCD_S6E8AX0 23config EXYNOS_LCD_S6E8AX0
24 bool "S6E8AX0 MIPI AMOLED LCD Driver" 24 bool "S6E8AX0 MIPI AMOLED LCD Driver"
25 depends on (EXYNOS_MIPI_DSI && BACKLIGHT_CLASS_DEVICE && LCD_CLASS_DEVICE) 25 depends on EXYNOS_MIPI_DSI && BACKLIGHT_CLASS_DEVICE
26 depends on (LCD_CLASS_DEVICE = y)
26 default n 27 default n
27 help 28 help
28 If you have an S6E8AX0 MIPI AMOLED LCD Panel, say Y to enable its 29 If you have an S6E8AX0 MIPI AMOLED LCD Panel, say Y to enable its
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index bbeb8dd7f108..77d6221618f4 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -2160,8 +2160,8 @@ static int dispc_ovl_calc_scaling_24xx(unsigned long pclk, unsigned long lclk,
2160 *five_taps = false; 2160 *five_taps = false;
2161 2161
2162 do { 2162 do {
2163 in_height = DIV_ROUND_UP(height, *decim_y); 2163 in_height = height / *decim_y;
2164 in_width = DIV_ROUND_UP(width, *decim_x); 2164 in_width = width / *decim_x;
2165 *core_clk = dispc.feat->calc_core_clk(pclk, in_width, 2165 *core_clk = dispc.feat->calc_core_clk(pclk, in_width,
2166 in_height, out_width, out_height, mem_to_mem); 2166 in_height, out_width, out_height, mem_to_mem);
2167 error = (in_width > maxsinglelinewidth || !*core_clk || 2167 error = (in_width > maxsinglelinewidth || !*core_clk ||
@@ -2199,8 +2199,8 @@ static int dispc_ovl_calc_scaling_34xx(unsigned long pclk, unsigned long lclk,
2199 dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH); 2199 dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH);
2200 2200
2201 do { 2201 do {
2202 in_height = DIV_ROUND_UP(height, *decim_y); 2202 in_height = height / *decim_y;
2203 in_width = DIV_ROUND_UP(width, *decim_x); 2203 in_width = width / *decim_x;
2204 *five_taps = in_height > out_height; 2204 *five_taps = in_height > out_height;
2205 2205
2206 if (in_width > maxsinglelinewidth) 2206 if (in_width > maxsinglelinewidth)
@@ -2268,7 +2268,7 @@ static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk,
2268{ 2268{
2269 u16 in_width, in_width_max; 2269 u16 in_width, in_width_max;
2270 int decim_x_min = *decim_x; 2270 int decim_x_min = *decim_x;
2271 u16 in_height = DIV_ROUND_UP(height, *decim_y); 2271 u16 in_height = height / *decim_y;
2272 const int maxsinglelinewidth = 2272 const int maxsinglelinewidth =
2273 dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH); 2273 dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH);
2274 const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE); 2274 const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE);
@@ -2287,7 +2287,7 @@ static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk,
2287 return -EINVAL; 2287 return -EINVAL;
2288 2288
2289 do { 2289 do {
2290 in_width = DIV_ROUND_UP(width, *decim_x); 2290 in_width = width / *decim_x;
2291 } while (*decim_x <= *x_predecim && 2291 } while (*decim_x <= *x_predecim &&
2292 in_width > maxsinglelinewidth && ++*decim_x); 2292 in_width > maxsinglelinewidth && ++*decim_x);
2293 2293
@@ -2466,8 +2466,8 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
2466 if (r) 2466 if (r)
2467 return r; 2467 return r;
2468 2468
2469 in_width = DIV_ROUND_UP(in_width, x_predecim); 2469 in_width = in_width / x_predecim;
2470 in_height = DIV_ROUND_UP(in_height, y_predecim); 2470 in_height = in_height / y_predecim;
2471 2471
2472 if (color_mode == OMAP_DSS_COLOR_YUV2 || 2472 if (color_mode == OMAP_DSS_COLOR_YUV2 ||
2473 color_mode == OMAP_DSS_COLOR_UYVY || 2473 color_mode == OMAP_DSS_COLOR_UYVY ||
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index 7411f2674e16..23ef21ffc2c4 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -117,7 +117,7 @@ struct dpi_clk_calc_ctx {
117 /* outputs */ 117 /* outputs */
118 118
119 struct dsi_clock_info dsi_cinfo; 119 struct dsi_clock_info dsi_cinfo;
120 unsigned long long fck; 120 unsigned long fck;
121 struct dispc_clock_info dispc_cinfo; 121 struct dispc_clock_info dispc_cinfo;
122}; 122};
123 123
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index efb9ee9e3c96..ba806c9e7f54 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -46,7 +46,7 @@ static struct {
46struct sdi_clk_calc_ctx { 46struct sdi_clk_calc_ctx {
47 unsigned long pck_min, pck_max; 47 unsigned long pck_min, pck_max;
48 48
49 unsigned long long fck; 49 unsigned long fck;
50 struct dispc_clock_info dispc_cinfo; 50 struct dispc_clock_info dispc_cinfo;
51}; 51};
52 52
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
index a06edbfa95ca..1b5d48c578e1 100644
--- a/drivers/vme/bridges/vme_ca91cx42.c
+++ b/drivers/vme/bridges/vme_ca91cx42.c
@@ -884,7 +884,7 @@ static ssize_t ca91cx42_master_read(struct vme_master_resource *image,
884 if (done == count) 884 if (done == count)
885 goto out; 885 goto out;
886 } 886 }
887 if ((uintptr_t)addr & 0x2) { 887 if ((uintptr_t)(addr + done) & 0x2) {
888 if ((count - done) < 2) { 888 if ((count - done) < 2) {
889 *(u8 *)(buf + done) = ioread8(addr + done); 889 *(u8 *)(buf + done) = ioread8(addr + done);
890 done += 1; 890 done += 1;
@@ -938,7 +938,7 @@ static ssize_t ca91cx42_master_write(struct vme_master_resource *image,
938 if (done == count) 938 if (done == count)
939 goto out; 939 goto out;
940 } 940 }
941 if ((uintptr_t)addr & 0x2) { 941 if ((uintptr_t)(addr + done) & 0x2) {
942 if ((count - done) < 2) { 942 if ((count - done) < 2) {
943 iowrite8(*(u8 *)(buf + done), addr + done); 943 iowrite8(*(u8 *)(buf + done), addr + done);
944 done += 1; 944 done += 1;
diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c
index 16830d8b777c..9911cd5fddb5 100644
--- a/drivers/vme/bridges/vme_tsi148.c
+++ b/drivers/vme/bridges/vme_tsi148.c
@@ -1289,7 +1289,7 @@ static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
1289 if (done == count) 1289 if (done == count)
1290 goto out; 1290 goto out;
1291 } 1291 }
1292 if ((uintptr_t)addr & 0x2) { 1292 if ((uintptr_t)(addr + done) & 0x2) {
1293 if ((count - done) < 2) { 1293 if ((count - done) < 2) {
1294 *(u8 *)(buf + done) = ioread8(addr + done); 1294 *(u8 *)(buf + done) = ioread8(addr + done);
1295 done += 1; 1295 done += 1;
@@ -1371,7 +1371,7 @@ static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
1371 if (done == count) 1371 if (done == count)
1372 goto out; 1372 goto out;
1373 } 1373 }
1374 if ((uintptr_t)addr & 0x2) { 1374 if ((uintptr_t)(addr + done) & 0x2) {
1375 if ((count - done) < 2) { 1375 if ((count - done) < 2) {
1376 iowrite8(*(u8 *)(buf + done), addr + done); 1376 iowrite8(*(u8 *)(buf + done), addr + done);
1377 done += 1; 1377 done += 1;
diff --git a/drivers/watchdog/w83697hf_wdt.c b/drivers/watchdog/w83697hf_wdt.c
index aaf2995d37f4..68b45fc9ba6a 100644
--- a/drivers/watchdog/w83697hf_wdt.c
+++ b/drivers/watchdog/w83697hf_wdt.c
@@ -402,7 +402,7 @@ static int __init wdt_init(void)
402 402
403 if (!found) { 403 if (!found) {
404 pr_err("No W83697HF/HG could be found\n"); 404 pr_err("No W83697HF/HG could be found\n");
405 ret = -EIO; 405 ret = -ENODEV;
406 goto out; 406 goto out;
407 } 407 }
408 408
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index d75c811bfa56..45e00afa7f2d 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -16,7 +16,6 @@ xen-pad-$(CONFIG_X86) += xen-acpi-pad.o
16dom0-$(CONFIG_X86) += pcpu.o 16dom0-$(CONFIG_X86) += pcpu.o
17obj-$(CONFIG_XEN_DOM0) += $(dom0-y) 17obj-$(CONFIG_XEN_DOM0) += $(dom0-y)
18obj-$(CONFIG_BLOCK) += biomerge.o 18obj-$(CONFIG_BLOCK) += biomerge.o
19obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
20obj-$(CONFIG_XEN_BALLOON) += xen-balloon.o 19obj-$(CONFIG_XEN_BALLOON) += xen-balloon.o
21obj-$(CONFIG_XEN_SELFBALLOONING) += xen-selfballoon.o 20obj-$(CONFIG_XEN_SELFBALLOONING) += xen-selfballoon.o
22obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o 21obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 4672e003c0ad..f4a9e3311297 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -862,6 +862,8 @@ int bind_evtchn_to_irq(unsigned int evtchn)
862 irq = ret; 862 irq = ret;
863 goto out; 863 goto out;
864 } 864 }
865 /* New interdomain events are bound to VCPU 0. */
866 bind_evtchn_to_cpu(evtchn, 0);
865 } else { 867 } else {
866 struct irq_info *info = info_for_irq(irq); 868 struct irq_info *info = info_for_irq(irq);
867 WARN_ON(info == NULL || info->type != IRQT_EVTCHN); 869 WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
diff --git a/drivers/xen/xencomm.c b/drivers/xen/xencomm.c
deleted file mode 100644
index 4793fc594549..000000000000
--- a/drivers/xen/xencomm.c
+++ /dev/null
@@ -1,219 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
15 *
16 * Copyright (C) IBM Corp. 2006
17 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 */
20
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23#include <linux/mm.h>
24#include <linux/slab.h>
25#include <asm/page.h>
26#include <xen/xencomm.h>
27#include <xen/interface/xen.h>
28#include <asm/xen/xencomm.h> /* for xencomm_is_phys_contiguous() */
29
30static int xencomm_init(struct xencomm_desc *desc,
31 void *buffer, unsigned long bytes)
32{
33 unsigned long recorded = 0;
34 int i = 0;
35
36 while ((recorded < bytes) && (i < desc->nr_addrs)) {
37 unsigned long vaddr = (unsigned long)buffer + recorded;
38 unsigned long paddr;
39 int offset;
40 int chunksz;
41
42 offset = vaddr % PAGE_SIZE; /* handle partial pages */
43 chunksz = min(PAGE_SIZE - offset, bytes - recorded);
44
45 paddr = xencomm_vtop(vaddr);
46 if (paddr == ~0UL) {
47 printk(KERN_DEBUG "%s: couldn't translate vaddr %lx\n",
48 __func__, vaddr);
49 return -EINVAL;
50 }
51
52 desc->address[i++] = paddr;
53 recorded += chunksz;
54 }
55
56 if (recorded < bytes) {
57 printk(KERN_DEBUG
58 "%s: could only translate %ld of %ld bytes\n",
59 __func__, recorded, bytes);
60 return -ENOSPC;
61 }
62
63 /* mark remaining addresses invalid (just for safety) */
64 while (i < desc->nr_addrs)
65 desc->address[i++] = XENCOMM_INVALID;
66
67 desc->magic = XENCOMM_MAGIC;
68
69 return 0;
70}
71
72static struct xencomm_desc *xencomm_alloc(gfp_t gfp_mask,
73 void *buffer, unsigned long bytes)
74{
75 struct xencomm_desc *desc;
76 unsigned long buffer_ulong = (unsigned long)buffer;
77 unsigned long start = buffer_ulong & PAGE_MASK;
78 unsigned long end = (buffer_ulong + bytes) | ~PAGE_MASK;
79 unsigned long nr_addrs = (end - start + 1) >> PAGE_SHIFT;
80 unsigned long size = sizeof(*desc) +
81 sizeof(desc->address[0]) * nr_addrs;
82
83 /*
84 * slab allocator returns at least sizeof(void*) aligned pointer.
85 * When sizeof(*desc) > sizeof(void*), struct xencomm_desc might
86 * cross page boundary.
87 */
88 if (sizeof(*desc) > sizeof(void *)) {
89 unsigned long order = get_order(size);
90 desc = (struct xencomm_desc *)__get_free_pages(gfp_mask,
91 order);
92 if (desc == NULL)
93 return NULL;
94
95 desc->nr_addrs =
96 ((PAGE_SIZE << order) - sizeof(struct xencomm_desc)) /
97 sizeof(*desc->address);
98 } else {
99 desc = kmalloc(size, gfp_mask);
100 if (desc == NULL)
101 return NULL;
102
103 desc->nr_addrs = nr_addrs;
104 }
105 return desc;
106}
107
108void xencomm_free(struct xencomm_handle *desc)
109{
110 if (desc && !((ulong)desc & XENCOMM_INLINE_FLAG)) {
111 struct xencomm_desc *desc__ = (struct xencomm_desc *)desc;
112 if (sizeof(*desc__) > sizeof(void *)) {
113 unsigned long size = sizeof(*desc__) +
114 sizeof(desc__->address[0]) * desc__->nr_addrs;
115 unsigned long order = get_order(size);
116 free_pages((unsigned long)__va(desc), order);
117 } else
118 kfree(__va(desc));
119 }
120}
121
122static int xencomm_create(void *buffer, unsigned long bytes,
123 struct xencomm_desc **ret, gfp_t gfp_mask)
124{
125 struct xencomm_desc *desc;
126 int rc;
127
128 pr_debug("%s: %p[%ld]\n", __func__, buffer, bytes);
129
130 if (bytes == 0) {
131 /* don't create a descriptor; Xen recognizes NULL. */
132 BUG_ON(buffer != NULL);
133 *ret = NULL;
134 return 0;
135 }
136
137 BUG_ON(buffer == NULL); /* 'bytes' is non-zero */
138
139 desc = xencomm_alloc(gfp_mask, buffer, bytes);
140 if (!desc) {
141 printk(KERN_DEBUG "%s failure\n", "xencomm_alloc");
142 return -ENOMEM;
143 }
144
145 rc = xencomm_init(desc, buffer, bytes);
146 if (rc) {
147 printk(KERN_DEBUG "%s failure: %d\n", "xencomm_init", rc);
148 xencomm_free((struct xencomm_handle *)__pa(desc));
149 return rc;
150 }
151
152 *ret = desc;
153 return 0;
154}
155
156static struct xencomm_handle *xencomm_create_inline(void *ptr)
157{
158 unsigned long paddr;
159
160 BUG_ON(!xencomm_is_phys_contiguous((unsigned long)ptr));
161
162 paddr = (unsigned long)xencomm_pa(ptr);
163 BUG_ON(paddr & XENCOMM_INLINE_FLAG);
164 return (struct xencomm_handle *)(paddr | XENCOMM_INLINE_FLAG);
165}
166
167/* "mini" routine, for stack-based communications: */
168static int xencomm_create_mini(void *buffer,
169 unsigned long bytes, struct xencomm_mini *xc_desc,
170 struct xencomm_desc **ret)
171{
172 int rc = 0;
173 struct xencomm_desc *desc;
174 BUG_ON(((unsigned long)xc_desc) % sizeof(*xc_desc) != 0);
175
176 desc = (void *)xc_desc;
177
178 desc->nr_addrs = XENCOMM_MINI_ADDRS;
179
180 rc = xencomm_init(desc, buffer, bytes);
181 if (!rc)
182 *ret = desc;
183
184 return rc;
185}
186
187struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes)
188{
189 int rc;
190 struct xencomm_desc *desc;
191
192 if (xencomm_is_phys_contiguous((unsigned long)ptr))
193 return xencomm_create_inline(ptr);
194
195 rc = xencomm_create(ptr, bytes, &desc, GFP_KERNEL);
196
197 if (rc || desc == NULL)
198 return NULL;
199
200 return xencomm_pa(desc);
201}
202
203struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, unsigned long bytes,
204 struct xencomm_mini *xc_desc)
205{
206 int rc;
207 struct xencomm_desc *desc = NULL;
208
209 if (xencomm_is_phys_contiguous((unsigned long)ptr))
210 return xencomm_create_inline(ptr);
211
212 rc = xencomm_create_mini(ptr, bytes, xc_desc,
213 &desc);
214
215 if (rc)
216 return NULL;
217
218 return xencomm_pa(desc);
219}
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index 0bad24ddc2e7..0129b78a6908 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -114,6 +114,14 @@ void bio_integrity_free(struct bio *bio)
114} 114}
115EXPORT_SYMBOL(bio_integrity_free); 115EXPORT_SYMBOL(bio_integrity_free);
116 116
117static inline unsigned int bip_integrity_vecs(struct bio_integrity_payload *bip)
118{
119 if (bip->bip_slab == BIO_POOL_NONE)
120 return BIP_INLINE_VECS;
121
122 return bvec_nr_vecs(bip->bip_slab);
123}
124
117/** 125/**
118 * bio_integrity_add_page - Attach integrity metadata 126 * bio_integrity_add_page - Attach integrity metadata
119 * @bio: bio to update 127 * @bio: bio to update
@@ -129,7 +137,7 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
129 struct bio_integrity_payload *bip = bio->bi_integrity; 137 struct bio_integrity_payload *bip = bio->bi_integrity;
130 struct bio_vec *iv; 138 struct bio_vec *iv;
131 139
132 if (bip->bip_vcnt >= bvec_nr_vecs(bip->bip_slab)) { 140 if (bip->bip_vcnt >= bip_integrity_vecs(bip)) {
133 printk(KERN_ERR "%s: bip_vec full\n", __func__); 141 printk(KERN_ERR "%s: bip_vec full\n", __func__);
134 return 0; 142 return 0;
135 } 143 }
@@ -226,7 +234,8 @@ unsigned int bio_integrity_tag_size(struct bio *bio)
226} 234}
227EXPORT_SYMBOL(bio_integrity_tag_size); 235EXPORT_SYMBOL(bio_integrity_tag_size);
228 236
229int bio_integrity_tag(struct bio *bio, void *tag_buf, unsigned int len, int set) 237static int bio_integrity_tag(struct bio *bio, void *tag_buf, unsigned int len,
238 int set)
230{ 239{
231 struct bio_integrity_payload *bip = bio->bi_integrity; 240 struct bio_integrity_payload *bip = bio->bi_integrity;
232 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); 241 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
diff --git a/fs/bio.c b/fs/bio.c
index 75c49a382239..8754e7b6eb49 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -611,7 +611,6 @@ EXPORT_SYMBOL(bio_clone_fast);
611struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, 611struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
612 struct bio_set *bs) 612 struct bio_set *bs)
613{ 613{
614 unsigned nr_iovecs = 0;
615 struct bvec_iter iter; 614 struct bvec_iter iter;
616 struct bio_vec bv; 615 struct bio_vec bv;
617 struct bio *bio; 616 struct bio *bio;
@@ -638,10 +637,7 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
638 * __bio_clone_fast() anyways. 637 * __bio_clone_fast() anyways.
639 */ 638 */
640 639
641 bio_for_each_segment(bv, bio_src, iter) 640 bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
642 nr_iovecs++;
643
644 bio = bio_alloc_bioset(gfp_mask, nr_iovecs, bs);
645 if (!bio) 641 if (!bio)
646 return NULL; 642 return NULL;
647 643
@@ -650,9 +646,18 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
650 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; 646 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
651 bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; 647 bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
652 648
649 if (bio->bi_rw & REQ_DISCARD)
650 goto integrity_clone;
651
652 if (bio->bi_rw & REQ_WRITE_SAME) {
653 bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
654 goto integrity_clone;
655 }
656
653 bio_for_each_segment(bv, bio_src, iter) 657 bio_for_each_segment(bv, bio_src, iter)
654 bio->bi_io_vec[bio->bi_vcnt++] = bv; 658 bio->bi_io_vec[bio->bi_vcnt++] = bv;
655 659
660integrity_clone:
656 if (bio_integrity(bio_src)) { 661 if (bio_integrity(bio_src)) {
657 int ret; 662 int ret;
658 663
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index e2600cdb6c25..b01fb6c527e3 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -1010,6 +1010,8 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
1010 bytes = min(bytes, working_bytes); 1010 bytes = min(bytes, working_bytes);
1011 kaddr = kmap_atomic(page_out); 1011 kaddr = kmap_atomic(page_out);
1012 memcpy(kaddr + *pg_offset, buf + buf_offset, bytes); 1012 memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
1013 if (*pg_index == (vcnt - 1) && *pg_offset == 0)
1014 memset(kaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
1013 kunmap_atomic(kaddr); 1015 kunmap_atomic(kaddr);
1014 flush_dcache_page(page_out); 1016 flush_dcache_page(page_out);
1015 1017
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 5215f04260b2..81ea55314b1f 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3839,7 +3839,6 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
3839 rb_erase(&ref->rb_node, &head->ref_root); 3839 rb_erase(&ref->rb_node, &head->ref_root);
3840 atomic_dec(&delayed_refs->num_entries); 3840 atomic_dec(&delayed_refs->num_entries);
3841 btrfs_put_delayed_ref(ref); 3841 btrfs_put_delayed_ref(ref);
3842 cond_resched_lock(&head->lock);
3843 } 3842 }
3844 if (head->must_insert_reserved) 3843 if (head->must_insert_reserved)
3845 pin_bytes = true; 3844 pin_bytes = true;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 9c9ecc93ae2c..32312e09f0f5 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2385,6 +2385,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2385 spin_unlock(&delayed_refs->lock); 2385 spin_unlock(&delayed_refs->lock);
2386 locked_ref = NULL; 2386 locked_ref = NULL;
2387 cond_resched(); 2387 cond_resched();
2388 count++;
2388 continue; 2389 continue;
2389 } 2390 }
2390 2391
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 184e9cb39647..d3d44486290b 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -5154,7 +5154,7 @@ static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
5154 return ERR_CAST(inode); 5154 return ERR_CAST(inode);
5155 } 5155 }
5156 5156
5157 return d_splice_alias(inode, dentry); 5157 return d_materialise_unique(dentry, inode);
5158} 5158}
5159 5159
5160unsigned char btrfs_filetype_table[] = { 5160unsigned char btrfs_filetype_table[] = {
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index b0134892dc70..a6d8efa46bfe 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -3537,20 +3537,6 @@ out:
3537 return ret; 3537 return ret;
3538} 3538}
3539 3539
3540static long btrfs_ioctl_global_rsv(struct btrfs_root *root, void __user *arg)
3541{
3542 struct btrfs_block_rsv *block_rsv = &root->fs_info->global_block_rsv;
3543 u64 reserved;
3544
3545 spin_lock(&block_rsv->lock);
3546 reserved = block_rsv->reserved;
3547 spin_unlock(&block_rsv->lock);
3548
3549 if (arg && copy_to_user(arg, &reserved, sizeof(reserved)))
3550 return -EFAULT;
3551 return 0;
3552}
3553
3554/* 3540/*
3555 * there are many ways the trans_start and trans_end ioctls can lead 3541 * there are many ways the trans_start and trans_end ioctls can lead
3556 * to deadlocks. They should only be used by applications that 3542 * to deadlocks. They should only be used by applications that
@@ -4525,7 +4511,7 @@ static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
4525 spin_lock(&root->fs_info->super_lock); 4511 spin_lock(&root->fs_info->super_lock);
4526 strcpy(super_block->label, label); 4512 strcpy(super_block->label, label);
4527 spin_unlock(&root->fs_info->super_lock); 4513 spin_unlock(&root->fs_info->super_lock);
4528 ret = btrfs_end_transaction(trans, root); 4514 ret = btrfs_commit_transaction(trans, root);
4529 4515
4530out_unlock: 4516out_unlock:
4531 mnt_drop_write_file(file); 4517 mnt_drop_write_file(file);
@@ -4668,7 +4654,7 @@ static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
4668 if (ret) 4654 if (ret)
4669 return ret; 4655 return ret;
4670 4656
4671 trans = btrfs_start_transaction(root, 1); 4657 trans = btrfs_start_transaction(root, 0);
4672 if (IS_ERR(trans)) 4658 if (IS_ERR(trans))
4673 return PTR_ERR(trans); 4659 return PTR_ERR(trans);
4674 4660
@@ -4689,7 +4675,7 @@ static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
4689 btrfs_set_super_incompat_flags(super_block, newflags); 4675 btrfs_set_super_incompat_flags(super_block, newflags);
4690 spin_unlock(&root->fs_info->super_lock); 4676 spin_unlock(&root->fs_info->super_lock);
4691 4677
4692 return btrfs_end_transaction(trans, root); 4678 return btrfs_commit_transaction(trans, root);
4693} 4679}
4694 4680
4695long btrfs_ioctl(struct file *file, unsigned int 4681long btrfs_ioctl(struct file *file, unsigned int
@@ -4757,8 +4743,6 @@ long btrfs_ioctl(struct file *file, unsigned int
4757 return btrfs_ioctl_logical_to_ino(root, argp); 4743 return btrfs_ioctl_logical_to_ino(root, argp);
4758 case BTRFS_IOC_SPACE_INFO: 4744 case BTRFS_IOC_SPACE_INFO:
4759 return btrfs_ioctl_space_info(root, argp); 4745 return btrfs_ioctl_space_info(root, argp);
4760 case BTRFS_IOC_GLOBAL_RSV:
4761 return btrfs_ioctl_global_rsv(root, argp);
4762 case BTRFS_IOC_SYNC: { 4746 case BTRFS_IOC_SYNC: {
4763 int ret; 4747 int ret;
4764 4748
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index cf9107a64204..9dde9717c1b9 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -1332,6 +1332,16 @@ verbose_printk(KERN_DEBUG "btrfs: find_extent_clone: data_offset=%llu, "
1332 } 1332 }
1333 1333
1334 if (cur_clone_root) { 1334 if (cur_clone_root) {
1335 if (compressed != BTRFS_COMPRESS_NONE) {
1336 /*
1337 * Offsets given by iterate_extent_inodes() are relative
1338 * to the start of the extent, we need to add logical
1339 * offset from the file extent item.
1340 * (See why at backref.c:check_extent_in_eb())
1341 */
1342 cur_clone_root->offset += btrfs_file_extent_offset(eb,
1343 fi);
1344 }
1335 *found = cur_clone_root; 1345 *found = cur_clone_root;
1336 ret = 0; 1346 ret = 0;
1337 } else { 1347 } else {
@@ -2774,8 +2784,6 @@ static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino)
2774 return 0; 2784 return 0;
2775} 2785}
2776 2786
2777#ifdef CONFIG_BTRFS_ASSERT
2778
2779static int del_waiting_dir_move(struct send_ctx *sctx, u64 ino) 2787static int del_waiting_dir_move(struct send_ctx *sctx, u64 ino)
2780{ 2788{
2781 struct rb_node *n = sctx->waiting_dir_moves.rb_node; 2789 struct rb_node *n = sctx->waiting_dir_moves.rb_node;
@@ -2796,8 +2804,6 @@ static int del_waiting_dir_move(struct send_ctx *sctx, u64 ino)
2796 return -ENOENT; 2804 return -ENOENT;
2797} 2805}
2798 2806
2799#endif
2800
2801static int add_pending_dir_move(struct send_ctx *sctx, u64 parent_ino) 2807static int add_pending_dir_move(struct send_ctx *sctx, u64 parent_ino)
2802{ 2808{
2803 struct rb_node **p = &sctx->pending_dir_moves.rb_node; 2809 struct rb_node **p = &sctx->pending_dir_moves.rb_node;
@@ -2902,7 +2908,9 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
2902 } 2908 }
2903 2909
2904 sctx->send_progress = sctx->cur_ino + 1; 2910 sctx->send_progress = sctx->cur_ino + 1;
2905 ASSERT(del_waiting_dir_move(sctx, pm->ino) == 0); 2911 ret = del_waiting_dir_move(sctx, pm->ino);
2912 ASSERT(ret == 0);
2913
2906 ret = get_cur_path(sctx, pm->ino, pm->gen, to_path); 2914 ret = get_cur_path(sctx, pm->ino, pm->gen, to_path);
2907 if (ret < 0) 2915 if (ret < 0)
2908 goto out; 2916 goto out;
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 97cc24198554..d04db817be5c 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -566,7 +566,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
566 kfree(num); 566 kfree(num);
567 567
568 if (info->max_inline) { 568 if (info->max_inline) {
569 info->max_inline = max_t(u64, 569 info->max_inline = min_t(u64,
570 info->max_inline, 570 info->max_inline,
571 root->sectorsize); 571 root->sectorsize);
572 } 572 }
@@ -855,6 +855,7 @@ static struct dentry *get_default_root(struct super_block *sb,
855 struct btrfs_path *path; 855 struct btrfs_path *path;
856 struct btrfs_key location; 856 struct btrfs_key location;
857 struct inode *inode; 857 struct inode *inode;
858 struct dentry *dentry;
858 u64 dir_id; 859 u64 dir_id;
859 int new = 0; 860 int new = 0;
860 861
@@ -925,7 +926,13 @@ setup_root:
925 return dget(sb->s_root); 926 return dget(sb->s_root);
926 } 927 }
927 928
928 return d_obtain_alias(inode); 929 dentry = d_obtain_alias(inode);
930 if (!IS_ERR(dentry)) {
931 spin_lock(&dentry->d_lock);
932 dentry->d_flags &= ~DCACHE_DISCONNECTED;
933 spin_unlock(&dentry->d_lock);
934 }
935 return dentry;
929} 936}
930 937
931static int btrfs_fill_super(struct super_block *sb, 938static int btrfs_fill_super(struct super_block *sb,
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index 782374d8fd19..865f4cf9a769 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -578,8 +578,14 @@ static int add_device_membership(struct btrfs_fs_info *fs_info)
578 return -ENOMEM; 578 return -ENOMEM;
579 579
580 list_for_each_entry(dev, &fs_devices->devices, dev_list) { 580 list_for_each_entry(dev, &fs_devices->devices, dev_list) {
581 struct hd_struct *disk = dev->bdev->bd_part; 581 struct hd_struct *disk;
582 struct kobject *disk_kobj = &part_to_dev(disk)->kobj; 582 struct kobject *disk_kobj;
583
584 if (!dev->bdev)
585 continue;
586
587 disk = dev->bdev->bd_part;
588 disk_kobj = &part_to_dev(disk)->kobj;
583 589
584 error = sysfs_create_link(fs_info->device_dir_kobj, 590 error = sysfs_create_link(fs_info->device_dir_kobj,
585 disk_kobj, disk_kobj->name); 591 disk_kobj, disk_kobj->name);
diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c
index 4c2d452c4bfc..21887d63dad5 100644
--- a/fs/ceph/acl.c
+++ b/fs/ceph/acl.c
@@ -54,11 +54,6 @@ static inline struct posix_acl *ceph_get_cached_acl(struct inode *inode,
54 return acl; 54 return acl;
55} 55}
56 56
57void ceph_forget_all_cached_acls(struct inode *inode)
58{
59 forget_all_cached_acls(inode);
60}
61
62struct posix_acl *ceph_get_acl(struct inode *inode, int type) 57struct posix_acl *ceph_get_acl(struct inode *inode, int type)
63{ 58{
64 int size; 59 int size;
@@ -160,11 +155,7 @@ int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type)
160 goto out_dput; 155 goto out_dput;
161 } 156 }
162 157
163 if (value) 158 ret = __ceph_setxattr(dentry, name, value, size, 0);
164 ret = __ceph_setxattr(dentry, name, value, size, 0);
165 else
166 ret = __ceph_removexattr(dentry, name);
167
168 if (ret) { 159 if (ret) {
169 if (new_mode != old_mode) { 160 if (new_mode != old_mode) {
170 newattrs.ia_mode = old_mode; 161 newattrs.ia_mode = old_mode;
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 6da4df84ba30..45eda6d7a40c 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -100,6 +100,14 @@ static unsigned fpos_off(loff_t p)
100 return p & 0xffffffff; 100 return p & 0xffffffff;
101} 101}
102 102
103static int fpos_cmp(loff_t l, loff_t r)
104{
105 int v = ceph_frag_compare(fpos_frag(l), fpos_frag(r));
106 if (v)
107 return v;
108 return (int)(fpos_off(l) - fpos_off(r));
109}
110
103/* 111/*
104 * When possible, we try to satisfy a readdir by peeking at the 112 * When possible, we try to satisfy a readdir by peeking at the
105 * dcache. We make this work by carefully ordering dentries on 113 * dcache. We make this work by carefully ordering dentries on
@@ -156,7 +164,7 @@ more:
156 if (!d_unhashed(dentry) && dentry->d_inode && 164 if (!d_unhashed(dentry) && dentry->d_inode &&
157 ceph_snap(dentry->d_inode) != CEPH_SNAPDIR && 165 ceph_snap(dentry->d_inode) != CEPH_SNAPDIR &&
158 ceph_ino(dentry->d_inode) != CEPH_INO_CEPH && 166 ceph_ino(dentry->d_inode) != CEPH_INO_CEPH &&
159 ctx->pos <= di->offset) 167 fpos_cmp(ctx->pos, di->offset) <= 0)
160 break; 168 break;
161 dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry, 169 dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry,
162 dentry->d_name.len, dentry->d_name.name, di->offset, 170 dentry->d_name.len, dentry->d_name.name, di->offset,
@@ -695,9 +703,8 @@ static int ceph_mknod(struct inode *dir, struct dentry *dentry,
695 ceph_mdsc_put_request(req); 703 ceph_mdsc_put_request(req);
696 704
697 if (!err) 705 if (!err)
698 err = ceph_init_acl(dentry, dentry->d_inode, dir); 706 ceph_init_acl(dentry, dentry->d_inode, dir);
699 707 else
700 if (err)
701 d_drop(dentry); 708 d_drop(dentry);
702 return err; 709 return err;
703} 710}
@@ -735,7 +742,9 @@ static int ceph_symlink(struct inode *dir, struct dentry *dentry,
735 if (!err && !req->r_reply_info.head->is_dentry) 742 if (!err && !req->r_reply_info.head->is_dentry)
736 err = ceph_handle_notrace_create(dir, dentry); 743 err = ceph_handle_notrace_create(dir, dentry);
737 ceph_mdsc_put_request(req); 744 ceph_mdsc_put_request(req);
738 if (err) 745 if (!err)
746 ceph_init_acl(dentry, dentry->d_inode, dir);
747 else
739 d_drop(dentry); 748 d_drop(dentry);
740 return err; 749 return err;
741} 750}
@@ -776,7 +785,9 @@ static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
776 err = ceph_handle_notrace_create(dir, dentry); 785 err = ceph_handle_notrace_create(dir, dentry);
777 ceph_mdsc_put_request(req); 786 ceph_mdsc_put_request(req);
778out: 787out:
779 if (err < 0) 788 if (!err)
789 ceph_init_acl(dentry, dentry->d_inode, dir);
790 else
780 d_drop(dentry); 791 d_drop(dentry);
781 return err; 792 return err;
782} 793}
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index dfd2ce3419f8..09c7afe32e49 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -286,6 +286,7 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
286 } else { 286 } else {
287 dout("atomic_open finish_open on dn %p\n", dn); 287 dout("atomic_open finish_open on dn %p\n", dn);
288 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) { 288 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
289 ceph_init_acl(dentry, dentry->d_inode, dir);
289 *opened |= FILE_CREATED; 290 *opened |= FILE_CREATED;
290 } 291 }
291 err = finish_open(file, dentry, ceph_open, opened); 292 err = finish_open(file, dentry, ceph_open, opened);
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 2df963f1cf5a..10a4ccbf38da 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -144,7 +144,11 @@ enum {
144 Opt_ino32, 144 Opt_ino32,
145 Opt_noino32, 145 Opt_noino32,
146 Opt_fscache, 146 Opt_fscache,
147 Opt_nofscache 147 Opt_nofscache,
148#ifdef CONFIG_CEPH_FS_POSIX_ACL
149 Opt_acl,
150#endif
151 Opt_noacl
148}; 152};
149 153
150static match_table_t fsopt_tokens = { 154static match_table_t fsopt_tokens = {
@@ -172,6 +176,10 @@ static match_table_t fsopt_tokens = {
172 {Opt_noino32, "noino32"}, 176 {Opt_noino32, "noino32"},
173 {Opt_fscache, "fsc"}, 177 {Opt_fscache, "fsc"},
174 {Opt_nofscache, "nofsc"}, 178 {Opt_nofscache, "nofsc"},
179#ifdef CONFIG_CEPH_FS_POSIX_ACL
180 {Opt_acl, "acl"},
181#endif
182 {Opt_noacl, "noacl"},
175 {-1, NULL} 183 {-1, NULL}
176}; 184};
177 185
@@ -271,6 +279,14 @@ static int parse_fsopt_token(char *c, void *private)
271 case Opt_nofscache: 279 case Opt_nofscache:
272 fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE; 280 fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE;
273 break; 281 break;
282#ifdef CONFIG_CEPH_FS_POSIX_ACL
283 case Opt_acl:
284 fsopt->sb_flags |= MS_POSIXACL;
285 break;
286#endif
287 case Opt_noacl:
288 fsopt->sb_flags &= ~MS_POSIXACL;
289 break;
274 default: 290 default:
275 BUG_ON(token); 291 BUG_ON(token);
276 } 292 }
@@ -438,6 +454,13 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
438 else 454 else
439 seq_puts(m, ",nofsc"); 455 seq_puts(m, ",nofsc");
440 456
457#ifdef CONFIG_CEPH_FS_POSIX_ACL
458 if (fsopt->sb_flags & MS_POSIXACL)
459 seq_puts(m, ",acl");
460 else
461 seq_puts(m, ",noacl");
462#endif
463
441 if (fsopt->wsize) 464 if (fsopt->wsize)
442 seq_printf(m, ",wsize=%d", fsopt->wsize); 465 seq_printf(m, ",wsize=%d", fsopt->wsize);
443 if (fsopt->rsize != CEPH_RSIZE_DEFAULT) 466 if (fsopt->rsize != CEPH_RSIZE_DEFAULT)
@@ -819,9 +842,6 @@ static int ceph_set_super(struct super_block *s, void *data)
819 842
820 s->s_flags = fsc->mount_options->sb_flags; 843 s->s_flags = fsc->mount_options->sb_flags;
821 s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */ 844 s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */
822#ifdef CONFIG_CEPH_FS_POSIX_ACL
823 s->s_flags |= MS_POSIXACL;
824#endif
825 845
826 s->s_xattr = ceph_xattr_handlers; 846 s->s_xattr = ceph_xattr_handlers;
827 s->s_fs_info = fsc; 847 s->s_fs_info = fsc;
@@ -911,6 +931,10 @@ static struct dentry *ceph_mount(struct file_system_type *fs_type,
911 struct ceph_options *opt = NULL; 931 struct ceph_options *opt = NULL;
912 932
913 dout("ceph_mount\n"); 933 dout("ceph_mount\n");
934
935#ifdef CONFIG_CEPH_FS_POSIX_ACL
936 flags |= MS_POSIXACL;
937#endif
914 err = parse_mount_options(&fsopt, &opt, flags, data, dev_name, &path); 938 err = parse_mount_options(&fsopt, &opt, flags, data, dev_name, &path);
915 if (err < 0) { 939 if (err < 0) {
916 res = ERR_PTR(err); 940 res = ERR_PTR(err);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 19793b56d0a7..d8801a95b685 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -13,6 +13,7 @@
13#include <linux/wait.h> 13#include <linux/wait.h>
14#include <linux/writeback.h> 14#include <linux/writeback.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/posix_acl.h>
16 17
17#include <linux/ceph/libceph.h> 18#include <linux/ceph/libceph.h>
18 19
@@ -743,7 +744,11 @@ extern const struct xattr_handler *ceph_xattr_handlers[];
743struct posix_acl *ceph_get_acl(struct inode *, int); 744struct posix_acl *ceph_get_acl(struct inode *, int);
744int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type); 745int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type);
745int ceph_init_acl(struct dentry *, struct inode *, struct inode *); 746int ceph_init_acl(struct dentry *, struct inode *, struct inode *);
746void ceph_forget_all_cached_acls(struct inode *inode); 747
748static inline void ceph_forget_all_cached_acls(struct inode *inode)
749{
750 forget_all_cached_acls(inode);
751}
747 752
748#else 753#else
749 754
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 898b6565ad3e..a55ec37378c6 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -12,6 +12,9 @@
12#define XATTR_CEPH_PREFIX "ceph." 12#define XATTR_CEPH_PREFIX "ceph."
13#define XATTR_CEPH_PREFIX_LEN (sizeof (XATTR_CEPH_PREFIX) - 1) 13#define XATTR_CEPH_PREFIX_LEN (sizeof (XATTR_CEPH_PREFIX) - 1)
14 14
15static int __remove_xattr(struct ceph_inode_info *ci,
16 struct ceph_inode_xattr *xattr);
17
15/* 18/*
16 * List of handlers for synthetic system.* attributes. Other 19 * List of handlers for synthetic system.* attributes. Other
17 * attributes are handled directly. 20 * attributes are handled directly.
@@ -319,8 +322,7 @@ static struct ceph_vxattr *ceph_match_vxattr(struct inode *inode,
319static int __set_xattr(struct ceph_inode_info *ci, 322static int __set_xattr(struct ceph_inode_info *ci,
320 const char *name, int name_len, 323 const char *name, int name_len,
321 const char *val, int val_len, 324 const char *val, int val_len,
322 int dirty, 325 int flags, int update_xattr,
323 int should_free_name, int should_free_val,
324 struct ceph_inode_xattr **newxattr) 326 struct ceph_inode_xattr **newxattr)
325{ 327{
326 struct rb_node **p; 328 struct rb_node **p;
@@ -349,12 +351,31 @@ static int __set_xattr(struct ceph_inode_info *ci,
349 xattr = NULL; 351 xattr = NULL;
350 } 352 }
351 353
354 if (update_xattr) {
355 int err = 0;
356 if (xattr && (flags & XATTR_CREATE))
357 err = -EEXIST;
358 else if (!xattr && (flags & XATTR_REPLACE))
359 err = -ENODATA;
360 if (err) {
361 kfree(name);
362 kfree(val);
363 return err;
364 }
365 if (update_xattr < 0) {
366 if (xattr)
367 __remove_xattr(ci, xattr);
368 kfree(name);
369 return 0;
370 }
371 }
372
352 if (!xattr) { 373 if (!xattr) {
353 new = 1; 374 new = 1;
354 xattr = *newxattr; 375 xattr = *newxattr;
355 xattr->name = name; 376 xattr->name = name;
356 xattr->name_len = name_len; 377 xattr->name_len = name_len;
357 xattr->should_free_name = should_free_name; 378 xattr->should_free_name = update_xattr;
358 379
359 ci->i_xattrs.count++; 380 ci->i_xattrs.count++;
360 dout("__set_xattr count=%d\n", ci->i_xattrs.count); 381 dout("__set_xattr count=%d\n", ci->i_xattrs.count);
@@ -364,7 +385,7 @@ static int __set_xattr(struct ceph_inode_info *ci,
364 if (xattr->should_free_val) 385 if (xattr->should_free_val)
365 kfree((void *)xattr->val); 386 kfree((void *)xattr->val);
366 387
367 if (should_free_name) { 388 if (update_xattr) {
368 kfree((void *)name); 389 kfree((void *)name);
369 name = xattr->name; 390 name = xattr->name;
370 } 391 }
@@ -379,8 +400,8 @@ static int __set_xattr(struct ceph_inode_info *ci,
379 xattr->val = ""; 400 xattr->val = "";
380 401
381 xattr->val_len = val_len; 402 xattr->val_len = val_len;
382 xattr->dirty = dirty; 403 xattr->dirty = update_xattr;
383 xattr->should_free_val = (val && should_free_val); 404 xattr->should_free_val = (val && update_xattr);
384 405
385 if (new) { 406 if (new) {
386 rb_link_node(&xattr->node, parent, p); 407 rb_link_node(&xattr->node, parent, p);
@@ -442,7 +463,7 @@ static int __remove_xattr(struct ceph_inode_info *ci,
442 struct ceph_inode_xattr *xattr) 463 struct ceph_inode_xattr *xattr)
443{ 464{
444 if (!xattr) 465 if (!xattr)
445 return -EOPNOTSUPP; 466 return -ENODATA;
446 467
447 rb_erase(&xattr->node, &ci->i_xattrs.index); 468 rb_erase(&xattr->node, &ci->i_xattrs.index);
448 469
@@ -588,7 +609,7 @@ start:
588 p += len; 609 p += len;
589 610
590 err = __set_xattr(ci, name, namelen, val, len, 611 err = __set_xattr(ci, name, namelen, val, len,
591 0, 0, 0, &xattrs[numattr]); 612 0, 0, &xattrs[numattr]);
592 613
593 if (err < 0) 614 if (err < 0)
594 goto bad; 615 goto bad;
@@ -850,6 +871,9 @@ static int ceph_sync_setxattr(struct dentry *dentry, const char *name,
850 871
851 dout("setxattr value=%.*s\n", (int)size, value); 872 dout("setxattr value=%.*s\n", (int)size, value);
852 873
874 if (!value)
875 flags |= CEPH_XATTR_REMOVE;
876
853 /* do request */ 877 /* do request */
854 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETXATTR, 878 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETXATTR,
855 USE_AUTH_MDS); 879 USE_AUTH_MDS);
@@ -892,7 +916,7 @@ int __ceph_setxattr(struct dentry *dentry, const char *name,
892 struct ceph_inode_info *ci = ceph_inode(inode); 916 struct ceph_inode_info *ci = ceph_inode(inode);
893 int issued; 917 int issued;
894 int err; 918 int err;
895 int dirty; 919 int dirty = 0;
896 int name_len = strlen(name); 920 int name_len = strlen(name);
897 int val_len = size; 921 int val_len = size;
898 char *newname = NULL; 922 char *newname = NULL;
@@ -953,12 +977,14 @@ retry:
953 goto retry; 977 goto retry;
954 } 978 }
955 979
956 err = __set_xattr(ci, newname, name_len, newval, 980 err = __set_xattr(ci, newname, name_len, newval, val_len,
957 val_len, 1, 1, 1, &xattr); 981 flags, value ? 1 : -1, &xattr);
958 982
959 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL); 983 if (!err) {
960 ci->i_xattrs.dirty = true; 984 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
961 inode->i_ctime = CURRENT_TIME; 985 ci->i_xattrs.dirty = true;
986 inode->i_ctime = CURRENT_TIME;
987 }
962 988
963 spin_unlock(&ci->i_ceph_lock); 989 spin_unlock(&ci->i_ceph_lock);
964 if (dirty) 990 if (dirty)
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 8f9b4f710d4a..7ff866dbb89e 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -865,8 +865,8 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
865 return rc; 865 return rc;
866} 866}
867 867
868static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb, 868struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
869 __u16 fid, u32 *pacllen) 869 const struct cifs_fid *cifsfid, u32 *pacllen)
870{ 870{
871 struct cifs_ntsd *pntsd = NULL; 871 struct cifs_ntsd *pntsd = NULL;
872 unsigned int xid; 872 unsigned int xid;
@@ -877,7 +877,8 @@ static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
877 return ERR_CAST(tlink); 877 return ERR_CAST(tlink);
878 878
879 xid = get_xid(); 879 xid = get_xid();
880 rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), fid, &pntsd, pacllen); 880 rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), cifsfid->netfid, &pntsd,
881 pacllen);
881 free_xid(xid); 882 free_xid(xid);
882 883
883 cifs_put_tlink(tlink); 884 cifs_put_tlink(tlink);
@@ -946,7 +947,7 @@ struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
946 if (!open_file) 947 if (!open_file)
947 return get_cifs_acl_by_path(cifs_sb, path, pacllen); 948 return get_cifs_acl_by_path(cifs_sb, path, pacllen);
948 949
949 pntsd = get_cifs_acl_by_fid(cifs_sb, open_file->fid.netfid, pacllen); 950 pntsd = get_cifs_acl_by_fid(cifs_sb, &open_file->fid, pacllen);
950 cifsFileInfo_put(open_file); 951 cifsFileInfo_put(open_file);
951 return pntsd; 952 return pntsd;
952} 953}
@@ -1006,19 +1007,31 @@ out:
1006/* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */ 1007/* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */
1007int 1008int
1008cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr, 1009cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
1009 struct inode *inode, const char *path, const __u16 *pfid) 1010 struct inode *inode, const char *path,
1011 const struct cifs_fid *pfid)
1010{ 1012{
1011 struct cifs_ntsd *pntsd = NULL; 1013 struct cifs_ntsd *pntsd = NULL;
1012 u32 acllen = 0; 1014 u32 acllen = 0;
1013 int rc = 0; 1015 int rc = 0;
1016 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1017 struct cifs_tcon *tcon;
1014 1018
1015 cifs_dbg(NOISY, "converting ACL to mode for %s\n", path); 1019 cifs_dbg(NOISY, "converting ACL to mode for %s\n", path);
1016 1020
1017 if (pfid) 1021 if (IS_ERR(tlink))
1018 pntsd = get_cifs_acl_by_fid(cifs_sb, *pfid, &acllen); 1022 return PTR_ERR(tlink);
1019 else 1023 tcon = tlink_tcon(tlink);
1020 pntsd = get_cifs_acl(cifs_sb, inode, path, &acllen);
1021 1024
1025 if (pfid && (tcon->ses->server->ops->get_acl_by_fid))
1026 pntsd = tcon->ses->server->ops->get_acl_by_fid(cifs_sb, pfid,
1027 &acllen);
1028 else if (tcon->ses->server->ops->get_acl)
1029 pntsd = tcon->ses->server->ops->get_acl(cifs_sb, inode, path,
1030 &acllen);
1031 else {
1032 cifs_put_tlink(tlink);
1033 return -EOPNOTSUPP;
1034 }
1022 /* if we can retrieve the ACL, now parse Access Control Entries, ACEs */ 1035 /* if we can retrieve the ACL, now parse Access Control Entries, ACEs */
1023 if (IS_ERR(pntsd)) { 1036 if (IS_ERR(pntsd)) {
1024 rc = PTR_ERR(pntsd); 1037 rc = PTR_ERR(pntsd);
@@ -1030,6 +1043,8 @@ cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
1030 cifs_dbg(VFS, "parse sec desc failed rc = %d\n", rc); 1043 cifs_dbg(VFS, "parse sec desc failed rc = %d\n", rc);
1031 } 1044 }
1032 1045
1046 cifs_put_tlink(tlink);
1047
1033 return rc; 1048 return rc;
1034} 1049}
1035 1050
@@ -1043,15 +1058,30 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode,
1043 __u32 secdesclen = 0; 1058 __u32 secdesclen = 0;
1044 struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */ 1059 struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */
1045 struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */ 1060 struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
1061 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1062 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1063 struct cifs_tcon *tcon;
1064
1065 if (IS_ERR(tlink))
1066 return PTR_ERR(tlink);
1067 tcon = tlink_tcon(tlink);
1046 1068
1047 cifs_dbg(NOISY, "set ACL from mode for %s\n", path); 1069 cifs_dbg(NOISY, "set ACL from mode for %s\n", path);
1048 1070
1049 /* Get the security descriptor */ 1071 /* Get the security descriptor */
1050 pntsd = get_cifs_acl(CIFS_SB(inode->i_sb), inode, path, &secdesclen); 1072
1073 if (tcon->ses->server->ops->get_acl == NULL) {
1074 cifs_put_tlink(tlink);
1075 return -EOPNOTSUPP;
1076 }
1077
1078 pntsd = tcon->ses->server->ops->get_acl(cifs_sb, inode, path,
1079 &secdesclen);
1051 if (IS_ERR(pntsd)) { 1080 if (IS_ERR(pntsd)) {
1052 rc = PTR_ERR(pntsd); 1081 rc = PTR_ERR(pntsd);
1053 cifs_dbg(VFS, "%s: error %d getting sec desc\n", __func__, rc); 1082 cifs_dbg(VFS, "%s: error %d getting sec desc\n", __func__, rc);
1054 goto out; 1083 cifs_put_tlink(tlink);
1084 return rc;
1055 } 1085 }
1056 1086
1057 /* 1087 /*
@@ -1064,6 +1094,7 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode,
1064 pnntsd = kmalloc(secdesclen, GFP_KERNEL); 1094 pnntsd = kmalloc(secdesclen, GFP_KERNEL);
1065 if (!pnntsd) { 1095 if (!pnntsd) {
1066 kfree(pntsd); 1096 kfree(pntsd);
1097 cifs_put_tlink(tlink);
1067 return -ENOMEM; 1098 return -ENOMEM;
1068 } 1099 }
1069 1100
@@ -1072,14 +1103,18 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode,
1072 1103
1073 cifs_dbg(NOISY, "build_sec_desc rc: %d\n", rc); 1104 cifs_dbg(NOISY, "build_sec_desc rc: %d\n", rc);
1074 1105
1106 if (tcon->ses->server->ops->set_acl == NULL)
1107 rc = -EOPNOTSUPP;
1108
1075 if (!rc) { 1109 if (!rc) {
1076 /* Set the security descriptor */ 1110 /* Set the security descriptor */
1077 rc = set_cifs_acl(pnntsd, secdesclen, inode, path, aclflag); 1111 rc = tcon->ses->server->ops->set_acl(pnntsd, secdesclen, inode,
1112 path, aclflag);
1078 cifs_dbg(NOISY, "set_cifs_acl rc: %d\n", rc); 1113 cifs_dbg(NOISY, "set_cifs_acl rc: %d\n", rc);
1079 } 1114 }
1115 cifs_put_tlink(tlink);
1080 1116
1081 kfree(pnntsd); 1117 kfree(pnntsd);
1082 kfree(pntsd); 1118 kfree(pntsd);
1083out:
1084 return rc; 1119 return rc;
1085} 1120}
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index a245d1809ed8..cf32f0393369 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -323,7 +323,8 @@ struct smb_version_operations {
323 /* async read from the server */ 323 /* async read from the server */
324 int (*async_readv)(struct cifs_readdata *); 324 int (*async_readv)(struct cifs_readdata *);
325 /* async write to the server */ 325 /* async write to the server */
326 int (*async_writev)(struct cifs_writedata *); 326 int (*async_writev)(struct cifs_writedata *,
327 void (*release)(struct kref *));
327 /* sync read from the server */ 328 /* sync read from the server */
328 int (*sync_read)(const unsigned int, struct cifsFileInfo *, 329 int (*sync_read)(const unsigned int, struct cifsFileInfo *,
329 struct cifs_io_parms *, unsigned int *, char **, 330 struct cifs_io_parms *, unsigned int *, char **,
@@ -395,6 +396,12 @@ struct smb_version_operations {
395 int (*set_EA)(const unsigned int, struct cifs_tcon *, const char *, 396 int (*set_EA)(const unsigned int, struct cifs_tcon *, const char *,
396 const char *, const void *, const __u16, 397 const char *, const void *, const __u16,
397 const struct nls_table *, int); 398 const struct nls_table *, int);
399 struct cifs_ntsd * (*get_acl)(struct cifs_sb_info *, struct inode *,
400 const char *, u32 *);
401 struct cifs_ntsd * (*get_acl_by_fid)(struct cifs_sb_info *,
402 const struct cifs_fid *, u32 *);
403 int (*set_acl)(struct cifs_ntsd *, __u32, struct inode *, const char *,
404 int);
398}; 405};
399 406
400struct smb_version_values { 407struct smb_version_values {
@@ -1064,7 +1071,7 @@ struct cifs_writedata {
1064 unsigned int pagesz; 1071 unsigned int pagesz;
1065 unsigned int tailsz; 1072 unsigned int tailsz;
1066 unsigned int nr_pages; 1073 unsigned int nr_pages;
1067 struct page *pages[1]; 1074 struct page *pages[];
1068}; 1075};
1069 1076
1070/* 1077/*
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 79e6e9a93a8c..acc4ee8ed075 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -151,7 +151,7 @@ extern struct inode *cifs_iget(struct super_block *sb,
151 151
152extern int cifs_get_inode_info(struct inode **inode, const char *full_path, 152extern int cifs_get_inode_info(struct inode **inode, const char *full_path,
153 FILE_ALL_INFO *data, struct super_block *sb, 153 FILE_ALL_INFO *data, struct super_block *sb,
154 int xid, const __u16 *fid); 154 int xid, const struct cifs_fid *fid);
155extern int cifs_get_inode_info_unix(struct inode **pinode, 155extern int cifs_get_inode_info_unix(struct inode **pinode,
156 const unsigned char *search_path, 156 const unsigned char *search_path,
157 struct super_block *sb, unsigned int xid); 157 struct super_block *sb, unsigned int xid);
@@ -162,11 +162,13 @@ extern int cifs_rename_pending_delete(const char *full_path,
162 const unsigned int xid); 162 const unsigned int xid);
163extern int cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, 163extern int cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb,
164 struct cifs_fattr *fattr, struct inode *inode, 164 struct cifs_fattr *fattr, struct inode *inode,
165 const char *path, const __u16 *pfid); 165 const char *path, const struct cifs_fid *pfid);
166extern int id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64, 166extern int id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64,
167 kuid_t, kgid_t); 167 kuid_t, kgid_t);
168extern struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *, struct inode *, 168extern struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *, struct inode *,
169 const char *, u32 *); 169 const char *, u32 *);
170extern struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *,
171 const struct cifs_fid *, u32 *);
170extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *, 172extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *,
171 const char *, int); 173 const char *, int);
172 174
@@ -488,7 +490,8 @@ void cifs_readdata_release(struct kref *refcount);
488int cifs_async_readv(struct cifs_readdata *rdata); 490int cifs_async_readv(struct cifs_readdata *rdata);
489int cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid); 491int cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid);
490 492
491int cifs_async_writev(struct cifs_writedata *wdata); 493int cifs_async_writev(struct cifs_writedata *wdata,
494 void (*release)(struct kref *kref));
492void cifs_writev_complete(struct work_struct *work); 495void cifs_writev_complete(struct work_struct *work);
493struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages, 496struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages,
494 work_func_t complete); 497 work_func_t complete);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 4d881c35eeca..f3264bd7a83d 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1910,7 +1910,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
1910 1910
1911 do { 1911 do {
1912 server = tlink_tcon(wdata->cfile->tlink)->ses->server; 1912 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
1913 rc = server->ops->async_writev(wdata); 1913 rc = server->ops->async_writev(wdata, cifs_writedata_release);
1914 } while (rc == -EAGAIN); 1914 } while (rc == -EAGAIN);
1915 1915
1916 for (i = 0; i < wdata->nr_pages; i++) { 1916 for (i = 0; i < wdata->nr_pages; i++) {
@@ -1962,15 +1962,9 @@ cifs_writedata_alloc(unsigned int nr_pages, work_func_t complete)
1962{ 1962{
1963 struct cifs_writedata *wdata; 1963 struct cifs_writedata *wdata;
1964 1964
1965 /* this would overflow */
1966 if (nr_pages == 0) {
1967 cifs_dbg(VFS, "%s: called with nr_pages == 0!\n", __func__);
1968 return NULL;
1969 }
1970
1971 /* writedata + number of page pointers */ 1965 /* writedata + number of page pointers */
1972 wdata = kzalloc(sizeof(*wdata) + 1966 wdata = kzalloc(sizeof(*wdata) +
1973 sizeof(struct page *) * (nr_pages - 1), GFP_NOFS); 1967 sizeof(struct page *) * nr_pages, GFP_NOFS);
1974 if (wdata != NULL) { 1968 if (wdata != NULL) {
1975 kref_init(&wdata->refcount); 1969 kref_init(&wdata->refcount);
1976 INIT_LIST_HEAD(&wdata->list); 1970 INIT_LIST_HEAD(&wdata->list);
@@ -2031,7 +2025,8 @@ cifs_writev_callback(struct mid_q_entry *mid)
2031 2025
2032/* cifs_async_writev - send an async write, and set up mid to handle result */ 2026/* cifs_async_writev - send an async write, and set up mid to handle result */
2033int 2027int
2034cifs_async_writev(struct cifs_writedata *wdata) 2028cifs_async_writev(struct cifs_writedata *wdata,
2029 void (*release)(struct kref *kref))
2035{ 2030{
2036 int rc = -EACCES; 2031 int rc = -EACCES;
2037 WRITE_REQ *smb = NULL; 2032 WRITE_REQ *smb = NULL;
@@ -2105,7 +2100,7 @@ cifs_async_writev(struct cifs_writedata *wdata)
2105 if (rc == 0) 2100 if (rc == 0)
2106 cifs_stats_inc(&tcon->stats.cifs_stats.num_writes); 2101 cifs_stats_inc(&tcon->stats.cifs_stats.num_writes);
2107 else 2102 else
2108 kref_put(&wdata->refcount, cifs_writedata_release); 2103 kref_put(&wdata->refcount, release);
2109 2104
2110async_writev_out: 2105async_writev_out:
2111 cifs_small_buf_release(smb); 2106 cifs_small_buf_release(smb);
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index d3a6796caa5a..3db0c5fd9a11 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -378,7 +378,7 @@ cifs_create_get_file_info:
378 xid); 378 xid);
379 else { 379 else {
380 rc = cifs_get_inode_info(&newinode, full_path, buf, inode->i_sb, 380 rc = cifs_get_inode_info(&newinode, full_path, buf, inode->i_sb,
381 xid, &fid->netfid); 381 xid, fid);
382 if (newinode) { 382 if (newinode) {
383 if (server->ops->set_lease_key) 383 if (server->ops->set_lease_key)
384 server->ops->set_lease_key(newinode, fid); 384 server->ops->set_lease_key(newinode, fid);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 853d6d1cc822..53c15074bb36 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -244,7 +244,7 @@ cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
244 xid); 244 xid);
245 else 245 else
246 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb, 246 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
247 xid, &fid->netfid); 247 xid, fid);
248 248
249out: 249out:
250 kfree(buf); 250 kfree(buf);
@@ -2043,7 +2043,8 @@ retry:
2043 } 2043 }
2044 wdata->pid = wdata->cfile->pid; 2044 wdata->pid = wdata->cfile->pid;
2045 server = tlink_tcon(wdata->cfile->tlink)->ses->server; 2045 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2046 rc = server->ops->async_writev(wdata); 2046 rc = server->ops->async_writev(wdata,
2047 cifs_writedata_release);
2047 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN); 2048 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
2048 2049
2049 for (i = 0; i < nr_pages; ++i) 2050 for (i = 0; i < nr_pages; ++i)
@@ -2331,9 +2332,20 @@ size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2331} 2332}
2332 2333
2333static void 2334static void
2334cifs_uncached_writev_complete(struct work_struct *work) 2335cifs_uncached_writedata_release(struct kref *refcount)
2335{ 2336{
2336 int i; 2337 int i;
2338 struct cifs_writedata *wdata = container_of(refcount,
2339 struct cifs_writedata, refcount);
2340
2341 for (i = 0; i < wdata->nr_pages; i++)
2342 put_page(wdata->pages[i]);
2343 cifs_writedata_release(refcount);
2344}
2345
2346static void
2347cifs_uncached_writev_complete(struct work_struct *work)
2348{
2337 struct cifs_writedata *wdata = container_of(work, 2349 struct cifs_writedata *wdata = container_of(work,
2338 struct cifs_writedata, work); 2350 struct cifs_writedata, work);
2339 struct inode *inode = wdata->cfile->dentry->d_inode; 2351 struct inode *inode = wdata->cfile->dentry->d_inode;
@@ -2347,12 +2359,7 @@ cifs_uncached_writev_complete(struct work_struct *work)
2347 2359
2348 complete(&wdata->done); 2360 complete(&wdata->done);
2349 2361
2350 if (wdata->result != -EAGAIN) { 2362 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2351 for (i = 0; i < wdata->nr_pages; i++)
2352 put_page(wdata->pages[i]);
2353 }
2354
2355 kref_put(&wdata->refcount, cifs_writedata_release);
2356} 2363}
2357 2364
2358/* attempt to send write to server, retry on any -EAGAIN errors */ 2365/* attempt to send write to server, retry on any -EAGAIN errors */
@@ -2370,7 +2377,8 @@ cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2370 if (rc != 0) 2377 if (rc != 0)
2371 continue; 2378 continue;
2372 } 2379 }
2373 rc = server->ops->async_writev(wdata); 2380 rc = server->ops->async_writev(wdata,
2381 cifs_uncached_writedata_release);
2374 } while (rc == -EAGAIN); 2382 } while (rc == -EAGAIN);
2375 2383
2376 return rc; 2384 return rc;
@@ -2381,7 +2389,7 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
2381 unsigned long nr_segs, loff_t *poffset) 2389 unsigned long nr_segs, loff_t *poffset)
2382{ 2390{
2383 unsigned long nr_pages, i; 2391 unsigned long nr_pages, i;
2384 size_t copied, len, cur_len; 2392 size_t bytes, copied, len, cur_len;
2385 ssize_t total_written = 0; 2393 ssize_t total_written = 0;
2386 loff_t offset; 2394 loff_t offset;
2387 struct iov_iter it; 2395 struct iov_iter it;
@@ -2436,14 +2444,45 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
2436 2444
2437 save_len = cur_len; 2445 save_len = cur_len;
2438 for (i = 0; i < nr_pages; i++) { 2446 for (i = 0; i < nr_pages; i++) {
2439 copied = min_t(const size_t, cur_len, PAGE_SIZE); 2447 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2440 copied = iov_iter_copy_from_user(wdata->pages[i], &it, 2448 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2441 0, copied); 2449 0, bytes);
2442 cur_len -= copied; 2450 cur_len -= copied;
2443 iov_iter_advance(&it, copied); 2451 iov_iter_advance(&it, copied);
2452 /*
2453 * If we didn't copy as much as we expected, then that
2454 * may mean we trod into an unmapped area. Stop copying
2455 * at that point. On the next pass through the big
2456 * loop, we'll likely end up getting a zero-length
2457 * write and bailing out of it.
2458 */
2459 if (copied < bytes)
2460 break;
2444 } 2461 }
2445 cur_len = save_len - cur_len; 2462 cur_len = save_len - cur_len;
2446 2463
2464 /*
2465 * If we have no data to send, then that probably means that
2466 * the copy above failed altogether. That's most likely because
2467 * the address in the iovec was bogus. Set the rc to -EFAULT,
2468 * free anything we allocated and bail out.
2469 */
2470 if (!cur_len) {
2471 for (i = 0; i < nr_pages; i++)
2472 put_page(wdata->pages[i]);
2473 kfree(wdata);
2474 rc = -EFAULT;
2475 break;
2476 }
2477
2478 /*
2479 * i + 1 now represents the number of pages we actually used in
2480 * the copy phase above. Bring nr_pages down to that, and free
2481 * any pages that we didn't use.
2482 */
2483 for ( ; nr_pages > i + 1; nr_pages--)
2484 put_page(wdata->pages[nr_pages - 1]);
2485
2447 wdata->sync_mode = WB_SYNC_ALL; 2486 wdata->sync_mode = WB_SYNC_ALL;
2448 wdata->nr_pages = nr_pages; 2487 wdata->nr_pages = nr_pages;
2449 wdata->offset = (__u64)offset; 2488 wdata->offset = (__u64)offset;
@@ -2454,7 +2493,8 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
2454 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE); 2493 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
2455 rc = cifs_uncached_retry_writev(wdata); 2494 rc = cifs_uncached_retry_writev(wdata);
2456 if (rc) { 2495 if (rc) {
2457 kref_put(&wdata->refcount, cifs_writedata_release); 2496 kref_put(&wdata->refcount,
2497 cifs_uncached_writedata_release);
2458 break; 2498 break;
2459 } 2499 }
2460 2500
@@ -2496,7 +2536,7 @@ restart_loop:
2496 } 2536 }
2497 } 2537 }
2498 list_del_init(&wdata->list); 2538 list_del_init(&wdata->list);
2499 kref_put(&wdata->refcount, cifs_writedata_release); 2539 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2500 } 2540 }
2501 2541
2502 if (total_written > 0) 2542 if (total_written > 0)
@@ -2559,8 +2599,8 @@ cifs_writev(struct kiocb *iocb, const struct iovec *iov,
2559 if (rc > 0) { 2599 if (rc > 0) {
2560 ssize_t err; 2600 ssize_t err;
2561 2601
2562 err = generic_write_sync(file, pos, rc); 2602 err = generic_write_sync(file, iocb->ki_pos - rc, rc);
2563 if (err < 0 && rc > 0) 2603 if (err < 0)
2564 rc = err; 2604 rc = err;
2565 } 2605 }
2566 2606
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 9cb9679d7357..aadc2b68678b 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -527,10 +527,15 @@ static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path,
527 return PTR_ERR(tlink); 527 return PTR_ERR(tlink);
528 tcon = tlink_tcon(tlink); 528 tcon = tlink_tcon(tlink);
529 529
530 rc = CIFSSMBQAllEAs(xid, tcon, path, "SETFILEBITS", 530 if (tcon->ses->server->ops->query_all_EAs == NULL) {
531 ea_value, 4 /* size of buf */, cifs_sb->local_nls, 531 cifs_put_tlink(tlink);
532 cifs_sb->mnt_cifs_flags & 532 return -EOPNOTSUPP;
533 CIFS_MOUNT_MAP_SPECIAL_CHR); 533 }
534
535 rc = tcon->ses->server->ops->query_all_EAs(xid, tcon, path,
536 "SETFILEBITS", ea_value, 4 /* size of buf */,
537 cifs_sb->local_nls,
538 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
534 cifs_put_tlink(tlink); 539 cifs_put_tlink(tlink);
535 if (rc < 0) 540 if (rc < 0)
536 return (int)rc; 541 return (int)rc;
@@ -672,7 +677,7 @@ cgfi_exit:
672int 677int
673cifs_get_inode_info(struct inode **inode, const char *full_path, 678cifs_get_inode_info(struct inode **inode, const char *full_path,
674 FILE_ALL_INFO *data, struct super_block *sb, int xid, 679 FILE_ALL_INFO *data, struct super_block *sb, int xid,
675 const __u16 *fid) 680 const struct cifs_fid *fid)
676{ 681{
677 bool validinum = false; 682 bool validinum = false;
678 __u16 srchflgs; 683 __u16 srchflgs;
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index 9ac5bfc9cc56..526fb89f9230 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -1067,6 +1067,15 @@ struct smb_version_operations smb1_operations = {
1067 .query_mf_symlink = cifs_query_mf_symlink, 1067 .query_mf_symlink = cifs_query_mf_symlink,
1068 .create_mf_symlink = cifs_create_mf_symlink, 1068 .create_mf_symlink = cifs_create_mf_symlink,
1069 .is_read_op = cifs_is_read_op, 1069 .is_read_op = cifs_is_read_op,
1070#ifdef CONFIG_CIFS_XATTR
1071 .query_all_EAs = CIFSSMBQAllEAs,
1072 .set_EA = CIFSSMBSetEA,
1073#endif /* CIFS_XATTR */
1074#ifdef CONFIG_CIFS_ACL
1075 .get_acl = get_cifs_acl,
1076 .get_acl_by_fid = get_cifs_acl_by_fid,
1077 .set_acl = set_cifs_acl,
1078#endif /* CIFS_ACL */
1070}; 1079};
1071 1080
1072struct smb_version_values smb1_values = { 1081struct smb_version_values smb1_values = {
diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h
index c38350851b08..bc0bb9c34f72 100644
--- a/fs/cifs/smb2glob.h
+++ b/fs/cifs/smb2glob.h
@@ -57,4 +57,7 @@
57#define SMB2_CMACAES_SIZE (16) 57#define SMB2_CMACAES_SIZE (16)
58#define SMB3_SIGNKEY_SIZE (16) 58#define SMB3_SIGNKEY_SIZE (16)
59 59
60/* Maximum buffer size value we can send with 1 credit */
61#define SMB2_MAX_BUFFER_SIZE 65536
62
60#endif /* _SMB2_GLOB_H */ 63#endif /* _SMB2_GLOB_H */
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 757da3e54d3d..192f51a12cf1 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -182,11 +182,8 @@ smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
182 /* start with specified wsize, or default */ 182 /* start with specified wsize, or default */
183 wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE; 183 wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE;
184 wsize = min_t(unsigned int, wsize, server->max_write); 184 wsize = min_t(unsigned int, wsize, server->max_write);
185 /* 185 /* set it to the maximum buffer size value we can send with 1 credit */
186 * limit write size to 2 ** 16, because we don't support multicredit 186 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
187 * requests now.
188 */
189 wsize = min_t(unsigned int, wsize, 2 << 15);
190 187
191 return wsize; 188 return wsize;
192} 189}
@@ -200,11 +197,8 @@ smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
200 /* start with specified rsize, or default */ 197 /* start with specified rsize, or default */
201 rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE; 198 rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE;
202 rsize = min_t(unsigned int, rsize, server->max_read); 199 rsize = min_t(unsigned int, rsize, server->max_read);
203 /* 200 /* set it to the maximum buffer size value we can send with 1 credit */
204 * limit write size to 2 ** 16, because we don't support multicredit 201 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
205 * requests now.
206 */
207 rsize = min_t(unsigned int, rsize, 2 << 15);
208 202
209 return rsize; 203 return rsize;
210} 204}
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 2013234b73ad..860344701067 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -413,7 +413,9 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
413 413
414 /* SMB2 only has an extended negflavor */ 414 /* SMB2 only has an extended negflavor */
415 server->negflavor = CIFS_NEGFLAVOR_EXTENDED; 415 server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
416 server->maxBuf = le32_to_cpu(rsp->MaxTransactSize); 416 /* set it to the maximum buffer size value we can send with 1 credit */
417 server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize),
418 SMB2_MAX_BUFFER_SIZE);
417 server->max_read = le32_to_cpu(rsp->MaxReadSize); 419 server->max_read = le32_to_cpu(rsp->MaxReadSize);
418 server->max_write = le32_to_cpu(rsp->MaxWriteSize); 420 server->max_write = le32_to_cpu(rsp->MaxWriteSize);
419 /* BB Do we need to validate the SecurityMode? */ 421 /* BB Do we need to validate the SecurityMode? */
@@ -1890,7 +1892,8 @@ smb2_writev_callback(struct mid_q_entry *mid)
1890 1892
1891/* smb2_async_writev - send an async write, and set up mid to handle result */ 1893/* smb2_async_writev - send an async write, and set up mid to handle result */
1892int 1894int
1893smb2_async_writev(struct cifs_writedata *wdata) 1895smb2_async_writev(struct cifs_writedata *wdata,
1896 void (*release)(struct kref *kref))
1894{ 1897{
1895 int rc = -EACCES; 1898 int rc = -EACCES;
1896 struct smb2_write_req *req = NULL; 1899 struct smb2_write_req *req = NULL;
@@ -1938,7 +1941,7 @@ smb2_async_writev(struct cifs_writedata *wdata)
1938 smb2_writev_callback, wdata, 0); 1941 smb2_writev_callback, wdata, 0);
1939 1942
1940 if (rc) { 1943 if (rc) {
1941 kref_put(&wdata->refcount, cifs_writedata_release); 1944 kref_put(&wdata->refcount, release);
1942 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); 1945 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
1943 } 1946 }
1944 1947
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 93adc64666f3..0ce48db20a65 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -123,7 +123,8 @@ extern int SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
123extern int smb2_async_readv(struct cifs_readdata *rdata); 123extern int smb2_async_readv(struct cifs_readdata *rdata);
124extern int SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms, 124extern int SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
125 unsigned int *nbytes, char **buf, int *buf_type); 125 unsigned int *nbytes, char **buf, int *buf_type);
126extern int smb2_async_writev(struct cifs_writedata *wdata); 126extern int smb2_async_writev(struct cifs_writedata *wdata,
127 void (*release)(struct kref *kref));
127extern int SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms, 128extern int SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
128 unsigned int *nbytes, struct kvec *iov, int n_vec); 129 unsigned int *nbytes, struct kvec *iov, int n_vec);
129extern int SMB2_echo(struct TCP_Server_Info *server); 130extern int SMB2_echo(struct TCP_Server_Info *server);
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 95c43bb20335..5ac836a86b18 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -176,8 +176,12 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
176 rc = -ENOMEM; 176 rc = -ENOMEM;
177 } else { 177 } else {
178 memcpy(pacl, ea_value, value_size); 178 memcpy(pacl, ea_value, value_size);
179 rc = set_cifs_acl(pacl, value_size, 179 if (pTcon->ses->server->ops->set_acl)
180 direntry->d_inode, full_path, CIFS_ACL_DACL); 180 rc = pTcon->ses->server->ops->set_acl(pacl,
181 value_size, direntry->d_inode,
182 full_path, CIFS_ACL_DACL);
183 else
184 rc = -EOPNOTSUPP;
181 if (rc == 0) /* force revalidate of the inode */ 185 if (rc == 0) /* force revalidate of the inode */
182 CIFS_I(direntry->d_inode)->time = 0; 186 CIFS_I(direntry->d_inode)->time = 0;
183 kfree(pacl); 187 kfree(pacl);
@@ -323,8 +327,11 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
323 u32 acllen; 327 u32 acllen;
324 struct cifs_ntsd *pacl; 328 struct cifs_ntsd *pacl;
325 329
326 pacl = get_cifs_acl(cifs_sb, direntry->d_inode, 330 if (pTcon->ses->server->ops->get_acl == NULL)
327 full_path, &acllen); 331 goto get_ea_exit; /* rc already EOPNOTSUPP */
332
333 pacl = pTcon->ses->server->ops->get_acl(cifs_sb,
334 direntry->d_inode, full_path, &acllen);
328 if (IS_ERR(pacl)) { 335 if (IS_ERR(pacl)) {
329 rc = PTR_ERR(pacl); 336 rc = PTR_ERR(pacl);
330 cifs_dbg(VFS, "%s: error %zd getting sec desc\n", 337 cifs_dbg(VFS, "%s: error %zd getting sec desc\n",
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index ece55565b9cd..d3a534fdc5ff 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -771,6 +771,8 @@ do { \
771 if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime)) \ 771 if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime)) \
772 (einode)->xtime.tv_sec = \ 772 (einode)->xtime.tv_sec = \
773 (signed)le32_to_cpu((raw_inode)->xtime); \ 773 (signed)le32_to_cpu((raw_inode)->xtime); \
774 else \
775 (einode)->xtime.tv_sec = 0; \
774 if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \ 776 if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \
775 ext4_decode_extra_time(&(einode)->xtime, \ 777 ext4_decode_extra_time(&(einode)->xtime, \
776 raw_inode->xtime ## _extra); \ 778 raw_inode->xtime ## _extra); \
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 10cff4736b11..74bc2d549c58 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3906,6 +3906,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3906 } else 3906 } else
3907 err = ret; 3907 err = ret;
3908 map->m_flags |= EXT4_MAP_MAPPED; 3908 map->m_flags |= EXT4_MAP_MAPPED;
3909 map->m_pblk = newblock;
3909 if (allocated > map->m_len) 3910 if (allocated > map->m_len)
3910 allocated = map->m_len; 3911 allocated = map->m_len;
3911 map->m_len = allocated; 3912 map->m_len = allocated;
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 43e64f6022eb..1a5073959f32 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -152,7 +152,7 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
152 if (ret > 0) { 152 if (ret > 0) {
153 ssize_t err; 153 ssize_t err;
154 154
155 err = generic_write_sync(file, pos, ret); 155 err = generic_write_sync(file, iocb->ki_pos - ret, ret);
156 if (err < 0 && ret > 0) 156 if (err < 0 && ret > 0)
157 ret = err; 157 ret = err;
158 } 158 }
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 6bea80614d77..a2a837f00407 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -140,7 +140,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
140 handle = ext4_journal_start(inode_bl, EXT4_HT_MOVE_EXTENTS, 2); 140 handle = ext4_journal_start(inode_bl, EXT4_HT_MOVE_EXTENTS, 2);
141 if (IS_ERR(handle)) { 141 if (IS_ERR(handle)) {
142 err = -EINVAL; 142 err = -EINVAL;
143 goto swap_boot_out; 143 goto journal_err_out;
144 } 144 }
145 145
146 /* Protect extent tree against block allocations via delalloc */ 146 /* Protect extent tree against block allocations via delalloc */
@@ -198,6 +198,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
198 198
199 ext4_double_up_write_data_sem(inode, inode_bl); 199 ext4_double_up_write_data_sem(inode, inode_bl);
200 200
201journal_err_out:
201 ext4_inode_resume_unlocked_dio(inode); 202 ext4_inode_resume_unlocked_dio(inode);
202 ext4_inode_resume_unlocked_dio(inode_bl); 203 ext4_inode_resume_unlocked_dio(inode_bl);
203 204
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index c5adbb318a90..f3b84cd9de56 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -243,6 +243,7 @@ static int ext4_alloc_group_tables(struct super_block *sb,
243 ext4_group_t group; 243 ext4_group_t group;
244 ext4_group_t last_group; 244 ext4_group_t last_group;
245 unsigned overhead; 245 unsigned overhead;
246 __u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0;
246 247
247 BUG_ON(flex_gd->count == 0 || group_data == NULL); 248 BUG_ON(flex_gd->count == 0 || group_data == NULL);
248 249
@@ -266,7 +267,7 @@ next_group:
266 src_group++; 267 src_group++;
267 for (; src_group <= last_group; src_group++) { 268 for (; src_group <= last_group; src_group++) {
268 overhead = ext4_group_overhead_blocks(sb, src_group); 269 overhead = ext4_group_overhead_blocks(sb, src_group);
269 if (overhead != 0) 270 if (overhead == 0)
270 last_blk += group_data[src_group - group].blocks_count; 271 last_blk += group_data[src_group - group].blocks_count;
271 else 272 else
272 break; 273 break;
@@ -280,8 +281,7 @@ next_group:
280 group = ext4_get_group_number(sb, start_blk - 1); 281 group = ext4_get_group_number(sb, start_blk - 1);
281 group -= group_data[0].group; 282 group -= group_data[0].group;
282 group_data[group].free_blocks_count--; 283 group_data[group].free_blocks_count--;
283 if (flexbg_size > 1) 284 flex_gd->bg_flags[group] &= uninit_mask;
284 flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT;
285 } 285 }
286 286
287 /* Allocate inode bitmaps */ 287 /* Allocate inode bitmaps */
@@ -292,22 +292,30 @@ next_group:
292 group = ext4_get_group_number(sb, start_blk - 1); 292 group = ext4_get_group_number(sb, start_blk - 1);
293 group -= group_data[0].group; 293 group -= group_data[0].group;
294 group_data[group].free_blocks_count--; 294 group_data[group].free_blocks_count--;
295 if (flexbg_size > 1) 295 flex_gd->bg_flags[group] &= uninit_mask;
296 flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT;
297 } 296 }
298 297
299 /* Allocate inode tables */ 298 /* Allocate inode tables */
300 for (; it_index < flex_gd->count; it_index++) { 299 for (; it_index < flex_gd->count; it_index++) {
301 if (start_blk + EXT4_SB(sb)->s_itb_per_group > last_blk) 300 unsigned int itb = EXT4_SB(sb)->s_itb_per_group;
301 ext4_fsblk_t next_group_start;
302
303 if (start_blk + itb > last_blk)
302 goto next_group; 304 goto next_group;
303 group_data[it_index].inode_table = start_blk; 305 group_data[it_index].inode_table = start_blk;
304 group = ext4_get_group_number(sb, start_blk - 1); 306 group = ext4_get_group_number(sb, start_blk);
307 next_group_start = ext4_group_first_block_no(sb, group + 1);
305 group -= group_data[0].group; 308 group -= group_data[0].group;
306 group_data[group].free_blocks_count -=
307 EXT4_SB(sb)->s_itb_per_group;
308 if (flexbg_size > 1)
309 flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT;
310 309
310 if (start_blk + itb > next_group_start) {
311 flex_gd->bg_flags[group + 1] &= uninit_mask;
312 overhead = start_blk + itb - next_group_start;
313 group_data[group + 1].free_blocks_count -= overhead;
314 itb -= overhead;
315 }
316
317 group_data[group].free_blocks_count -= itb;
318 flex_gd->bg_flags[group] &= uninit_mask;
311 start_blk += EXT4_SB(sb)->s_itb_per_group; 319 start_blk += EXT4_SB(sb)->s_itb_per_group;
312 } 320 }
313 321
@@ -401,7 +409,7 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
401 start = ext4_group_first_block_no(sb, group); 409 start = ext4_group_first_block_no(sb, group);
402 group -= flex_gd->groups[0].group; 410 group -= flex_gd->groups[0].group;
403 411
404 count2 = sb->s_blocksize * 8 - (block - start); 412 count2 = EXT4_BLOCKS_PER_GROUP(sb) - (block - start);
405 if (count2 > count) 413 if (count2 > count)
406 count2 = count; 414 count2 = count;
407 415
@@ -620,7 +628,7 @@ handle_ib:
620 if (err) 628 if (err)
621 goto out; 629 goto out;
622 count = group_table_count[j]; 630 count = group_table_count[j];
623 start = group_data[i].block_bitmap; 631 start = (&group_data[i].block_bitmap)[j];
624 block = start; 632 block = start;
625 } 633 }
626 634
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 1f7784de05b6..710fed2377d4 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -3695,16 +3695,22 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3695 for (i = 0; i < 4; i++) 3695 for (i = 0; i < 4; i++)
3696 sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]); 3696 sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
3697 sbi->s_def_hash_version = es->s_def_hash_version; 3697 sbi->s_def_hash_version = es->s_def_hash_version;
3698 i = le32_to_cpu(es->s_flags); 3698 if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) {
3699 if (i & EXT2_FLAGS_UNSIGNED_HASH) 3699 i = le32_to_cpu(es->s_flags);
3700 sbi->s_hash_unsigned = 3; 3700 if (i & EXT2_FLAGS_UNSIGNED_HASH)
3701 else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) { 3701 sbi->s_hash_unsigned = 3;
3702 else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
3702#ifdef __CHAR_UNSIGNED__ 3703#ifdef __CHAR_UNSIGNED__
3703 es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH); 3704 if (!(sb->s_flags & MS_RDONLY))
3704 sbi->s_hash_unsigned = 3; 3705 es->s_flags |=
3706 cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
3707 sbi->s_hash_unsigned = 3;
3705#else 3708#else
3706 es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH); 3709 if (!(sb->s_flags & MS_RDONLY))
3710 es->s_flags |=
3711 cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
3707#endif 3712#endif
3713 }
3708 } 3714 }
3709 3715
3710 /* Handle clustersize */ 3716 /* Handle clustersize */
diff --git a/fs/file.c b/fs/file.c
index 771578b33fb6..db25c2bdfe46 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -34,7 +34,7 @@ static void *alloc_fdmem(size_t size)
34 * vmalloc() if the allocation size will be considered "large" by the VM. 34 * vmalloc() if the allocation size will be considered "large" by the VM.
35 */ 35 */
36 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { 36 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
37 void *data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN); 37 void *data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN|__GFP_NORETRY);
38 if (data != NULL) 38 if (data != NULL)
39 return data; 39 return data;
40 } 40 }
diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c
index e1959efad64f..b5ebc2d7d80d 100644
--- a/fs/fscache/object-list.c
+++ b/fs/fscache/object-list.c
@@ -50,6 +50,8 @@ void fscache_objlist_add(struct fscache_object *obj)
50 struct fscache_object *xobj; 50 struct fscache_object *xobj;
51 struct rb_node **p = &fscache_object_list.rb_node, *parent = NULL; 51 struct rb_node **p = &fscache_object_list.rb_node, *parent = NULL;
52 52
53 ASSERT(RB_EMPTY_NODE(&obj->objlist_link));
54
53 write_lock(&fscache_object_list_lock); 55 write_lock(&fscache_object_list_lock);
54 56
55 while (*p) { 57 while (*p) {
@@ -75,6 +77,9 @@ void fscache_objlist_add(struct fscache_object *obj)
75 */ 77 */
76void fscache_objlist_remove(struct fscache_object *obj) 78void fscache_objlist_remove(struct fscache_object *obj)
77{ 79{
80 if (RB_EMPTY_NODE(&obj->objlist_link))
81 return;
82
78 write_lock(&fscache_object_list_lock); 83 write_lock(&fscache_object_list_lock);
79 84
80 BUG_ON(RB_EMPTY_ROOT(&fscache_object_list)); 85 BUG_ON(RB_EMPTY_ROOT(&fscache_object_list));
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index 53d35c504240..d3b4539f1651 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -314,6 +314,9 @@ void fscache_object_init(struct fscache_object *object,
314 object->cache = cache; 314 object->cache = cache;
315 object->cookie = cookie; 315 object->cookie = cookie;
316 object->parent = NULL; 316 object->parent = NULL;
317#ifdef CONFIG_FSCACHE_OBJECT_LIST
318 RB_CLEAR_NODE(&object->objlist_link);
319#endif
317 320
318 object->oob_event_mask = 0; 321 object->oob_event_mask = 0;
319 for (t = object->oob_table; t->events; t++) 322 for (t = object->oob_table; t->events; t++)
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 8360674c85bc..60bb365f54a5 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -514,11 +514,13 @@ int jbd2_journal_start_reserved(handle_t *handle, unsigned int type,
514 * similarly constrained call sites 514 * similarly constrained call sites
515 */ 515 */
516 ret = start_this_handle(journal, handle, GFP_NOFS); 516 ret = start_this_handle(journal, handle, GFP_NOFS);
517 if (ret < 0) 517 if (ret < 0) {
518 jbd2_journal_free_reserved(handle); 518 jbd2_journal_free_reserved(handle);
519 return ret;
520 }
519 handle->h_type = type; 521 handle->h_type = type;
520 handle->h_line_no = line_no; 522 handle->h_line_no = line_no;
521 return ret; 523 return 0;
522} 524}
523EXPORT_SYMBOL(jbd2_journal_start_reserved); 525EXPORT_SYMBOL(jbd2_journal_start_reserved);
524 526
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
index e973b85d6afd..5a8ea16eedbc 100644
--- a/fs/jfs/acl.c
+++ b/fs/jfs/acl.c
@@ -86,6 +86,8 @@ static int __jfs_set_acl(tid_t tid, struct inode *inode, int type,
86 rc = posix_acl_equiv_mode(acl, &inode->i_mode); 86 rc = posix_acl_equiv_mode(acl, &inode->i_mode);
87 if (rc < 0) 87 if (rc < 0)
88 return rc; 88 return rc;
89 inode->i_ctime = CURRENT_TIME;
90 mark_inode_dirty(inode);
89 if (rc == 0) 91 if (rc == 0)
90 acl = NULL; 92 acl = NULL;
91 break; 93 break;
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index e066a3902973..ab798a88ec1d 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -779,6 +779,7 @@ nlmsvc_grant_blocked(struct nlm_block *block)
779 struct nlm_file *file = block->b_file; 779 struct nlm_file *file = block->b_file;
780 struct nlm_lock *lock = &block->b_call->a_args.lock; 780 struct nlm_lock *lock = &block->b_call->a_args.lock;
781 int error; 781 int error;
782 loff_t fl_start, fl_end;
782 783
783 dprintk("lockd: grant blocked lock %p\n", block); 784 dprintk("lockd: grant blocked lock %p\n", block);
784 785
@@ -796,9 +797,16 @@ nlmsvc_grant_blocked(struct nlm_block *block)
796 } 797 }
797 798
798 /* Try the lock operation again */ 799 /* Try the lock operation again */
800 /* vfs_lock_file() can mangle fl_start and fl_end, but we need
801 * them unchanged for the GRANT_MSG
802 */
799 lock->fl.fl_flags |= FL_SLEEP; 803 lock->fl.fl_flags |= FL_SLEEP;
804 fl_start = lock->fl.fl_start;
805 fl_end = lock->fl.fl_end;
800 error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL); 806 error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
801 lock->fl.fl_flags &= ~FL_SLEEP; 807 lock->fl.fl_flags &= ~FL_SLEEP;
808 lock->fl.fl_start = fl_start;
809 lock->fl.fl_end = fl_end;
802 810
803 switch (error) { 811 switch (error) {
804 case 0: 812 case 0:
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index be38b573495a..4a48fe4b84b6 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1846,6 +1846,11 @@ int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1846 GFP_KERNEL)) { 1846 GFP_KERNEL)) {
1847 SetPageUptodate(page); 1847 SetPageUptodate(page);
1848 unlock_page(page); 1848 unlock_page(page);
1849 /*
1850 * add_to_page_cache_lru() grabs an extra page refcount.
1851 * Drop it here to avoid leaking this page later.
1852 */
1853 page_cache_release(page);
1849 } else 1854 } else
1850 __free_page(page); 1855 __free_page(page);
1851 1856
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 28a0a3cbd3b7..360114ae8b82 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -164,17 +164,16 @@ static void nfs_zap_caches_locked(struct inode *inode)
164 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) { 164 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
165 nfs_fscache_invalidate(inode); 165 nfs_fscache_invalidate(inode);
166 nfsi->cache_validity |= NFS_INO_INVALID_ATTR 166 nfsi->cache_validity |= NFS_INO_INVALID_ATTR
167 | NFS_INO_INVALID_LABEL
168 | NFS_INO_INVALID_DATA 167 | NFS_INO_INVALID_DATA
169 | NFS_INO_INVALID_ACCESS 168 | NFS_INO_INVALID_ACCESS
170 | NFS_INO_INVALID_ACL 169 | NFS_INO_INVALID_ACL
171 | NFS_INO_REVAL_PAGECACHE; 170 | NFS_INO_REVAL_PAGECACHE;
172 } else 171 } else
173 nfsi->cache_validity |= NFS_INO_INVALID_ATTR 172 nfsi->cache_validity |= NFS_INO_INVALID_ATTR
174 | NFS_INO_INVALID_LABEL
175 | NFS_INO_INVALID_ACCESS 173 | NFS_INO_INVALID_ACCESS
176 | NFS_INO_INVALID_ACL 174 | NFS_INO_INVALID_ACL
177 | NFS_INO_REVAL_PAGECACHE; 175 | NFS_INO_REVAL_PAGECACHE;
176 nfs_zap_label_cache_locked(nfsi);
178} 177}
179 178
180void nfs_zap_caches(struct inode *inode) 179void nfs_zap_caches(struct inode *inode)
@@ -266,6 +265,13 @@ nfs_init_locked(struct inode *inode, void *opaque)
266} 265}
267 266
268#ifdef CONFIG_NFS_V4_SECURITY_LABEL 267#ifdef CONFIG_NFS_V4_SECURITY_LABEL
268static void nfs_clear_label_invalid(struct inode *inode)
269{
270 spin_lock(&inode->i_lock);
271 NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_LABEL;
272 spin_unlock(&inode->i_lock);
273}
274
269void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr, 275void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr,
270 struct nfs4_label *label) 276 struct nfs4_label *label)
271{ 277{
@@ -283,6 +289,7 @@ void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr,
283 __func__, 289 __func__,
284 (char *)label->label, 290 (char *)label->label,
285 label->len, error); 291 label->len, error);
292 nfs_clear_label_invalid(inode);
286 } 293 }
287} 294}
288 295
@@ -1648,7 +1655,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1648 inode->i_blocks = fattr->du.nfs2.blocks; 1655 inode->i_blocks = fattr->du.nfs2.blocks;
1649 1656
1650 /* Update attrtimeo value if we're out of the unstable period */ 1657 /* Update attrtimeo value if we're out of the unstable period */
1651 if (invalid & (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_LABEL)) { 1658 if (invalid & NFS_INO_INVALID_ATTR) {
1652 nfs_inc_stats(inode, NFSIOS_ATTRINVALIDATE); 1659 nfs_inc_stats(inode, NFSIOS_ATTRINVALIDATE);
1653 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode); 1660 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
1654 nfsi->attrtimeo_timestamp = now; 1661 nfsi->attrtimeo_timestamp = now;
@@ -1661,7 +1668,6 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1661 } 1668 }
1662 } 1669 }
1663 invalid &= ~NFS_INO_INVALID_ATTR; 1670 invalid &= ~NFS_INO_INVALID_ATTR;
1664 invalid &= ~NFS_INO_INVALID_LABEL;
1665 /* Don't invalidate the data if we were to blame */ 1671 /* Don't invalidate the data if we were to blame */
1666 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) 1672 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)
1667 || S_ISLNK(inode->i_mode))) 1673 || S_ISLNK(inode->i_mode)))
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 8b5cc04a8611..b46cf5a67329 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -176,7 +176,8 @@ extern struct nfs_server *nfs4_create_server(
176extern struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *, 176extern struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *,
177 struct nfs_fh *); 177 struct nfs_fh *);
178extern int nfs4_update_server(struct nfs_server *server, const char *hostname, 178extern int nfs4_update_server(struct nfs_server *server, const char *hostname,
179 struct sockaddr *sap, size_t salen); 179 struct sockaddr *sap, size_t salen,
180 struct net *net);
180extern void nfs_free_server(struct nfs_server *server); 181extern void nfs_free_server(struct nfs_server *server);
181extern struct nfs_server *nfs_clone_server(struct nfs_server *, 182extern struct nfs_server *nfs_clone_server(struct nfs_server *,
182 struct nfs_fh *, 183 struct nfs_fh *,
@@ -279,9 +280,18 @@ static inline void nfs4_label_free(struct nfs4_label *label)
279 } 280 }
280 return; 281 return;
281} 282}
283
284static inline void nfs_zap_label_cache_locked(struct nfs_inode *nfsi)
285{
286 if (nfs_server_capable(&nfsi->vfs_inode, NFS_CAP_SECURITY_LABEL))
287 nfsi->cache_validity |= NFS_INO_INVALID_LABEL;
288}
282#else 289#else
283static inline struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags) { return NULL; } 290static inline struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags) { return NULL; }
284static inline void nfs4_label_free(void *label) {} 291static inline void nfs4_label_free(void *label) {}
292static inline void nfs_zap_label_cache_locked(struct nfs_inode *nfsi)
293{
294}
285#endif /* CONFIG_NFS_V4_SECURITY_LABEL */ 295#endif /* CONFIG_NFS_V4_SECURITY_LABEL */
286 296
287/* proc.c */ 297/* proc.c */
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index aa9bc973f36a..a462ef0fb5d6 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -18,6 +18,7 @@
18#include <linux/lockd/bind.h> 18#include <linux/lockd/bind.h>
19#include <linux/nfs_mount.h> 19#include <linux/nfs_mount.h>
20#include <linux/freezer.h> 20#include <linux/freezer.h>
21#include <linux/xattr.h>
21 22
22#include "iostat.h" 23#include "iostat.h"
23#include "internal.h" 24#include "internal.h"
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 860ad26a5590..0e46d3d1b6cc 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -1135,6 +1135,7 @@ static int nfs_probe_destination(struct nfs_server *server)
1135 * @hostname: new end-point's hostname 1135 * @hostname: new end-point's hostname
1136 * @sap: new end-point's socket address 1136 * @sap: new end-point's socket address
1137 * @salen: size of "sap" 1137 * @salen: size of "sap"
1138 * @net: net namespace
1138 * 1139 *
1139 * The nfs_server must be quiescent before this function is invoked. 1140 * The nfs_server must be quiescent before this function is invoked.
1140 * Either its session is drained (NFSv4.1+), or its transport is 1141 * Either its session is drained (NFSv4.1+), or its transport is
@@ -1143,13 +1144,13 @@ static int nfs_probe_destination(struct nfs_server *server)
1143 * Returns zero on success, or a negative errno value. 1144 * Returns zero on success, or a negative errno value.
1144 */ 1145 */
1145int nfs4_update_server(struct nfs_server *server, const char *hostname, 1146int nfs4_update_server(struct nfs_server *server, const char *hostname,
1146 struct sockaddr *sap, size_t salen) 1147 struct sockaddr *sap, size_t salen, struct net *net)
1147{ 1148{
1148 struct nfs_client *clp = server->nfs_client; 1149 struct nfs_client *clp = server->nfs_client;
1149 struct rpc_clnt *clnt = server->client; 1150 struct rpc_clnt *clnt = server->client;
1150 struct xprt_create xargs = { 1151 struct xprt_create xargs = {
1151 .ident = clp->cl_proto, 1152 .ident = clp->cl_proto,
1152 .net = &init_net, 1153 .net = net,
1153 .dstaddr = sap, 1154 .dstaddr = sap,
1154 .addrlen = salen, 1155 .addrlen = salen,
1155 .servername = hostname, 1156 .servername = hostname,
@@ -1189,7 +1190,7 @@ int nfs4_update_server(struct nfs_server *server, const char *hostname,
1189 error = nfs4_set_client(server, hostname, sap, salen, buf, 1190 error = nfs4_set_client(server, hostname, sap, salen, buf,
1190 clp->cl_rpcclient->cl_auth->au_flavor, 1191 clp->cl_rpcclient->cl_auth->au_flavor,
1191 clp->cl_proto, clnt->cl_timeout, 1192 clp->cl_proto, clnt->cl_timeout,
1192 clp->cl_minorversion, clp->cl_net); 1193 clp->cl_minorversion, net);
1193 nfs_put_client(clp); 1194 nfs_put_client(clp);
1194 if (error != 0) { 1195 if (error != 0) {
1195 nfs_server_insert_lists(server); 1196 nfs_server_insert_lists(server);
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
index 4e7f05d3e9db..3d5dbf80d46a 100644
--- a/fs/nfs/nfs4namespace.c
+++ b/fs/nfs/nfs4namespace.c
@@ -121,9 +121,8 @@ static int nfs4_validate_fspath(struct dentry *dentry,
121} 121}
122 122
123static size_t nfs_parse_server_name(char *string, size_t len, 123static size_t nfs_parse_server_name(char *string, size_t len,
124 struct sockaddr *sa, size_t salen, struct nfs_server *server) 124 struct sockaddr *sa, size_t salen, struct net *net)
125{ 125{
126 struct net *net = rpc_net_ns(server->client);
127 ssize_t ret; 126 ssize_t ret;
128 127
129 ret = rpc_pton(net, string, len, sa, salen); 128 ret = rpc_pton(net, string, len, sa, salen);
@@ -223,6 +222,7 @@ static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
223 const struct nfs4_fs_location *location) 222 const struct nfs4_fs_location *location)
224{ 223{
225 const size_t addr_bufsize = sizeof(struct sockaddr_storage); 224 const size_t addr_bufsize = sizeof(struct sockaddr_storage);
225 struct net *net = rpc_net_ns(NFS_SB(mountdata->sb)->client);
226 struct vfsmount *mnt = ERR_PTR(-ENOENT); 226 struct vfsmount *mnt = ERR_PTR(-ENOENT);
227 char *mnt_path; 227 char *mnt_path;
228 unsigned int maxbuflen; 228 unsigned int maxbuflen;
@@ -248,8 +248,7 @@ static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
248 continue; 248 continue;
249 249
250 mountdata->addrlen = nfs_parse_server_name(buf->data, buf->len, 250 mountdata->addrlen = nfs_parse_server_name(buf->data, buf->len,
251 mountdata->addr, addr_bufsize, 251 mountdata->addr, addr_bufsize, net);
252 NFS_SB(mountdata->sb));
253 if (mountdata->addrlen == 0) 252 if (mountdata->addrlen == 0)
254 continue; 253 continue;
255 254
@@ -419,6 +418,7 @@ static int nfs4_try_replacing_one_location(struct nfs_server *server,
419 const struct nfs4_fs_location *location) 418 const struct nfs4_fs_location *location)
420{ 419{
421 const size_t addr_bufsize = sizeof(struct sockaddr_storage); 420 const size_t addr_bufsize = sizeof(struct sockaddr_storage);
421 struct net *net = rpc_net_ns(server->client);
422 struct sockaddr *sap; 422 struct sockaddr *sap;
423 unsigned int s; 423 unsigned int s;
424 size_t salen; 424 size_t salen;
@@ -440,7 +440,7 @@ static int nfs4_try_replacing_one_location(struct nfs_server *server,
440 continue; 440 continue;
441 441
442 salen = nfs_parse_server_name(buf->data, buf->len, 442 salen = nfs_parse_server_name(buf->data, buf->len,
443 sap, addr_bufsize, server); 443 sap, addr_bufsize, net);
444 if (salen == 0) 444 if (salen == 0)
445 continue; 445 continue;
446 rpc_set_port(sap, NFS_PORT); 446 rpc_set_port(sap, NFS_PORT);
@@ -450,7 +450,7 @@ static int nfs4_try_replacing_one_location(struct nfs_server *server,
450 if (hostname == NULL) 450 if (hostname == NULL)
451 break; 451 break;
452 452
453 error = nfs4_update_server(server, hostname, sap, salen); 453 error = nfs4_update_server(server, hostname, sap, salen, net);
454 kfree(hostname); 454 kfree(hostname);
455 if (error == 0) 455 if (error == 0)
456 break; 456 break;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index e5be72518bd7..e1a47217c05e 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1015,8 +1015,11 @@ int nfs4_select_rw_stateid(nfs4_stateid *dst, struct nfs4_state *state,
1015 if (ret == -EIO) 1015 if (ret == -EIO)
1016 /* A lost lock - don't even consider delegations */ 1016 /* A lost lock - don't even consider delegations */
1017 goto out; 1017 goto out;
1018 if (nfs4_copy_delegation_stateid(dst, state->inode, fmode)) 1018 /* returns true if delegation stateid found and copied */
1019 if (nfs4_copy_delegation_stateid(dst, state->inode, fmode)) {
1020 ret = 0;
1019 goto out; 1021 goto out;
1022 }
1020 if (ret != -ENOENT) 1023 if (ret != -ENOENT)
1021 /* nfs4_copy_delegation_stateid() didn't over-write 1024 /* nfs4_copy_delegation_stateid() didn't over-write
1022 * dst, so it still has the lock stateid which we now 1025 * dst, so it still has the lock stateid which we now
diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
index d3a587144222..d190e33d0ec2 100644
--- a/fs/nfsd/nfs4acl.c
+++ b/fs/nfsd/nfs4acl.c
@@ -151,17 +151,15 @@ nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry,
151 pacl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL); 151 pacl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL);
152 if (IS_ERR(pacl)) 152 if (IS_ERR(pacl))
153 return PTR_ERR(pacl); 153 return PTR_ERR(pacl);
154 /* allocate for worst case: one (deny, allow) pair each: */
155 size += 2 * pacl->a_count;
156 } 154 }
155 /* allocate for worst case: one (deny, allow) pair each: */
156 size += 2 * pacl->a_count;
157 157
158 if (S_ISDIR(inode->i_mode)) { 158 if (S_ISDIR(inode->i_mode)) {
159 flags = NFS4_ACL_DIR; 159 flags = NFS4_ACL_DIR;
160 dpacl = get_acl(inode, ACL_TYPE_DEFAULT); 160 dpacl = get_acl(inode, ACL_TYPE_DEFAULT);
161 if (dpacl) 161 if (dpacl)
162 size += 2 * dpacl->a_count; 162 size += 2 * dpacl->a_count;
163 } else {
164 dpacl = NULL;
165 } 163 }
166 164
167 *acl = nfs4_acl_new(size); 165 *acl = nfs4_acl_new(size);
@@ -170,8 +168,7 @@ nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry,
170 goto out; 168 goto out;
171 } 169 }
172 170
173 if (pacl) 171 _posix_to_nfsv4_one(pacl, *acl, flags & ~NFS4_ACL_TYPE_DEFAULT);
174 _posix_to_nfsv4_one(pacl, *acl, flags & ~NFS4_ACL_TYPE_DEFAULT);
175 172
176 if (dpacl) 173 if (dpacl)
177 _posix_to_nfsv4_one(dpacl, *acl, flags | NFS4_ACL_TYPE_DEFAULT); 174 _posix_to_nfsv4_one(dpacl, *acl, flags | NFS4_ACL_TYPE_DEFAULT);
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index ea4ba9daeb47..db9bd8a31725 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -2134,7 +2134,7 @@ static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2134 ret = ntfs_file_aio_write_nolock(iocb, iov, nr_segs, &iocb->ki_pos); 2134 ret = ntfs_file_aio_write_nolock(iocb, iov, nr_segs, &iocb->ki_pos);
2135 mutex_unlock(&inode->i_mutex); 2135 mutex_unlock(&inode->i_mutex);
2136 if (ret > 0) { 2136 if (ret > 0) {
2137 int err = generic_write_sync(file, pos, ret); 2137 int err = generic_write_sync(file, iocb->ki_pos - ret, ret);
2138 if (err < 0) 2138 if (err < 0)
2139 ret = err; 2139 ret = err;
2140 } 2140 }
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index aada5801567a..e2edff38be52 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -7158,7 +7158,7 @@ int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh,
7158 if (end > i_size_read(inode)) 7158 if (end > i_size_read(inode))
7159 end = i_size_read(inode); 7159 end = i_size_read(inode);
7160 7160
7161 BUG_ON(start >= end); 7161 BUG_ON(start > end);
7162 7162
7163 if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) || 7163 if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) ||
7164 !(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL) || 7164 !(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL) ||
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index d77d71ead8d1..8450262bcf2a 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -185,6 +185,9 @@ static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end,
185 file->f_path.dentry->d_name.name, 185 file->f_path.dentry->d_name.name,
186 (unsigned long long)datasync); 186 (unsigned long long)datasync);
187 187
188 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
189 return -EROFS;
190
188 err = filemap_write_and_wait_range(inode->i_mapping, start, end); 191 err = filemap_write_and_wait_range(inode->i_mapping, start, end);
189 if (err) 192 if (err)
190 return err; 193 return err;
@@ -474,11 +477,6 @@ static int ocfs2_truncate_file(struct inode *inode,
474 goto bail; 477 goto bail;
475 } 478 }
476 479
477 /* lets handle the simple truncate cases before doing any more
478 * cluster locking. */
479 if (new_i_size == le64_to_cpu(fe->i_size))
480 goto bail;
481
482 down_write(&OCFS2_I(inode)->ip_alloc_sem); 480 down_write(&OCFS2_I(inode)->ip_alloc_sem);
483 481
484 ocfs2_resv_discard(&osb->osb_la_resmap, 482 ocfs2_resv_discard(&osb->osb_la_resmap,
@@ -718,7 +716,8 @@ leave:
718 * While a write will already be ordering the data, a truncate will not. 716 * While a write will already be ordering the data, a truncate will not.
719 * Thus, we need to explicitly order the zeroed pages. 717 * Thus, we need to explicitly order the zeroed pages.
720 */ 718 */
721static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode) 719static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode,
720 struct buffer_head *di_bh)
722{ 721{
723 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 722 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
724 handle_t *handle = NULL; 723 handle_t *handle = NULL;
@@ -735,7 +734,14 @@ static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode)
735 } 734 }
736 735
737 ret = ocfs2_jbd2_file_inode(handle, inode); 736 ret = ocfs2_jbd2_file_inode(handle, inode);
738 if (ret < 0) 737 if (ret < 0) {
738 mlog_errno(ret);
739 goto out;
740 }
741
742 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
743 OCFS2_JOURNAL_ACCESS_WRITE);
744 if (ret)
739 mlog_errno(ret); 745 mlog_errno(ret);
740 746
741out: 747out:
@@ -751,7 +757,7 @@ out:
751 * to be too fragile to do exactly what we need without us having to 757 * to be too fragile to do exactly what we need without us having to
752 * worry about recursive locking in ->write_begin() and ->write_end(). */ 758 * worry about recursive locking in ->write_begin() and ->write_end(). */
753static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from, 759static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
754 u64 abs_to) 760 u64 abs_to, struct buffer_head *di_bh)
755{ 761{
756 struct address_space *mapping = inode->i_mapping; 762 struct address_space *mapping = inode->i_mapping;
757 struct page *page; 763 struct page *page;
@@ -759,6 +765,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
759 handle_t *handle = NULL; 765 handle_t *handle = NULL;
760 int ret = 0; 766 int ret = 0;
761 unsigned zero_from, zero_to, block_start, block_end; 767 unsigned zero_from, zero_to, block_start, block_end;
768 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
762 769
763 BUG_ON(abs_from >= abs_to); 770 BUG_ON(abs_from >= abs_to);
764 BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT)); 771 BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT));
@@ -801,7 +808,8 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
801 } 808 }
802 809
803 if (!handle) { 810 if (!handle) {
804 handle = ocfs2_zero_start_ordered_transaction(inode); 811 handle = ocfs2_zero_start_ordered_transaction(inode,
812 di_bh);
805 if (IS_ERR(handle)) { 813 if (IS_ERR(handle)) {
806 ret = PTR_ERR(handle); 814 ret = PTR_ERR(handle);
807 handle = NULL; 815 handle = NULL;
@@ -818,8 +826,22 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
818 ret = 0; 826 ret = 0;
819 } 827 }
820 828
821 if (handle) 829 if (handle) {
830 /*
831 * fs-writeback will release the dirty pages without page lock
832 * whose offset are over inode size, the release happens at
833 * block_write_full_page_endio().
834 */
835 i_size_write(inode, abs_to);
836 inode->i_blocks = ocfs2_inode_sector_count(inode);
837 di->i_size = cpu_to_le64((u64)i_size_read(inode));
838 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
839 di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
840 di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
841 di->i_mtime_nsec = di->i_ctime_nsec;
842 ocfs2_journal_dirty(handle, di_bh);
822 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); 843 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
844 }
823 845
824out_unlock: 846out_unlock:
825 unlock_page(page); 847 unlock_page(page);
@@ -915,7 +937,7 @@ out:
915 * has made sure that the entire range needs zeroing. 937 * has made sure that the entire range needs zeroing.
916 */ 938 */
917static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start, 939static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
918 u64 range_end) 940 u64 range_end, struct buffer_head *di_bh)
919{ 941{
920 int rc = 0; 942 int rc = 0;
921 u64 next_pos; 943 u64 next_pos;
@@ -931,7 +953,7 @@ static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
931 next_pos = (zero_pos & PAGE_CACHE_MASK) + PAGE_CACHE_SIZE; 953 next_pos = (zero_pos & PAGE_CACHE_MASK) + PAGE_CACHE_SIZE;
932 if (next_pos > range_end) 954 if (next_pos > range_end)
933 next_pos = range_end; 955 next_pos = range_end;
934 rc = ocfs2_write_zero_page(inode, zero_pos, next_pos); 956 rc = ocfs2_write_zero_page(inode, zero_pos, next_pos, di_bh);
935 if (rc < 0) { 957 if (rc < 0) {
936 mlog_errno(rc); 958 mlog_errno(rc);
937 break; 959 break;
@@ -977,7 +999,7 @@ int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
977 range_end = zero_to_size; 999 range_end = zero_to_size;
978 1000
979 ret = ocfs2_zero_extend_range(inode, range_start, 1001 ret = ocfs2_zero_extend_range(inode, range_start,
980 range_end); 1002 range_end, di_bh);
981 if (ret) { 1003 if (ret) {
982 mlog_errno(ret); 1004 mlog_errno(ret);
983 break; 1005 break;
@@ -1145,14 +1167,14 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
1145 goto bail_unlock_rw; 1167 goto bail_unlock_rw;
1146 } 1168 }
1147 1169
1148 if (size_change && attr->ia_size != i_size_read(inode)) { 1170 if (size_change) {
1149 status = inode_newsize_ok(inode, attr->ia_size); 1171 status = inode_newsize_ok(inode, attr->ia_size);
1150 if (status) 1172 if (status)
1151 goto bail_unlock; 1173 goto bail_unlock;
1152 1174
1153 inode_dio_wait(inode); 1175 inode_dio_wait(inode);
1154 1176
1155 if (i_size_read(inode) > attr->ia_size) { 1177 if (i_size_read(inode) >= attr->ia_size) {
1156 if (ocfs2_should_order_data(inode)) { 1178 if (ocfs2_should_order_data(inode)) {
1157 status = ocfs2_begin_ordered_truncate(inode, 1179 status = ocfs2_begin_ordered_truncate(inode,
1158 attr->ia_size); 1180 attr->ia_size);
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index f4d609be9400..3683643f3f0e 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -664,6 +664,7 @@ static int ocfs2_link(struct dentry *old_dentry,
664 struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); 664 struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
665 struct ocfs2_dir_lookup_result lookup = { NULL, }; 665 struct ocfs2_dir_lookup_result lookup = { NULL, };
666 sigset_t oldset; 666 sigset_t oldset;
667 u64 old_de_ino;
667 668
668 trace_ocfs2_link((unsigned long long)OCFS2_I(inode)->ip_blkno, 669 trace_ocfs2_link((unsigned long long)OCFS2_I(inode)->ip_blkno,
669 old_dentry->d_name.len, old_dentry->d_name.name, 670 old_dentry->d_name.len, old_dentry->d_name.name,
@@ -686,6 +687,22 @@ static int ocfs2_link(struct dentry *old_dentry,
686 goto out; 687 goto out;
687 } 688 }
688 689
690 err = ocfs2_lookup_ino_from_name(dir, old_dentry->d_name.name,
691 old_dentry->d_name.len, &old_de_ino);
692 if (err) {
693 err = -ENOENT;
694 goto out;
695 }
696
697 /*
698 * Check whether another node removed the source inode while we
699 * were in the vfs.
700 */
701 if (old_de_ino != OCFS2_I(inode)->ip_blkno) {
702 err = -ENOENT;
703 goto out;
704 }
705
689 err = ocfs2_check_dir_for_entry(dir, dentry->d_name.name, 706 err = ocfs2_check_dir_for_entry(dir, dentry->d_name.name,
690 dentry->d_name.len); 707 dentry->d_name.len);
691 if (err) 708 if (err)
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 2ca7ba047f04..88d4585b30f1 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -468,17 +468,24 @@ static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
468 return rc; 468 return rc;
469 } 469 }
470 nhdr_ptr = notes_section; 470 nhdr_ptr = notes_section;
471 while (real_sz < max_sz) { 471 while (nhdr_ptr->n_namesz != 0) {
472 if (nhdr_ptr->n_namesz == 0)
473 break;
474 sz = sizeof(Elf64_Nhdr) + 472 sz = sizeof(Elf64_Nhdr) +
475 ((nhdr_ptr->n_namesz + 3) & ~3) + 473 ((nhdr_ptr->n_namesz + 3) & ~3) +
476 ((nhdr_ptr->n_descsz + 3) & ~3); 474 ((nhdr_ptr->n_descsz + 3) & ~3);
475 if ((real_sz + sz) > max_sz) {
476 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
477 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
478 break;
479 }
477 real_sz += sz; 480 real_sz += sz;
478 nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz); 481 nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
479 } 482 }
480 kfree(notes_section); 483 kfree(notes_section);
481 phdr_ptr->p_memsz = real_sz; 484 phdr_ptr->p_memsz = real_sz;
485 if (real_sz == 0) {
486 pr_warn("Warning: Zero PT_NOTE entries found\n");
487 return -EINVAL;
488 }
482 } 489 }
483 490
484 return 0; 491 return 0;
@@ -648,17 +655,24 @@ static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
648 return rc; 655 return rc;
649 } 656 }
650 nhdr_ptr = notes_section; 657 nhdr_ptr = notes_section;
651 while (real_sz < max_sz) { 658 while (nhdr_ptr->n_namesz != 0) {
652 if (nhdr_ptr->n_namesz == 0)
653 break;
654 sz = sizeof(Elf32_Nhdr) + 659 sz = sizeof(Elf32_Nhdr) +
655 ((nhdr_ptr->n_namesz + 3) & ~3) + 660 ((nhdr_ptr->n_namesz + 3) & ~3) +
656 ((nhdr_ptr->n_descsz + 3) & ~3); 661 ((nhdr_ptr->n_descsz + 3) & ~3);
662 if ((real_sz + sz) > max_sz) {
663 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
664 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
665 break;
666 }
657 real_sz += sz; 667 real_sz += sz;
658 nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz); 668 nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
659 } 669 }
660 kfree(notes_section); 670 kfree(notes_section);
661 phdr_ptr->p_memsz = real_sz; 671 phdr_ptr->p_memsz = real_sz;
672 if (real_sz == 0) {
673 pr_warn("Warning: Zero PT_NOTE entries found\n");
674 return -EINVAL;
675 }
662 } 676 }
663 677
664 return 0; 678 return 0;
diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
index 2b7882b508db..9a3c68cf6026 100644
--- a/fs/reiserfs/do_balan.c
+++ b/fs/reiserfs/do_balan.c
@@ -324,23 +324,17 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
324 switch (flag) { 324 switch (flag) {
325 case M_INSERT: /* insert item into L[0] */ 325 case M_INSERT: /* insert item into L[0] */
326 326
327 if (item_pos == tb->lnum[0] - 1 327 if (item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) {
328 && tb->lbytes != -1) {
329 /* part of new item falls into L[0] */ 328 /* part of new item falls into L[0] */
330 int new_item_len; 329 int new_item_len;
331 int version; 330 int version;
332 331
333 ret_val = 332 ret_val = leaf_shift_left(tb, tb->lnum[0] - 1, -1);
334 leaf_shift_left(tb, tb->lnum[0] - 1,
335 -1);
336 333
337 /* Calculate item length to insert to S[0] */ 334 /* Calculate item length to insert to S[0] */
338 new_item_len = 335 new_item_len = ih_item_len(ih) - tb->lbytes;
339 ih_item_len(ih) - tb->lbytes;
340 /* Calculate and check item length to insert to L[0] */ 336 /* Calculate and check item length to insert to L[0] */
341 put_ih_item_len(ih, 337 put_ih_item_len(ih, ih_item_len(ih) - new_item_len);
342 ih_item_len(ih) -
343 new_item_len);
344 338
345 RFALSE(ih_item_len(ih) <= 0, 339 RFALSE(ih_item_len(ih) <= 0,
346 "PAP-12080: there is nothing to insert into L[0]: ih_item_len=%d", 340 "PAP-12080: there is nothing to insert into L[0]: ih_item_len=%d",
@@ -349,30 +343,18 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
349 /* Insert new item into L[0] */ 343 /* Insert new item into L[0] */
350 buffer_info_init_left(tb, &bi); 344 buffer_info_init_left(tb, &bi);
351 leaf_insert_into_buf(&bi, 345 leaf_insert_into_buf(&bi,
352 n + item_pos - 346 n + item_pos - ret_val, ih, body,
353 ret_val, ih, body, 347 zeros_num > ih_item_len(ih) ? ih_item_len(ih) : zeros_num);
354 zeros_num >
355 ih_item_len(ih) ?
356 ih_item_len(ih) :
357 zeros_num);
358 348
359 version = ih_version(ih); 349 version = ih_version(ih);
360 350
361 /* Calculate key component, item length and body to insert into S[0] */ 351 /* Calculate key component, item length and body to insert into S[0] */
362 set_le_ih_k_offset(ih, 352 set_le_ih_k_offset(ih, le_ih_k_offset(ih) +
363 le_ih_k_offset(ih) + 353 (tb-> lbytes << (is_indirect_le_ih(ih) ? tb->tb_sb-> s_blocksize_bits - UNFM_P_SHIFT : 0)));
364 (tb->
365 lbytes <<
366 (is_indirect_le_ih
367 (ih) ? tb->tb_sb->
368 s_blocksize_bits -
369 UNFM_P_SHIFT :
370 0)));
371 354
372 put_ih_item_len(ih, new_item_len); 355 put_ih_item_len(ih, new_item_len);
373 if (tb->lbytes > zeros_num) { 356 if (tb->lbytes > zeros_num) {
374 body += 357 body += (tb->lbytes - zeros_num);
375 (tb->lbytes - zeros_num);
376 zeros_num = 0; 358 zeros_num = 0;
377 } else 359 } else
378 zeros_num -= tb->lbytes; 360 zeros_num -= tb->lbytes;
@@ -383,15 +365,10 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
383 } else { 365 } else {
384 /* new item in whole falls into L[0] */ 366 /* new item in whole falls into L[0] */
385 /* Shift lnum[0]-1 items to L[0] */ 367 /* Shift lnum[0]-1 items to L[0] */
386 ret_val = 368 ret_val = leaf_shift_left(tb, tb->lnum[0] - 1, tb->lbytes);
387 leaf_shift_left(tb, tb->lnum[0] - 1,
388 tb->lbytes);
389 /* Insert new item into L[0] */ 369 /* Insert new item into L[0] */
390 buffer_info_init_left(tb, &bi); 370 buffer_info_init_left(tb, &bi);
391 leaf_insert_into_buf(&bi, 371 leaf_insert_into_buf(&bi, n + item_pos - ret_val, ih, body, zeros_num);
392 n + item_pos -
393 ret_val, ih, body,
394 zeros_num);
395 tb->insert_size[0] = 0; 372 tb->insert_size[0] = 0;
396 zeros_num = 0; 373 zeros_num = 0;
397 } 374 }
@@ -399,264 +376,117 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
399 376
400 case M_PASTE: /* append item in L[0] */ 377 case M_PASTE: /* append item in L[0] */
401 378
402 if (item_pos == tb->lnum[0] - 1 379 if (item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) {
403 && tb->lbytes != -1) {
404 /* we must shift the part of the appended item */ 380 /* we must shift the part of the appended item */
405 if (is_direntry_le_ih 381 if (is_direntry_le_ih(B_N_PITEM_HEAD(tbS0, item_pos))) {
406 (B_N_PITEM_HEAD(tbS0, item_pos))) {
407 382
408 RFALSE(zeros_num, 383 RFALSE(zeros_num,
409 "PAP-12090: invalid parameter in case of a directory"); 384 "PAP-12090: invalid parameter in case of a directory");
410 /* directory item */ 385 /* directory item */
411 if (tb->lbytes > pos_in_item) { 386 if (tb->lbytes > pos_in_item) {
412 /* new directory entry falls into L[0] */ 387 /* new directory entry falls into L[0] */
413 struct item_head 388 struct item_head *pasted;
414 *pasted; 389 int l_pos_in_item = pos_in_item;
415 int l_pos_in_item =
416 pos_in_item;
417 390
418 /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 entries from given directory item */ 391 /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 entries from given directory item */
419 ret_val = 392 ret_val = leaf_shift_left(tb, tb->lnum[0], tb->lbytes-1);
420 leaf_shift_left(tb, 393 if (ret_val && !item_pos) {
421 tb-> 394 pasted = B_N_PITEM_HEAD(tb->L[0], B_NR_ITEMS(tb->L[0]) - 1);
422 lnum 395 l_pos_in_item += I_ENTRY_COUNT(pasted) - (tb->lbytes -1);
423 [0],
424 tb->
425 lbytes
426 -
427 1);
428 if (ret_val
429 && !item_pos) {
430 pasted =
431 B_N_PITEM_HEAD
432 (tb->L[0],
433 B_NR_ITEMS
434 (tb->
435 L[0]) -
436 1);
437 l_pos_in_item +=
438 I_ENTRY_COUNT
439 (pasted) -
440 (tb->
441 lbytes -
442 1);
443 } 396 }
444 397
445 /* Append given directory entry to directory item */ 398 /* Append given directory entry to directory item */
446 buffer_info_init_left(tb, &bi); 399 buffer_info_init_left(tb, &bi);
447 leaf_paste_in_buffer 400 leaf_paste_in_buffer(&bi, n + item_pos - ret_val, l_pos_in_item, tb->insert_size[0], body, zeros_num);
448 (&bi,
449 n + item_pos -
450 ret_val,
451 l_pos_in_item,
452 tb->insert_size[0],
453 body, zeros_num);
454 401
455 /* previous string prepared space for pasting new entry, following string pastes this entry */ 402 /* previous string prepared space for pasting new entry, following string pastes this entry */
456 403
457 /* when we have merge directory item, pos_in_item has been changed too */ 404 /* when we have merge directory item, pos_in_item has been changed too */
458 405
459 /* paste new directory entry. 1 is entry number */ 406 /* paste new directory entry. 1 is entry number */
460 leaf_paste_entries(&bi, 407 leaf_paste_entries(&bi, n + item_pos - ret_val, l_pos_in_item,
461 n + 408 1, (struct reiserfs_de_head *) body,
462 item_pos 409 body + DEH_SIZE, tb->insert_size[0]);
463 -
464 ret_val,
465 l_pos_in_item,
466 1,
467 (struct
468 reiserfs_de_head
469 *)
470 body,
471 body
472 +
473 DEH_SIZE,
474 tb->
475 insert_size
476 [0]
477 );
478 tb->insert_size[0] = 0; 410 tb->insert_size[0] = 0;
479 } else { 411 } else {
480 /* new directory item doesn't fall into L[0] */ 412 /* new directory item doesn't fall into L[0] */
481 /* Shift lnum[0]-1 items in whole. Shift lbytes directory entries from directory item number lnum[0] */ 413 /* Shift lnum[0]-1 items in whole. Shift lbytes directory entries from directory item number lnum[0] */
482 leaf_shift_left(tb, 414 leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
483 tb->
484 lnum[0],
485 tb->
486 lbytes);
487 } 415 }
488 /* Calculate new position to append in item body */ 416 /* Calculate new position to append in item body */
489 pos_in_item -= tb->lbytes; 417 pos_in_item -= tb->lbytes;
490 } else { 418 } else {
491 /* regular object */ 419 /* regular object */
492 RFALSE(tb->lbytes <= 0, 420 RFALSE(tb->lbytes <= 0, "PAP-12095: there is nothing to shift to L[0]. lbytes=%d", tb->lbytes);
493 "PAP-12095: there is nothing to shift to L[0]. lbytes=%d", 421 RFALSE(pos_in_item != ih_item_len(B_N_PITEM_HEAD(tbS0, item_pos)),
494 tb->lbytes);
495 RFALSE(pos_in_item !=
496 ih_item_len
497 (B_N_PITEM_HEAD
498 (tbS0, item_pos)),
499 "PAP-12100: incorrect position to paste: item_len=%d, pos_in_item=%d", 422 "PAP-12100: incorrect position to paste: item_len=%d, pos_in_item=%d",
500 ih_item_len 423 ih_item_len(B_N_PITEM_HEAD(tbS0, item_pos)),pos_in_item);
501 (B_N_PITEM_HEAD
502 (tbS0, item_pos)),
503 pos_in_item);
504 424
505 if (tb->lbytes >= pos_in_item) { 425 if (tb->lbytes >= pos_in_item) {
506 /* appended item will be in L[0] in whole */ 426 /* appended item will be in L[0] in whole */
507 int l_n; 427 int l_n;
508 428
509 /* this bytes number must be appended to the last item of L[h] */ 429 /* this bytes number must be appended to the last item of L[h] */
510 l_n = 430 l_n = tb->lbytes - pos_in_item;
511 tb->lbytes -
512 pos_in_item;
513 431
514 /* Calculate new insert_size[0] */ 432 /* Calculate new insert_size[0] */
515 tb->insert_size[0] -= 433 tb->insert_size[0] -= l_n;
516 l_n;
517 434
518 RFALSE(tb-> 435 RFALSE(tb->insert_size[0] <= 0,
519 insert_size[0] <=
520 0,
521 "PAP-12105: there is nothing to paste into L[0]. insert_size=%d", 436 "PAP-12105: there is nothing to paste into L[0]. insert_size=%d",
522 tb-> 437 tb->insert_size[0]);
523 insert_size[0]); 438 ret_val = leaf_shift_left(tb, tb->lnum[0], ih_item_len
524 ret_val = 439 (B_N_PITEM_HEAD(tbS0, item_pos)));
525 leaf_shift_left(tb,
526 tb->
527 lnum
528 [0],
529 ih_item_len
530 (B_N_PITEM_HEAD
531 (tbS0,
532 item_pos)));
533 /* Append to body of item in L[0] */ 440 /* Append to body of item in L[0] */
534 buffer_info_init_left(tb, &bi); 441 buffer_info_init_left(tb, &bi);
535 leaf_paste_in_buffer 442 leaf_paste_in_buffer
536 (&bi, 443 (&bi, n + item_pos - ret_val, ih_item_len
537 n + item_pos - 444 (B_N_PITEM_HEAD(tb->L[0], n + item_pos - ret_val)),
538 ret_val, 445 l_n, body,
539 ih_item_len 446 zeros_num > l_n ? l_n : zeros_num);
540 (B_N_PITEM_HEAD
541 (tb->L[0],
542 n + item_pos -
543 ret_val)), l_n,
544 body,
545 zeros_num >
546 l_n ? l_n :
547 zeros_num);
548 /* 0-th item in S0 can be only of DIRECT type when l_n != 0 */ 447 /* 0-th item in S0 can be only of DIRECT type when l_n != 0 */
549 { 448 {
550 int version; 449 int version;
551 int temp_l = 450 int temp_l = l_n;
552 l_n; 451
553 452 RFALSE(ih_item_len(B_N_PITEM_HEAD(tbS0, 0)),
554 RFALSE
555 (ih_item_len
556 (B_N_PITEM_HEAD
557 (tbS0,
558 0)),
559 "PAP-12106: item length must be 0"); 453 "PAP-12106: item length must be 0");
560 RFALSE 454 RFALSE(comp_short_le_keys(B_N_PKEY(tbS0, 0), B_N_PKEY
561 (comp_short_le_keys 455 (tb->L[0], n + item_pos - ret_val)),
562 (B_N_PKEY
563 (tbS0, 0),
564 B_N_PKEY
565 (tb->L[0],
566 n +
567 item_pos
568 -
569 ret_val)),
570 "PAP-12107: items must be of the same file"); 456 "PAP-12107: items must be of the same file");
571 if (is_indirect_le_ih(B_N_PITEM_HEAD(tb->L[0], n + item_pos - ret_val))) { 457 if (is_indirect_le_ih(B_N_PITEM_HEAD(tb->L[0], n + item_pos - ret_val))) {
572 temp_l = 458 temp_l = l_n << (tb->tb_sb-> s_blocksize_bits - UNFM_P_SHIFT);
573 l_n
574 <<
575 (tb->
576 tb_sb->
577 s_blocksize_bits
578 -
579 UNFM_P_SHIFT);
580 } 459 }
581 /* update key of first item in S0 */ 460 /* update key of first item in S0 */
582 version = 461 version = ih_version(B_N_PITEM_HEAD(tbS0, 0));
583 ih_version 462 set_le_key_k_offset(version, B_N_PKEY(tbS0, 0),
584 (B_N_PITEM_HEAD 463 le_key_k_offset(version,B_N_PKEY(tbS0, 0)) + temp_l);
585 (tbS0, 0));
586 set_le_key_k_offset
587 (version,
588 B_N_PKEY
589 (tbS0, 0),
590 le_key_k_offset
591 (version,
592 B_N_PKEY
593 (tbS0,
594 0)) +
595 temp_l);
596 /* update left delimiting key */ 464 /* update left delimiting key */
597 set_le_key_k_offset 465 set_le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFL[0], tb->lkey[0]),
598 (version, 466 le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFL[0], tb->lkey[0])) + temp_l);
599 B_N_PDELIM_KEY
600 (tb->
601 CFL[0],
602 tb->
603 lkey[0]),
604 le_key_k_offset
605 (version,
606 B_N_PDELIM_KEY
607 (tb->
608 CFL[0],
609 tb->
610 lkey[0]))
611 + temp_l);
612 } 467 }
613 468
614 /* Calculate new body, position in item and insert_size[0] */ 469 /* Calculate new body, position in item and insert_size[0] */
615 if (l_n > zeros_num) { 470 if (l_n > zeros_num) {
616 body += 471 body += (l_n - zeros_num);
617 (l_n -
618 zeros_num);
619 zeros_num = 0; 472 zeros_num = 0;
620 } else 473 } else
621 zeros_num -= 474 zeros_num -= l_n;
622 l_n;
623 pos_in_item = 0; 475 pos_in_item = 0;
624 476
625 RFALSE 477 RFALSE(comp_short_le_keys(B_N_PKEY(tbS0, 0), B_N_PKEY(tb->L[0], B_NR_ITEMS(tb->L[0]) - 1))
626 (comp_short_le_keys 478 || !op_is_left_mergeable(B_N_PKEY(tbS0, 0), tbS0->b_size)
627 (B_N_PKEY(tbS0, 0), 479 || !op_is_left_mergeable(B_N_PDELIM_KEY(tb->CFL[0], tb->lkey[0]), tbS0->b_size),
628 B_N_PKEY(tb->L[0],
629 B_NR_ITEMS
630 (tb->
631 L[0]) -
632 1))
633 ||
634 !op_is_left_mergeable
635 (B_N_PKEY(tbS0, 0),
636 tbS0->b_size)
637 ||
638 !op_is_left_mergeable
639 (B_N_PDELIM_KEY
640 (tb->CFL[0],
641 tb->lkey[0]),
642 tbS0->b_size),
643 "PAP-12120: item must be merge-able with left neighboring item"); 480 "PAP-12120: item must be merge-able with left neighboring item");
644 } else { /* only part of the appended item will be in L[0] */ 481 } else { /* only part of the appended item will be in L[0] */
645 482
646 /* Calculate position in item for append in S[0] */ 483 /* Calculate position in item for append in S[0] */
647 pos_in_item -= 484 pos_in_item -= tb->lbytes;
648 tb->lbytes;
649 485
650 RFALSE(pos_in_item <= 0, 486 RFALSE(pos_in_item <= 0, "PAP-12125: no place for paste. pos_in_item=%d", pos_in_item);
651 "PAP-12125: no place for paste. pos_in_item=%d",
652 pos_in_item);
653 487
654 /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */ 488 /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */
655 leaf_shift_left(tb, 489 leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
656 tb->
657 lnum[0],
658 tb->
659 lbytes);
660 } 490 }
661 } 491 }
662 } else { /* appended item will be in L[0] in whole */ 492 } else { /* appended item will be in L[0] in whole */
@@ -665,52 +495,30 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
665 495
666 if (!item_pos && op_is_left_mergeable(B_N_PKEY(tbS0, 0), tbS0->b_size)) { /* if we paste into first item of S[0] and it is left mergable */ 496 if (!item_pos && op_is_left_mergeable(B_N_PKEY(tbS0, 0), tbS0->b_size)) { /* if we paste into first item of S[0] and it is left mergable */
667 /* then increment pos_in_item by the size of the last item in L[0] */ 497 /* then increment pos_in_item by the size of the last item in L[0] */
668 pasted = 498 pasted = B_N_PITEM_HEAD(tb->L[0], n - 1);
669 B_N_PITEM_HEAD(tb->L[0],
670 n - 1);
671 if (is_direntry_le_ih(pasted)) 499 if (is_direntry_le_ih(pasted))
672 pos_in_item += 500 pos_in_item += ih_entry_count(pasted);
673 ih_entry_count
674 (pasted);
675 else 501 else
676 pos_in_item += 502 pos_in_item += ih_item_len(pasted);
677 ih_item_len(pasted);
678 } 503 }
679 504
680 /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */ 505 /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */
681 ret_val = 506 ret_val = leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
682 leaf_shift_left(tb, tb->lnum[0],
683 tb->lbytes);
684 /* Append to body of item in L[0] */ 507 /* Append to body of item in L[0] */
685 buffer_info_init_left(tb, &bi); 508 buffer_info_init_left(tb, &bi);
686 leaf_paste_in_buffer(&bi, 509 leaf_paste_in_buffer(&bi, n + item_pos - ret_val,
687 n + item_pos -
688 ret_val,
689 pos_in_item, 510 pos_in_item,
690 tb->insert_size[0], 511 tb->insert_size[0],
691 body, zeros_num); 512 body, zeros_num);
692 513
693 /* if appended item is directory, paste entry */ 514 /* if appended item is directory, paste entry */
694 pasted = 515 pasted = B_N_PITEM_HEAD(tb->L[0], n + item_pos - ret_val);
695 B_N_PITEM_HEAD(tb->L[0],
696 n + item_pos -
697 ret_val);
698 if (is_direntry_le_ih(pasted)) 516 if (is_direntry_le_ih(pasted))
699 leaf_paste_entries(&bi, 517 leaf_paste_entries(&bi, n + item_pos - ret_val,
700 n + 518 pos_in_item, 1,
701 item_pos - 519 (struct reiserfs_de_head *) body,
702 ret_val, 520 body + DEH_SIZE,
703 pos_in_item, 521 tb->insert_size[0]);
704 1,
705 (struct
706 reiserfs_de_head
707 *)body,
708 body +
709 DEH_SIZE,
710 tb->
711 insert_size
712 [0]
713 );
714 /* if appended item is indirect item, put unformatted node into un list */ 522 /* if appended item is indirect item, put unformatted node into un list */
715 if (is_indirect_le_ih(pasted)) 523 if (is_indirect_le_ih(pasted))
716 set_ih_free_space(pasted, 0); 524 set_ih_free_space(pasted, 0);
@@ -722,13 +530,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
722 reiserfs_panic(tb->tb_sb, "PAP-12130", 530 reiserfs_panic(tb->tb_sb, "PAP-12130",
723 "lnum > 0: unexpected mode: " 531 "lnum > 0: unexpected mode: "
724 " %s(%d)", 532 " %s(%d)",
725 (flag == 533 (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" : "UNKNOWN"), flag);
726 M_DELETE) ? "DELETE" : ((flag ==
727 M_CUT)
728 ? "CUT"
729 :
730 "UNKNOWN"),
731 flag);
732 } 534 }
733 } else { 535 } else {
734 /* new item doesn't fall into L[0] */ 536 /* new item doesn't fall into L[0] */
@@ -748,14 +550,12 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
748 case M_INSERT: /* insert item */ 550 case M_INSERT: /* insert item */
749 if (n - tb->rnum[0] < item_pos) { /* new item or its part falls to R[0] */ 551 if (n - tb->rnum[0] < item_pos) { /* new item or its part falls to R[0] */
750 if (item_pos == n - tb->rnum[0] + 1 && tb->rbytes != -1) { /* part of new item falls into R[0] */ 552 if (item_pos == n - tb->rnum[0] + 1 && tb->rbytes != -1) { /* part of new item falls into R[0] */
751 loff_t old_key_comp, old_len, 553 loff_t old_key_comp, old_len, r_zeros_number;
752 r_zeros_number;
753 const char *r_body; 554 const char *r_body;
754 int version; 555 int version;
755 loff_t offset; 556 loff_t offset;
756 557
757 leaf_shift_right(tb, tb->rnum[0] - 1, 558 leaf_shift_right(tb, tb->rnum[0] - 1, -1);
758 -1);
759 559
760 version = ih_version(ih); 560 version = ih_version(ih);
761 /* Remember key component and item length */ 561 /* Remember key component and item length */
@@ -763,29 +563,17 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
763 old_len = ih_item_len(ih); 563 old_len = ih_item_len(ih);
764 564
765 /* Calculate key component and item length to insert into R[0] */ 565 /* Calculate key component and item length to insert into R[0] */
766 offset = 566 offset = le_ih_k_offset(ih) + ((old_len - tb->rbytes) << (is_indirect_le_ih(ih) ? tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT : 0));
767 le_ih_k_offset(ih) +
768 ((old_len -
769 tb->
770 rbytes) << (is_indirect_le_ih(ih)
771 ? tb->tb_sb->
772 s_blocksize_bits -
773 UNFM_P_SHIFT : 0));
774 set_le_ih_k_offset(ih, offset); 567 set_le_ih_k_offset(ih, offset);
775 put_ih_item_len(ih, tb->rbytes); 568 put_ih_item_len(ih, tb->rbytes);
776 /* Insert part of the item into R[0] */ 569 /* Insert part of the item into R[0] */
777 buffer_info_init_right(tb, &bi); 570 buffer_info_init_right(tb, &bi);
778 if ((old_len - tb->rbytes) > zeros_num) { 571 if ((old_len - tb->rbytes) > zeros_num) {
779 r_zeros_number = 0; 572 r_zeros_number = 0;
780 r_body = 573 r_body = body + (old_len - tb->rbytes) - zeros_num;
781 body + (old_len -
782 tb->rbytes) -
783 zeros_num;
784 } else { 574 } else {
785 r_body = body; 575 r_body = body;
786 r_zeros_number = 576 r_zeros_number = zeros_num - (old_len - tb->rbytes);
787 zeros_num - (old_len -
788 tb->rbytes);
789 zeros_num -= r_zeros_number; 577 zeros_num -= r_zeros_number;
790 } 578 }
791 579
@@ -798,25 +586,18 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
798 586
799 /* Calculate key component and item length to insert into S[0] */ 587 /* Calculate key component and item length to insert into S[0] */
800 set_le_ih_k_offset(ih, old_key_comp); 588 set_le_ih_k_offset(ih, old_key_comp);
801 put_ih_item_len(ih, 589 put_ih_item_len(ih, old_len - tb->rbytes);
802 old_len - tb->rbytes);
803 590
804 tb->insert_size[0] -= tb->rbytes; 591 tb->insert_size[0] -= tb->rbytes;
805 592
806 } else { /* whole new item falls into R[0] */ 593 } else { /* whole new item falls into R[0] */
807 594
808 /* Shift rnum[0]-1 items to R[0] */ 595 /* Shift rnum[0]-1 items to R[0] */
809 ret_val = 596 ret_val = leaf_shift_right(tb, tb->rnum[0] - 1, tb->rbytes);
810 leaf_shift_right(tb,
811 tb->rnum[0] - 1,
812 tb->rbytes);
813 /* Insert new item into R[0] */ 597 /* Insert new item into R[0] */
814 buffer_info_init_right(tb, &bi); 598 buffer_info_init_right(tb, &bi);
815 leaf_insert_into_buf(&bi, 599 leaf_insert_into_buf(&bi, item_pos - n + tb->rnum[0] - 1,
816 item_pos - n + 600 ih, body, zeros_num);
817 tb->rnum[0] - 1,
818 ih, body,
819 zeros_num);
820 601
821 if (item_pos - n + tb->rnum[0] - 1 == 0) { 602 if (item_pos - n + tb->rnum[0] - 1 == 0) {
822 replace_key(tb, tb->CFR[0], 603 replace_key(tb, tb->CFR[0],
@@ -841,200 +622,97 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
841 622
842 RFALSE(zeros_num, 623 RFALSE(zeros_num,
843 "PAP-12145: invalid parameter in case of a directory"); 624 "PAP-12145: invalid parameter in case of a directory");
844 entry_count = 625 entry_count = I_ENTRY_COUNT(B_N_PITEM_HEAD
845 I_ENTRY_COUNT(B_N_PITEM_HEAD 626 (tbS0, item_pos));
846 (tbS0,
847 item_pos));
848 if (entry_count - tb->rbytes < 627 if (entry_count - tb->rbytes <
849 pos_in_item) 628 pos_in_item)
850 /* new directory entry falls into R[0] */ 629 /* new directory entry falls into R[0] */
851 { 630 {
852 int paste_entry_position; 631 int paste_entry_position;
853 632
854 RFALSE(tb->rbytes - 1 >= 633 RFALSE(tb->rbytes - 1 >= entry_count || !tb-> insert_size[0],
855 entry_count
856 || !tb->
857 insert_size[0],
858 "PAP-12150: no enough of entries to shift to R[0]: rbytes=%d, entry_count=%d", 634 "PAP-12150: no enough of entries to shift to R[0]: rbytes=%d, entry_count=%d",
859 tb->rbytes, 635 tb->rbytes, entry_count);
860 entry_count);
861 /* Shift rnum[0]-1 items in whole. Shift rbytes-1 directory entries from directory item number rnum[0] */ 636 /* Shift rnum[0]-1 items in whole. Shift rbytes-1 directory entries from directory item number rnum[0] */
862 leaf_shift_right(tb, 637 leaf_shift_right(tb, tb->rnum[0], tb->rbytes - 1);
863 tb->
864 rnum
865 [0],
866 tb->
867 rbytes
868 - 1);
869 /* Paste given directory entry to directory item */ 638 /* Paste given directory entry to directory item */
870 paste_entry_position = 639 paste_entry_position = pos_in_item - entry_count + tb->rbytes - 1;
871 pos_in_item -
872 entry_count +
873 tb->rbytes - 1;
874 buffer_info_init_right(tb, &bi); 640 buffer_info_init_right(tb, &bi);
875 leaf_paste_in_buffer 641 leaf_paste_in_buffer(&bi, 0, paste_entry_position, tb->insert_size[0], body, zeros_num);
876 (&bi, 0,
877 paste_entry_position,
878 tb->insert_size[0],
879 body, zeros_num);
880 /* paste entry */ 642 /* paste entry */
881 leaf_paste_entries(&bi, 643 leaf_paste_entries(&bi, 0, paste_entry_position, 1,
882 0, 644 (struct reiserfs_de_head *) body,
883 paste_entry_position, 645 body + DEH_SIZE, tb->insert_size[0]);
884 1, 646
885 (struct 647 if (paste_entry_position == 0) {
886 reiserfs_de_head
887 *)
888 body,
889 body
890 +
891 DEH_SIZE,
892 tb->
893 insert_size
894 [0]
895 );
896
897 if (paste_entry_position
898 == 0) {
899 /* change delimiting keys */ 648 /* change delimiting keys */
900 replace_key(tb, 649 replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0],0);
901 tb->
902 CFR
903 [0],
904 tb->
905 rkey
906 [0],
907 tb->
908 R
909 [0],
910 0);
911 } 650 }
912 651
913 tb->insert_size[0] = 0; 652 tb->insert_size[0] = 0;
914 pos_in_item++; 653 pos_in_item++;
915 } else { /* new directory entry doesn't fall into R[0] */ 654 } else { /* new directory entry doesn't fall into R[0] */
916 655
917 leaf_shift_right(tb, 656 leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
918 tb->
919 rnum
920 [0],
921 tb->
922 rbytes);
923 } 657 }
924 } else { /* regular object */ 658 } else { /* regular object */
925 659
926 int n_shift, n_rem, 660 int n_shift, n_rem, r_zeros_number;
927 r_zeros_number;
928 const char *r_body; 661 const char *r_body;
929 662
930 /* Calculate number of bytes which must be shifted from appended item */ 663 /* Calculate number of bytes which must be shifted from appended item */
931 if ((n_shift = 664 if ((n_shift = tb->rbytes - tb->insert_size[0]) < 0)
932 tb->rbytes -
933 tb->insert_size[0]) < 0)
934 n_shift = 0; 665 n_shift = 0;
935 666
936 RFALSE(pos_in_item != 667 RFALSE(pos_in_item != ih_item_len
937 ih_item_len 668 (B_N_PITEM_HEAD(tbS0, item_pos)),
938 (B_N_PITEM_HEAD
939 (tbS0, item_pos)),
940 "PAP-12155: invalid position to paste. ih_item_len=%d, pos_in_item=%d", 669 "PAP-12155: invalid position to paste. ih_item_len=%d, pos_in_item=%d",
941 pos_in_item, 670 pos_in_item, ih_item_len
942 ih_item_len 671 (B_N_PITEM_HEAD(tbS0, item_pos)));
943 (B_N_PITEM_HEAD 672
944 (tbS0, item_pos))); 673 leaf_shift_right(tb, tb->rnum[0], n_shift);
945
946 leaf_shift_right(tb,
947 tb->rnum[0],
948 n_shift);
949 /* Calculate number of bytes which must remain in body after appending to R[0] */ 674 /* Calculate number of bytes which must remain in body after appending to R[0] */
950 if ((n_rem = 675 if ((n_rem = tb->insert_size[0] - tb->rbytes) < 0)
951 tb->insert_size[0] -
952 tb->rbytes) < 0)
953 n_rem = 0; 676 n_rem = 0;
954 677
955 { 678 {
956 int version; 679 int version;
957 unsigned long temp_rem = 680 unsigned long temp_rem = n_rem;
958 n_rem; 681
959 682 version = ih_version(B_N_PITEM_HEAD(tb->R[0], 0));
960 version = 683 if (is_indirect_le_key(version, B_N_PKEY(tb->R[0], 0))) {
961 ih_version 684 temp_rem = n_rem << (tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT);
962 (B_N_PITEM_HEAD
963 (tb->R[0], 0));
964 if (is_indirect_le_key
965 (version,
966 B_N_PKEY(tb->R[0],
967 0))) {
968 temp_rem =
969 n_rem <<
970 (tb->tb_sb->
971 s_blocksize_bits
972 -
973 UNFM_P_SHIFT);
974 } 685 }
975 set_le_key_k_offset 686 set_le_key_k_offset(version, B_N_PKEY(tb->R[0], 0),
976 (version, 687 le_key_k_offset(version, B_N_PKEY(tb->R[0], 0)) + temp_rem);
977 B_N_PKEY(tb->R[0], 688 set_le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFR[0], tb->rkey[0]),
978 0), 689 le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFR[0], tb->rkey[0])) + temp_rem);
979 le_key_k_offset
980 (version,
981 B_N_PKEY(tb->R[0],
982 0)) +
983 temp_rem);
984 set_le_key_k_offset
985 (version,
986 B_N_PDELIM_KEY(tb->
987 CFR
988 [0],
989 tb->
990 rkey
991 [0]),
992 le_key_k_offset
993 (version,
994 B_N_PDELIM_KEY
995 (tb->CFR[0],
996 tb->rkey[0])) +
997 temp_rem);
998 } 690 }
999/* k_offset (B_N_PKEY(tb->R[0],0)) += n_rem; 691/* k_offset (B_N_PKEY(tb->R[0],0)) += n_rem;
1000 k_offset (B_N_PDELIM_KEY(tb->CFR[0],tb->rkey[0])) += n_rem;*/ 692 k_offset (B_N_PDELIM_KEY(tb->CFR[0],tb->rkey[0])) += n_rem;*/
1001 do_balance_mark_internal_dirty 693 do_balance_mark_internal_dirty(tb, tb->CFR[0], 0);
1002 (tb, tb->CFR[0], 0);
1003 694
1004 /* Append part of body into R[0] */ 695 /* Append part of body into R[0] */
1005 buffer_info_init_right(tb, &bi); 696 buffer_info_init_right(tb, &bi);
1006 if (n_rem > zeros_num) { 697 if (n_rem > zeros_num) {
1007 r_zeros_number = 0; 698 r_zeros_number = 0;
1008 r_body = 699 r_body = body + n_rem - zeros_num;
1009 body + n_rem -
1010 zeros_num;
1011 } else { 700 } else {
1012 r_body = body; 701 r_body = body;
1013 r_zeros_number = 702 r_zeros_number = zeros_num - n_rem;
1014 zeros_num - n_rem; 703 zeros_num -= r_zeros_number;
1015 zeros_num -=
1016 r_zeros_number;
1017 } 704 }
1018 705
1019 leaf_paste_in_buffer(&bi, 0, 706 leaf_paste_in_buffer(&bi, 0, n_shift,
1020 n_shift, 707 tb->insert_size[0] - n_rem,
1021 tb-> 708 r_body, r_zeros_number);
1022 insert_size 709
1023 [0] - 710 if (is_indirect_le_ih(B_N_PITEM_HEAD(tb->R[0], 0))) {
1024 n_rem,
1025 r_body,
1026 r_zeros_number);
1027
1028 if (is_indirect_le_ih
1029 (B_N_PITEM_HEAD
1030 (tb->R[0], 0))) {
1031#if 0 711#if 0
1032 RFALSE(n_rem, 712 RFALSE(n_rem,
1033 "PAP-12160: paste more than one unformatted node pointer"); 713 "PAP-12160: paste more than one unformatted node pointer");
1034#endif 714#endif
1035 set_ih_free_space 715 set_ih_free_space(B_N_PITEM_HEAD(tb->R[0], 0), 0);
1036 (B_N_PITEM_HEAD
1037 (tb->R[0], 0), 0);
1038 } 716 }
1039 tb->insert_size[0] = n_rem; 717 tb->insert_size[0] = n_rem;
1040 if (!n_rem) 718 if (!n_rem)
@@ -1044,58 +722,28 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
1044 722
1045 struct item_head *pasted; 723 struct item_head *pasted;
1046 724
1047 ret_val = 725 ret_val = leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
1048 leaf_shift_right(tb, tb->rnum[0],
1049 tb->rbytes);
1050 /* append item in R[0] */ 726 /* append item in R[0] */
1051 if (pos_in_item >= 0) { 727 if (pos_in_item >= 0) {
1052 buffer_info_init_right(tb, &bi); 728 buffer_info_init_right(tb, &bi);
1053 leaf_paste_in_buffer(&bi, 729 leaf_paste_in_buffer(&bi, item_pos - n + tb->rnum[0], pos_in_item,
1054 item_pos - 730 tb->insert_size[0], body, zeros_num);
1055 n +
1056 tb->
1057 rnum[0],
1058 pos_in_item,
1059 tb->
1060 insert_size
1061 [0], body,
1062 zeros_num);
1063 } 731 }
1064 732
1065 /* paste new entry, if item is directory item */ 733 /* paste new entry, if item is directory item */
1066 pasted = 734 pasted = B_N_PITEM_HEAD(tb->R[0], item_pos - n + tb->rnum[0]);
1067 B_N_PITEM_HEAD(tb->R[0], 735 if (is_direntry_le_ih(pasted) && pos_in_item >= 0) {
1068 item_pos - n + 736 leaf_paste_entries(&bi, item_pos - n + tb->rnum[0],
1069 tb->rnum[0]); 737 pos_in_item, 1,
1070 if (is_direntry_le_ih(pasted) 738 (struct reiserfs_de_head *) body,
1071 && pos_in_item >= 0) { 739 body + DEH_SIZE, tb->insert_size[0]);
1072 leaf_paste_entries(&bi,
1073 item_pos -
1074 n +
1075 tb->rnum[0],
1076 pos_in_item,
1077 1,
1078 (struct
1079 reiserfs_de_head
1080 *)body,
1081 body +
1082 DEH_SIZE,
1083 tb->
1084 insert_size
1085 [0]
1086 );
1087 if (!pos_in_item) { 740 if (!pos_in_item) {
1088 741
1089 RFALSE(item_pos - n + 742 RFALSE(item_pos - n + tb->rnum[0],
1090 tb->rnum[0],
1091 "PAP-12165: directory item must be first item of node when pasting is in 0th position"); 743 "PAP-12165: directory item must be first item of node when pasting is in 0th position");
1092 744
1093 /* update delimiting keys */ 745 /* update delimiting keys */
1094 replace_key(tb, 746 replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0);
1095 tb->CFR[0],
1096 tb->rkey[0],
1097 tb->R[0],
1098 0);
1099 } 747 }
1100 } 748 }
1101 749
@@ -1111,22 +759,16 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
1111 default: /* cases d and t */ 759 default: /* cases d and t */
1112 reiserfs_panic(tb->tb_sb, "PAP-12175", 760 reiserfs_panic(tb->tb_sb, "PAP-12175",
1113 "rnum > 0: unexpected mode: %s(%d)", 761 "rnum > 0: unexpected mode: %s(%d)",
1114 (flag == 762 (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" : "UNKNOWN"), flag);
1115 M_DELETE) ? "DELETE" : ((flag ==
1116 M_CUT) ? "CUT"
1117 : "UNKNOWN"),
1118 flag);
1119 } 763 }
1120 764
1121 } 765 }
1122 766
1123 /* tb->rnum[0] > 0 */ 767 /* tb->rnum[0] > 0 */
1124 RFALSE(tb->blknum[0] > 3, 768 RFALSE(tb->blknum[0] > 3,
1125 "PAP-12180: blknum can not be %d. It must be <= 3", 769 "PAP-12180: blknum can not be %d. It must be <= 3", tb->blknum[0]);
1126 tb->blknum[0]);
1127 RFALSE(tb->blknum[0] < 0, 770 RFALSE(tb->blknum[0] < 0,
1128 "PAP-12185: blknum can not be %d. It must be >= 0", 771 "PAP-12185: blknum can not be %d. It must be >= 0", tb->blknum[0]);
1129 tb->blknum[0]);
1130 772
1131 /* if while adding to a node we discover that it is possible to split 773 /* if while adding to a node we discover that it is possible to split
1132 it in two, and merge the left part into the left neighbor and the 774 it in two, and merge the left part into the left neighbor and the
@@ -1177,8 +819,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
1177 819
1178 if (n - snum[i] < item_pos) { /* new item or it's part falls to first new node S_new[i] */ 820 if (n - snum[i] < item_pos) { /* new item or it's part falls to first new node S_new[i] */
1179 if (item_pos == n - snum[i] + 1 && sbytes[i] != -1) { /* part of new item falls into S_new[i] */ 821 if (item_pos == n - snum[i] + 1 && sbytes[i] != -1) { /* part of new item falls into S_new[i] */
1180 int old_key_comp, old_len, 822 int old_key_comp, old_len, r_zeros_number;
1181 r_zeros_number;
1182 const char *r_body; 823 const char *r_body;
1183 int version; 824 int version;
1184 825
@@ -1192,15 +833,8 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
1192 old_len = ih_item_len(ih); 833 old_len = ih_item_len(ih);
1193 834
1194 /* Calculate key component and item length to insert into S_new[i] */ 835 /* Calculate key component and item length to insert into S_new[i] */
1195 set_le_ih_k_offset(ih, 836 set_le_ih_k_offset(ih, le_ih_k_offset(ih) +
1196 le_ih_k_offset(ih) + 837 ((old_len - sbytes[i]) << (is_indirect_le_ih(ih) ? tb->tb_sb-> s_blocksize_bits - UNFM_P_SHIFT : 0)));
1197 ((old_len -
1198 sbytes[i]) <<
1199 (is_indirect_le_ih
1200 (ih) ? tb->tb_sb->
1201 s_blocksize_bits -
1202 UNFM_P_SHIFT :
1203 0)));
1204 838
1205 put_ih_item_len(ih, sbytes[i]); 839 put_ih_item_len(ih, sbytes[i]);
1206 840
@@ -1209,39 +843,29 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
1209 843
1210 if ((old_len - sbytes[i]) > zeros_num) { 844 if ((old_len - sbytes[i]) > zeros_num) {
1211 r_zeros_number = 0; 845 r_zeros_number = 0;
1212 r_body = 846 r_body = body + (old_len - sbytes[i]) - zeros_num;
1213 body + (old_len -
1214 sbytes[i]) -
1215 zeros_num;
1216 } else { 847 } else {
1217 r_body = body; 848 r_body = body;
1218 r_zeros_number = 849 r_zeros_number = zeros_num - (old_len - sbytes[i]);
1219 zeros_num - (old_len -
1220 sbytes[i]);
1221 zeros_num -= r_zeros_number; 850 zeros_num -= r_zeros_number;
1222 } 851 }
1223 852
1224 leaf_insert_into_buf(&bi, 0, ih, r_body, 853 leaf_insert_into_buf(&bi, 0, ih, r_body, r_zeros_number);
1225 r_zeros_number);
1226 854
1227 /* Calculate key component and item length to insert into S[i] */ 855 /* Calculate key component and item length to insert into S[i] */
1228 set_le_ih_k_offset(ih, old_key_comp); 856 set_le_ih_k_offset(ih, old_key_comp);
1229 put_ih_item_len(ih, 857 put_ih_item_len(ih, old_len - sbytes[i]);
1230 old_len - sbytes[i]);
1231 tb->insert_size[0] -= sbytes[i]; 858 tb->insert_size[0] -= sbytes[i];
1232 } else { /* whole new item falls into S_new[i] */ 859 } else { /* whole new item falls into S_new[i] */
1233 860
1234 /* Shift snum[0] - 1 items to S_new[i] (sbytes[i] of split item) */ 861 /* Shift snum[0] - 1 items to S_new[i] (sbytes[i] of split item) */
1235 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, 862 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb,
1236 snum[i] - 1, sbytes[i], 863 snum[i] - 1, sbytes[i], S_new[i]);
1237 S_new[i]);
1238 864
1239 /* Insert new item into S_new[i] */ 865 /* Insert new item into S_new[i] */
1240 buffer_info_init_bh(tb, &bi, S_new[i]); 866 buffer_info_init_bh(tb, &bi, S_new[i]);
1241 leaf_insert_into_buf(&bi, 867 leaf_insert_into_buf(&bi, item_pos - n + snum[i] - 1,
1242 item_pos - n + 868 ih, body, zeros_num);
1243 snum[i] - 1, ih,
1244 body, zeros_num);
1245 869
1246 zeros_num = tb->insert_size[0] = 0; 870 zeros_num = tb->insert_size[0] = 0;
1247 } 871 }
@@ -1268,150 +892,73 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
1268 892
1269 int entry_count; 893 int entry_count;
1270 894
1271 entry_count = 895 entry_count = ih_entry_count(aux_ih);
1272 ih_entry_count(aux_ih);
1273 896
1274 if (entry_count - sbytes[i] < 897 if (entry_count - sbytes[i] < pos_in_item && pos_in_item <= entry_count) {
1275 pos_in_item
1276 && pos_in_item <=
1277 entry_count) {
1278 /* new directory entry falls into S_new[i] */ 898 /* new directory entry falls into S_new[i] */
1279 899
1280 RFALSE(!tb-> 900 RFALSE(!tb->insert_size[0], "PAP-12215: insert_size is already 0");
1281 insert_size[0], 901 RFALSE(sbytes[i] - 1 >= entry_count,
1282 "PAP-12215: insert_size is already 0");
1283 RFALSE(sbytes[i] - 1 >=
1284 entry_count,
1285 "PAP-12220: there are no so much entries (%d), only %d", 902 "PAP-12220: there are no so much entries (%d), only %d",
1286 sbytes[i] - 1, 903 sbytes[i] - 1, entry_count);
1287 entry_count);
1288 904
1289 /* Shift snum[i]-1 items in whole. Shift sbytes[i] directory entries from directory item number snum[i] */ 905 /* Shift snum[i]-1 items in whole. Shift sbytes[i] directory entries from directory item number snum[i] */
1290 leaf_move_items 906 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, snum[i], sbytes[i] - 1, S_new[i]);
1291 (LEAF_FROM_S_TO_SNEW,
1292 tb, snum[i],
1293 sbytes[i] - 1,
1294 S_new[i]);
1295 /* Paste given directory entry to directory item */ 907 /* Paste given directory entry to directory item */
1296 buffer_info_init_bh(tb, &bi, S_new[i]); 908 buffer_info_init_bh(tb, &bi, S_new[i]);
1297 leaf_paste_in_buffer 909 leaf_paste_in_buffer(&bi, 0, pos_in_item - entry_count + sbytes[i] - 1,
1298 (&bi, 0, 910 tb->insert_size[0], body, zeros_num);
1299 pos_in_item -
1300 entry_count +
1301 sbytes[i] - 1,
1302 tb->insert_size[0],
1303 body, zeros_num);
1304 /* paste new directory entry */ 911 /* paste new directory entry */
1305 leaf_paste_entries(&bi, 912 leaf_paste_entries(&bi, 0, pos_in_item - entry_count + sbytes[i] - 1, 1,
1306 0, 913 (struct reiserfs_de_head *) body,
1307 pos_in_item 914 body + DEH_SIZE, tb->insert_size[0]);
1308 -
1309 entry_count
1310 +
1311 sbytes
1312 [i] -
1313 1, 1,
1314 (struct
1315 reiserfs_de_head
1316 *)
1317 body,
1318 body
1319 +
1320 DEH_SIZE,
1321 tb->
1322 insert_size
1323 [0]
1324 );
1325 tb->insert_size[0] = 0; 915 tb->insert_size[0] = 0;
1326 pos_in_item++; 916 pos_in_item++;
1327 } else { /* new directory entry doesn't fall into S_new[i] */ 917 } else { /* new directory entry doesn't fall into S_new[i] */
1328 leaf_move_items 918 leaf_move_items(LEAF_FROM_S_TO_SNEW,tb, snum[i], sbytes[i], S_new[i]);
1329 (LEAF_FROM_S_TO_SNEW,
1330 tb, snum[i],
1331 sbytes[i],
1332 S_new[i]);
1333 } 919 }
1334 } else { /* regular object */ 920 } else { /* regular object */
1335 921
1336 int n_shift, n_rem, 922 int n_shift, n_rem, r_zeros_number;
1337 r_zeros_number;
1338 const char *r_body; 923 const char *r_body;
1339 924
1340 RFALSE(pos_in_item != 925 RFALSE(pos_in_item != ih_item_len(B_N_PITEM_HEAD(tbS0, item_pos)) || tb->insert_size[0] <= 0,
1341 ih_item_len
1342 (B_N_PITEM_HEAD
1343 (tbS0, item_pos))
1344 || tb->insert_size[0] <=
1345 0,
1346 "PAP-12225: item too short or insert_size <= 0"); 926 "PAP-12225: item too short or insert_size <= 0");
1347 927
1348 /* Calculate number of bytes which must be shifted from appended item */ 928 /* Calculate number of bytes which must be shifted from appended item */
1349 n_shift = 929 n_shift = sbytes[i] - tb->insert_size[0];
1350 sbytes[i] -
1351 tb->insert_size[0];
1352 if (n_shift < 0) 930 if (n_shift < 0)
1353 n_shift = 0; 931 n_shift = 0;
1354 leaf_move_items 932 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, snum[i], n_shift, S_new[i]);
1355 (LEAF_FROM_S_TO_SNEW, tb,
1356 snum[i], n_shift,
1357 S_new[i]);
1358 933
1359 /* Calculate number of bytes which must remain in body after append to S_new[i] */ 934 /* Calculate number of bytes which must remain in body after append to S_new[i] */
1360 n_rem = 935 n_rem = tb->insert_size[0] - sbytes[i];
1361 tb->insert_size[0] -
1362 sbytes[i];
1363 if (n_rem < 0) 936 if (n_rem < 0)
1364 n_rem = 0; 937 n_rem = 0;
1365 /* Append part of body into S_new[0] */ 938 /* Append part of body into S_new[0] */
1366 buffer_info_init_bh(tb, &bi, S_new[i]); 939 buffer_info_init_bh(tb, &bi, S_new[i]);
1367 if (n_rem > zeros_num) { 940 if (n_rem > zeros_num) {
1368 r_zeros_number = 0; 941 r_zeros_number = 0;
1369 r_body = 942 r_body = body + n_rem - zeros_num;
1370 body + n_rem -
1371 zeros_num;
1372 } else { 943 } else {
1373 r_body = body; 944 r_body = body;
1374 r_zeros_number = 945 r_zeros_number = zeros_num - n_rem;
1375 zeros_num - n_rem; 946 zeros_num -= r_zeros_number;
1376 zeros_num -=
1377 r_zeros_number;
1378 } 947 }
1379 948
1380 leaf_paste_in_buffer(&bi, 0, 949 leaf_paste_in_buffer(&bi, 0, n_shift,
1381 n_shift, 950 tb->insert_size[0] - n_rem,
1382 tb-> 951 r_body, r_zeros_number);
1383 insert_size
1384 [0] -
1385 n_rem,
1386 r_body,
1387 r_zeros_number);
1388 { 952 {
1389 struct item_head *tmp; 953 struct item_head *tmp;
1390 954
1391 tmp = 955 tmp = B_N_PITEM_HEAD(S_new[i], 0);
1392 B_N_PITEM_HEAD(S_new
1393 [i],
1394 0);
1395 if (is_indirect_le_ih 956 if (is_indirect_le_ih
1396 (tmp)) { 957 (tmp)) {
1397 set_ih_free_space 958 set_ih_free_space(tmp, 0);
1398 (tmp, 0); 959 set_le_ih_k_offset(tmp, le_ih_k_offset(tmp) + (n_rem << (tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT)));
1399 set_le_ih_k_offset
1400 (tmp,
1401 le_ih_k_offset
1402 (tmp) +
1403 (n_rem <<
1404 (tb->
1405 tb_sb->
1406 s_blocksize_bits
1407 -
1408 UNFM_P_SHIFT)));
1409 } else { 960 } else {
1410 set_le_ih_k_offset 961 set_le_ih_k_offset(tmp, le_ih_k_offset(tmp) + n_rem);
1411 (tmp,
1412 le_ih_k_offset
1413 (tmp) +
1414 n_rem);
1415 } 962 }
1416 } 963 }
1417 964
@@ -1426,8 +973,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
1426 struct item_head *pasted; 973 struct item_head *pasted;
1427 974
1428#ifdef CONFIG_REISERFS_CHECK 975#ifdef CONFIG_REISERFS_CHECK
1429 struct item_head *ih_check = 976 struct item_head *ih_check = B_N_PITEM_HEAD(tbS0, item_pos);
1430 B_N_PITEM_HEAD(tbS0, item_pos);
1431 977
1432 if (!is_direntry_le_ih(ih_check) 978 if (!is_direntry_le_ih(ih_check)
1433 && (pos_in_item != ih_item_len(ih_check) 979 && (pos_in_item != ih_item_len(ih_check)
@@ -1439,8 +985,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
1439 "to ih_item_len"); 985 "to ih_item_len");
1440#endif /* CONFIG_REISERFS_CHECK */ 986#endif /* CONFIG_REISERFS_CHECK */
1441 987
1442 leaf_mi = 988 leaf_mi = leaf_move_items(LEAF_FROM_S_TO_SNEW,
1443 leaf_move_items(LEAF_FROM_S_TO_SNEW,
1444 tb, snum[i], 989 tb, snum[i],
1445 sbytes[i], 990 sbytes[i],
1446 S_new[i]); 991 S_new[i]);
@@ -1452,30 +997,19 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
1452 /* paste into item */ 997 /* paste into item */
1453 buffer_info_init_bh(tb, &bi, S_new[i]); 998 buffer_info_init_bh(tb, &bi, S_new[i]);
1454 leaf_paste_in_buffer(&bi, 999 leaf_paste_in_buffer(&bi,
1455 item_pos - n + 1000 item_pos - n + snum[i],
1456 snum[i],
1457 pos_in_item, 1001 pos_in_item,
1458 tb->insert_size[0], 1002 tb->insert_size[0],
1459 body, zeros_num); 1003 body, zeros_num);
1460 1004
1461 pasted = 1005 pasted = B_N_PITEM_HEAD(S_new[i], item_pos - n + snum[i]);
1462 B_N_PITEM_HEAD(S_new[i],
1463 item_pos - n +
1464 snum[i]);
1465 if (is_direntry_le_ih(pasted)) { 1006 if (is_direntry_le_ih(pasted)) {
1466 leaf_paste_entries(&bi, 1007 leaf_paste_entries(&bi,
1467 item_pos - 1008 item_pos - n + snum[i],
1468 n + snum[i], 1009 pos_in_item, 1,
1469 pos_in_item, 1010 (struct reiserfs_de_head *)body,
1470 1, 1011 body + DEH_SIZE,
1471 (struct 1012 tb->insert_size[0]
1472 reiserfs_de_head
1473 *)body,
1474 body +
1475 DEH_SIZE,
1476 tb->
1477 insert_size
1478 [0]
1479 ); 1013 );
1480 } 1014 }
1481 1015
@@ -1495,11 +1029,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
1495 default: /* cases d and t */ 1029 default: /* cases d and t */
1496 reiserfs_panic(tb->tb_sb, "PAP-12245", 1030 reiserfs_panic(tb->tb_sb, "PAP-12245",
1497 "blknum > 2: unexpected mode: %s(%d)", 1031 "blknum > 2: unexpected mode: %s(%d)",
1498 (flag == 1032 (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" : "UNKNOWN"), flag);
1499 M_DELETE) ? "DELETE" : ((flag ==
1500 M_CUT) ? "CUT"
1501 : "UNKNOWN"),
1502 flag);
1503 } 1033 }
1504 1034
1505 memcpy(insert_key + i, B_N_PKEY(S_new[i], 0), KEY_SIZE); 1035 memcpy(insert_key + i, B_N_PKEY(S_new[i], 0), KEY_SIZE);
@@ -1524,9 +1054,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
1524 /* If we insert the first key change the delimiting key */ 1054 /* If we insert the first key change the delimiting key */
1525 if (item_pos == 0) { 1055 if (item_pos == 0) {
1526 if (tb->CFL[0]) /* can be 0 in reiserfsck */ 1056 if (tb->CFL[0]) /* can be 0 in reiserfsck */
1527 replace_key(tb, tb->CFL[0], tb->lkey[0], 1057 replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0);
1528 tbS0, 0);
1529
1530 } 1058 }
1531 break; 1059 break;
1532 1060
@@ -1536,53 +1064,27 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
1536 pasted = B_N_PITEM_HEAD(tbS0, item_pos); 1064 pasted = B_N_PITEM_HEAD(tbS0, item_pos);
1537 /* when directory, may be new entry already pasted */ 1065 /* when directory, may be new entry already pasted */
1538 if (is_direntry_le_ih(pasted)) { 1066 if (is_direntry_le_ih(pasted)) {
1539 if (pos_in_item >= 0 && 1067 if (pos_in_item >= 0 && pos_in_item <= ih_entry_count(pasted)) {
1540 pos_in_item <=
1541 ih_entry_count(pasted)) {
1542 1068
1543 RFALSE(!tb->insert_size[0], 1069 RFALSE(!tb->insert_size[0],
1544 "PAP-12260: insert_size is 0 already"); 1070 "PAP-12260: insert_size is 0 already");
1545 1071
1546 /* prepare space */ 1072 /* prepare space */
1547 buffer_info_init_tbS0(tb, &bi); 1073 buffer_info_init_tbS0(tb, &bi);
1548 leaf_paste_in_buffer(&bi, 1074 leaf_paste_in_buffer(&bi, item_pos, pos_in_item,
1549 item_pos, 1075 tb->insert_size[0], body,
1550 pos_in_item,
1551 tb->
1552 insert_size
1553 [0], body,
1554 zeros_num); 1076 zeros_num);
1555 1077
1556 /* paste entry */ 1078 /* paste entry */
1557 leaf_paste_entries(&bi, 1079 leaf_paste_entries(&bi, item_pos, pos_in_item, 1,
1558 item_pos, 1080 (struct reiserfs_de_head *)body,
1559 pos_in_item, 1081 body + DEH_SIZE,
1560 1, 1082 tb->insert_size[0]);
1561 (struct
1562 reiserfs_de_head
1563 *)body,
1564 body +
1565 DEH_SIZE,
1566 tb->
1567 insert_size
1568 [0]
1569 );
1570 if (!item_pos && !pos_in_item) { 1083 if (!item_pos && !pos_in_item) {
1571 RFALSE(!tb->CFL[0] 1084 RFALSE(!tb->CFL[0] || !tb->L[0],
1572 || !tb->L[0],
1573 "PAP-12270: CFL[0]/L[0] must be specified"); 1085 "PAP-12270: CFL[0]/L[0] must be specified");
1574 if (tb->CFL[0]) { 1086 if (tb->CFL[0])
1575 replace_key(tb, 1087 replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0);
1576 tb->
1577 CFL
1578 [0],
1579 tb->
1580 lkey
1581 [0],
1582 tbS0,
1583 0);
1584
1585 }
1586 } 1088 }
1587 tb->insert_size[0] = 0; 1089 tb->insert_size[0] = 0;
1588 } 1090 }
@@ -1593,13 +1095,8 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
1593 "PAP-12275: insert size must not be %d", 1095 "PAP-12275: insert size must not be %d",
1594 tb->insert_size[0]); 1096 tb->insert_size[0]);
1595 buffer_info_init_tbS0(tb, &bi); 1097 buffer_info_init_tbS0(tb, &bi);
1596 leaf_paste_in_buffer(&bi, 1098 leaf_paste_in_buffer(&bi, item_pos, pos_in_item,
1597 item_pos, 1099 tb->insert_size[0], body, zeros_num);
1598 pos_in_item,
1599 tb->
1600 insert_size
1601 [0], body,
1602 zeros_num);
1603 1100
1604 if (is_indirect_le_ih(pasted)) { 1101 if (is_indirect_le_ih(pasted)) {
1605#if 0 1102#if 0
@@ -1611,8 +1108,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
1611 tb-> 1108 tb->
1612 insert_size[0]); 1109 insert_size[0]);
1613#endif 1110#endif
1614 set_ih_free_space 1111 set_ih_free_space(pasted, 0);
1615 (pasted, 0);
1616 } 1112 }
1617 tb->insert_size[0] = 0; 1113 tb->insert_size[0] = 0;
1618 } 1114 }
@@ -1620,8 +1116,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
1620 else { 1116 else {
1621 if (tb->insert_size[0]) { 1117 if (tb->insert_size[0]) {
1622 print_cur_tb("12285"); 1118 print_cur_tb("12285");
1623 reiserfs_panic(tb-> 1119 reiserfs_panic(tb->tb_sb,
1624 tb_sb,
1625 "PAP-12285", 1120 "PAP-12285",
1626 "insert_size " 1121 "insert_size "
1627 "must be 0 " 1122 "must be 0 "
diff --git a/fs/sync.c b/fs/sync.c
index f15537452231..e8ba024a055b 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -222,23 +222,6 @@ SYSCALL_DEFINE1(fdatasync, unsigned int, fd)
222 return do_fsync(fd, 1); 222 return do_fsync(fd, 1);
223} 223}
224 224
225/**
226 * generic_write_sync - perform syncing after a write if file / inode is sync
227 * @file: file to which the write happened
228 * @pos: offset where the write started
229 * @count: length of the write
230 *
231 * This is just a simple wrapper about our general syncing function.
232 */
233int generic_write_sync(struct file *file, loff_t pos, loff_t count)
234{
235 if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host))
236 return 0;
237 return vfs_fsync_range(file, pos, pos + count - 1,
238 (file->f_flags & __O_SYNC) ? 0 : 1);
239}
240EXPORT_SYMBOL(generic_write_sync);
241
242/* 225/*
243 * sys_sync_file_range() permits finely controlled syncing over a segment of 226 * sys_sync_file_range() permits finely controlled syncing over a segment of
244 * a file in the range offset .. (offset+nbytes-1) inclusive. If nbytes is 227 * a file in the range offset .. (offset+nbytes-1) inclusive. If nbytes is
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 2e7989e3a2d6..64b48eade91d 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -799,7 +799,7 @@ xfs_file_aio_write(
799 XFS_STATS_ADD(xs_write_bytes, ret); 799 XFS_STATS_ADD(xs_write_bytes, ret);
800 800
801 /* Handle various SYNC-type writes */ 801 /* Handle various SYNC-type writes */
802 err = generic_write_sync(file, pos, ret); 802 err = generic_write_sync(file, iocb->ki_pos - ret, ret);
803 if (err < 0) 803 if (err < 0)
804 ret = err; 804 ret = err;
805 } 805 }
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index f35d5c953ff9..9ddfb8190ca1 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -705,7 +705,6 @@ xfs_setattr_size(
705{ 705{
706 struct xfs_mount *mp = ip->i_mount; 706 struct xfs_mount *mp = ip->i_mount;
707 struct inode *inode = VFS_I(ip); 707 struct inode *inode = VFS_I(ip);
708 int mask = iattr->ia_valid;
709 xfs_off_t oldsize, newsize; 708 xfs_off_t oldsize, newsize;
710 struct xfs_trans *tp; 709 struct xfs_trans *tp;
711 int error; 710 int error;
@@ -726,8 +725,8 @@ xfs_setattr_size(
726 725
727 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 726 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
728 ASSERT(S_ISREG(ip->i_d.di_mode)); 727 ASSERT(S_ISREG(ip->i_d.di_mode));
729 ASSERT((mask & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET| 728 ASSERT((iattr->ia_valid & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|
730 ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0); 729 ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
731 730
732 oldsize = inode->i_size; 731 oldsize = inode->i_size;
733 newsize = iattr->ia_size; 732 newsize = iattr->ia_size;
@@ -736,7 +735,7 @@ xfs_setattr_size(
736 * Short circuit the truncate case for zero length files. 735 * Short circuit the truncate case for zero length files.
737 */ 736 */
738 if (newsize == 0 && oldsize == 0 && ip->i_d.di_nextents == 0) { 737 if (newsize == 0 && oldsize == 0 && ip->i_d.di_nextents == 0) {
739 if (!(mask & (ATTR_CTIME|ATTR_MTIME))) 738 if (!(iattr->ia_valid & (ATTR_CTIME|ATTR_MTIME)))
740 return 0; 739 return 0;
741 740
742 /* 741 /*
@@ -824,10 +823,11 @@ xfs_setattr_size(
824 * these flags set. For all other operations the VFS set these flags 823 * these flags set. For all other operations the VFS set these flags
825 * explicitly if it wants a timestamp update. 824 * explicitly if it wants a timestamp update.
826 */ 825 */
827 if (newsize != oldsize && (!(mask & (ATTR_CTIME | ATTR_MTIME)))) { 826 if (newsize != oldsize &&
827 !(iattr->ia_valid & (ATTR_CTIME | ATTR_MTIME))) {
828 iattr->ia_ctime = iattr->ia_mtime = 828 iattr->ia_ctime = iattr->ia_mtime =
829 current_fs_time(inode->i_sb); 829 current_fs_time(inode->i_sb);
830 mask |= ATTR_CTIME | ATTR_MTIME; 830 iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME;
831 } 831 }
832 832
833 /* 833 /*
@@ -863,9 +863,9 @@ xfs_setattr_size(
863 xfs_inode_clear_eofblocks_tag(ip); 863 xfs_inode_clear_eofblocks_tag(ip);
864 } 864 }
865 865
866 if (mask & ATTR_MODE) 866 if (iattr->ia_valid & ATTR_MODE)
867 xfs_setattr_mode(ip, iattr); 867 xfs_setattr_mode(ip, iattr);
868 if (mask & (ATTR_ATIME|ATTR_CTIME|ATTR_MTIME)) 868 if (iattr->ia_valid & (ATTR_ATIME|ATTR_CTIME|ATTR_MTIME))
869 xfs_setattr_time(ip, iattr); 869 xfs_setattr_time(ip, iattr);
870 870
871 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 871 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index cdebd832c3db..4ef6fdbced78 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -205,16 +205,25 @@ xlog_cil_insert_format_items(
205 /* 205 /*
206 * We 64-bit align the length of each iovec so that the start 206 * We 64-bit align the length of each iovec so that the start
207 * of the next one is naturally aligned. We'll need to 207 * of the next one is naturally aligned. We'll need to
208 * account for that slack space here. 208 * account for that slack space here. Then round nbytes up
209 * to 64-bit alignment so that the initial buffer alignment is
210 * easy to calculate and verify.
209 */ 211 */
210 nbytes += niovecs * sizeof(uint64_t); 212 nbytes += niovecs * sizeof(uint64_t);
213 nbytes = round_up(nbytes, sizeof(uint64_t));
211 214
212 /* grab the old item if it exists for reservation accounting */ 215 /* grab the old item if it exists for reservation accounting */
213 old_lv = lip->li_lv; 216 old_lv = lip->li_lv;
214 217
215 /* calc buffer size */ 218 /*
216 buf_size = sizeof(struct xfs_log_vec) + nbytes + 219 * The data buffer needs to start 64-bit aligned, so round up
217 niovecs * sizeof(struct xfs_log_iovec); 220 * that space to ensure we can align it appropriately and not
221 * overrun the buffer.
222 */
223 buf_size = nbytes +
224 round_up((sizeof(struct xfs_log_vec) +
225 niovecs * sizeof(struct xfs_log_iovec)),
226 sizeof(uint64_t));
218 227
219 /* compare to existing item size */ 228 /* compare to existing item size */
220 if (lip->li_lv && buf_size <= lip->li_lv->lv_size) { 229 if (lip->li_lv && buf_size <= lip->li_lv->lv_size) {
@@ -251,6 +260,8 @@ xlog_cil_insert_format_items(
251 /* The allocated data region lies beyond the iovec region */ 260 /* The allocated data region lies beyond the iovec region */
252 lv->lv_buf_len = 0; 261 lv->lv_buf_len = 0;
253 lv->lv_buf = (char *)lv + buf_size - nbytes; 262 lv->lv_buf = (char *)lv + buf_size - nbytes;
263 ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t)));
264
254 lip->li_ops->iop_format(lip, lv); 265 lip->li_ops->iop_format(lip, lv);
255insert: 266insert:
256 ASSERT(lv->lv_buf_len <= nbytes); 267 ASSERT(lv->lv_buf_len <= nbytes);
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 02df7b408a26..f96c05669a9e 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -282,22 +282,29 @@ xfs_readsb(
282 struct xfs_sb *sbp = &mp->m_sb; 282 struct xfs_sb *sbp = &mp->m_sb;
283 int error; 283 int error;
284 int loud = !(flags & XFS_MFSI_QUIET); 284 int loud = !(flags & XFS_MFSI_QUIET);
285 const struct xfs_buf_ops *buf_ops;
285 286
286 ASSERT(mp->m_sb_bp == NULL); 287 ASSERT(mp->m_sb_bp == NULL);
287 ASSERT(mp->m_ddev_targp != NULL); 288 ASSERT(mp->m_ddev_targp != NULL);
288 289
289 /* 290 /*
291 * For the initial read, we must guess at the sector
292 * size based on the block device. It's enough to
293 * get the sb_sectsize out of the superblock and
294 * then reread with the proper length.
295 * We don't verify it yet, because it may not be complete.
296 */
297 sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
298 buf_ops = NULL;
299
300 /*
290 * Allocate a (locked) buffer to hold the superblock. 301 * Allocate a (locked) buffer to hold the superblock.
291 * This will be kept around at all times to optimize 302 * This will be kept around at all times to optimize
292 * access to the superblock. 303 * access to the superblock.
293 */ 304 */
294 sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
295
296reread: 305reread:
297 bp = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR, 306 bp = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR,
298 BTOBB(sector_size), 0, 307 BTOBB(sector_size), 0, buf_ops);
299 loud ? &xfs_sb_buf_ops
300 : &xfs_sb_quiet_buf_ops);
301 if (!bp) { 308 if (!bp) {
302 if (loud) 309 if (loud)
303 xfs_warn(mp, "SB buffer read failed"); 310 xfs_warn(mp, "SB buffer read failed");
@@ -328,12 +335,13 @@ reread:
328 } 335 }
329 336
330 /* 337 /*
331 * If device sector size is smaller than the superblock size, 338 * Re-read the superblock so the buffer is correctly sized,
332 * re-read the superblock so the buffer is correctly sized. 339 * and properly verified.
333 */ 340 */
334 if (sector_size < sbp->sb_sectsize) { 341 if (buf_ops == NULL) {
335 xfs_buf_relse(bp); 342 xfs_buf_relse(bp);
336 sector_size = sbp->sb_sectsize; 343 sector_size = sbp->sb_sectsize;
344 buf_ops = loud ? &xfs_sb_buf_ops : &xfs_sb_quiet_buf_ops;
337 goto reread; 345 goto reread;
338 } 346 }
339 347
diff --git a/fs/xfs/xfs_sb.c b/fs/xfs/xfs_sb.c
index b7c9aea77f8f..1e116794bb66 100644
--- a/fs/xfs/xfs_sb.c
+++ b/fs/xfs/xfs_sb.c
@@ -295,8 +295,7 @@ xfs_mount_validate_sb(
295 sbp->sb_dblocks == 0 || 295 sbp->sb_dblocks == 0 ||
296 sbp->sb_dblocks > XFS_MAX_DBLOCKS(sbp) || 296 sbp->sb_dblocks > XFS_MAX_DBLOCKS(sbp) ||
297 sbp->sb_dblocks < XFS_MIN_DBLOCKS(sbp))) { 297 sbp->sb_dblocks < XFS_MIN_DBLOCKS(sbp))) {
298 XFS_CORRUPTION_ERROR("SB sanity check failed", 298 xfs_notice(mp, "SB sanity check failed");
299 XFS_ERRLEVEL_LOW, mp, sbp);
300 return XFS_ERROR(EFSCORRUPTED); 299 return XFS_ERROR(EFSCORRUPTED);
301 } 300 }
302 301
@@ -611,10 +610,10 @@ xfs_sb_read_verify(
611 XFS_SB_VERSION_5) || 610 XFS_SB_VERSION_5) ||
612 dsb->sb_crc != 0)) { 611 dsb->sb_crc != 0)) {
613 612
614 if (!xfs_verify_cksum(bp->b_addr, be16_to_cpu(dsb->sb_sectsize), 613 if (!xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
615 offsetof(struct xfs_sb, sb_crc))) { 614 offsetof(struct xfs_sb, sb_crc))) {
616 /* Only fail bad secondaries on a known V5 filesystem */ 615 /* Only fail bad secondaries on a known V5 filesystem */
617 if (bp->b_bn != XFS_SB_DADDR && 616 if (bp->b_bn == XFS_SB_DADDR ||
618 xfs_sb_version_hascrc(&mp->m_sb)) { 617 xfs_sb_version_hascrc(&mp->m_sb)) {
619 error = EFSCORRUPTED; 618 error = EFSCORRUPTED;
620 goto out_error; 619 goto out_error;
@@ -625,7 +624,7 @@ xfs_sb_read_verify(
625 624
626out_error: 625out_error:
627 if (error) { 626 if (error) {
628 if (error != EWRONGFS) 627 if (error == EFSCORRUPTED)
629 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, 628 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW,
630 mp, bp->b_addr); 629 mp, bp->b_addr);
631 xfs_buf_ioerror(bp, error); 630 xfs_buf_ioerror(bp, error);
@@ -644,7 +643,6 @@ xfs_sb_quiet_read_verify(
644{ 643{
645 struct xfs_dsb *dsb = XFS_BUF_TO_SBP(bp); 644 struct xfs_dsb *dsb = XFS_BUF_TO_SBP(bp);
646 645
647
648 if (dsb->sb_magicnum == cpu_to_be32(XFS_SB_MAGIC)) { 646 if (dsb->sb_magicnum == cpu_to_be32(XFS_SB_MAGIC)) {
649 /* XFS filesystem, verify noisily! */ 647 /* XFS filesystem, verify noisily! */
650 xfs_sb_read_verify(bp); 648 xfs_sb_read_verify(bp);
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 8e4f41d9af4d..34c7bdc06014 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -701,6 +701,18 @@ static inline pte_t pte_mknuma(pte_t pte)
701} 701}
702#endif 702#endif
703 703
704#ifndef ptep_set_numa
705static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr,
706 pte_t *ptep)
707{
708 pte_t ptent = *ptep;
709
710 ptent = pte_mknuma(ptent);
711 set_pte_at(mm, addr, ptep, ptent);
712 return;
713}
714#endif
715
704#ifndef pmd_mknuma 716#ifndef pmd_mknuma
705static inline pmd_t pmd_mknuma(pmd_t pmd) 717static inline pmd_t pmd_mknuma(pmd_t pmd)
706{ 718{
@@ -708,6 +720,18 @@ static inline pmd_t pmd_mknuma(pmd_t pmd)
708 return pmd_clear_flags(pmd, _PAGE_PRESENT); 720 return pmd_clear_flags(pmd, _PAGE_PRESENT);
709} 721}
710#endif 722#endif
723
724#ifndef pmdp_set_numa
725static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
726 pmd_t *pmdp)
727{
728 pmd_t pmd = *pmdp;
729
730 pmd = pmd_mknuma(pmd);
731 set_pmd_at(mm, addr, pmdp, pmd);
732 return;
733}
734#endif
711#else 735#else
712extern int pte_numa(pte_t pte); 736extern int pte_numa(pte_t pte);
713extern int pmd_numa(pmd_t pmd); 737extern int pmd_numa(pmd_t pmd);
@@ -715,6 +739,8 @@ extern pte_t pte_mknonnuma(pte_t pte);
715extern pmd_t pmd_mknonnuma(pmd_t pmd); 739extern pmd_t pmd_mknonnuma(pmd_t pmd);
716extern pte_t pte_mknuma(pte_t pte); 740extern pte_t pte_mknuma(pte_t pte);
717extern pmd_t pmd_mknuma(pmd_t pmd); 741extern pmd_t pmd_mknuma(pmd_t pmd);
742extern void ptep_set_numa(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
743extern void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp);
718#endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */ 744#endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */
719#else 745#else
720static inline int pmd_numa(pmd_t pmd) 746static inline int pmd_numa(pmd_t pmd)
@@ -742,10 +768,23 @@ static inline pte_t pte_mknuma(pte_t pte)
742 return pte; 768 return pte;
743} 769}
744 770
771static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr,
772 pte_t *ptep)
773{
774 return;
775}
776
777
745static inline pmd_t pmd_mknuma(pmd_t pmd) 778static inline pmd_t pmd_mknuma(pmd_t pmd)
746{ 779{
747 return pmd; 780 return pmd;
748} 781}
782
783static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
784 pmd_t *pmdp)
785{
786 return ;
787}
749#endif /* CONFIG_NUMA_BALANCING */ 788#endif /* CONFIG_NUMA_BALANCING */
750 789
751#endif /* CONFIG_MMU */ 790#endif /* CONFIG_MMU */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 04086c5be930..04a7f31301f8 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -199,6 +199,9 @@ int drm_err(const char *func, const char *format, ...);
199#define DRM_INFO(fmt, ...) \ 199#define DRM_INFO(fmt, ...) \
200 printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__) 200 printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
201 201
202#define DRM_INFO_ONCE(fmt, ...) \
203 printk_once(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
204
202/** 205/**
203 * Debug output. 206 * Debug output.
204 * 207 *
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 71727b6210ae..8f3dee097579 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -907,6 +907,9 @@ struct drm_mode_config {
907 907
908 /* whether async page flip is supported or not */ 908 /* whether async page flip is supported or not */
909 bool async_page_flip; 909 bool async_page_flip;
910
911 /* cursor size */
912 uint32_t cursor_width, cursor_height;
910}; 913};
911 914
912#define obj_to_crtc(x) container_of(x, struct drm_crtc, base) 915#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
index d1f61bfe0ebe..49a828425fa2 100644
--- a/include/drm/ttm/ttm_page_alloc.h
+++ b/include/drm/ttm/ttm_page_alloc.h
@@ -29,6 +29,8 @@
29#include <drm/ttm/ttm_bo_driver.h> 29#include <drm/ttm/ttm_bo_driver.h>
30#include <drm/ttm/ttm_memory.h> 30#include <drm/ttm/ttm_memory.h>
31 31
32struct device;
33
32/** 34/**
33 * Initialize pool allocator. 35 * Initialize pool allocator.
34 */ 36 */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 70654521dab6..5a4d39b4686b 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -250,6 +250,17 @@ static inline unsigned bio_segments(struct bio *bio)
250 struct bio_vec bv; 250 struct bio_vec bv;
251 struct bvec_iter iter; 251 struct bvec_iter iter;
252 252
253 /*
254 * We special case discard/write same, because they interpret bi_size
255 * differently:
256 */
257
258 if (bio->bi_rw & REQ_DISCARD)
259 return 1;
260
261 if (bio->bi_rw & REQ_WRITE_SAME)
262 return 1;
263
253 bio_for_each_segment(bv, bio, iter) 264 bio_for_each_segment(bv, bio, iter)
254 segs++; 265 segs++;
255 266
@@ -332,6 +343,7 @@ extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
332extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs); 343extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
333 344
334extern struct bio_set *fs_bio_set; 345extern struct bio_set *fs_bio_set;
346unsigned int bio_integrity_tag_size(struct bio *bio);
335 347
336static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) 348static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
337{ 349{
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 161b23105b1e..18ba8a627f46 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -83,6 +83,8 @@ struct blk_mq_ops {
83 */ 83 */
84 rq_timed_out_fn *timeout; 84 rq_timed_out_fn *timeout;
85 85
86 softirq_done_fn *complete;
87
86 /* 88 /*
87 * Override for hctx allocations (should probably go) 89 * Override for hctx allocations (should probably go)
88 */ 90 */
@@ -119,11 +121,12 @@ void blk_mq_init_commands(struct request_queue *, void (*init)(void *data, struc
119 121
120void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); 122void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
121 123
122void blk_mq_insert_request(struct request_queue *, struct request *, bool); 124void blk_mq_insert_request(struct request_queue *, struct request *,
125 bool, bool);
123void blk_mq_run_queues(struct request_queue *q, bool async); 126void blk_mq_run_queues(struct request_queue *q, bool async);
124void blk_mq_free_request(struct request *rq); 127void blk_mq_free_request(struct request *rq);
125bool blk_mq_can_queue(struct blk_mq_hw_ctx *); 128bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
126struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, bool reserved); 129struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp);
127struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp); 130struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp);
128struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag); 131struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag);
129 132
@@ -133,6 +136,8 @@ void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int);
133 136
134void blk_mq_end_io(struct request *rq, int error); 137void blk_mq_end_io(struct request *rq, int error);
135 138
139void blk_mq_complete_request(struct request *rq);
140
136void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); 141void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
137void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); 142void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
138void blk_mq_stop_hw_queues(struct request_queue *q); 143void blk_mq_stop_hw_queues(struct request_queue *q);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 8678c4322b44..4afa4f8f6090 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -98,7 +98,7 @@ struct request {
98 struct list_head queuelist; 98 struct list_head queuelist;
99 union { 99 union {
100 struct call_single_data csd; 100 struct call_single_data csd;
101 struct work_struct mq_flush_data; 101 struct work_struct mq_flush_work;
102 }; 102 };
103 103
104 struct request_queue *q; 104 struct request_queue *q;
@@ -448,13 +448,8 @@ struct request_queue {
448 unsigned long flush_pending_since; 448 unsigned long flush_pending_since;
449 struct list_head flush_queue[2]; 449 struct list_head flush_queue[2];
450 struct list_head flush_data_in_flight; 450 struct list_head flush_data_in_flight;
451 union { 451 struct request *flush_rq;
452 struct request flush_rq; 452 spinlock_t mq_flush_lock;
453 struct {
454 spinlock_t mq_flush_lock;
455 struct work_struct mq_flush_work;
456 };
457 };
458 453
459 struct mutex sysfs_lock; 454 struct mutex sysfs_lock;
460 455
diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
index 2f0543f7510c..f9bbbb472663 100644
--- a/include/linux/can/skb.h
+++ b/include/linux/can/skb.h
@@ -11,7 +11,9 @@
11#define CAN_SKB_H 11#define CAN_SKB_H
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/skbuff.h>
14#include <linux/can.h> 15#include <linux/can.h>
16#include <net/sock.h>
15 17
16/* 18/*
17 * The struct can_skb_priv is used to transport additional information along 19 * The struct can_skb_priv is used to transport additional information along
@@ -42,4 +44,40 @@ static inline void can_skb_reserve(struct sk_buff *skb)
42 skb_reserve(skb, sizeof(struct can_skb_priv)); 44 skb_reserve(skb, sizeof(struct can_skb_priv));
43} 45}
44 46
47static inline void can_skb_destructor(struct sk_buff *skb)
48{
49 sock_put(skb->sk);
50}
51
52static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk)
53{
54 if (sk) {
55 sock_hold(sk);
56 skb->destructor = can_skb_destructor;
57 skb->sk = sk;
58 }
59}
60
61/*
62 * returns an unshared skb owned by the original sock to be echo'ed back
63 */
64static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb)
65{
66 if (skb_shared(skb)) {
67 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
68
69 if (likely(nskb)) {
70 can_skb_set_owner(nskb, skb->sk);
71 consume_skb(skb);
72 return nskb;
73 } else {
74 kfree_skb(skb);
75 return NULL;
76 }
77 }
78
79 /* we can assume to have an unshared skb with proper owner */
80 return skb;
81}
82
45#endif /* CAN_SKB_H */ 83#endif /* CAN_SKB_H */
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index 2623cffc73a1..25bfb0eff772 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -373,8 +373,9 @@ extern const char *ceph_mds_op_name(int op);
373/* 373/*
374 * Ceph setxattr request flags. 374 * Ceph setxattr request flags.
375 */ 375 */
376#define CEPH_XATTR_CREATE 1 376#define CEPH_XATTR_CREATE (1 << 0)
377#define CEPH_XATTR_REPLACE 2 377#define CEPH_XATTR_REPLACE (1 << 1)
378#define CEPH_XATTR_REMOVE (1 << 31)
378 379
379union ceph_mds_request_args { 380union ceph_mds_request_args {
380 struct { 381 struct {
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 5c097596104b..9450f025fe0c 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -166,6 +166,8 @@ struct cgroup {
166 * 166 *
167 * The ID of the root cgroup is always 0, and a new cgroup 167 * The ID of the root cgroup is always 0, and a new cgroup
168 * will be assigned with a smallest available ID. 168 * will be assigned with a smallest available ID.
169 *
170 * Allocating/Removing ID must be protected by cgroup_mutex.
169 */ 171 */
170 int id; 172 int id;
171 173
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
index ded429966c1f..2507fd2a1eb4 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
@@ -75,11 +75,7 @@
75 * 75 *
76 * (asm goto is automatically volatile - the naming reflects this.) 76 * (asm goto is automatically volatile - the naming reflects this.)
77 */ 77 */
78#if GCC_VERSION <= 40801 78#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
79# define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
80#else
81# define asm_volatile_goto(x...) do { asm goto(x); } while (0)
82#endif
83 79
84#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP 80#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
85#if GCC_VERSION >= 40400 81#if GCC_VERSION >= 40400
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index dfac5ed31120..f886985a28b2 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -171,7 +171,7 @@ struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
171 size_t size, int flags, const char *); 171 size_t size, int flags, const char *);
172 172
173#define dma_buf_export(priv, ops, size, flags) \ 173#define dma_buf_export(priv, ops, size, flags) \
174 dma_buf_export_named(priv, ops, size, flags, __FILE__) 174 dma_buf_export_named(priv, ops, size, flags, KBUILD_MODNAME)
175 175
176int dma_buf_fd(struct dma_buf *dmabuf, int flags); 176int dma_buf_fd(struct dma_buf *dmabuf, int flags);
177struct dma_buf *dma_buf_get(int fd); 177struct dma_buf *dma_buf_get(int fd);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index d79678c188ad..60829565e552 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2274,7 +2274,13 @@ extern int filemap_fdatawrite_range(struct address_space *mapping,
2274extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end, 2274extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
2275 int datasync); 2275 int datasync);
2276extern int vfs_fsync(struct file *file, int datasync); 2276extern int vfs_fsync(struct file *file, int datasync);
2277extern int generic_write_sync(struct file *file, loff_t pos, loff_t count); 2277static inline int generic_write_sync(struct file *file, loff_t pos, loff_t count)
2278{
2279 if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host))
2280 return 0;
2281 return vfs_fsync_range(file, pos, pos + count - 1,
2282 (file->f_flags & __O_SYNC) ? 0 : 1);
2283}
2278extern void emergency_sync(void); 2284extern void emergency_sync(void);
2279extern void emergency_remount(void); 2285extern void emergency_remount(void);
2280#ifdef CONFIG_BLOCK 2286#ifdef CONFIG_BLOCK
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 4d34dbbbad4d..7a8144fef406 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -4,8 +4,6 @@
4#include <linux/err.h> 4#include <linux/err.h>
5#include <linux/kernel.h> 5#include <linux/kernel.h>
6 6
7#ifdef CONFIG_GPIOLIB
8
9struct device; 7struct device;
10struct gpio_chip; 8struct gpio_chip;
11 9
@@ -18,6 +16,8 @@ struct gpio_chip;
18 */ 16 */
19struct gpio_desc; 17struct gpio_desc;
20 18
19#ifdef CONFIG_GPIOLIB
20
21/* Acquire and dispose GPIOs */ 21/* Acquire and dispose GPIOs */
22struct gpio_desc *__must_check gpiod_get(struct device *dev, 22struct gpio_desc *__must_check gpiod_get(struct device *dev,
23 const char *con_id); 23 const char *con_id);
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 15da677478dd..344883dce584 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -875,7 +875,7 @@ struct vmbus_channel_relid_released {
875struct vmbus_channel_initiate_contact { 875struct vmbus_channel_initiate_contact {
876 struct vmbus_channel_message_header header; 876 struct vmbus_channel_message_header header;
877 u32 vmbus_version_requested; 877 u32 vmbus_version_requested;
878 u32 padding2; 878 u32 target_vcpu; /* The VCPU the host should respond to */
879 u64 interrupt_page; 879 u64 interrupt_page;
880 u64 monitor_page1; 880 u64 monitor_page1;
881 u64 monitor_page2; 881 u64 monitor_page2;
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 0053adde0ed9..a2678d35b5a2 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -158,6 +158,11 @@ devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
158 devname, dev_id); 158 devname, dev_id);
159} 159}
160 160
161extern int __must_check
162devm_request_any_context_irq(struct device *dev, unsigned int irq,
163 irq_handler_t handler, unsigned long irqflags,
164 const char *devname, void *dev_id);
165
161extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); 166extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
162 167
163/* 168/*
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index e7831d203737..35e7eca4e33b 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -118,9 +118,7 @@ extern int mq_init_ns(struct ipc_namespace *ns);
118 * the new maximum will handle anyone else. I may have to revisit this 118 * the new maximum will handle anyone else. I may have to revisit this
119 * in the future. 119 * in the future.
120 */ 120 */
121#define MIN_QUEUESMAX 1
122#define DFLT_QUEUESMAX 256 121#define DFLT_QUEUESMAX 256
123#define HARD_QUEUESMAX 1024
124#define MIN_MSGMAX 1 122#define MIN_MSGMAX 1
125#define DFLT_MSG 10U 123#define DFLT_MSG 10U
126#define DFLT_MSGMAX 10 124#define DFLT_MSGMAX 10
diff --git a/include/linux/mfd/max8997-private.h b/include/linux/mfd/max8997-private.h
index ad1ae7f345ad..78c76cd4d37b 100644
--- a/include/linux/mfd/max8997-private.h
+++ b/include/linux/mfd/max8997-private.h
@@ -387,7 +387,7 @@ struct max8997_dev {
387 struct i2c_client *muic; /* slave addr 0x4a */ 387 struct i2c_client *muic; /* slave addr 0x4a */
388 struct mutex iolock; 388 struct mutex iolock;
389 389
390 int type; 390 unsigned long type;
391 struct platform_device *battery; /* battery control (not fuel gauge) */ 391 struct platform_device *battery; /* battery control (not fuel gauge) */
392 392
393 int irq; 393 int irq;
diff --git a/include/linux/mfd/max8998-private.h b/include/linux/mfd/max8998-private.h
index 4ecb24b4b863..d68ada502ff3 100644
--- a/include/linux/mfd/max8998-private.h
+++ b/include/linux/mfd/max8998-private.h
@@ -163,7 +163,7 @@ struct max8998_dev {
163 int ono; 163 int ono;
164 u8 irq_masks_cur[MAX8998_NUM_IRQ_REGS]; 164 u8 irq_masks_cur[MAX8998_NUM_IRQ_REGS];
165 u8 irq_masks_cache[MAX8998_NUM_IRQ_REGS]; 165 u8 irq_masks_cache[MAX8998_NUM_IRQ_REGS];
166 int type; 166 unsigned long type;
167 bool wakeup; 167 bool wakeup;
168}; 168};
169 169
diff --git a/include/linux/mfd/tps65217.h b/include/linux/mfd/tps65217.h
index a5a7f0130e96..54b5458ec084 100644
--- a/include/linux/mfd/tps65217.h
+++ b/include/linux/mfd/tps65217.h
@@ -252,7 +252,7 @@ struct tps65217_board {
252struct tps65217 { 252struct tps65217 {
253 struct device *dev; 253 struct device *dev;
254 struct tps65217_board *pdata; 254 struct tps65217_board *pdata;
255 unsigned int id; 255 unsigned long id;
256 struct regulator_desc desc[TPS65217_NUM_REGULATOR]; 256 struct regulator_desc desc[TPS65217_NUM_REGULATOR];
257 struct regulator_dev *rdev[TPS65217_NUM_REGULATOR]; 257 struct regulator_dev *rdev[TPS65217_NUM_REGULATOR];
258 struct regmap *regmap; 258 struct regmap *regmap;
@@ -263,7 +263,7 @@ static inline struct tps65217 *dev_to_tps65217(struct device *dev)
263 return dev_get_drvdata(dev); 263 return dev_get_drvdata(dev);
264} 264}
265 265
266static inline int tps65217_chip_id(struct tps65217 *tps65217) 266static inline unsigned long tps65217_chip_id(struct tps65217 *tps65217)
267{ 267{
268 return tps65217->id; 268 return tps65217->id;
269} 269}
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 554548cd3dd4..130bc8d77fa5 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -38,8 +38,10 @@
38#include <linux/pci.h> 38#include <linux/pci.h>
39#include <linux/spinlock_types.h> 39#include <linux/spinlock_types.h>
40#include <linux/semaphore.h> 40#include <linux/semaphore.h>
41#include <linux/slab.h>
41#include <linux/vmalloc.h> 42#include <linux/vmalloc.h>
42#include <linux/radix-tree.h> 43#include <linux/radix-tree.h>
44
43#include <linux/mlx5/device.h> 45#include <linux/mlx5/device.h>
44#include <linux/mlx5/doorbell.h> 46#include <linux/mlx5/doorbell.h>
45 47
@@ -227,6 +229,7 @@ struct mlx5_uuar_info {
227 * protect uuar allocation data structs 229 * protect uuar allocation data structs
228 */ 230 */
229 struct mutex lock; 231 struct mutex lock;
232 u32 ver;
230}; 233};
231 234
232struct mlx5_bf { 235struct mlx5_bf {
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 440a02ee6f92..e8eeebd49a98 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -752,6 +752,9 @@ struct netdev_phys_port_id {
752 unsigned char id_len; 752 unsigned char id_len;
753}; 753};
754 754
755typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
756 struct sk_buff *skb);
757
755/* 758/*
756 * This structure defines the management hooks for network devices. 759 * This structure defines the management hooks for network devices.
757 * The following hooks can be defined; unless noted otherwise, they are 760 * The following hooks can be defined; unless noted otherwise, they are
@@ -783,7 +786,7 @@ struct netdev_phys_port_id {
783 * Required can not be NULL. 786 * Required can not be NULL.
784 * 787 *
785 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, 788 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
786 * void *accel_priv); 789 * void *accel_priv, select_queue_fallback_t fallback);
787 * Called to decide which queue to when device supports multiple 790 * Called to decide which queue to when device supports multiple
788 * transmit queues. 791 * transmit queues.
789 * 792 *
@@ -1005,7 +1008,8 @@ struct net_device_ops {
1005 struct net_device *dev); 1008 struct net_device *dev);
1006 u16 (*ndo_select_queue)(struct net_device *dev, 1009 u16 (*ndo_select_queue)(struct net_device *dev,
1007 struct sk_buff *skb, 1010 struct sk_buff *skb,
1008 void *accel_priv); 1011 void *accel_priv,
1012 select_queue_fallback_t fallback);
1009 void (*ndo_change_rx_flags)(struct net_device *dev, 1013 void (*ndo_change_rx_flags)(struct net_device *dev,
1010 int flags); 1014 int flags);
1011 void (*ndo_set_rx_mode)(struct net_device *dev); 1015 void (*ndo_set_rx_mode)(struct net_device *dev);
@@ -1551,7 +1555,6 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
1551struct netdev_queue *netdev_pick_tx(struct net_device *dev, 1555struct netdev_queue *netdev_pick_tx(struct net_device *dev,
1552 struct sk_buff *skb, 1556 struct sk_buff *skb,
1553 void *accel_priv); 1557 void *accel_priv);
1554u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
1555 1558
1556/* 1559/*
1557 * Net namespace inlines 1560 * Net namespace inlines
@@ -2276,6 +2279,26 @@ static inline void netdev_reset_queue(struct net_device *dev_queue)
2276} 2279}
2277 2280
2278/** 2281/**
2282 * netdev_cap_txqueue - check if selected tx queue exceeds device queues
2283 * @dev: network device
2284 * @queue_index: given tx queue index
2285 *
2286 * Returns 0 if given tx queue index >= number of device tx queues,
2287 * otherwise returns the originally passed tx queue index.
2288 */
2289static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
2290{
2291 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
2292 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
2293 dev->name, queue_index,
2294 dev->real_num_tx_queues);
2295 return 0;
2296 }
2297
2298 return queue_index;
2299}
2300
2301/**
2279 * netif_running - test if up 2302 * netif_running - test if up
2280 * @dev: network device 2303 * @dev: network device
2281 * 2304 *
@@ -3068,7 +3091,12 @@ void netdev_change_features(struct net_device *dev);
3068void netif_stacked_transfer_operstate(const struct net_device *rootdev, 3091void netif_stacked_transfer_operstate(const struct net_device *rootdev,
3069 struct net_device *dev); 3092 struct net_device *dev);
3070 3093
3071netdev_features_t netif_skb_features(struct sk_buff *skb); 3094netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
3095 const struct net_device *dev);
3096static inline netdev_features_t netif_skb_features(struct sk_buff *skb)
3097{
3098 return netif_skb_dev_features(skb, skb->dev);
3099}
3072 3100
3073static inline bool net_gso_ok(netdev_features_t features, int gso_type) 3101static inline bool net_gso_ok(netdev_features_t features, int gso_type)
3074{ 3102{
diff --git a/include/linux/of.h b/include/linux/of.h
index 70c64ba17fa5..435cb995904d 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -169,35 +169,15 @@ static inline const char *of_node_full_name(const struct device_node *np)
169 169
170extern struct device_node *of_find_node_by_name(struct device_node *from, 170extern struct device_node *of_find_node_by_name(struct device_node *from,
171 const char *name); 171 const char *name);
172#define for_each_node_by_name(dn, name) \
173 for (dn = of_find_node_by_name(NULL, name); dn; \
174 dn = of_find_node_by_name(dn, name))
175extern struct device_node *of_find_node_by_type(struct device_node *from, 172extern struct device_node *of_find_node_by_type(struct device_node *from,
176 const char *type); 173 const char *type);
177#define for_each_node_by_type(dn, type) \
178 for (dn = of_find_node_by_type(NULL, type); dn; \
179 dn = of_find_node_by_type(dn, type))
180extern struct device_node *of_find_compatible_node(struct device_node *from, 174extern struct device_node *of_find_compatible_node(struct device_node *from,
181 const char *type, const char *compat); 175 const char *type, const char *compat);
182#define for_each_compatible_node(dn, type, compatible) \
183 for (dn = of_find_compatible_node(NULL, type, compatible); dn; \
184 dn = of_find_compatible_node(dn, type, compatible))
185extern struct device_node *of_find_matching_node_and_match( 176extern struct device_node *of_find_matching_node_and_match(
186 struct device_node *from, 177 struct device_node *from,
187 const struct of_device_id *matches, 178 const struct of_device_id *matches,
188 const struct of_device_id **match); 179 const struct of_device_id **match);
189static inline struct device_node *of_find_matching_node( 180
190 struct device_node *from,
191 const struct of_device_id *matches)
192{
193 return of_find_matching_node_and_match(from, matches, NULL);
194}
195#define for_each_matching_node(dn, matches) \
196 for (dn = of_find_matching_node(NULL, matches); dn; \
197 dn = of_find_matching_node(dn, matches))
198#define for_each_matching_node_and_match(dn, matches, match) \
199 for (dn = of_find_matching_node_and_match(NULL, matches, match); \
200 dn; dn = of_find_matching_node_and_match(dn, matches, match))
201extern struct device_node *of_find_node_by_path(const char *path); 181extern struct device_node *of_find_node_by_path(const char *path);
202extern struct device_node *of_find_node_by_phandle(phandle handle); 182extern struct device_node *of_find_node_by_phandle(phandle handle);
203extern struct device_node *of_get_parent(const struct device_node *node); 183extern struct device_node *of_get_parent(const struct device_node *node);
@@ -209,43 +189,11 @@ extern struct device_node *of_get_next_available_child(
209 189
210extern struct device_node *of_get_child_by_name(const struct device_node *node, 190extern struct device_node *of_get_child_by_name(const struct device_node *node,
211 const char *name); 191 const char *name);
212#define for_each_child_of_node(parent, child) \
213 for (child = of_get_next_child(parent, NULL); child != NULL; \
214 child = of_get_next_child(parent, child))
215
216#define for_each_available_child_of_node(parent, child) \
217 for (child = of_get_next_available_child(parent, NULL); child != NULL; \
218 child = of_get_next_available_child(parent, child))
219
220static inline int of_get_child_count(const struct device_node *np)
221{
222 struct device_node *child;
223 int num = 0;
224
225 for_each_child_of_node(np, child)
226 num++;
227
228 return num;
229}
230
231static inline int of_get_available_child_count(const struct device_node *np)
232{
233 struct device_node *child;
234 int num = 0;
235
236 for_each_available_child_of_node(np, child)
237 num++;
238
239 return num;
240}
241 192
242/* cache lookup */ 193/* cache lookup */
243extern struct device_node *of_find_next_cache_node(const struct device_node *); 194extern struct device_node *of_find_next_cache_node(const struct device_node *);
244extern struct device_node *of_find_node_with_property( 195extern struct device_node *of_find_node_with_property(
245 struct device_node *from, const char *prop_name); 196 struct device_node *from, const char *prop_name);
246#define for_each_node_with_property(dn, prop_name) \
247 for (dn = of_find_node_with_property(NULL, prop_name); dn; \
248 dn = of_find_node_with_property(dn, prop_name))
249 197
250extern struct property *of_find_property(const struct device_node *np, 198extern struct property *of_find_property(const struct device_node *np,
251 const char *name, 199 const char *name,
@@ -367,42 +315,53 @@ static inline struct device_node *of_find_node_by_name(struct device_node *from,
367 return NULL; 315 return NULL;
368} 316}
369 317
370static inline struct device_node *of_get_parent(const struct device_node *node) 318static inline struct device_node *of_find_node_by_type(struct device_node *from,
319 const char *type)
371{ 320{
372 return NULL; 321 return NULL;
373} 322}
374 323
375static inline bool of_have_populated_dt(void) 324static inline struct device_node *of_find_matching_node_and_match(
325 struct device_node *from,
326 const struct of_device_id *matches,
327 const struct of_device_id **match)
376{ 328{
377 return false; 329 return NULL;
378} 330}
379 331
380/* Kill an unused variable warning on a device_node pointer */ 332static inline struct device_node *of_get_parent(const struct device_node *node)
381static inline void __of_use_dn(const struct device_node *np)
382{ 333{
334 return NULL;
383} 335}
384 336
385#define for_each_child_of_node(parent, child) \ 337static inline struct device_node *of_get_next_child(
386 while (__of_use_dn(parent), __of_use_dn(child), 0) 338 const struct device_node *node, struct device_node *prev)
339{
340 return NULL;
341}
387 342
388#define for_each_available_child_of_node(parent, child) \ 343static inline struct device_node *of_get_next_available_child(
389 while (0) 344 const struct device_node *node, struct device_node *prev)
345{
346 return NULL;
347}
390 348
391static inline struct device_node *of_get_child_by_name( 349static inline struct device_node *of_find_node_with_property(
392 const struct device_node *node, 350 struct device_node *from, const char *prop_name)
393 const char *name)
394{ 351{
395 return NULL; 352 return NULL;
396} 353}
397 354
398static inline int of_get_child_count(const struct device_node *np) 355static inline bool of_have_populated_dt(void)
399{ 356{
400 return 0; 357 return false;
401} 358}
402 359
403static inline int of_get_available_child_count(const struct device_node *np) 360static inline struct device_node *of_get_child_by_name(
361 const struct device_node *node,
362 const char *name)
404{ 363{
405 return 0; 364 return NULL;
406} 365}
407 366
408static inline int of_device_is_compatible(const struct device_node *device, 367static inline int of_device_is_compatible(const struct device_node *device,
@@ -569,6 +528,13 @@ extern int of_node_to_nid(struct device_node *np);
569static inline int of_node_to_nid(struct device_node *device) { return 0; } 528static inline int of_node_to_nid(struct device_node *device) { return 0; }
570#endif 529#endif
571 530
531static inline struct device_node *of_find_matching_node(
532 struct device_node *from,
533 const struct of_device_id *matches)
534{
535 return of_find_matching_node_and_match(from, matches, NULL);
536}
537
572/** 538/**
573 * of_property_read_bool - Findfrom a property 539 * of_property_read_bool - Findfrom a property
574 * @np: device node from which the property value is to be read. 540 * @np: device node from which the property value is to be read.
@@ -618,6 +584,55 @@ static inline int of_property_read_u32(const struct device_node *np,
618 s; \ 584 s; \
619 s = of_prop_next_string(prop, s)) 585 s = of_prop_next_string(prop, s))
620 586
587#define for_each_node_by_name(dn, name) \
588 for (dn = of_find_node_by_name(NULL, name); dn; \
589 dn = of_find_node_by_name(dn, name))
590#define for_each_node_by_type(dn, type) \
591 for (dn = of_find_node_by_type(NULL, type); dn; \
592 dn = of_find_node_by_type(dn, type))
593#define for_each_compatible_node(dn, type, compatible) \
594 for (dn = of_find_compatible_node(NULL, type, compatible); dn; \
595 dn = of_find_compatible_node(dn, type, compatible))
596#define for_each_matching_node(dn, matches) \
597 for (dn = of_find_matching_node(NULL, matches); dn; \
598 dn = of_find_matching_node(dn, matches))
599#define for_each_matching_node_and_match(dn, matches, match) \
600 for (dn = of_find_matching_node_and_match(NULL, matches, match); \
601 dn; dn = of_find_matching_node_and_match(dn, matches, match))
602
603#define for_each_child_of_node(parent, child) \
604 for (child = of_get_next_child(parent, NULL); child != NULL; \
605 child = of_get_next_child(parent, child))
606#define for_each_available_child_of_node(parent, child) \
607 for (child = of_get_next_available_child(parent, NULL); child != NULL; \
608 child = of_get_next_available_child(parent, child))
609
610#define for_each_node_with_property(dn, prop_name) \
611 for (dn = of_find_node_with_property(NULL, prop_name); dn; \
612 dn = of_find_node_with_property(dn, prop_name))
613
614static inline int of_get_child_count(const struct device_node *np)
615{
616 struct device_node *child;
617 int num = 0;
618
619 for_each_child_of_node(np, child)
620 num++;
621
622 return num;
623}
624
625static inline int of_get_available_child_count(const struct device_node *np)
626{
627 struct device_node *child;
628 int num = 0;
629
630 for_each_available_child_of_node(np, child)
631 num++;
632
633 return num;
634}
635
621#if defined(CONFIG_PROC_FS) && defined(CONFIG_PROC_DEVICETREE) 636#if defined(CONFIG_PROC_FS) && defined(CONFIG_PROC_DEVICETREE)
622extern void proc_device_tree_add_node(struct device_node *, struct proc_dir_entry *); 637extern void proc_device_tree_add_node(struct device_node *, struct proc_dir_entry *);
623extern void proc_device_tree_add_prop(struct proc_dir_entry *pde, struct property *prop); 638extern void proc_device_tree_add_prop(struct proc_dir_entry *pde, struct property *prop);
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index 8d7dd6768cb7..ef370210ffb2 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -78,11 +78,13 @@ static inline int of_device_uevent_modalias(struct device *dev,
78 78
79static inline void of_device_node_put(struct device *dev) { } 79static inline void of_device_node_put(struct device *dev) { }
80 80
81static inline const struct of_device_id *of_match_device( 81static inline const struct of_device_id *__of_match_device(
82 const struct of_device_id *matches, const struct device *dev) 82 const struct of_device_id *matches, const struct device *dev)
83{ 83{
84 return NULL; 84 return NULL;
85} 85}
86#define of_match_device(matches, dev) \
87 __of_match_device(of_match_ptr(matches), (dev))
86 88
87static inline struct device_node *of_cpu_device_node_get(int cpu) 89static inline struct device_node *of_cpu_device_node_get(int cpu)
88{ 90{
diff --git a/include/linux/pci.h b/include/linux/pci.h
index fb57c892b214..33aa2caf0f0c 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1169,8 +1169,23 @@ void msi_remove_pci_irq_vectors(struct pci_dev *dev);
1169void pci_restore_msi_state(struct pci_dev *dev); 1169void pci_restore_msi_state(struct pci_dev *dev);
1170int pci_msi_enabled(void); 1170int pci_msi_enabled(void);
1171int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec); 1171int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec);
1172static inline int pci_enable_msi_exact(struct pci_dev *dev, int nvec)
1173{
1174 int rc = pci_enable_msi_range(dev, nvec, nvec);
1175 if (rc < 0)
1176 return rc;
1177 return 0;
1178}
1172int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, 1179int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
1173 int minvec, int maxvec); 1180 int minvec, int maxvec);
1181static inline int pci_enable_msix_exact(struct pci_dev *dev,
1182 struct msix_entry *entries, int nvec)
1183{
1184 int rc = pci_enable_msix_range(dev, entries, nvec, nvec);
1185 if (rc < 0)
1186 return rc;
1187 return 0;
1188}
1174#else 1189#else
1175static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; } 1190static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
1176static inline int pci_enable_msi_block(struct pci_dev *dev, int nvec) 1191static inline int pci_enable_msi_block(struct pci_dev *dev, int nvec)
@@ -1189,9 +1204,14 @@ static inline int pci_msi_enabled(void) { return 0; }
1189static inline int pci_enable_msi_range(struct pci_dev *dev, int minvec, 1204static inline int pci_enable_msi_range(struct pci_dev *dev, int minvec,
1190 int maxvec) 1205 int maxvec)
1191{ return -ENOSYS; } 1206{ return -ENOSYS; }
1207static inline int pci_enable_msi_exact(struct pci_dev *dev, int nvec)
1208{ return -ENOSYS; }
1192static inline int pci_enable_msix_range(struct pci_dev *dev, 1209static inline int pci_enable_msix_range(struct pci_dev *dev,
1193 struct msix_entry *entries, int minvec, int maxvec) 1210 struct msix_entry *entries, int minvec, int maxvec)
1194{ return -ENOSYS; } 1211{ return -ENOSYS; }
1212static inline int pci_enable_msix_exact(struct pci_dev *dev,
1213 struct msix_entry *entries, int nvec)
1214{ return -ENOSYS; }
1195#endif 1215#endif
1196 1216
1197#ifdef CONFIG_PCIEPORTBUS 1217#ifdef CONFIG_PCIEPORTBUS
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index e273e5ac19c9..3f83459dbb20 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -146,7 +146,9 @@ static inline void phy_set_bus_width(struct phy *phy, int bus_width)
146 phy->attrs.bus_width = bus_width; 146 phy->attrs.bus_width = bus_width;
147} 147}
148struct phy *phy_get(struct device *dev, const char *string); 148struct phy *phy_get(struct device *dev, const char *string);
149struct phy *phy_optional_get(struct device *dev, const char *string);
149struct phy *devm_phy_get(struct device *dev, const char *string); 150struct phy *devm_phy_get(struct device *dev, const char *string);
151struct phy *devm_phy_optional_get(struct device *dev, const char *string);
150void phy_put(struct phy *phy); 152void phy_put(struct phy *phy);
151void devm_phy_put(struct device *dev, struct phy *phy); 153void devm_phy_put(struct device *dev, struct phy *phy);
152struct phy *of_phy_simple_xlate(struct device *dev, 154struct phy *of_phy_simple_xlate(struct device *dev,
@@ -232,11 +234,23 @@ static inline struct phy *phy_get(struct device *dev, const char *string)
232 return ERR_PTR(-ENOSYS); 234 return ERR_PTR(-ENOSYS);
233} 235}
234 236
237static inline struct phy *phy_optional_get(struct device *dev,
238 const char *string)
239{
240 return ERR_PTR(-ENOSYS);
241}
242
235static inline struct phy *devm_phy_get(struct device *dev, const char *string) 243static inline struct phy *devm_phy_get(struct device *dev, const char *string)
236{ 244{
237 return ERR_PTR(-ENOSYS); 245 return ERR_PTR(-ENOSYS);
238} 246}
239 247
248static inline struct phy *devm_phy_optional_get(struct device *dev,
249 const char *string)
250{
251 return ERR_PTR(-ENOSYS);
252}
253
240static inline void phy_put(struct phy *phy) 254static inline void phy_put(struct phy *phy)
241{ 255{
242} 256}
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index f589c9af8cbf..3ebbbe7b6d05 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2916,5 +2916,22 @@ static inline bool skb_head_is_locked(const struct sk_buff *skb)
2916{ 2916{
2917 return !skb->head_frag || skb_cloned(skb); 2917 return !skb->head_frag || skb_cloned(skb);
2918} 2918}
2919
2920/**
2921 * skb_gso_network_seglen - Return length of individual segments of a gso packet
2922 *
2923 * @skb: GSO skb
2924 *
2925 * skb_gso_network_seglen is used to determine the real size of the
2926 * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
2927 *
2928 * The MAC/L2 header is not accounted for.
2929 */
2930static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
2931{
2932 unsigned int hdr_len = skb_transport_header(skb) -
2933 skb_network_header(skb);
2934 return hdr_len + skb_gso_transport_seglen(skb);
2935}
2919#endif /* __KERNEL__ */ 2936#endif /* __KERNEL__ */
2920#endif /* _LINUX_SKBUFF_H */ 2937#endif /* _LINUX_SKBUFF_H */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 3834f43f9993..6ae004e437ea 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -188,6 +188,9 @@ static inline void kick_all_cpus_sync(void) { }
188 */ 188 */
189extern void arch_disable_smp_support(void); 189extern void arch_disable_smp_support(void);
190 190
191extern void arch_enable_nonboot_cpus_begin(void);
192extern void arch_enable_nonboot_cpus_end(void);
193
191void smp_setup_processor_id(void); 194void smp_setup_processor_id(void);
192 195
193#endif /* __LINUX_SMP_H */ 196#endif /* __LINUX_SMP_H */
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index a1d4ca290862..4203c66d8803 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -273,7 +273,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
273 * message while queuing transfers that arrive in the meantime. When the 273 * message while queuing transfers that arrive in the meantime. When the
274 * driver is finished with this message, it must call 274 * driver is finished with this message, it must call
275 * spi_finalize_current_message() so the subsystem can issue the next 275 * spi_finalize_current_message() so the subsystem can issue the next
276 * transfer 276 * message
277 * @unprepare_transfer_hardware: there are currently no more messages on the 277 * @unprepare_transfer_hardware: there are currently no more messages on the
278 * queue so the subsystem notifies the driver that it may relax the 278 * queue so the subsystem notifies the driver that it may relax the
279 * hardware by issuing this call 279 * hardware by issuing this call
@@ -287,7 +287,10 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
287 * - return 1 if the transfer is still in progress. When 287 * - return 1 if the transfer is still in progress. When
288 * the driver is finished with this transfer it must 288 * the driver is finished with this transfer it must
289 * call spi_finalize_current_transfer() so the subsystem 289 * call spi_finalize_current_transfer() so the subsystem
290 * can issue the next transfer 290 * can issue the next transfer. Note: transfer_one and
291 * transfer_one_message are mutually exclusive; when both
292 * are set, the generic subsystem does not call your
293 * transfer_one callback.
291 * @unprepare_message: undo any work done by prepare_message(). 294 * @unprepare_message: undo any work done by prepare_message().
292 * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS 295 * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
293 * number. Any individual value may be -ENOENT for CS lines that 296 * number. Any individual value may be -ENOENT for CS lines that
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 40ed9e9a77e5..a747a77ea584 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -281,13 +281,15 @@ asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
281asmlinkage long sys_sched_setparam(pid_t pid, 281asmlinkage long sys_sched_setparam(pid_t pid,
282 struct sched_param __user *param); 282 struct sched_param __user *param);
283asmlinkage long sys_sched_setattr(pid_t pid, 283asmlinkage long sys_sched_setattr(pid_t pid,
284 struct sched_attr __user *attr); 284 struct sched_attr __user *attr,
285 unsigned int flags);
285asmlinkage long sys_sched_getscheduler(pid_t pid); 286asmlinkage long sys_sched_getscheduler(pid_t pid);
286asmlinkage long sys_sched_getparam(pid_t pid, 287asmlinkage long sys_sched_getparam(pid_t pid,
287 struct sched_param __user *param); 288 struct sched_param __user *param);
288asmlinkage long sys_sched_getattr(pid_t pid, 289asmlinkage long sys_sched_getattr(pid_t pid,
289 struct sched_attr __user *attr, 290 struct sched_attr __user *attr,
290 unsigned int size); 291 unsigned int size,
292 unsigned int flags);
291asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, 293asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
292 unsigned long __user *user_mask_ptr); 294 unsigned long __user *user_mask_ptr);
293asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len, 295asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
diff --git a/include/linux/usb.h b/include/linux/usb.h
index c716da18c668..7f6eb859873e 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -1265,8 +1265,6 @@ typedef void (*usb_complete_t)(struct urb *);
1265 * @sg: scatter gather buffer list, the buffer size of each element in 1265 * @sg: scatter gather buffer list, the buffer size of each element in
1266 * the list (except the last) must be divisible by the endpoint's 1266 * the list (except the last) must be divisible by the endpoint's
1267 * max packet size if no_sg_constraint isn't set in 'struct usb_bus' 1267 * max packet size if no_sg_constraint isn't set in 'struct usb_bus'
1268 * (FIXME: scatter-gather under xHCI is broken for periodic transfers.
1269 * Do not use urb->sg for interrupt endpoints for now, only bulk.)
1270 * @num_mapped_sgs: (internal) number of mapped sg entries 1268 * @num_mapped_sgs: (internal) number of mapped sg entries
1271 * @num_sgs: number of entries in the sg list 1269 * @num_sgs: number of entries in the sg list
1272 * @transfer_buffer_length: How big is transfer_buffer. The transfer may 1270 * @transfer_buffer_length: How big is transfer_buffer. The transfer may
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 594521ba0d43..704f4f652d0a 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -419,10 +419,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
419 static struct lock_class_key __key; \ 419 static struct lock_class_key __key; \
420 const char *__lock_name; \ 420 const char *__lock_name; \
421 \ 421 \
422 if (__builtin_constant_p(fmt)) \ 422 __lock_name = #fmt#args; \
423 __lock_name = (fmt); \
424 else \
425 __lock_name = #fmt; \
426 \ 423 \
427 __alloc_workqueue_key((fmt), (flags), (max_active), \ 424 __alloc_workqueue_key((fmt), (flags), (max_active), \
428 &__key, __lock_name, ##args); \ 425 &__key, __lock_name, ##args); \
diff --git a/include/net/datalink.h b/include/net/datalink.h
index deb7ca75db48..93cb18f729b5 100644
--- a/include/net/datalink.h
+++ b/include/net/datalink.h
@@ -15,4 +15,6 @@ struct datalink_proto {
15 struct list_head node; 15 struct list_head node;
16}; 16};
17 17
18struct datalink_proto *make_EII_client(void);
19void destroy_EII_client(struct datalink_proto *dl);
18#endif 20#endif
diff --git a/include/net/dn.h b/include/net/dn.h
index ccc15588d108..913b73d239f5 100644
--- a/include/net/dn.h
+++ b/include/net/dn.h
@@ -200,6 +200,8 @@ static inline void dn_sk_ports_copy(struct flowidn *fld, struct dn_scp *scp)
200} 200}
201 201
202unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu); 202unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu);
203void dn_register_sysctl(void);
204void dn_unregister_sysctl(void);
203 205
204#define DN_MENUVER_ACC 0x01 206#define DN_MENUVER_ACC 0x01
205#define DN_MENUVER_USR 0x02 207#define DN_MENUVER_USR 0x02
diff --git a/include/net/dn_route.h b/include/net/dn_route.h
index b409ad6b8d7a..55df9939bca2 100644
--- a/include/net/dn_route.h
+++ b/include/net/dn_route.h
@@ -20,6 +20,8 @@ int dn_route_output_sock(struct dst_entry __rcu **pprt, struct flowidn *,
20 struct sock *sk, int flags); 20 struct sock *sk, int flags);
21int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb); 21int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb);
22void dn_rt_cache_flush(int delay); 22void dn_rt_cache_flush(int delay);
23int dn_route_rcv(struct sk_buff *skb, struct net_device *dev,
24 struct packet_type *pt, struct net_device *orig_dev);
23 25
24/* Masks for flags field */ 26/* Masks for flags field */
25#define DN_RT_F_PID 0x07 /* Mask for packet type */ 27#define DN_RT_F_PID 0x07 /* Mask for packet type */
diff --git a/include/net/ethoc.h b/include/net/ethoc.h
index 96f3789b27bc..2a2d6bb34eb8 100644
--- a/include/net/ethoc.h
+++ b/include/net/ethoc.h
@@ -16,6 +16,7 @@
16struct ethoc_platform_data { 16struct ethoc_platform_data {
17 u8 hwaddr[IFHWADDRLEN]; 17 u8 hwaddr[IFHWADDRLEN];
18 s8 phy_id; 18 s8 phy_id;
19 u32 eth_clkfreq;
19}; 20};
20 21
21#endif /* !LINUX_NET_ETHOC_H */ 22#endif /* !LINUX_NET_ETHOC_H */
diff --git a/include/net/ipx.h b/include/net/ipx.h
index 9e9e35465baf..0143180fecc9 100644
--- a/include/net/ipx.h
+++ b/include/net/ipx.h
@@ -140,6 +140,17 @@ static __inline__ void ipxitf_hold(struct ipx_interface *intrfc)
140} 140}
141 141
142void ipxitf_down(struct ipx_interface *intrfc); 142void ipxitf_down(struct ipx_interface *intrfc);
143struct ipx_interface *ipxitf_find_using_net(__be32 net);
144int ipxitf_send(struct ipx_interface *intrfc, struct sk_buff *skb, char *node);
145__be16 ipx_cksum(struct ipxhdr *packet, int length);
146int ipxrtr_add_route(__be32 network, struct ipx_interface *intrfc,
147 unsigned char *node);
148void ipxrtr_del_routes(struct ipx_interface *intrfc);
149int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx,
150 struct iovec *iov, size_t len, int noblock);
151int ipxrtr_route_skb(struct sk_buff *skb);
152struct ipx_route *ipxrtr_lookup(__be32 net);
153int ipxrtr_ioctl(unsigned int cmd, void __user *arg);
143 154
144static __inline__ void ipxitf_put(struct ipx_interface *intrfc) 155static __inline__ void ipxitf_put(struct ipx_interface *intrfc)
145{ 156{
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index da68c9a90ac5..991dcd94cbbf 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -162,6 +162,14 @@ extern struct list_head net_namespace_list;
162struct net *get_net_ns_by_pid(pid_t pid); 162struct net *get_net_ns_by_pid(pid_t pid);
163struct net *get_net_ns_by_fd(int pid); 163struct net *get_net_ns_by_fd(int pid);
164 164
165#ifdef CONFIG_SYSCTL
166void ipx_register_sysctl(void);
167void ipx_unregister_sysctl(void);
168#else
169#define ipx_register_sysctl()
170#define ipx_unregister_sysctl()
171#endif
172
165#ifdef CONFIG_NET_NS 173#ifdef CONFIG_NET_NS
166void __put_net(struct net *net); 174void __put_net(struct net *net);
167 175
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 01ea6eed1bb1..b2ac6246b7e0 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -284,6 +284,8 @@ extern unsigned int nf_conntrack_max;
284extern unsigned int nf_conntrack_hash_rnd; 284extern unsigned int nf_conntrack_hash_rnd;
285void init_nf_conntrack_hash_rnd(void); 285void init_nf_conntrack_hash_rnd(void);
286 286
287void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl);
288
287#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count) 289#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count)
288#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count) 290#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
289 291
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 57c8ff7955df..e7e14ffe0f6a 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -252,6 +252,7 @@ void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
252 * @owner: module reference 252 * @owner: module reference
253 * @policy: netlink attribute policy 253 * @policy: netlink attribute policy
254 * @maxattr: highest netlink attribute number 254 * @maxattr: highest netlink attribute number
255 * @family: address family for AF-specific types
255 */ 256 */
256struct nft_expr_type { 257struct nft_expr_type {
257 const struct nft_expr_ops *(*select_ops)(const struct nft_ctx *, 258 const struct nft_expr_ops *(*select_ops)(const struct nft_ctx *,
@@ -262,6 +263,7 @@ struct nft_expr_type {
262 struct module *owner; 263 struct module *owner;
263 const struct nla_policy *policy; 264 const struct nla_policy *policy;
264 unsigned int maxattr; 265 unsigned int maxattr;
266 u8 family;
265}; 267};
266 268
267/** 269/**
@@ -320,7 +322,6 @@ static inline void *nft_expr_priv(const struct nft_expr *expr)
320 * struct nft_rule - nf_tables rule 322 * struct nft_rule - nf_tables rule
321 * 323 *
322 * @list: used internally 324 * @list: used internally
323 * @rcu_head: used internally for rcu
324 * @handle: rule handle 325 * @handle: rule handle
325 * @genmask: generation mask 326 * @genmask: generation mask
326 * @dlen: length of expression data 327 * @dlen: length of expression data
@@ -328,7 +329,6 @@ static inline void *nft_expr_priv(const struct nft_expr *expr)
328 */ 329 */
329struct nft_rule { 330struct nft_rule {
330 struct list_head list; 331 struct list_head list;
331 struct rcu_head rcu_head;
332 u64 handle:46, 332 u64 handle:46,
333 genmask:2, 333 genmask:2,
334 dlen:16; 334 dlen:16;
@@ -389,7 +389,6 @@ enum nft_chain_flags {
389 * 389 *
390 * @rules: list of rules in the chain 390 * @rules: list of rules in the chain
391 * @list: used internally 391 * @list: used internally
392 * @rcu_head: used internally
393 * @net: net namespace that this chain belongs to 392 * @net: net namespace that this chain belongs to
394 * @table: table that this chain belongs to 393 * @table: table that this chain belongs to
395 * @handle: chain handle 394 * @handle: chain handle
@@ -401,7 +400,6 @@ enum nft_chain_flags {
401struct nft_chain { 400struct nft_chain {
402 struct list_head rules; 401 struct list_head rules;
403 struct list_head list; 402 struct list_head list;
404 struct rcu_head rcu_head;
405 struct net *net; 403 struct net *net;
406 struct nft_table *table; 404 struct nft_table *table;
407 u64 handle; 405 u64 handle;
@@ -529,6 +527,9 @@ void nft_unregister_expr(struct nft_expr_type *);
529#define MODULE_ALIAS_NFT_CHAIN(family, name) \ 527#define MODULE_ALIAS_NFT_CHAIN(family, name) \
530 MODULE_ALIAS("nft-chain-" __stringify(family) "-" name) 528 MODULE_ALIAS("nft-chain-" __stringify(family) "-" name)
531 529
530#define MODULE_ALIAS_NFT_AF_EXPR(family, name) \
531 MODULE_ALIAS("nft-expr-" __stringify(family) "-" name)
532
532#define MODULE_ALIAS_NFT_EXPR(name) \ 533#define MODULE_ALIAS_NFT_EXPR(name) \
533 MODULE_ALIAS("nft-expr-" name) 534 MODULE_ALIAS("nft-expr-" name)
534 535
diff --git a/include/net/netfilter/nft_reject.h b/include/net/netfilter/nft_reject.h
new file mode 100644
index 000000000000..36b0da2d55bb
--- /dev/null
+++ b/include/net/netfilter/nft_reject.h
@@ -0,0 +1,25 @@
1#ifndef _NFT_REJECT_H_
2#define _NFT_REJECT_H_
3
4struct nft_reject {
5 enum nft_reject_types type:8;
6 u8 icmp_code;
7};
8
9extern const struct nla_policy nft_reject_policy[];
10
11int nft_reject_init(const struct nft_ctx *ctx,
12 const struct nft_expr *expr,
13 const struct nlattr * const tb[]);
14
15int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr);
16
17void nft_reject_ipv4_eval(const struct nft_expr *expr,
18 struct nft_data data[NFT_REG_MAX + 1],
19 const struct nft_pktinfo *pkt);
20
21void nft_reject_ipv6_eval(const struct nft_expr *expr,
22 struct nft_data data[NFT_REG_MAX + 1],
23 const struct nft_pktinfo *pkt);
24
25#endif
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index d992ca3145fe..6ee76c804893 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1653,17 +1653,6 @@ struct sctp_association {
1653 /* This is the last advertised value of rwnd over a SACK chunk. */ 1653 /* This is the last advertised value of rwnd over a SACK chunk. */
1654 __u32 a_rwnd; 1654 __u32 a_rwnd;
1655 1655
1656 /* Number of bytes by which the rwnd has slopped. The rwnd is allowed
1657 * to slop over a maximum of the association's frag_point.
1658 */
1659 __u32 rwnd_over;
1660
1661 /* Keeps treack of rwnd pressure. This happens when we have
1662 * a window, but not recevie buffer (i.e small packets). This one
1663 * is releases slowly (1 PMTU at a time ).
1664 */
1665 __u32 rwnd_press;
1666
1667 /* This is the sndbuf size in use for the association. 1656 /* This is the sndbuf size in use for the association.
1668 * This corresponds to the sndbuf size for the association, 1657 * This corresponds to the sndbuf size for the association,
1669 * as specified in the sk->sndbuf. 1658 * as specified in the sk->sndbuf.
@@ -1892,8 +1881,7 @@ void sctp_assoc_update(struct sctp_association *old,
1892__u32 sctp_association_get_next_tsn(struct sctp_association *); 1881__u32 sctp_association_get_next_tsn(struct sctp_association *);
1893 1882
1894void sctp_assoc_sync_pmtu(struct sock *, struct sctp_association *); 1883void sctp_assoc_sync_pmtu(struct sock *, struct sctp_association *);
1895void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int); 1884void sctp_assoc_rwnd_update(struct sctp_association *, bool);
1896void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int);
1897void sctp_assoc_set_primary(struct sctp_association *, 1885void sctp_assoc_set_primary(struct sctp_association *,
1898 struct sctp_transport *); 1886 struct sctp_transport *);
1899void sctp_assoc_del_nonprimary_peers(struct sctp_association *, 1887void sctp_assoc_del_nonprimary_peers(struct sctp_association *,
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 8d4a1c06f7e4..6793f32ccb58 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -226,7 +226,8 @@ enum ib_port_cap_flags {
226 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22, 226 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
227 IB_PORT_BOOT_MGMT_SUP = 1 << 23, 227 IB_PORT_BOOT_MGMT_SUP = 1 << 23,
228 IB_PORT_LINK_LATENCY_SUP = 1 << 24, 228 IB_PORT_LINK_LATENCY_SUP = 1 << 24,
229 IB_PORT_CLIENT_REG_SUP = 1 << 25 229 IB_PORT_CLIENT_REG_SUP = 1 << 25,
230 IB_PORT_IP_BASED_GIDS = 1 << 26
230}; 231};
231 232
232enum ib_port_width { 233enum ib_port_width {
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index c9c791209cd1..1772fadcff62 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -525,7 +525,6 @@ struct se_cmd {
525#define CMD_T_COMPLETE (1 << 2) 525#define CMD_T_COMPLETE (1 << 2)
526#define CMD_T_SENT (1 << 4) 526#define CMD_T_SENT (1 << 4)
527#define CMD_T_STOP (1 << 5) 527#define CMD_T_STOP (1 << 5)
528#define CMD_T_FAILED (1 << 6)
529#define CMD_T_DEV_ACTIVE (1 << 7) 528#define CMD_T_DEV_ACTIVE (1 << 7)
530#define CMD_T_REQUEST_STOP (1 << 8) 529#define CMD_T_REQUEST_STOP (1 << 8)
531#define CMD_T_BUSY (1 << 9) 530#define CMD_T_BUSY (1 << 9)
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index 9e9475c85de5..e5bf9a76f169 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -42,7 +42,6 @@ TRACE_EVENT(pstate_sample,
42 u32 state, 42 u32 state,
43 u64 mperf, 43 u64 mperf,
44 u64 aperf, 44 u64 aperf,
45 u32 energy,
46 u32 freq 45 u32 freq
47 ), 46 ),
48 47
@@ -51,7 +50,6 @@ TRACE_EVENT(pstate_sample,
51 state, 50 state,
52 mperf, 51 mperf,
53 aperf, 52 aperf,
54 energy,
55 freq 53 freq
56 ), 54 ),
57 55
@@ -61,7 +59,6 @@ TRACE_EVENT(pstate_sample,
61 __field(u32, state) 59 __field(u32, state)
62 __field(u64, mperf) 60 __field(u64, mperf)
63 __field(u64, aperf) 61 __field(u64, aperf)
64 __field(u32, energy)
65 __field(u32, freq) 62 __field(u32, freq)
66 63
67 ), 64 ),
@@ -72,17 +69,15 @@ TRACE_EVENT(pstate_sample,
72 __entry->state = state; 69 __entry->state = state;
73 __entry->mperf = mperf; 70 __entry->mperf = mperf;
74 __entry->aperf = aperf; 71 __entry->aperf = aperf;
75 __entry->energy = energy;
76 __entry->freq = freq; 72 __entry->freq = freq;
77 ), 73 ),
78 74
79 TP_printk("core_busy=%lu scaled=%lu state=%lu mperf=%llu aperf=%llu energy=%lu freq=%lu ", 75 TP_printk("core_busy=%lu scaled=%lu state=%lu mperf=%llu aperf=%llu freq=%lu ",
80 (unsigned long)__entry->core_busy, 76 (unsigned long)__entry->core_busy,
81 (unsigned long)__entry->scaled_busy, 77 (unsigned long)__entry->scaled_busy,
82 (unsigned long)__entry->state, 78 (unsigned long)__entry->state,
83 (unsigned long long)__entry->mperf, 79 (unsigned long long)__entry->mperf,
84 (unsigned long long)__entry->aperf, 80 (unsigned long long)__entry->aperf,
85 (unsigned long)__entry->energy,
86 (unsigned long)__entry->freq 81 (unsigned long)__entry->freq
87 ) 82 )
88 83
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 3c9a833992e8..b06c8ed68707 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -619,6 +619,8 @@ struct drm_gem_open {
619#define DRM_PRIME_CAP_EXPORT 0x2 619#define DRM_PRIME_CAP_EXPORT 0x2
620#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6 620#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
621#define DRM_CAP_ASYNC_PAGE_FLIP 0x7 621#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
622#define DRM_CAP_CURSOR_WIDTH 0x8
623#define DRM_CAP_CURSOR_HEIGHT 0x9
622 624
623/** DRM_IOCTL_GET_CAP ioctl argument type */ 625/** DRM_IOCTL_GET_CAP ioctl argument type */
624struct drm_get_cap { 626struct drm_get_cap {
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index 9971c560ed9a..87792a5fee3b 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -87,6 +87,7 @@
87#define DRM_VMW_PARAM_MAX_SURF_MEMORY 7 87#define DRM_VMW_PARAM_MAX_SURF_MEMORY 7
88#define DRM_VMW_PARAM_3D_CAPS_SIZE 8 88#define DRM_VMW_PARAM_3D_CAPS_SIZE 8
89#define DRM_VMW_PARAM_MAX_MOB_MEMORY 9 89#define DRM_VMW_PARAM_MAX_MOB_MEMORY 9
90#define DRM_VMW_PARAM_MAX_MOB_SIZE 10
90 91
91/** 92/**
92 * struct drm_vmw_getparam_arg 93 * struct drm_vmw_getparam_arg
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index 1b8a0f4c9590..b4d69092fbdb 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -558,7 +558,6 @@ static inline char *btrfs_err_str(enum btrfs_err_code err_code)
558#define BTRFS_IOC_DEFAULT_SUBVOL _IOW(BTRFS_IOCTL_MAGIC, 19, __u64) 558#define BTRFS_IOC_DEFAULT_SUBVOL _IOW(BTRFS_IOCTL_MAGIC, 19, __u64)
559#define BTRFS_IOC_SPACE_INFO _IOWR(BTRFS_IOCTL_MAGIC, 20, \ 559#define BTRFS_IOC_SPACE_INFO _IOWR(BTRFS_IOCTL_MAGIC, 20, \
560 struct btrfs_ioctl_space_args) 560 struct btrfs_ioctl_space_args)
561#define BTRFS_IOC_GLOBAL_RSV _IOR(BTRFS_IOCTL_MAGIC, 20, __u64)
562#define BTRFS_IOC_START_SYNC _IOR(BTRFS_IOCTL_MAGIC, 24, __u64) 561#define BTRFS_IOC_START_SYNC _IOR(BTRFS_IOCTL_MAGIC, 24, __u64)
563#define BTRFS_IOC_WAIT_SYNC _IOW(BTRFS_IOCTL_MAGIC, 22, __u64) 562#define BTRFS_IOC_WAIT_SYNC _IOW(BTRFS_IOCTL_MAGIC, 22, __u64)
564#define BTRFS_IOC_SNAP_CREATE_V2 _IOW(BTRFS_IOCTL_MAGIC, 23, \ 563#define BTRFS_IOC_SNAP_CREATE_V2 _IOW(BTRFS_IOCTL_MAGIC, 23, \
diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h
index 633b93cac1ed..e9a1d2d973b6 100644
--- a/include/uapi/linux/in6.h
+++ b/include/uapi/linux/in6.h
@@ -128,22 +128,13 @@ struct in6_flowlabel_req {
128 * IPV6 extension headers 128 * IPV6 extension headers
129 */ 129 */
130#if __UAPI_DEF_IPPROTO_V6 130#if __UAPI_DEF_IPPROTO_V6
131enum { 131#define IPPROTO_HOPOPTS 0 /* IPv6 hop-by-hop options */
132 IPPROTO_HOPOPTS = 0, /* IPv6 hop-by-hop options */ 132#define IPPROTO_ROUTING 43 /* IPv6 routing header */
133#define IPPROTO_HOPOPTS IPPROTO_HOPOPTS 133#define IPPROTO_FRAGMENT 44 /* IPv6 fragmentation header */
134 IPPROTO_ROUTING = 43, /* IPv6 routing header */ 134#define IPPROTO_ICMPV6 58 /* ICMPv6 */
135#define IPPROTO_ROUTING IPPROTO_ROUTING 135#define IPPROTO_NONE 59 /* IPv6 no next header */
136 IPPROTO_FRAGMENT = 44, /* IPv6 fragmentation header */ 136#define IPPROTO_DSTOPTS 60 /* IPv6 destination options */
137#define IPPROTO_FRAGMENT IPPROTO_FRAGMENT 137#define IPPROTO_MH 135 /* IPv6 mobility header */
138 IPPROTO_ICMPV6 = 58, /* ICMPv6 */
139#define IPPROTO_ICMPV6 IPPROTO_ICMPV6
140 IPPROTO_NONE = 59, /* IPv6 no next header */
141#define IPPROTO_NONE IPPROTO_NONE
142 IPPROTO_DSTOPTS = 60, /* IPv6 destination options */
143#define IPPROTO_DSTOPTS IPPROTO_DSTOPTS
144 IPPROTO_MH = 135, /* IPv6 mobility header */
145#define IPPROTO_MH IPPROTO_MH
146};
147#endif /* __UAPI_DEF_IPPROTO_V6 */ 138#endif /* __UAPI_DEF_IPPROTO_V6 */
148 139
149/* 140/*
diff --git a/include/uapi/linux/mic_ioctl.h b/include/uapi/linux/mic_ioctl.h
index 7fabba5059cf..feb0b4c0814c 100644
--- a/include/uapi/linux/mic_ioctl.h
+++ b/include/uapi/linux/mic_ioctl.h
@@ -39,7 +39,7 @@ struct mic_copy_desc {
39#else 39#else
40 struct iovec *iov; 40 struct iovec *iov;
41#endif 41#endif
42 int iovcnt; 42 __u32 iovcnt;
43 __u8 vr_idx; 43 __u8 vr_idx;
44 __u8 update_used; 44 __u8 update_used;
45 __u32 out_len; 45 __u32 out_len;
diff --git a/include/uapi/xen/Kbuild b/include/uapi/xen/Kbuild
index 61257cb14653..5c459628e8c7 100644
--- a/include/uapi/xen/Kbuild
+++ b/include/uapi/xen/Kbuild
@@ -1,3 +1,5 @@
1# UAPI Header export list 1# UAPI Header export list
2header-y += evtchn.h 2header-y += evtchn.h
3header-y += gntalloc.h
4header-y += gntdev.h
3header-y += privcmd.h 5header-y += privcmd.h
diff --git a/include/xen/gntalloc.h b/include/uapi/xen/gntalloc.h
index 76bd58065f4f..76bd58065f4f 100644
--- a/include/xen/gntalloc.h
+++ b/include/uapi/xen/gntalloc.h
diff --git a/include/xen/gntdev.h b/include/uapi/xen/gntdev.h
index 5304bd3c84c5..5304bd3c84c5 100644
--- a/include/xen/gntdev.h
+++ b/include/uapi/xen/gntdev.h
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
index ae665ac59c36..32ec05a6572f 100644
--- a/include/xen/interface/io/blkif.h
+++ b/include/xen/interface/io/blkif.h
@@ -113,13 +113,13 @@ typedef uint64_t blkif_sector_t;
113 * it's less than the number provided by the backend. The indirect_grefs field 113 * it's less than the number provided by the backend. The indirect_grefs field
114 * in blkif_request_indirect should be filled by the frontend with the 114 * in blkif_request_indirect should be filled by the frontend with the
115 * grant references of the pages that are holding the indirect segments. 115 * grant references of the pages that are holding the indirect segments.
116 * This pages are filled with an array of blkif_request_segment_aligned 116 * These pages are filled with an array of blkif_request_segment that hold the
117 * that hold the information about the segments. The number of indirect 117 * information about the segments. The number of indirect pages to use is
118 * pages to use is determined by the maximum number of segments 118 * determined by the number of segments an indirect request contains. Every
119 * a indirect request contains. Every indirect page can contain a maximum 119 * indirect page can contain a maximum of
120 * of 512 segments (PAGE_SIZE/sizeof(blkif_request_segment_aligned)), 120 * (PAGE_SIZE / sizeof(struct blkif_request_segment)) segments, so to
121 * so to calculate the number of indirect pages to use we have to do 121 * calculate the number of indirect pages to use we have to do
122 * ceil(indirect_segments/512). 122 * ceil(indirect_segments / (PAGE_SIZE / sizeof(struct blkif_request_segment))).
123 * 123 *
124 * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not* 124 * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not*
125 * create the "feature-max-indirect-segments" node! 125 * create the "feature-max-indirect-segments" node!
@@ -135,13 +135,12 @@ typedef uint64_t blkif_sector_t;
135 135
136#define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8 136#define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8
137 137
138struct blkif_request_segment_aligned { 138struct blkif_request_segment {
139 grant_ref_t gref; /* reference to I/O buffer frame */ 139 grant_ref_t gref; /* reference to I/O buffer frame */
140 /* @first_sect: first sector in frame to transfer (inclusive). */ 140 /* @first_sect: first sector in frame to transfer (inclusive). */
141 /* @last_sect: last sector in frame to transfer (inclusive). */ 141 /* @last_sect: last sector in frame to transfer (inclusive). */
142 uint8_t first_sect, last_sect; 142 uint8_t first_sect, last_sect;
143 uint16_t _pad; /* padding to make it 8 bytes, so it's cache-aligned */ 143};
144} __attribute__((__packed__));
145 144
146struct blkif_request_rw { 145struct blkif_request_rw {
147 uint8_t nr_segments; /* number of segments */ 146 uint8_t nr_segments; /* number of segments */
@@ -151,12 +150,7 @@ struct blkif_request_rw {
151#endif 150#endif
152 uint64_t id; /* private guest value, echoed in resp */ 151 uint64_t id; /* private guest value, echoed in resp */
153 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ 152 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
154 struct blkif_request_segment { 153 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
155 grant_ref_t gref; /* reference to I/O buffer frame */
156 /* @first_sect: first sector in frame to transfer (inclusive). */
157 /* @last_sect: last sector in frame to transfer (inclusive). */
158 uint8_t first_sect, last_sect;
159 } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
160} __attribute__((__packed__)); 154} __attribute__((__packed__));
161 155
162struct blkif_request_discard { 156struct blkif_request_discard {
diff --git a/include/xen/interface/xencomm.h b/include/xen/interface/xencomm.h
deleted file mode 100644
index ac45e0712afa..000000000000
--- a/include/xen/interface/xencomm.h
+++ /dev/null
@@ -1,41 +0,0 @@
1/*
2 * Permission is hereby granted, free of charge, to any person obtaining a copy
3 * of this software and associated documentation files (the "Software"), to
4 * deal in the Software without restriction, including without limitation the
5 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6 * sell copies of the Software, and to permit persons to whom the Software is
7 * furnished to do so, subject to the following conditions:
8 *
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
18 * DEALINGS IN THE SOFTWARE.
19 *
20 * Copyright (C) IBM Corp. 2006
21 */
22
23#ifndef _XEN_XENCOMM_H_
24#define _XEN_XENCOMM_H_
25
26/* A xencomm descriptor is a scatter/gather list containing physical
27 * addresses corresponding to a virtually contiguous memory area. The
28 * hypervisor translates these physical addresses to machine addresses to copy
29 * to and from the virtually contiguous area.
30 */
31
32#define XENCOMM_MAGIC 0x58434F4D /* 'XCOM' */
33#define XENCOMM_INVALID (~0UL)
34
35struct xencomm_desc {
36 uint32_t magic;
37 uint32_t nr_addrs; /* the number of entries in address[] */
38 uint64_t address[0];
39};
40
41#endif /* _XEN_XENCOMM_H_ */
diff --git a/include/xen/xencomm.h b/include/xen/xencomm.h
deleted file mode 100644
index e43b039be112..000000000000
--- a/include/xen/xencomm.h
+++ /dev/null
@@ -1,77 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
15 *
16 * Copyright (C) IBM Corp. 2006
17 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 * Jerone Young <jyoung5@us.ibm.com>
20 */
21
22#ifndef _LINUX_XENCOMM_H_
23#define _LINUX_XENCOMM_H_
24
25#include <xen/interface/xencomm.h>
26
27#define XENCOMM_MINI_ADDRS 3
28struct xencomm_mini {
29 struct xencomm_desc _desc;
30 uint64_t address[XENCOMM_MINI_ADDRS];
31};
32
33/* To avoid additionnal virt to phys conversion, an opaque structure is
34 presented. */
35struct xencomm_handle;
36
37extern void xencomm_free(struct xencomm_handle *desc);
38extern struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes);
39extern struct xencomm_handle *__xencomm_map_no_alloc(void *ptr,
40 unsigned long bytes, struct xencomm_mini *xc_area);
41
42#if 0
43#define XENCOMM_MINI_ALIGNED(xc_desc, n) \
44 struct xencomm_mini xc_desc ## _base[(n)] \
45 __attribute__((__aligned__(sizeof(struct xencomm_mini)))); \
46 struct xencomm_mini *xc_desc = &xc_desc ## _base[0];
47#else
48/*
49 * gcc bug workaround:
50 * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=16660
51 * gcc doesn't handle properly stack variable with
52 * __attribute__((__align__(sizeof(struct xencomm_mini))))
53 */
54#define XENCOMM_MINI_ALIGNED(xc_desc, n) \
55 unsigned char xc_desc ## _base[((n) + 1 ) * \
56 sizeof(struct xencomm_mini)]; \
57 struct xencomm_mini *xc_desc = (struct xencomm_mini *) \
58 ((unsigned long)xc_desc ## _base + \
59 (sizeof(struct xencomm_mini) - \
60 ((unsigned long)xc_desc ## _base) % \
61 sizeof(struct xencomm_mini)));
62#endif
63#define xencomm_map_no_alloc(ptr, bytes) \
64 ({ XENCOMM_MINI_ALIGNED(xc_desc, 1); \
65 __xencomm_map_no_alloc(ptr, bytes, xc_desc); })
66
67/* provided by architecture code: */
68extern unsigned long xencomm_vtop(unsigned long vaddr);
69
70static inline void *xencomm_pa(void *ptr)
71{
72 return (void *)xencomm_vtop((unsigned long)ptr);
73}
74
75#define xen_guest_handle(hnd) ((hnd).p)
76
77#endif /* _LINUX_XENCOMM_H_ */
diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
index 383d638340b8..5bb8bfe67149 100644
--- a/ipc/mq_sysctl.c
+++ b/ipc/mq_sysctl.c
@@ -22,6 +22,16 @@ static void *get_mq(ctl_table *table)
22 return which; 22 return which;
23} 23}
24 24
25static int proc_mq_dointvec(ctl_table *table, int write,
26 void __user *buffer, size_t *lenp, loff_t *ppos)
27{
28 struct ctl_table mq_table;
29 memcpy(&mq_table, table, sizeof(mq_table));
30 mq_table.data = get_mq(table);
31
32 return proc_dointvec(&mq_table, write, buffer, lenp, ppos);
33}
34
25static int proc_mq_dointvec_minmax(ctl_table *table, int write, 35static int proc_mq_dointvec_minmax(ctl_table *table, int write,
26 void __user *buffer, size_t *lenp, loff_t *ppos) 36 void __user *buffer, size_t *lenp, loff_t *ppos)
27{ 37{
@@ -33,12 +43,10 @@ static int proc_mq_dointvec_minmax(ctl_table *table, int write,
33 lenp, ppos); 43 lenp, ppos);
34} 44}
35#else 45#else
46#define proc_mq_dointvec NULL
36#define proc_mq_dointvec_minmax NULL 47#define proc_mq_dointvec_minmax NULL
37#endif 48#endif
38 49
39static int msg_queues_limit_min = MIN_QUEUESMAX;
40static int msg_queues_limit_max = HARD_QUEUESMAX;
41
42static int msg_max_limit_min = MIN_MSGMAX; 50static int msg_max_limit_min = MIN_MSGMAX;
43static int msg_max_limit_max = HARD_MSGMAX; 51static int msg_max_limit_max = HARD_MSGMAX;
44 52
@@ -51,9 +59,7 @@ static ctl_table mq_sysctls[] = {
51 .data = &init_ipc_ns.mq_queues_max, 59 .data = &init_ipc_ns.mq_queues_max,
52 .maxlen = sizeof(int), 60 .maxlen = sizeof(int),
53 .mode = 0644, 61 .mode = 0644,
54 .proc_handler = proc_mq_dointvec_minmax, 62 .proc_handler = proc_mq_dointvec,
55 .extra1 = &msg_queues_limit_min,
56 .extra2 = &msg_queues_limit_max,
57 }, 63 },
58 { 64 {
59 .procname = "msg_max", 65 .procname = "msg_max",
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index ccf1f9fd263a..c3b31179122c 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -433,9 +433,9 @@ static int mqueue_create(struct inode *dir, struct dentry *dentry,
433 error = -EACCES; 433 error = -EACCES;
434 goto out_unlock; 434 goto out_unlock;
435 } 435 }
436 if (ipc_ns->mq_queues_count >= HARD_QUEUESMAX || 436
437 (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max && 437 if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
438 !capable(CAP_SYS_RESOURCE))) { 438 !capable(CAP_SYS_RESOURCE)) {
439 error = -ENOSPC; 439 error = -ENOSPC;
440 goto out_unlock; 440 goto out_unlock;
441 } 441 }
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index e2f46ba37f72..105f273b6f86 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -886,7 +886,9 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode)
886 * per-subsystem and moved to css->id so that lookups are 886 * per-subsystem and moved to css->id so that lookups are
887 * successful until the target css is released. 887 * successful until the target css is released.
888 */ 888 */
889 mutex_lock(&cgroup_mutex);
889 idr_remove(&cgrp->root->cgroup_idr, cgrp->id); 890 idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
891 mutex_unlock(&cgroup_mutex);
890 cgrp->id = -1; 892 cgrp->id = -1;
891 893
892 call_rcu(&cgrp->rcu_head, cgroup_free_rcu); 894 call_rcu(&cgrp->rcu_head, cgroup_free_rcu);
@@ -1566,10 +1568,10 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1566 mutex_lock(&cgroup_mutex); 1568 mutex_lock(&cgroup_mutex);
1567 mutex_lock(&cgroup_root_mutex); 1569 mutex_lock(&cgroup_root_mutex);
1568 1570
1569 root_cgrp->id = idr_alloc(&root->cgroup_idr, root_cgrp, 1571 ret = idr_alloc(&root->cgroup_idr, root_cgrp, 0, 1, GFP_KERNEL);
1570 0, 1, GFP_KERNEL); 1572 if (ret < 0)
1571 if (root_cgrp->id < 0)
1572 goto unlock_drop; 1573 goto unlock_drop;
1574 root_cgrp->id = ret;
1573 1575
1574 /* Check for name clashes with existing mounts */ 1576 /* Check for name clashes with existing mounts */
1575 ret = -EBUSY; 1577 ret = -EBUSY;
@@ -2763,10 +2765,7 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add)
2763 */ 2765 */
2764 update_before = cgroup_serial_nr_next; 2766 update_before = cgroup_serial_nr_next;
2765 2767
2766 mutex_unlock(&cgroup_mutex);
2767
2768 /* add/rm files for all cgroups created before */ 2768 /* add/rm files for all cgroups created before */
2769 rcu_read_lock();
2770 css_for_each_descendant_pre(css, cgroup_css(root, ss)) { 2769 css_for_each_descendant_pre(css, cgroup_css(root, ss)) {
2771 struct cgroup *cgrp = css->cgroup; 2770 struct cgroup *cgrp = css->cgroup;
2772 2771
@@ -2775,23 +2774,19 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add)
2775 2774
2776 inode = cgrp->dentry->d_inode; 2775 inode = cgrp->dentry->d_inode;
2777 dget(cgrp->dentry); 2776 dget(cgrp->dentry);
2778 rcu_read_unlock();
2779
2780 dput(prev); 2777 dput(prev);
2781 prev = cgrp->dentry; 2778 prev = cgrp->dentry;
2782 2779
2780 mutex_unlock(&cgroup_mutex);
2783 mutex_lock(&inode->i_mutex); 2781 mutex_lock(&inode->i_mutex);
2784 mutex_lock(&cgroup_mutex); 2782 mutex_lock(&cgroup_mutex);
2785 if (cgrp->serial_nr < update_before && !cgroup_is_dead(cgrp)) 2783 if (cgrp->serial_nr < update_before && !cgroup_is_dead(cgrp))
2786 ret = cgroup_addrm_files(cgrp, cfts, is_add); 2784 ret = cgroup_addrm_files(cgrp, cfts, is_add);
2787 mutex_unlock(&cgroup_mutex);
2788 mutex_unlock(&inode->i_mutex); 2785 mutex_unlock(&inode->i_mutex);
2789
2790 rcu_read_lock();
2791 if (ret) 2786 if (ret)
2792 break; 2787 break;
2793 } 2788 }
2794 rcu_read_unlock(); 2789 mutex_unlock(&cgroup_mutex);
2795 dput(prev); 2790 dput(prev);
2796 deactivate_super(sb); 2791 deactivate_super(sb);
2797 return ret; 2792 return ret;
@@ -2910,9 +2905,14 @@ static void cgroup_enable_task_cg_lists(void)
2910 * We should check if the process is exiting, otherwise 2905 * We should check if the process is exiting, otherwise
2911 * it will race with cgroup_exit() in that the list 2906 * it will race with cgroup_exit() in that the list
2912 * entry won't be deleted though the process has exited. 2907 * entry won't be deleted though the process has exited.
2908 * Do it while holding siglock so that we don't end up
2909 * racing against cgroup_exit().
2913 */ 2910 */
2911 spin_lock_irq(&p->sighand->siglock);
2914 if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list)) 2912 if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list))
2915 list_add(&p->cg_list, &task_css_set(p)->tasks); 2913 list_add(&p->cg_list, &task_css_set(p)->tasks);
2914 spin_unlock_irq(&p->sighand->siglock);
2915
2916 task_unlock(p); 2916 task_unlock(p);
2917 } while_each_thread(g, p); 2917 } while_each_thread(g, p);
2918 read_unlock(&tasklist_lock); 2918 read_unlock(&tasklist_lock);
@@ -4158,7 +4158,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
4158 struct cgroup *cgrp; 4158 struct cgroup *cgrp;
4159 struct cgroup_name *name; 4159 struct cgroup_name *name;
4160 struct cgroupfs_root *root = parent->root; 4160 struct cgroupfs_root *root = parent->root;
4161 int ssid, err = 0; 4161 int ssid, err;
4162 struct cgroup_subsys *ss; 4162 struct cgroup_subsys *ss;
4163 struct super_block *sb = root->sb; 4163 struct super_block *sb = root->sb;
4164 4164
@@ -4168,19 +4168,13 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
4168 return -ENOMEM; 4168 return -ENOMEM;
4169 4169
4170 name = cgroup_alloc_name(dentry); 4170 name = cgroup_alloc_name(dentry);
4171 if (!name) 4171 if (!name) {
4172 err = -ENOMEM;
4172 goto err_free_cgrp; 4173 goto err_free_cgrp;
4174 }
4173 rcu_assign_pointer(cgrp->name, name); 4175 rcu_assign_pointer(cgrp->name, name);
4174 4176
4175 /* 4177 /*
4176 * Temporarily set the pointer to NULL, so idr_find() won't return
4177 * a half-baked cgroup.
4178 */
4179 cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL);
4180 if (cgrp->id < 0)
4181 goto err_free_name;
4182
4183 /*
4184 * Only live parents can have children. Note that the liveliness 4178 * Only live parents can have children. Note that the liveliness
4185 * check isn't strictly necessary because cgroup_mkdir() and 4179 * check isn't strictly necessary because cgroup_mkdir() and
4186 * cgroup_rmdir() are fully synchronized by i_mutex; however, do it 4180 * cgroup_rmdir() are fully synchronized by i_mutex; however, do it
@@ -4189,7 +4183,17 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
4189 */ 4183 */
4190 if (!cgroup_lock_live_group(parent)) { 4184 if (!cgroup_lock_live_group(parent)) {
4191 err = -ENODEV; 4185 err = -ENODEV;
4192 goto err_free_id; 4186 goto err_free_name;
4187 }
4188
4189 /*
4190 * Temporarily set the pointer to NULL, so idr_find() won't return
4191 * a half-baked cgroup.
4192 */
4193 cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL);
4194 if (cgrp->id < 0) {
4195 err = -ENOMEM;
4196 goto err_unlock;
4193 } 4197 }
4194 4198
4195 /* Grab a reference on the superblock so the hierarchy doesn't 4199 /* Grab a reference on the superblock so the hierarchy doesn't
@@ -4221,7 +4225,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
4221 */ 4225 */
4222 err = cgroup_create_file(dentry, S_IFDIR | mode, sb); 4226 err = cgroup_create_file(dentry, S_IFDIR | mode, sb);
4223 if (err < 0) 4227 if (err < 0)
4224 goto err_unlock; 4228 goto err_free_id;
4225 lockdep_assert_held(&dentry->d_inode->i_mutex); 4229 lockdep_assert_held(&dentry->d_inode->i_mutex);
4226 4230
4227 cgrp->serial_nr = cgroup_serial_nr_next++; 4231 cgrp->serial_nr = cgroup_serial_nr_next++;
@@ -4257,12 +4261,12 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
4257 4261
4258 return 0; 4262 return 0;
4259 4263
4260err_unlock:
4261 mutex_unlock(&cgroup_mutex);
4262 /* Release the reference count that we took on the superblock */
4263 deactivate_super(sb);
4264err_free_id: 4264err_free_id:
4265 idr_remove(&root->cgroup_idr, cgrp->id); 4265 idr_remove(&root->cgroup_idr, cgrp->id);
4266 /* Release the reference count that we took on the superblock */
4267 deactivate_super(sb);
4268err_unlock:
4269 mutex_unlock(&cgroup_mutex);
4266err_free_name: 4270err_free_name:
4267 kfree(rcu_dereference_raw(cgrp->name)); 4271 kfree(rcu_dereference_raw(cgrp->name));
4268err_free_cgrp: 4272err_free_cgrp:
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 45e5543e2a1e..fa990061aa6c 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7875,14 +7875,14 @@ static void perf_pmu_rotate_stop(struct pmu *pmu)
7875static void __perf_event_exit_context(void *__info) 7875static void __perf_event_exit_context(void *__info)
7876{ 7876{
7877 struct perf_event_context *ctx = __info; 7877 struct perf_event_context *ctx = __info;
7878 struct perf_event *event, *tmp; 7878 struct perf_event *event;
7879 7879
7880 perf_pmu_rotate_stop(ctx->pmu); 7880 perf_pmu_rotate_stop(ctx->pmu);
7881 7881
7882 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry) 7882 rcu_read_lock();
7883 __perf_remove_from_context(event); 7883 list_for_each_entry_rcu(event, &ctx->event_list, event_entry)
7884 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
7885 __perf_remove_from_context(event); 7884 __perf_remove_from_context(event);
7885 rcu_read_unlock();
7886} 7886}
7887 7887
7888static void perf_event_exit_cpu_context(int cpu) 7888static void perf_event_exit_cpu_context(int cpu)
@@ -7906,11 +7906,11 @@ static void perf_event_exit_cpu(int cpu)
7906{ 7906{
7907 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 7907 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
7908 7908
7909 perf_event_exit_cpu_context(cpu);
7910
7909 mutex_lock(&swhash->hlist_mutex); 7911 mutex_lock(&swhash->hlist_mutex);
7910 swevent_hlist_release(swhash); 7912 swevent_hlist_release(swhash);
7911 mutex_unlock(&swhash->hlist_mutex); 7913 mutex_unlock(&swhash->hlist_mutex);
7912
7913 perf_event_exit_cpu_context(cpu);
7914} 7914}
7915#else 7915#else
7916static inline void perf_event_exit_cpu(int cpu) { } 7916static inline void perf_event_exit_cpu(int cpu) { }
diff --git a/kernel/irq/devres.c b/kernel/irq/devres.c
index bd8e788d71e0..1ef0606797c9 100644
--- a/kernel/irq/devres.c
+++ b/kernel/irq/devres.c
@@ -73,6 +73,51 @@ int devm_request_threaded_irq(struct device *dev, unsigned int irq,
73EXPORT_SYMBOL(devm_request_threaded_irq); 73EXPORT_SYMBOL(devm_request_threaded_irq);
74 74
75/** 75/**
76 * devm_request_any_context_irq - allocate an interrupt line for a managed device
77 * @dev: device to request interrupt for
78 * @irq: Interrupt line to allocate
79 * @handler: Function to be called when the IRQ occurs
80 * @thread_fn: function to be called in a threaded interrupt context. NULL
81 * for devices which handle everything in @handler
82 * @irqflags: Interrupt type flags
83 * @devname: An ascii name for the claiming device
84 * @dev_id: A cookie passed back to the handler function
85 *
86 * Except for the extra @dev argument, this function takes the
87 * same arguments and performs the same function as
88 * request_any_context_irq(). IRQs requested with this function will be
89 * automatically freed on driver detach.
90 *
91 * If an IRQ allocated with this function needs to be freed
92 * separately, devm_free_irq() must be used.
93 */
94int devm_request_any_context_irq(struct device *dev, unsigned int irq,
95 irq_handler_t handler, unsigned long irqflags,
96 const char *devname, void *dev_id)
97{
98 struct irq_devres *dr;
99 int rc;
100
101 dr = devres_alloc(devm_irq_release, sizeof(struct irq_devres),
102 GFP_KERNEL);
103 if (!dr)
104 return -ENOMEM;
105
106 rc = request_any_context_irq(irq, handler, irqflags, devname, dev_id);
107 if (rc) {
108 devres_free(dr);
109 return rc;
110 }
111
112 dr->irq = irq;
113 dr->dev_id = dev_id;
114 devres_add(dev, dr);
115
116 return 0;
117}
118EXPORT_SYMBOL(devm_request_any_context_irq);
119
120/**
76 * devm_free_irq - free an interrupt 121 * devm_free_irq - free an interrupt
77 * @dev: device to free interrupt for 122 * @dev: device to free interrupt for
78 * @irq: Interrupt line to free 123 * @irq: Interrupt line to free
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 192a302d6cfd..8ab8e9390297 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -274,6 +274,7 @@ struct irq_desc *irq_to_desc(unsigned int irq)
274{ 274{
275 return (irq < NR_IRQS) ? irq_desc + irq : NULL; 275 return (irq < NR_IRQS) ? irq_desc + irq : NULL;
276} 276}
277EXPORT_SYMBOL(irq_to_desc);
277 278
278static void free_desc(unsigned int irq) 279static void free_desc(unsigned int irq)
279{ 280{
diff --git a/kernel/power/console.c b/kernel/power/console.c
index eacb8bd8cab4..aba9c545a0e3 100644
--- a/kernel/power/console.c
+++ b/kernel/power/console.c
@@ -9,6 +9,7 @@
9#include <linux/kbd_kern.h> 9#include <linux/kbd_kern.h>
10#include <linux/vt.h> 10#include <linux/vt.h>
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/slab.h>
12#include "power.h" 13#include "power.h"
13 14
14#define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1) 15#define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1)
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index b1d255f04135..4dae9cbe9259 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -1076,7 +1076,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
1076 next_seq = log_next_seq; 1076 next_seq = log_next_seq;
1077 1077
1078 len = 0; 1078 len = 0;
1079 prev = 0;
1080 while (len >= 0 && seq < next_seq) { 1079 while (len >= 0 && seq < next_seq) {
1081 struct printk_log *msg = log_from_idx(idx); 1080 struct printk_log *msg = log_from_idx(idx);
1082 int textlen; 1081 int textlen;
@@ -2788,7 +2787,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
2788 next_idx = idx; 2787 next_idx = idx;
2789 2788
2790 l = 0; 2789 l = 0;
2791 prev = 0;
2792 while (seq < dumper->next_seq) { 2790 while (seq < dumper->next_seq) {
2793 struct printk_log *msg = log_from_idx(idx); 2791 struct printk_log *msg = log_from_idx(idx);
2794 2792
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b46131ef6aab..6edbef296ece 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1952,7 +1952,7 @@ static int dl_overflow(struct task_struct *p, int policy,
1952{ 1952{
1953 1953
1954 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); 1954 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1955 u64 period = attr->sched_period; 1955 u64 period = attr->sched_period ?: attr->sched_deadline;
1956 u64 runtime = attr->sched_runtime; 1956 u64 runtime = attr->sched_runtime;
1957 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0; 1957 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
1958 int cpus, err = -1; 1958 int cpus, err = -1;
@@ -3661,13 +3661,14 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
3661 * @pid: the pid in question. 3661 * @pid: the pid in question.
3662 * @uattr: structure containing the extended parameters. 3662 * @uattr: structure containing the extended parameters.
3663 */ 3663 */
3664SYSCALL_DEFINE2(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr) 3664SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
3665 unsigned int, flags)
3665{ 3666{
3666 struct sched_attr attr; 3667 struct sched_attr attr;
3667 struct task_struct *p; 3668 struct task_struct *p;
3668 int retval; 3669 int retval;
3669 3670
3670 if (!uattr || pid < 0) 3671 if (!uattr || pid < 0 || flags)
3671 return -EINVAL; 3672 return -EINVAL;
3672 3673
3673 if (sched_copy_attr(uattr, &attr)) 3674 if (sched_copy_attr(uattr, &attr))
@@ -3786,7 +3787,7 @@ static int sched_read_attr(struct sched_attr __user *uattr,
3786 attr->size = usize; 3787 attr->size = usize;
3787 } 3788 }
3788 3789
3789 ret = copy_to_user(uattr, attr, usize); 3790 ret = copy_to_user(uattr, attr, attr->size);
3790 if (ret) 3791 if (ret)
3791 return -EFAULT; 3792 return -EFAULT;
3792 3793
@@ -3804,8 +3805,8 @@ err_size:
3804 * @uattr: structure containing the extended parameters. 3805 * @uattr: structure containing the extended parameters.
3805 * @size: sizeof(attr) for fwd/bwd comp. 3806 * @size: sizeof(attr) for fwd/bwd comp.
3806 */ 3807 */
3807SYSCALL_DEFINE3(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 3808SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
3808 unsigned int, size) 3809 unsigned int, size, unsigned int, flags)
3809{ 3810{
3810 struct sched_attr attr = { 3811 struct sched_attr attr = {
3811 .size = sizeof(struct sched_attr), 3812 .size = sizeof(struct sched_attr),
@@ -3814,7 +3815,7 @@ SYSCALL_DEFINE3(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
3814 int retval; 3815 int retval;
3815 3816
3816 if (!uattr || pid < 0 || size > PAGE_SIZE || 3817 if (!uattr || pid < 0 || size > PAGE_SIZE ||
3817 size < SCHED_ATTR_SIZE_VER0) 3818 size < SCHED_ATTR_SIZE_VER0 || flags)
3818 return -EINVAL; 3819 return -EINVAL;
3819 3820
3820 rcu_read_lock(); 3821 rcu_read_lock();
@@ -7422,6 +7423,7 @@ static int sched_dl_global_constraints(void)
7422 u64 period = global_rt_period(); 7423 u64 period = global_rt_period();
7423 u64 new_bw = to_ratio(period, runtime); 7424 u64 new_bw = to_ratio(period, runtime);
7424 int cpu, ret = 0; 7425 int cpu, ret = 0;
7426 unsigned long flags;
7425 7427
7426 /* 7428 /*
7427 * Here we want to check the bandwidth not being set to some 7429 * Here we want to check the bandwidth not being set to some
@@ -7435,10 +7437,10 @@ static int sched_dl_global_constraints(void)
7435 for_each_possible_cpu(cpu) { 7437 for_each_possible_cpu(cpu) {
7436 struct dl_bw *dl_b = dl_bw_of(cpu); 7438 struct dl_bw *dl_b = dl_bw_of(cpu);
7437 7439
7438 raw_spin_lock(&dl_b->lock); 7440 raw_spin_lock_irqsave(&dl_b->lock, flags);
7439 if (new_bw < dl_b->total_bw) 7441 if (new_bw < dl_b->total_bw)
7440 ret = -EBUSY; 7442 ret = -EBUSY;
7441 raw_spin_unlock(&dl_b->lock); 7443 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
7442 7444
7443 if (ret) 7445 if (ret)
7444 break; 7446 break;
@@ -7451,6 +7453,7 @@ static void sched_dl_do_global(void)
7451{ 7453{
7452 u64 new_bw = -1; 7454 u64 new_bw = -1;
7453 int cpu; 7455 int cpu;
7456 unsigned long flags;
7454 7457
7455 def_dl_bandwidth.dl_period = global_rt_period(); 7458 def_dl_bandwidth.dl_period = global_rt_period();
7456 def_dl_bandwidth.dl_runtime = global_rt_runtime(); 7459 def_dl_bandwidth.dl_runtime = global_rt_runtime();
@@ -7464,9 +7467,9 @@ static void sched_dl_do_global(void)
7464 for_each_possible_cpu(cpu) { 7467 for_each_possible_cpu(cpu) {
7465 struct dl_bw *dl_b = dl_bw_of(cpu); 7468 struct dl_bw *dl_b = dl_bw_of(cpu);
7466 7469
7467 raw_spin_lock(&dl_b->lock); 7470 raw_spin_lock_irqsave(&dl_b->lock, flags);
7468 dl_b->bw = new_bw; 7471 dl_b->bw = new_bw;
7469 raw_spin_unlock(&dl_b->lock); 7472 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
7470 } 7473 }
7471} 7474}
7472 7475
@@ -7475,7 +7478,8 @@ static int sched_rt_global_validate(void)
7475 if (sysctl_sched_rt_period <= 0) 7478 if (sysctl_sched_rt_period <= 0)
7476 return -EINVAL; 7479 return -EINVAL;
7477 7480
7478 if (sysctl_sched_rt_runtime > sysctl_sched_rt_period) 7481 if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
7482 (sysctl_sched_rt_runtime > sysctl_sched_rt_period))
7479 return -EINVAL; 7483 return -EINVAL;
7480 7484
7481 return 0; 7485 return 0;
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c
index 045fc74e3f09..5b8838b56d1c 100644
--- a/kernel/sched/cpudeadline.c
+++ b/kernel/sched/cpudeadline.c
@@ -70,7 +70,7 @@ static void cpudl_heapify(struct cpudl *cp, int idx)
70 70
71static void cpudl_change_key(struct cpudl *cp, int idx, u64 new_dl) 71static void cpudl_change_key(struct cpudl *cp, int idx, u64 new_dl)
72{ 72{
73 WARN_ON(idx > num_present_cpus() || idx == IDX_INVALID); 73 WARN_ON(!cpu_present(idx) || idx == IDX_INVALID);
74 74
75 if (dl_time_before(new_dl, cp->elements[idx].dl)) { 75 if (dl_time_before(new_dl, cp->elements[idx].dl)) {
76 cp->elements[idx].dl = new_dl; 76 cp->elements[idx].dl = new_dl;
@@ -117,7 +117,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
117 } 117 }
118 118
119out: 119out:
120 WARN_ON(best_cpu > num_present_cpus() && best_cpu != -1); 120 WARN_ON(!cpu_present(best_cpu) && best_cpu != -1);
121 121
122 return best_cpu; 122 return best_cpu;
123} 123}
@@ -137,7 +137,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid)
137 int old_idx, new_cpu; 137 int old_idx, new_cpu;
138 unsigned long flags; 138 unsigned long flags;
139 139
140 WARN_ON(cpu > num_present_cpus()); 140 WARN_ON(!cpu_present(cpu));
141 141
142 raw_spin_lock_irqsave(&cp->lock, flags); 142 raw_spin_lock_irqsave(&cp->lock, flags);
143 old_idx = cp->cpu_to_idx[cpu]; 143 old_idx = cp->cpu_to_idx[cpu];
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 0dd5e0971a07..15cbc17fbf84 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -121,7 +121,7 @@ static inline void dl_clear_overload(struct rq *rq)
121 121
122static void update_dl_migration(struct dl_rq *dl_rq) 122static void update_dl_migration(struct dl_rq *dl_rq)
123{ 123{
124 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_total > 1) { 124 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
125 if (!dl_rq->overloaded) { 125 if (!dl_rq->overloaded) {
126 dl_set_overload(rq_of_dl_rq(dl_rq)); 126 dl_set_overload(rq_of_dl_rq(dl_rq));
127 dl_rq->overloaded = 1; 127 dl_rq->overloaded = 1;
@@ -137,7 +137,6 @@ static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
137 struct task_struct *p = dl_task_of(dl_se); 137 struct task_struct *p = dl_task_of(dl_se);
138 dl_rq = &rq_of_dl_rq(dl_rq)->dl; 138 dl_rq = &rq_of_dl_rq(dl_rq)->dl;
139 139
140 dl_rq->dl_nr_total++;
141 if (p->nr_cpus_allowed > 1) 140 if (p->nr_cpus_allowed > 1)
142 dl_rq->dl_nr_migratory++; 141 dl_rq->dl_nr_migratory++;
143 142
@@ -149,7 +148,6 @@ static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
149 struct task_struct *p = dl_task_of(dl_se); 148 struct task_struct *p = dl_task_of(dl_se);
150 dl_rq = &rq_of_dl_rq(dl_rq)->dl; 149 dl_rq = &rq_of_dl_rq(dl_rq)->dl;
151 150
152 dl_rq->dl_nr_total--;
153 if (p->nr_cpus_allowed > 1) 151 if (p->nr_cpus_allowed > 1)
154 dl_rq->dl_nr_migratory--; 152 dl_rq->dl_nr_migratory--;
155 153
@@ -717,6 +715,7 @@ void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
717 715
718 WARN_ON(!dl_prio(prio)); 716 WARN_ON(!dl_prio(prio));
719 dl_rq->dl_nr_running++; 717 dl_rq->dl_nr_running++;
718 inc_nr_running(rq_of_dl_rq(dl_rq));
720 719
721 inc_dl_deadline(dl_rq, deadline); 720 inc_dl_deadline(dl_rq, deadline);
722 inc_dl_migration(dl_se, dl_rq); 721 inc_dl_migration(dl_se, dl_rq);
@@ -730,6 +729,7 @@ void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
730 WARN_ON(!dl_prio(prio)); 729 WARN_ON(!dl_prio(prio));
731 WARN_ON(!dl_rq->dl_nr_running); 730 WARN_ON(!dl_rq->dl_nr_running);
732 dl_rq->dl_nr_running--; 731 dl_rq->dl_nr_running--;
732 dec_nr_running(rq_of_dl_rq(dl_rq));
733 733
734 dec_dl_deadline(dl_rq, dl_se->deadline); 734 dec_dl_deadline(dl_rq, dl_se->deadline);
735 dec_dl_migration(dl_se, dl_rq); 735 dec_dl_migration(dl_se, dl_rq);
@@ -836,8 +836,6 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
836 836
837 if (!task_current(rq, p) && p->nr_cpus_allowed > 1) 837 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
838 enqueue_pushable_dl_task(rq, p); 838 enqueue_pushable_dl_task(rq, p);
839
840 inc_nr_running(rq);
841} 839}
842 840
843static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) 841static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
@@ -850,8 +848,6 @@ static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
850{ 848{
851 update_curr_dl(rq); 849 update_curr_dl(rq);
852 __dequeue_task_dl(rq, p, flags); 850 __dequeue_task_dl(rq, p, flags);
853
854 dec_nr_running(rq);
855} 851}
856 852
857/* 853/*
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 966cc2bfcb77..78157099b167 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1757,6 +1757,8 @@ void task_numa_work(struct callback_head *work)
1757 start = end; 1757 start = end;
1758 if (pages <= 0) 1758 if (pages <= 0)
1759 goto out; 1759 goto out;
1760
1761 cond_resched();
1760 } while (end != vma->vm_end); 1762 } while (end != vma->vm_end);
1761 } 1763 }
1762 1764
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c2119fd20f8b..f964add50f38 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -462,7 +462,6 @@ struct dl_rq {
462 } earliest_dl; 462 } earliest_dl;
463 463
464 unsigned long dl_nr_migratory; 464 unsigned long dl_nr_migratory;
465 unsigned long dl_nr_total;
466 int overloaded; 465 int overloaded;
467 466
468 /* 467 /*
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 7a925ba456fb..a6a5bf53e86d 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -51,7 +51,13 @@
51 * HZ shrinks, so values greater than 8 overflow 32bits when 51 * HZ shrinks, so values greater than 8 overflow 32bits when
52 * HZ=100. 52 * HZ=100.
53 */ 53 */
54#if HZ < 34
55#define JIFFIES_SHIFT 6
56#elif HZ < 67
57#define JIFFIES_SHIFT 7
58#else
54#define JIFFIES_SHIFT 8 59#define JIFFIES_SHIFT 8
60#endif
55 61
56static cycle_t jiffies_read(struct clocksource *cs) 62static cycle_t jiffies_read(struct clocksource *cs)
57{ 63{
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index 0abb36464281..4d23dc4d8139 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -116,20 +116,42 @@ static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt)
116void __init sched_clock_register(u64 (*read)(void), int bits, 116void __init sched_clock_register(u64 (*read)(void), int bits,
117 unsigned long rate) 117 unsigned long rate)
118{ 118{
119 u64 res, wrap, new_mask, new_epoch, cyc, ns;
120 u32 new_mult, new_shift;
121 ktime_t new_wrap_kt;
119 unsigned long r; 122 unsigned long r;
120 u64 res, wrap;
121 char r_unit; 123 char r_unit;
122 124
123 if (cd.rate > rate) 125 if (cd.rate > rate)
124 return; 126 return;
125 127
126 WARN_ON(!irqs_disabled()); 128 WARN_ON(!irqs_disabled());
127 read_sched_clock = read;
128 sched_clock_mask = CLOCKSOURCE_MASK(bits);
129 cd.rate = rate;
130 129
131 /* calculate the mult/shift to convert counter ticks to ns. */ 130 /* calculate the mult/shift to convert counter ticks to ns. */
132 clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 3600); 131 clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600);
132
133 new_mask = CLOCKSOURCE_MASK(bits);
134
135 /* calculate how many ns until we wrap */
136 wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask);
137 new_wrap_kt = ns_to_ktime(wrap - (wrap >> 3));
138
139 /* update epoch for new counter and update epoch_ns from old counter*/
140 new_epoch = read();
141 cyc = read_sched_clock();
142 ns = cd.epoch_ns + cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask,
143 cd.mult, cd.shift);
144
145 raw_write_seqcount_begin(&cd.seq);
146 read_sched_clock = read;
147 sched_clock_mask = new_mask;
148 cd.rate = rate;
149 cd.wrap_kt = new_wrap_kt;
150 cd.mult = new_mult;
151 cd.shift = new_shift;
152 cd.epoch_cyc = new_epoch;
153 cd.epoch_ns = ns;
154 raw_write_seqcount_end(&cd.seq);
133 155
134 r = rate; 156 r = rate;
135 if (r >= 4000000) { 157 if (r >= 4000000) {
@@ -141,22 +163,12 @@ void __init sched_clock_register(u64 (*read)(void), int bits,
141 } else 163 } else
142 r_unit = ' '; 164 r_unit = ' ';
143 165
144 /* calculate how many ns until we wrap */
145 wrap = clocks_calc_max_nsecs(cd.mult, cd.shift, 0, sched_clock_mask);
146 cd.wrap_kt = ns_to_ktime(wrap - (wrap >> 3));
147
148 /* calculate the ns resolution of this counter */ 166 /* calculate the ns resolution of this counter */
149 res = cyc_to_ns(1ULL, cd.mult, cd.shift); 167 res = cyc_to_ns(1ULL, new_mult, new_shift);
168
150 pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n", 169 pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n",
151 bits, r, r_unit, res, wrap); 170 bits, r, r_unit, res, wrap);
152 171
153 update_sched_clock();
154
155 /*
156 * Ensure that sched_clock() starts off at 0ns
157 */
158 cd.epoch_ns = 0;
159
160 /* Enable IRQ time accounting if we have a fast enough sched_clock */ 172 /* Enable IRQ time accounting if we have a fast enough sched_clock */
161 if (irqtime > 0 || (irqtime == -1 && rate >= 1000000)) 173 if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
162 enable_sched_clock_irqtime(); 174 enable_sched_clock_irqtime();
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 43780ab5e279..98977a57ac72 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -756,6 +756,7 @@ out:
756static void tick_broadcast_clear_oneshot(int cpu) 756static void tick_broadcast_clear_oneshot(int cpu)
757{ 757{
758 cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); 758 cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
759 cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
759} 760}
760 761
761static void tick_broadcast_init_next_event(struct cpumask *mask, 762static void tick_broadcast_init_next_event(struct cpumask *mask,
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 294b8a271a04..fc4da2d97f9b 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2397,6 +2397,13 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
2397 write &= RB_WRITE_MASK; 2397 write &= RB_WRITE_MASK;
2398 tail = write - length; 2398 tail = write - length;
2399 2399
2400 /*
2401 * If this is the first commit on the page, then it has the same
2402 * timestamp as the page itself.
2403 */
2404 if (!tail)
2405 delta = 0;
2406
2400 /* See if we shot pass the end of this buffer page */ 2407 /* See if we shot pass the end of this buffer page */
2401 if (unlikely(write > BUF_PAGE_SIZE)) 2408 if (unlikely(write > BUF_PAGE_SIZE))
2402 return rb_move_tail(cpu_buffer, length, tail, 2409 return rb_move_tail(cpu_buffer, length, tail,
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 240fb62cf394..dd06439b9c84 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -225,7 +225,7 @@ static u32 map_id_up(struct uid_gid_map *map, u32 id)
225 * 225 *
226 * When there is no mapping defined for the user-namespace uid 226 * When there is no mapping defined for the user-namespace uid
227 * pair INVALID_UID is returned. Callers are expected to test 227 * pair INVALID_UID is returned. Callers are expected to test
228 * for and handle handle INVALID_UID being returned. INVALID_UID 228 * for and handle INVALID_UID being returned. INVALID_UID
229 * may be tested for using uid_valid(). 229 * may be tested for using uid_valid().
230 */ 230 */
231kuid_t make_kuid(struct user_namespace *ns, uid_t uid) 231kuid_t make_kuid(struct user_namespace *ns, uid_t uid)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 82ef9f3b7473..193e977a10ea 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1851,6 +1851,12 @@ static void destroy_worker(struct worker *worker)
1851 if (worker->flags & WORKER_IDLE) 1851 if (worker->flags & WORKER_IDLE)
1852 pool->nr_idle--; 1852 pool->nr_idle--;
1853 1853
1854 /*
1855 * Once WORKER_DIE is set, the kworker may destroy itself at any
1856 * point. Pin to ensure the task stays until we're done with it.
1857 */
1858 get_task_struct(worker->task);
1859
1854 list_del_init(&worker->entry); 1860 list_del_init(&worker->entry);
1855 worker->flags |= WORKER_DIE; 1861 worker->flags |= WORKER_DIE;
1856 1862
@@ -1859,6 +1865,7 @@ static void destroy_worker(struct worker *worker)
1859 spin_unlock_irq(&pool->lock); 1865 spin_unlock_irq(&pool->lock);
1860 1866
1861 kthread_stop(worker->task); 1867 kthread_stop(worker->task);
1868 put_task_struct(worker->task);
1862 kfree(worker); 1869 kfree(worker);
1863 1870
1864 spin_lock_irq(&pool->lock); 1871 spin_lock_irq(&pool->lock);
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c
index 7be235f1a70b..93d145e5539c 100644
--- a/lib/percpu_ida.c
+++ b/lib/percpu_ida.c
@@ -54,9 +54,7 @@ static inline void move_tags(unsigned *dst, unsigned *dst_nr,
54/* 54/*
55 * Try to steal tags from a remote cpu's percpu freelist. 55 * Try to steal tags from a remote cpu's percpu freelist.
56 * 56 *
57 * We first check how many percpu freelists have tags - we don't steal tags 57 * We first check how many percpu freelists have tags
58 * unless enough percpu freelists have tags on them that it's possible more than
59 * half the total tags could be stuck on remote percpu freelists.
60 * 58 *
61 * Then we iterate through the cpus until we find some tags - we don't attempt 59 * Then we iterate through the cpus until we find some tags - we don't attempt
62 * to find the "best" cpu to steal from, to keep cacheline bouncing to a 60 * to find the "best" cpu to steal from, to keep cacheline bouncing to a
@@ -69,8 +67,7 @@ static inline void steal_tags(struct percpu_ida *pool,
69 struct percpu_ida_cpu *remote; 67 struct percpu_ida_cpu *remote;
70 68
71 for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags); 69 for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags);
72 cpus_have_tags * pool->percpu_max_size > pool->nr_tags / 2; 70 cpus_have_tags; cpus_have_tags--) {
73 cpus_have_tags--) {
74 cpu = cpumask_next(cpu, &pool->cpus_have_tags); 71 cpu = cpumask_next(cpu, &pool->cpus_have_tags);
75 72
76 if (cpu >= nr_cpu_ids) { 73 if (cpu >= nr_cpu_ids) {
diff --git a/mm/filemap.c b/mm/filemap.c
index d56d3c145b9f..7a13f6ac5421 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2553,8 +2553,8 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2553 if (ret > 0) { 2553 if (ret > 0) {
2554 ssize_t err; 2554 ssize_t err;
2555 2555
2556 err = generic_write_sync(file, pos, ret); 2556 err = generic_write_sync(file, iocb->ki_pos - ret, ret);
2557 if (err < 0 && ret > 0) 2557 if (err < 0)
2558 ret = err; 2558 ret = err;
2559 } 2559 }
2560 return ret; 2560 return ret;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 82166bf974e1..4df39b1bde91 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1166,8 +1166,10 @@ alloc:
1166 } else { 1166 } else {
1167 ret = do_huge_pmd_wp_page_fallback(mm, vma, address, 1167 ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
1168 pmd, orig_pmd, page, haddr); 1168 pmd, orig_pmd, page, haddr);
1169 if (ret & VM_FAULT_OOM) 1169 if (ret & VM_FAULT_OOM) {
1170 split_huge_page(page); 1170 split_huge_page(page);
1171 ret |= VM_FAULT_FALLBACK;
1172 }
1171 put_page(page); 1173 put_page(page);
1172 } 1174 }
1173 count_vm_event(THP_FAULT_FALLBACK); 1175 count_vm_event(THP_FAULT_FALLBACK);
@@ -1179,9 +1181,10 @@ alloc:
1179 if (page) { 1181 if (page) {
1180 split_huge_page(page); 1182 split_huge_page(page);
1181 put_page(page); 1183 put_page(page);
1182 } 1184 } else
1185 split_huge_page_pmd(vma, address, pmd);
1186 ret |= VM_FAULT_FALLBACK;
1183 count_vm_event(THP_FAULT_FALLBACK); 1187 count_vm_event(THP_FAULT_FALLBACK);
1184 ret |= VM_FAULT_OOM;
1185 goto out; 1188 goto out;
1186 } 1189 }
1187 1190
@@ -1545,6 +1548,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1545 entry = pmd_mknonnuma(entry); 1548 entry = pmd_mknonnuma(entry);
1546 entry = pmd_modify(entry, newprot); 1549 entry = pmd_modify(entry, newprot);
1547 ret = HPAGE_PMD_NR; 1550 ret = HPAGE_PMD_NR;
1551 set_pmd_at(mm, addr, pmd, entry);
1548 BUG_ON(pmd_write(entry)); 1552 BUG_ON(pmd_write(entry));
1549 } else { 1553 } else {
1550 struct page *page = pmd_page(*pmd); 1554 struct page *page = pmd_page(*pmd);
@@ -1557,16 +1561,10 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1557 */ 1561 */
1558 if (!is_huge_zero_page(page) && 1562 if (!is_huge_zero_page(page) &&
1559 !pmd_numa(*pmd)) { 1563 !pmd_numa(*pmd)) {
1560 entry = *pmd; 1564 pmdp_set_numa(mm, addr, pmd);
1561 entry = pmd_mknuma(entry);
1562 ret = HPAGE_PMD_NR; 1565 ret = HPAGE_PMD_NR;
1563 } 1566 }
1564 } 1567 }
1565
1566 /* Set PMD if cleared earlier */
1567 if (ret == HPAGE_PMD_NR)
1568 set_pmd_at(mm, addr, pmd, entry);
1569
1570 spin_unlock(ptl); 1568 spin_unlock(ptl);
1571 } 1569 }
1572 1570
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 53385cd4e6f0..ce7a8cc7b404 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1687,7 +1687,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1687 * protects memcg_name and makes sure that parallel ooms do not 1687 * protects memcg_name and makes sure that parallel ooms do not
1688 * interleave 1688 * interleave
1689 */ 1689 */
1690 static DEFINE_SPINLOCK(oom_info_lock); 1690 static DEFINE_MUTEX(oom_info_lock);
1691 struct cgroup *task_cgrp; 1691 struct cgroup *task_cgrp;
1692 struct cgroup *mem_cgrp; 1692 struct cgroup *mem_cgrp;
1693 static char memcg_name[PATH_MAX]; 1693 static char memcg_name[PATH_MAX];
@@ -1698,7 +1698,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1698 if (!p) 1698 if (!p)
1699 return; 1699 return;
1700 1700
1701 spin_lock(&oom_info_lock); 1701 mutex_lock(&oom_info_lock);
1702 rcu_read_lock(); 1702 rcu_read_lock();
1703 1703
1704 mem_cgrp = memcg->css.cgroup; 1704 mem_cgrp = memcg->css.cgroup;
@@ -1767,7 +1767,7 @@ done:
1767 1767
1768 pr_cont("\n"); 1768 pr_cont("\n");
1769 } 1769 }
1770 spin_unlock(&oom_info_lock); 1770 mutex_unlock(&oom_info_lock);
1771} 1771}
1772 1772
1773/* 1773/*
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 4f08a2d61487..2f2f34a4e77d 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -945,8 +945,10 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
945 * to it. Similarly, page lock is shifted. 945 * to it. Similarly, page lock is shifted.
946 */ 946 */
947 if (hpage != p) { 947 if (hpage != p) {
948 put_page(hpage); 948 if (!(flags & MF_COUNT_INCREASED)) {
949 get_page(p); 949 put_page(hpage);
950 get_page(p);
951 }
950 lock_page(p); 952 lock_page(p);
951 unlock_page(hpage); 953 unlock_page(hpage);
952 *hpagep = p; 954 *hpagep = p;
diff --git a/mm/memory.c b/mm/memory.c
index be6a0c0d4ae0..22dfa617bddb 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3348,6 +3348,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3348 if (ret & VM_FAULT_LOCKED) 3348 if (ret & VM_FAULT_LOCKED)
3349 unlock_page(vmf.page); 3349 unlock_page(vmf.page);
3350 ret = VM_FAULT_HWPOISON; 3350 ret = VM_FAULT_HWPOISON;
3351 page_cache_release(vmf.page);
3351 goto uncharge_out; 3352 goto uncharge_out;
3352 } 3353 }
3353 3354
@@ -3703,7 +3704,6 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3703 if (unlikely(is_vm_hugetlb_page(vma))) 3704 if (unlikely(is_vm_hugetlb_page(vma)))
3704 return hugetlb_fault(mm, vma, address, flags); 3705 return hugetlb_fault(mm, vma, address, flags);
3705 3706
3706retry:
3707 pgd = pgd_offset(mm, address); 3707 pgd = pgd_offset(mm, address);
3708 pud = pud_alloc(mm, pgd, address); 3708 pud = pud_alloc(mm, pgd, address);
3709 if (!pud) 3709 if (!pud)
@@ -3741,20 +3741,13 @@ retry:
3741 if (dirty && !pmd_write(orig_pmd)) { 3741 if (dirty && !pmd_write(orig_pmd)) {
3742 ret = do_huge_pmd_wp_page(mm, vma, address, pmd, 3742 ret = do_huge_pmd_wp_page(mm, vma, address, pmd,
3743 orig_pmd); 3743 orig_pmd);
3744 /* 3744 if (!(ret & VM_FAULT_FALLBACK))
3745 * If COW results in an oom, the huge pmd will 3745 return ret;
3746 * have been split, so retry the fault on the
3747 * pte for a smaller charge.
3748 */
3749 if (unlikely(ret & VM_FAULT_OOM))
3750 goto retry;
3751 return ret;
3752 } else { 3746 } else {
3753 huge_pmd_set_accessed(mm, vma, address, pmd, 3747 huge_pmd_set_accessed(mm, vma, address, pmd,
3754 orig_pmd, dirty); 3748 orig_pmd, dirty);
3749 return 0;
3755 } 3750 }
3756
3757 return 0;
3758 } 3751 }
3759 } 3752 }
3760 3753
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 7332c1785744..769a67a15803 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -58,36 +58,27 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
58 if (pte_numa(ptent)) 58 if (pte_numa(ptent))
59 ptent = pte_mknonnuma(ptent); 59 ptent = pte_mknonnuma(ptent);
60 ptent = pte_modify(ptent, newprot); 60 ptent = pte_modify(ptent, newprot);
61 /*
62 * Avoid taking write faults for pages we
63 * know to be dirty.
64 */
65 if (dirty_accountable && pte_dirty(ptent))
66 ptent = pte_mkwrite(ptent);
67 ptep_modify_prot_commit(mm, addr, pte, ptent);
61 updated = true; 68 updated = true;
62 } else { 69 } else {
63 struct page *page; 70 struct page *page;
64 71
65 ptent = *pte;
66 page = vm_normal_page(vma, addr, oldpte); 72 page = vm_normal_page(vma, addr, oldpte);
67 if (page && !PageKsm(page)) { 73 if (page && !PageKsm(page)) {
68 if (!pte_numa(oldpte)) { 74 if (!pte_numa(oldpte)) {
69 ptent = pte_mknuma(ptent); 75 ptep_set_numa(mm, addr, pte);
70 set_pte_at(mm, addr, pte, ptent);
71 updated = true; 76 updated = true;
72 } 77 }
73 } 78 }
74 } 79 }
75
76 /*
77 * Avoid taking write faults for pages we know to be
78 * dirty.
79 */
80 if (dirty_accountable && pte_dirty(ptent)) {
81 ptent = pte_mkwrite(ptent);
82 updated = true;
83 }
84
85 if (updated) 80 if (updated)
86 pages++; 81 pages++;
87
88 /* Only !prot_numa always clears the pte */
89 if (!prot_numa)
90 ptep_modify_prot_commit(mm, addr, pte, ptent);
91 } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) { 82 } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
92 swp_entry_t entry = pte_to_swp_entry(oldpte); 83 swp_entry_t entry = pte_to_swp_entry(oldpte);
93 84
diff --git a/mm/slub.c b/mm/slub.c
index 7e3e0458bce4..25f14ad8f817 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1004,21 +1004,19 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
1004static void add_full(struct kmem_cache *s, 1004static void add_full(struct kmem_cache *s,
1005 struct kmem_cache_node *n, struct page *page) 1005 struct kmem_cache_node *n, struct page *page)
1006{ 1006{
1007 lockdep_assert_held(&n->list_lock);
1008
1009 if (!(s->flags & SLAB_STORE_USER)) 1007 if (!(s->flags & SLAB_STORE_USER))
1010 return; 1008 return;
1011 1009
1010 lockdep_assert_held(&n->list_lock);
1012 list_add(&page->lru, &n->full); 1011 list_add(&page->lru, &n->full);
1013} 1012}
1014 1013
1015static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) 1014static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
1016{ 1015{
1017 lockdep_assert_held(&n->list_lock);
1018
1019 if (!(s->flags & SLAB_STORE_USER)) 1016 if (!(s->flags & SLAB_STORE_USER))
1020 return; 1017 return;
1021 1018
1019 lockdep_assert_held(&n->list_lock);
1022 list_del(&page->lru); 1020 list_del(&page->lru);
1023} 1021}
1024 1022
@@ -1520,11 +1518,9 @@ static void discard_slab(struct kmem_cache *s, struct page *page)
1520/* 1518/*
1521 * Management of partially allocated slabs. 1519 * Management of partially allocated slabs.
1522 */ 1520 */
1523static inline void add_partial(struct kmem_cache_node *n, 1521static inline void
1524 struct page *page, int tail) 1522__add_partial(struct kmem_cache_node *n, struct page *page, int tail)
1525{ 1523{
1526 lockdep_assert_held(&n->list_lock);
1527
1528 n->nr_partial++; 1524 n->nr_partial++;
1529 if (tail == DEACTIVATE_TO_TAIL) 1525 if (tail == DEACTIVATE_TO_TAIL)
1530 list_add_tail(&page->lru, &n->partial); 1526 list_add_tail(&page->lru, &n->partial);
@@ -1532,15 +1528,27 @@ static inline void add_partial(struct kmem_cache_node *n,
1532 list_add(&page->lru, &n->partial); 1528 list_add(&page->lru, &n->partial);
1533} 1529}
1534 1530
1535static inline void remove_partial(struct kmem_cache_node *n, 1531static inline void add_partial(struct kmem_cache_node *n,
1536 struct page *page) 1532 struct page *page, int tail)
1537{ 1533{
1538 lockdep_assert_held(&n->list_lock); 1534 lockdep_assert_held(&n->list_lock);
1535 __add_partial(n, page, tail);
1536}
1539 1537
1538static inline void
1539__remove_partial(struct kmem_cache_node *n, struct page *page)
1540{
1540 list_del(&page->lru); 1541 list_del(&page->lru);
1541 n->nr_partial--; 1542 n->nr_partial--;
1542} 1543}
1543 1544
1545static inline void remove_partial(struct kmem_cache_node *n,
1546 struct page *page)
1547{
1548 lockdep_assert_held(&n->list_lock);
1549 __remove_partial(n, page);
1550}
1551
1544/* 1552/*
1545 * Remove slab from the partial list, freeze it and 1553 * Remove slab from the partial list, freeze it and
1546 * return the pointer to the freelist. 1554 * return the pointer to the freelist.
@@ -2906,12 +2914,10 @@ static void early_kmem_cache_node_alloc(int node)
2906 inc_slabs_node(kmem_cache_node, node, page->objects); 2914 inc_slabs_node(kmem_cache_node, node, page->objects);
2907 2915
2908 /* 2916 /*
2909 * the lock is for lockdep's sake, not for any actual 2917 * No locks need to be taken here as it has just been
2910 * race protection 2918 * initialized and there is no concurrent access.
2911 */ 2919 */
2912 spin_lock(&n->list_lock); 2920 __add_partial(n, page, DEACTIVATE_TO_HEAD);
2913 add_partial(n, page, DEACTIVATE_TO_HEAD);
2914 spin_unlock(&n->list_lock);
2915} 2921}
2916 2922
2917static void free_kmem_cache_nodes(struct kmem_cache *s) 2923static void free_kmem_cache_nodes(struct kmem_cache *s)
@@ -3197,7 +3203,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
3197 3203
3198 list_for_each_entry_safe(page, h, &n->partial, lru) { 3204 list_for_each_entry_safe(page, h, &n->partial, lru) {
3199 if (!page->inuse) { 3205 if (!page->inuse) {
3200 remove_partial(n, page); 3206 __remove_partial(n, page);
3201 discard_slab(s, page); 3207 discard_slab(s, page);
3202 } else { 3208 } else {
3203 list_slab_objects(s, page, 3209 list_slab_objects(s, page,
diff --git a/mm/vmpressure.c b/mm/vmpressure.c
index 196970a4541f..d4042e75f7c7 100644
--- a/mm/vmpressure.c
+++ b/mm/vmpressure.c
@@ -19,6 +19,7 @@
19#include <linux/mm.h> 19#include <linux/mm.h>
20#include <linux/vmstat.h> 20#include <linux/vmstat.h>
21#include <linux/eventfd.h> 21#include <linux/eventfd.h>
22#include <linux/slab.h>
22#include <linux/swap.h> 23#include <linux/swap.h>
23#include <linux/printk.h> 24#include <linux/printk.h>
24#include <linux/vmpressure.h> 25#include <linux/vmpressure.h>
diff --git a/net/9p/client.c b/net/9p/client.c
index a5e4d2dcb03e..9186550d77a6 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -204,7 +204,7 @@ free_and_return:
204 return ret; 204 return ret;
205} 205}
206 206
207struct p9_fcall *p9_fcall_alloc(int alloc_msize) 207static struct p9_fcall *p9_fcall_alloc(int alloc_msize)
208{ 208{
209 struct p9_fcall *fc; 209 struct p9_fcall *fc;
210 fc = kmalloc(sizeof(struct p9_fcall) + alloc_msize, GFP_NOFS); 210 fc = kmalloc(sizeof(struct p9_fcall) + alloc_msize, GFP_NOFS);
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index cd1e1ede73a4..ac2666c1d011 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -340,7 +340,10 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
340 int count = nr_pages; 340 int count = nr_pages;
341 while (nr_pages) { 341 while (nr_pages) {
342 s = rest_of_page(data); 342 s = rest_of_page(data);
343 pages[index++] = kmap_to_page(data); 343 if (is_vmalloc_addr(data))
344 pages[index++] = vmalloc_to_page(data);
345 else
346 pages[index++] = kmap_to_page(data);
344 data += s; 347 data += s;
345 nr_pages--; 348 nr_pages--;
346 } 349 }
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 512159bf607f..8323bced8e5b 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -241,19 +241,19 @@ batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const uint8_t *addr)
241 size = bat_priv->num_ifaces * sizeof(uint8_t); 241 size = bat_priv->num_ifaces * sizeof(uint8_t);
242 orig_node->bat_iv.bcast_own_sum = kzalloc(size, GFP_ATOMIC); 242 orig_node->bat_iv.bcast_own_sum = kzalloc(size, GFP_ATOMIC);
243 if (!orig_node->bat_iv.bcast_own_sum) 243 if (!orig_node->bat_iv.bcast_own_sum)
244 goto free_bcast_own; 244 goto free_orig_node;
245 245
246 hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig, 246 hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig,
247 batadv_choose_orig, orig_node, 247 batadv_choose_orig, orig_node,
248 &orig_node->hash_entry); 248 &orig_node->hash_entry);
249 if (hash_added != 0) 249 if (hash_added != 0)
250 goto free_bcast_own; 250 goto free_orig_node;
251 251
252 return orig_node; 252 return orig_node;
253 253
254free_bcast_own:
255 kfree(orig_node->bat_iv.bcast_own);
256free_orig_node: 254free_orig_node:
255 /* free twice, as batadv_orig_node_new sets refcount to 2 */
256 batadv_orig_node_free_ref(orig_node);
257 batadv_orig_node_free_ref(orig_node); 257 batadv_orig_node_free_ref(orig_node);
258 258
259 return NULL; 259 return NULL;
@@ -266,7 +266,7 @@ batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface,
266 struct batadv_orig_node *orig_neigh) 266 struct batadv_orig_node *orig_neigh)
267{ 267{
268 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 268 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
269 struct batadv_neigh_node *neigh_node; 269 struct batadv_neigh_node *neigh_node, *tmp_neigh_node;
270 270
271 neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, orig_node); 271 neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, orig_node);
272 if (!neigh_node) 272 if (!neigh_node)
@@ -281,14 +281,24 @@ batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface,
281 neigh_node->orig_node = orig_neigh; 281 neigh_node->orig_node = orig_neigh;
282 neigh_node->if_incoming = hard_iface; 282 neigh_node->if_incoming = hard_iface;
283 283
284 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
285 "Creating new neighbor %pM for orig_node %pM on interface %s\n",
286 neigh_addr, orig_node->orig, hard_iface->net_dev->name);
287
288 spin_lock_bh(&orig_node->neigh_list_lock); 284 spin_lock_bh(&orig_node->neigh_list_lock);
289 hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list); 285 tmp_neigh_node = batadv_neigh_node_get(orig_node, hard_iface,
286 neigh_addr);
287 if (!tmp_neigh_node) {
288 hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
289 } else {
290 kfree(neigh_node);
291 batadv_hardif_free_ref(hard_iface);
292 neigh_node = tmp_neigh_node;
293 }
290 spin_unlock_bh(&orig_node->neigh_list_lock); 294 spin_unlock_bh(&orig_node->neigh_list_lock);
291 295
296 if (!tmp_neigh_node)
297 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
298 "Creating new neighbor %pM for orig_node %pM on interface %s\n",
299 neigh_addr, orig_node->orig,
300 hard_iface->net_dev->name);
301
292out: 302out:
293 return neigh_node; 303 return neigh_node;
294} 304}
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 3d417d3641c6..b851cc580853 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -241,7 +241,7 @@ int batadv_hardif_min_mtu(struct net_device *soft_iface)
241{ 241{
242 struct batadv_priv *bat_priv = netdev_priv(soft_iface); 242 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
243 const struct batadv_hard_iface *hard_iface; 243 const struct batadv_hard_iface *hard_iface;
244 int min_mtu = ETH_DATA_LEN; 244 int min_mtu = INT_MAX;
245 245
246 rcu_read_lock(); 246 rcu_read_lock();
247 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { 247 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
@@ -256,8 +256,6 @@ int batadv_hardif_min_mtu(struct net_device *soft_iface)
256 } 256 }
257 rcu_read_unlock(); 257 rcu_read_unlock();
258 258
259 atomic_set(&bat_priv->packet_size_max, min_mtu);
260
261 if (atomic_read(&bat_priv->fragmentation) == 0) 259 if (atomic_read(&bat_priv->fragmentation) == 0)
262 goto out; 260 goto out;
263 261
@@ -268,13 +266,21 @@ int batadv_hardif_min_mtu(struct net_device *soft_iface)
268 min_mtu = min_t(int, min_mtu, BATADV_FRAG_MAX_FRAG_SIZE); 266 min_mtu = min_t(int, min_mtu, BATADV_FRAG_MAX_FRAG_SIZE);
269 min_mtu -= sizeof(struct batadv_frag_packet); 267 min_mtu -= sizeof(struct batadv_frag_packet);
270 min_mtu *= BATADV_FRAG_MAX_FRAGMENTS; 268 min_mtu *= BATADV_FRAG_MAX_FRAGMENTS;
271 atomic_set(&bat_priv->packet_size_max, min_mtu);
272
273 /* with fragmentation enabled we can fragment external packets easily */
274 min_mtu = min_t(int, min_mtu, ETH_DATA_LEN);
275 269
276out: 270out:
277 return min_mtu - batadv_max_header_len(); 271 /* report to the other components the maximum amount of bytes that
272 * batman-adv can send over the wire (without considering the payload
273 * overhead). For example, this value is used by TT to compute the
274 * maximum local table table size
275 */
276 atomic_set(&bat_priv->packet_size_max, min_mtu);
277
278 /* the real soft-interface MTU is computed by removing the payload
279 * overhead from the maximum amount of bytes that was just computed.
280 *
281 * However batman-adv does not support MTUs bigger than ETH_DATA_LEN
282 */
283 return min_t(int, min_mtu - batadv_max_header_len(), ETH_DATA_LEN);
278} 284}
279 285
280/* adjusts the MTU if a new interface with a smaller MTU appeared. */ 286/* adjusts the MTU if a new interface with a smaller MTU appeared. */
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 6df12a2e3605..853941629dc1 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -458,6 +458,42 @@ out:
458} 458}
459 459
460/** 460/**
461 * batadv_neigh_node_get - retrieve a neighbour from the list
462 * @orig_node: originator which the neighbour belongs to
463 * @hard_iface: the interface where this neighbour is connected to
464 * @addr: the address of the neighbour
465 *
466 * Looks for and possibly returns a neighbour belonging to this originator list
467 * which is connected through the provided hard interface.
468 * Returns NULL if the neighbour is not found.
469 */
470struct batadv_neigh_node *
471batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
472 const struct batadv_hard_iface *hard_iface,
473 const uint8_t *addr)
474{
475 struct batadv_neigh_node *tmp_neigh_node, *res = NULL;
476
477 rcu_read_lock();
478 hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) {
479 if (!batadv_compare_eth(tmp_neigh_node->addr, addr))
480 continue;
481
482 if (tmp_neigh_node->if_incoming != hard_iface)
483 continue;
484
485 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
486 continue;
487
488 res = tmp_neigh_node;
489 break;
490 }
491 rcu_read_unlock();
492
493 return res;
494}
495
496/**
461 * batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object 497 * batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object
462 * @rcu: rcu pointer of the orig_ifinfo object 498 * @rcu: rcu pointer of the orig_ifinfo object
463 */ 499 */
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index 37be290f63f6..db3a9ed734cb 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -29,6 +29,10 @@ void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node);
29struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv, 29struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
30 const uint8_t *addr); 30 const uint8_t *addr);
31struct batadv_neigh_node * 31struct batadv_neigh_node *
32batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
33 const struct batadv_hard_iface *hard_iface,
34 const uint8_t *addr);
35struct batadv_neigh_node *
32batadv_neigh_node_new(struct batadv_hard_iface *hard_iface, 36batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
33 const uint8_t *neigh_addr, 37 const uint8_t *neigh_addr,
34 struct batadv_orig_node *orig_node); 38 struct batadv_orig_node *orig_node);
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 1ed9f7c9ecea..a953d5b196a3 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -688,7 +688,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
688 int is_old_ttvn; 688 int is_old_ttvn;
689 689
690 /* check if there is enough data before accessing it */ 690 /* check if there is enough data before accessing it */
691 if (pskb_may_pull(skb, hdr_len + ETH_HLEN) < 0) 691 if (!pskb_may_pull(skb, hdr_len + ETH_HLEN))
692 return 0; 692 return 0;
693 693
694 /* create a copy of the skb (in case of for re-routing) to modify it. */ 694 /* create a copy of the skb (in case of for re-routing) to modify it. */
@@ -918,6 +918,8 @@ int batadv_recv_unicast_tvlv(struct sk_buff *skb,
918 918
919 if (ret != NET_RX_SUCCESS) 919 if (ret != NET_RX_SUCCESS)
920 ret = batadv_route_unicast_packet(skb, recv_if); 920 ret = batadv_route_unicast_packet(skb, recv_if);
921 else
922 consume_skb(skb);
921 923
922 return ret; 924 return ret;
923} 925}
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 579f5f00a385..843febd1e519 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -254,9 +254,9 @@ static int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
254 struct batadv_orig_node *orig_node, 254 struct batadv_orig_node *orig_node,
255 unsigned short vid) 255 unsigned short vid)
256{ 256{
257 struct ethhdr *ethhdr = (struct ethhdr *)skb->data; 257 struct ethhdr *ethhdr;
258 struct batadv_unicast_packet *unicast_packet; 258 struct batadv_unicast_packet *unicast_packet;
259 int ret = NET_XMIT_DROP; 259 int ret = NET_XMIT_DROP, hdr_size;
260 260
261 if (!orig_node) 261 if (!orig_node)
262 goto out; 262 goto out;
@@ -265,12 +265,16 @@ static int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
265 case BATADV_UNICAST: 265 case BATADV_UNICAST:
266 if (!batadv_send_skb_prepare_unicast(skb, orig_node)) 266 if (!batadv_send_skb_prepare_unicast(skb, orig_node))
267 goto out; 267 goto out;
268
269 hdr_size = sizeof(*unicast_packet);
268 break; 270 break;
269 case BATADV_UNICAST_4ADDR: 271 case BATADV_UNICAST_4ADDR:
270 if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb, 272 if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
271 orig_node, 273 orig_node,
272 packet_subtype)) 274 packet_subtype))
273 goto out; 275 goto out;
276
277 hdr_size = sizeof(struct batadv_unicast_4addr_packet);
274 break; 278 break;
275 default: 279 default:
276 /* this function supports UNICAST and UNICAST_4ADDR only. It 280 /* this function supports UNICAST and UNICAST_4ADDR only. It
@@ -279,6 +283,7 @@ static int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
279 goto out; 283 goto out;
280 } 284 }
281 285
286 ethhdr = (struct ethhdr *)(skb->data + hdr_size);
282 unicast_packet = (struct batadv_unicast_packet *)skb->data; 287 unicast_packet = (struct batadv_unicast_packet *)skb->data;
283 288
284 /* inform the destination node that we are still missing a correct route 289 /* inform the destination node that we are still missing a correct route
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index b6071f675a3e..959dde721c46 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1975,6 +1975,7 @@ static uint32_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
1975 struct hlist_head *head; 1975 struct hlist_head *head;
1976 uint32_t i, crc_tmp, crc = 0; 1976 uint32_t i, crc_tmp, crc = 0;
1977 uint8_t flags; 1977 uint8_t flags;
1978 __be16 tmp_vid;
1978 1979
1979 for (i = 0; i < hash->size; i++) { 1980 for (i = 0; i < hash->size; i++) {
1980 head = &hash->table[i]; 1981 head = &hash->table[i];
@@ -2011,8 +2012,11 @@ static uint32_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
2011 orig_node)) 2012 orig_node))
2012 continue; 2013 continue;
2013 2014
2014 crc_tmp = crc32c(0, &tt_common->vid, 2015 /* use network order to read the VID: this ensures that
2015 sizeof(tt_common->vid)); 2016 * every node reads the bytes in the same order.
2017 */
2018 tmp_vid = htons(tt_common->vid);
2019 crc_tmp = crc32c(0, &tmp_vid, sizeof(tmp_vid));
2016 2020
2017 /* compute the CRC on flags that have to be kept in sync 2021 /* compute the CRC on flags that have to be kept in sync
2018 * among nodes 2022 * among nodes
@@ -2046,6 +2050,7 @@ static uint32_t batadv_tt_local_crc(struct batadv_priv *bat_priv,
2046 struct hlist_head *head; 2050 struct hlist_head *head;
2047 uint32_t i, crc_tmp, crc = 0; 2051 uint32_t i, crc_tmp, crc = 0;
2048 uint8_t flags; 2052 uint8_t flags;
2053 __be16 tmp_vid;
2049 2054
2050 for (i = 0; i < hash->size; i++) { 2055 for (i = 0; i < hash->size; i++) {
2051 head = &hash->table[i]; 2056 head = &hash->table[i];
@@ -2064,8 +2069,11 @@ static uint32_t batadv_tt_local_crc(struct batadv_priv *bat_priv,
2064 if (tt_common->flags & BATADV_TT_CLIENT_NEW) 2069 if (tt_common->flags & BATADV_TT_CLIENT_NEW)
2065 continue; 2070 continue;
2066 2071
2067 crc_tmp = crc32c(0, &tt_common->vid, 2072 /* use network order to read the VID: this ensures that
2068 sizeof(tt_common->vid)); 2073 * every node reads the bytes in the same order.
2074 */
2075 tmp_vid = htons(tt_common->vid);
2076 crc_tmp = crc32c(0, &tmp_vid, sizeof(tmp_vid));
2069 2077
2070 /* compute the CRC on flags that have to be kept in sync 2078 /* compute the CRC on flags that have to be kept in sync
2071 * among nodes 2079 * among nodes
@@ -2262,6 +2270,7 @@ static bool batadv_tt_global_check_crc(struct batadv_orig_node *orig_node,
2262{ 2270{
2263 struct batadv_tvlv_tt_vlan_data *tt_vlan_tmp; 2271 struct batadv_tvlv_tt_vlan_data *tt_vlan_tmp;
2264 struct batadv_orig_node_vlan *vlan; 2272 struct batadv_orig_node_vlan *vlan;
2273 uint32_t crc;
2265 int i; 2274 int i;
2266 2275
2267 /* check if each received CRC matches the locally stored one */ 2276 /* check if each received CRC matches the locally stored one */
@@ -2281,7 +2290,10 @@ static bool batadv_tt_global_check_crc(struct batadv_orig_node *orig_node,
2281 if (!vlan) 2290 if (!vlan)
2282 return false; 2291 return false;
2283 2292
2284 if (vlan->tt.crc != ntohl(tt_vlan_tmp->crc)) 2293 crc = vlan->tt.crc;
2294 batadv_orig_node_vlan_free_ref(vlan);
2295
2296 if (crc != ntohl(tt_vlan_tmp->crc))
2285 return false; 2297 return false;
2286 } 2298 }
2287 2299
@@ -3218,7 +3230,6 @@ static void batadv_tt_update_orig(struct batadv_priv *bat_priv,
3218 3230
3219 spin_lock_bh(&orig_node->tt_lock); 3231 spin_lock_bh(&orig_node->tt_lock);
3220 3232
3221 tt_change = (struct batadv_tvlv_tt_change *)tt_buff;
3222 batadv_tt_update_changes(bat_priv, orig_node, tt_num_changes, 3233 batadv_tt_update_changes(bat_priv, orig_node, tt_num_changes,
3223 ttvn, tt_change); 3234 ttvn, tt_change);
3224 3235
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 292e619db896..d9fb93451442 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -430,6 +430,16 @@ static void hidp_del_timer(struct hidp_session *session)
430 del_timer(&session->timer); 430 del_timer(&session->timer);
431} 431}
432 432
433static void hidp_process_report(struct hidp_session *session,
434 int type, const u8 *data, int len, int intr)
435{
436 if (len > HID_MAX_BUFFER_SIZE)
437 len = HID_MAX_BUFFER_SIZE;
438
439 memcpy(session->input_buf, data, len);
440 hid_input_report(session->hid, type, session->input_buf, len, intr);
441}
442
433static void hidp_process_handshake(struct hidp_session *session, 443static void hidp_process_handshake(struct hidp_session *session,
434 unsigned char param) 444 unsigned char param)
435{ 445{
@@ -502,7 +512,8 @@ static int hidp_process_data(struct hidp_session *session, struct sk_buff *skb,
502 hidp_input_report(session, skb); 512 hidp_input_report(session, skb);
503 513
504 if (session->hid) 514 if (session->hid)
505 hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 0); 515 hidp_process_report(session, HID_INPUT_REPORT,
516 skb->data, skb->len, 0);
506 break; 517 break;
507 518
508 case HIDP_DATA_RTYPE_OTHER: 519 case HIDP_DATA_RTYPE_OTHER:
@@ -584,7 +595,8 @@ static void hidp_recv_intr_frame(struct hidp_session *session,
584 hidp_input_report(session, skb); 595 hidp_input_report(session, skb);
585 596
586 if (session->hid) { 597 if (session->hid) {
587 hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 1); 598 hidp_process_report(session, HID_INPUT_REPORT,
599 skb->data, skb->len, 1);
588 BT_DBG("report len %d", skb->len); 600 BT_DBG("report len %d", skb->len);
589 } 601 }
590 } else { 602 } else {
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h
index ab5241400cf7..8798492a6e99 100644
--- a/net/bluetooth/hidp/hidp.h
+++ b/net/bluetooth/hidp/hidp.h
@@ -24,6 +24,7 @@
24#define __HIDP_H 24#define __HIDP_H
25 25
26#include <linux/types.h> 26#include <linux/types.h>
27#include <linux/hid.h>
27#include <linux/kref.h> 28#include <linux/kref.h>
28#include <net/bluetooth/bluetooth.h> 29#include <net/bluetooth/bluetooth.h>
29#include <net/bluetooth/l2cap.h> 30#include <net/bluetooth/l2cap.h>
@@ -179,6 +180,9 @@ struct hidp_session {
179 180
180 /* Used in hidp_output_raw_report() */ 181 /* Used in hidp_output_raw_report() */
181 int output_report_success; /* boolean */ 182 int output_report_success; /* boolean */
183
184 /* temporary input buffer */
185 u8 input_buf[HID_MAX_BUFFER_SIZE];
182}; 186};
183 187
184/* HIDP init defines */ 188/* HIDP init defines */
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index e4401a531afb..63f0455c0bc3 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -187,8 +187,7 @@ static int br_set_mac_address(struct net_device *dev, void *p)
187 187
188 spin_lock_bh(&br->lock); 188 spin_lock_bh(&br->lock);
189 if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) { 189 if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) {
190 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 190 /* Mac address will be changed in br_stp_change_bridge_id(). */
191 br_fdb_change_mac_address(br, addr->sa_data);
192 br_stp_change_bridge_id(br, addr->sa_data); 191 br_stp_change_bridge_id(br, addr->sa_data);
193 } 192 }
194 spin_unlock_bh(&br->lock); 193 spin_unlock_bh(&br->lock);
@@ -226,6 +225,33 @@ static void br_netpoll_cleanup(struct net_device *dev)
226 br_netpoll_disable(p); 225 br_netpoll_disable(p);
227} 226}
228 227
228static int __br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
229{
230 struct netpoll *np;
231 int err;
232
233 np = kzalloc(sizeof(*p->np), gfp);
234 if (!np)
235 return -ENOMEM;
236
237 err = __netpoll_setup(np, p->dev, gfp);
238 if (err) {
239 kfree(np);
240 return err;
241 }
242
243 p->np = np;
244 return err;
245}
246
247int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
248{
249 if (!p->br->dev->npinfo)
250 return 0;
251
252 return __br_netpoll_enable(p, gfp);
253}
254
229static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, 255static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni,
230 gfp_t gfp) 256 gfp_t gfp)
231{ 257{
@@ -236,7 +262,7 @@ static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni,
236 list_for_each_entry(p, &br->port_list, list) { 262 list_for_each_entry(p, &br->port_list, list) {
237 if (!p->dev) 263 if (!p->dev)
238 continue; 264 continue;
239 err = br_netpoll_enable(p, gfp); 265 err = __br_netpoll_enable(p, gfp);
240 if (err) 266 if (err)
241 goto fail; 267 goto fail;
242 } 268 }
@@ -249,28 +275,6 @@ fail:
249 goto out; 275 goto out;
250} 276}
251 277
252int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
253{
254 struct netpoll *np;
255 int err;
256
257 if (!p->br->dev->npinfo)
258 return 0;
259
260 np = kzalloc(sizeof(*p->np), gfp);
261 if (!np)
262 return -ENOMEM;
263
264 err = __netpoll_setup(np, p->dev, gfp);
265 if (err) {
266 kfree(np);
267 return err;
268 }
269
270 p->np = np;
271 return err;
272}
273
274void br_netpoll_disable(struct net_bridge_port *p) 278void br_netpoll_disable(struct net_bridge_port *p)
275{ 279{
276 struct netpoll *np = p->np; 280 struct netpoll *np = p->np;
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index c5f5a4a933f4..9203d5a1943f 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -27,6 +27,9 @@
27#include "br_private.h" 27#include "br_private.h"
28 28
29static struct kmem_cache *br_fdb_cache __read_mostly; 29static struct kmem_cache *br_fdb_cache __read_mostly;
30static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head,
31 const unsigned char *addr,
32 __u16 vid);
30static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, 33static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
31 const unsigned char *addr, u16 vid); 34 const unsigned char *addr, u16 vid);
32static void fdb_notify(struct net_bridge *br, 35static void fdb_notify(struct net_bridge *br,
@@ -89,11 +92,57 @@ static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f)
89 call_rcu(&f->rcu, fdb_rcu_free); 92 call_rcu(&f->rcu, fdb_rcu_free);
90} 93}
91 94
95/* Delete a local entry if no other port had the same address. */
96static void fdb_delete_local(struct net_bridge *br,
97 const struct net_bridge_port *p,
98 struct net_bridge_fdb_entry *f)
99{
100 const unsigned char *addr = f->addr.addr;
101 u16 vid = f->vlan_id;
102 struct net_bridge_port *op;
103
104 /* Maybe another port has same hw addr? */
105 list_for_each_entry(op, &br->port_list, list) {
106 if (op != p && ether_addr_equal(op->dev->dev_addr, addr) &&
107 (!vid || nbp_vlan_find(op, vid))) {
108 f->dst = op;
109 f->added_by_user = 0;
110 return;
111 }
112 }
113
114 /* Maybe bridge device has same hw addr? */
115 if (p && ether_addr_equal(br->dev->dev_addr, addr) &&
116 (!vid || br_vlan_find(br, vid))) {
117 f->dst = NULL;
118 f->added_by_user = 0;
119 return;
120 }
121
122 fdb_delete(br, f);
123}
124
125void br_fdb_find_delete_local(struct net_bridge *br,
126 const struct net_bridge_port *p,
127 const unsigned char *addr, u16 vid)
128{
129 struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
130 struct net_bridge_fdb_entry *f;
131
132 spin_lock_bh(&br->hash_lock);
133 f = fdb_find(head, addr, vid);
134 if (f && f->is_local && !f->added_by_user && f->dst == p)
135 fdb_delete_local(br, p, f);
136 spin_unlock_bh(&br->hash_lock);
137}
138
92void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr) 139void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
93{ 140{
94 struct net_bridge *br = p->br; 141 struct net_bridge *br = p->br;
95 bool no_vlan = (nbp_get_vlan_info(p) == NULL) ? true : false; 142 struct net_port_vlans *pv = nbp_get_vlan_info(p);
143 bool no_vlan = !pv;
96 int i; 144 int i;
145 u16 vid;
97 146
98 spin_lock_bh(&br->hash_lock); 147 spin_lock_bh(&br->hash_lock);
99 148
@@ -104,38 +153,34 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
104 struct net_bridge_fdb_entry *f; 153 struct net_bridge_fdb_entry *f;
105 154
106 f = hlist_entry(h, struct net_bridge_fdb_entry, hlist); 155 f = hlist_entry(h, struct net_bridge_fdb_entry, hlist);
107 if (f->dst == p && f->is_local) { 156 if (f->dst == p && f->is_local && !f->added_by_user) {
108 /* maybe another port has same hw addr? */
109 struct net_bridge_port *op;
110 u16 vid = f->vlan_id;
111 list_for_each_entry(op, &br->port_list, list) {
112 if (op != p &&
113 ether_addr_equal(op->dev->dev_addr,
114 f->addr.addr) &&
115 nbp_vlan_find(op, vid)) {
116 f->dst = op;
117 goto insert;
118 }
119 }
120
121 /* delete old one */ 157 /* delete old one */
122 fdb_delete(br, f); 158 fdb_delete_local(br, p, f);
123insert:
124 /* insert new address, may fail if invalid
125 * address or dup.
126 */
127 fdb_insert(br, p, newaddr, vid);
128 159
129 /* if this port has no vlan information 160 /* if this port has no vlan information
130 * configured, we can safely be done at 161 * configured, we can safely be done at
131 * this point. 162 * this point.
132 */ 163 */
133 if (no_vlan) 164 if (no_vlan)
134 goto done; 165 goto insert;
135 } 166 }
136 } 167 }
137 } 168 }
138 169
170insert:
171 /* insert new address, may fail if invalid address or dup. */
172 fdb_insert(br, p, newaddr, 0);
173
174 if (no_vlan)
175 goto done;
176
177 /* Now add entries for every VLAN configured on the port.
178 * This function runs under RTNL so the bitmap will not change
179 * from under us.
180 */
181 for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID)
182 fdb_insert(br, p, newaddr, vid);
183
139done: 184done:
140 spin_unlock_bh(&br->hash_lock); 185 spin_unlock_bh(&br->hash_lock);
141} 186}
@@ -146,10 +191,12 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
146 struct net_port_vlans *pv; 191 struct net_port_vlans *pv;
147 u16 vid = 0; 192 u16 vid = 0;
148 193
194 spin_lock_bh(&br->hash_lock);
195
149 /* If old entry was unassociated with any port, then delete it. */ 196 /* If old entry was unassociated with any port, then delete it. */
150 f = __br_fdb_get(br, br->dev->dev_addr, 0); 197 f = __br_fdb_get(br, br->dev->dev_addr, 0);
151 if (f && f->is_local && !f->dst) 198 if (f && f->is_local && !f->dst)
152 fdb_delete(br, f); 199 fdb_delete_local(br, NULL, f);
153 200
154 fdb_insert(br, NULL, newaddr, 0); 201 fdb_insert(br, NULL, newaddr, 0);
155 202
@@ -159,14 +206,16 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
159 */ 206 */
160 pv = br_get_vlan_info(br); 207 pv = br_get_vlan_info(br);
161 if (!pv) 208 if (!pv)
162 return; 209 goto out;
163 210
164 for_each_set_bit_from(vid, pv->vlan_bitmap, VLAN_N_VID) { 211 for_each_set_bit_from(vid, pv->vlan_bitmap, VLAN_N_VID) {
165 f = __br_fdb_get(br, br->dev->dev_addr, vid); 212 f = __br_fdb_get(br, br->dev->dev_addr, vid);
166 if (f && f->is_local && !f->dst) 213 if (f && f->is_local && !f->dst)
167 fdb_delete(br, f); 214 fdb_delete_local(br, NULL, f);
168 fdb_insert(br, NULL, newaddr, vid); 215 fdb_insert(br, NULL, newaddr, vid);
169 } 216 }
217out:
218 spin_unlock_bh(&br->hash_lock);
170} 219}
171 220
172void br_fdb_cleanup(unsigned long _data) 221void br_fdb_cleanup(unsigned long _data)
@@ -235,25 +284,11 @@ void br_fdb_delete_by_port(struct net_bridge *br,
235 284
236 if (f->is_static && !do_all) 285 if (f->is_static && !do_all)
237 continue; 286 continue;
238 /*
239 * if multiple ports all have the same device address
240 * then when one port is deleted, assign
241 * the local entry to other port
242 */
243 if (f->is_local) {
244 struct net_bridge_port *op;
245 list_for_each_entry(op, &br->port_list, list) {
246 if (op != p &&
247 ether_addr_equal(op->dev->dev_addr,
248 f->addr.addr)) {
249 f->dst = op;
250 goto skip_delete;
251 }
252 }
253 }
254 287
255 fdb_delete(br, f); 288 if (f->is_local)
256 skip_delete: ; 289 fdb_delete_local(br, p, f);
290 else
291 fdb_delete(br, f);
257 } 292 }
258 } 293 }
259 spin_unlock_bh(&br->hash_lock); 294 spin_unlock_bh(&br->hash_lock);
@@ -397,6 +432,7 @@ static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
397 fdb->vlan_id = vid; 432 fdb->vlan_id = vid;
398 fdb->is_local = 0; 433 fdb->is_local = 0;
399 fdb->is_static = 0; 434 fdb->is_static = 0;
435 fdb->added_by_user = 0;
400 fdb->updated = fdb->used = jiffies; 436 fdb->updated = fdb->used = jiffies;
401 hlist_add_head_rcu(&fdb->hlist, head); 437 hlist_add_head_rcu(&fdb->hlist, head);
402 } 438 }
@@ -447,7 +483,7 @@ int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
447} 483}
448 484
449void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, 485void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
450 const unsigned char *addr, u16 vid) 486 const unsigned char *addr, u16 vid, bool added_by_user)
451{ 487{
452 struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)]; 488 struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
453 struct net_bridge_fdb_entry *fdb; 489 struct net_bridge_fdb_entry *fdb;
@@ -473,13 +509,18 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
473 /* fastpath: update of existing entry */ 509 /* fastpath: update of existing entry */
474 fdb->dst = source; 510 fdb->dst = source;
475 fdb->updated = jiffies; 511 fdb->updated = jiffies;
512 if (unlikely(added_by_user))
513 fdb->added_by_user = 1;
476 } 514 }
477 } else { 515 } else {
478 spin_lock(&br->hash_lock); 516 spin_lock(&br->hash_lock);
479 if (likely(!fdb_find(head, addr, vid))) { 517 if (likely(!fdb_find(head, addr, vid))) {
480 fdb = fdb_create(head, source, addr, vid); 518 fdb = fdb_create(head, source, addr, vid);
481 if (fdb) 519 if (fdb) {
520 if (unlikely(added_by_user))
521 fdb->added_by_user = 1;
482 fdb_notify(br, fdb, RTM_NEWNEIGH); 522 fdb_notify(br, fdb, RTM_NEWNEIGH);
523 }
483 } 524 }
484 /* else we lose race and someone else inserts 525 /* else we lose race and someone else inserts
485 * it first, don't bother updating 526 * it first, don't bother updating
@@ -647,6 +688,7 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
647 688
648 modified = true; 689 modified = true;
649 } 690 }
691 fdb->added_by_user = 1;
650 692
651 fdb->used = jiffies; 693 fdb->used = jiffies;
652 if (modified) { 694 if (modified) {
@@ -664,7 +706,7 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge_port *p,
664 706
665 if (ndm->ndm_flags & NTF_USE) { 707 if (ndm->ndm_flags & NTF_USE) {
666 rcu_read_lock(); 708 rcu_read_lock();
667 br_fdb_update(p->br, p, addr, vid); 709 br_fdb_update(p->br, p, addr, vid, true);
668 rcu_read_unlock(); 710 rcu_read_unlock();
669 } else { 711 } else {
670 spin_lock_bh(&p->br->hash_lock); 712 spin_lock_bh(&p->br->hash_lock);
@@ -749,8 +791,7 @@ out:
749 return err; 791 return err;
750} 792}
751 793
752int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr, 794static int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr, u16 vlan)
753 u16 vlan)
754{ 795{
755 struct hlist_head *head = &br->hash[br_mac_hash(addr, vlan)]; 796 struct hlist_head *head = &br->hash[br_mac_hash(addr, vlan)];
756 struct net_bridge_fdb_entry *fdb; 797 struct net_bridge_fdb_entry *fdb;
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index cffe1d666ba1..54d207d3a31c 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -389,6 +389,9 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
389 if (br->dev->needed_headroom < dev->needed_headroom) 389 if (br->dev->needed_headroom < dev->needed_headroom)
390 br->dev->needed_headroom = dev->needed_headroom; 390 br->dev->needed_headroom = dev->needed_headroom;
391 391
392 if (br_fdb_insert(br, p, dev->dev_addr, 0))
393 netdev_err(dev, "failed insert local address bridge forwarding table\n");
394
392 spin_lock_bh(&br->lock); 395 spin_lock_bh(&br->lock);
393 changed_addr = br_stp_recalculate_bridge_id(br); 396 changed_addr = br_stp_recalculate_bridge_id(br);
394 397
@@ -404,9 +407,6 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
404 407
405 dev_set_mtu(br->dev, br_min_mtu(br)); 408 dev_set_mtu(br->dev, br_min_mtu(br));
406 409
407 if (br_fdb_insert(br, p, dev->dev_addr, 0))
408 netdev_err(dev, "failed insert local address bridge forwarding table\n");
409
410 kobject_uevent(&p->kobj, KOBJ_ADD); 410 kobject_uevent(&p->kobj, KOBJ_ADD);
411 411
412 return 0; 412 return 0;
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index bf8dc7d308d6..28d544627422 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -77,7 +77,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
77 /* insert into forwarding database after filtering to avoid spoofing */ 77 /* insert into forwarding database after filtering to avoid spoofing */
78 br = p->br; 78 br = p->br;
79 if (p->flags & BR_LEARNING) 79 if (p->flags & BR_LEARNING)
80 br_fdb_update(br, p, eth_hdr(skb)->h_source, vid); 80 br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, false);
81 81
82 if (!is_broadcast_ether_addr(dest) && is_multicast_ether_addr(dest) && 82 if (!is_broadcast_ether_addr(dest) && is_multicast_ether_addr(dest) &&
83 br_multicast_rcv(br, p, skb, vid)) 83 br_multicast_rcv(br, p, skb, vid))
@@ -148,7 +148,7 @@ static int br_handle_local_finish(struct sk_buff *skb)
148 148
149 br_vlan_get_tag(skb, &vid); 149 br_vlan_get_tag(skb, &vid);
150 if (p->flags & BR_LEARNING) 150 if (p->flags & BR_LEARNING)
151 br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid); 151 br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false);
152 return 0; /* process further */ 152 return 0; /* process further */
153} 153}
154 154
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index fcd12333c59b..3ba11bc99b65 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -104,6 +104,7 @@ struct net_bridge_fdb_entry
104 mac_addr addr; 104 mac_addr addr;
105 unsigned char is_local; 105 unsigned char is_local;
106 unsigned char is_static; 106 unsigned char is_static;
107 unsigned char added_by_user;
107 __u16 vlan_id; 108 __u16 vlan_id;
108}; 109};
109 110
@@ -370,6 +371,9 @@ static inline void br_netpoll_disable(struct net_bridge_port *p)
370int br_fdb_init(void); 371int br_fdb_init(void);
371void br_fdb_fini(void); 372void br_fdb_fini(void);
372void br_fdb_flush(struct net_bridge *br); 373void br_fdb_flush(struct net_bridge *br);
374void br_fdb_find_delete_local(struct net_bridge *br,
375 const struct net_bridge_port *p,
376 const unsigned char *addr, u16 vid);
373void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr); 377void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr);
374void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr); 378void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr);
375void br_fdb_cleanup(unsigned long arg); 379void br_fdb_cleanup(unsigned long arg);
@@ -383,8 +387,7 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf, unsigned long count,
383int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source, 387int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
384 const unsigned char *addr, u16 vid); 388 const unsigned char *addr, u16 vid);
385void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, 389void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
386 const unsigned char *addr, u16 vid); 390 const unsigned char *addr, u16 vid, bool added_by_user);
387int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr, u16 vid);
388 391
389int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], 392int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
390 struct net_device *dev, const unsigned char *addr); 393 struct net_device *dev, const unsigned char *addr);
@@ -584,6 +587,7 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br,
584int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags); 587int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags);
585int br_vlan_delete(struct net_bridge *br, u16 vid); 588int br_vlan_delete(struct net_bridge *br, u16 vid);
586void br_vlan_flush(struct net_bridge *br); 589void br_vlan_flush(struct net_bridge *br);
590bool br_vlan_find(struct net_bridge *br, u16 vid);
587int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val); 591int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
588int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags); 592int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags);
589int nbp_vlan_delete(struct net_bridge_port *port, u16 vid); 593int nbp_vlan_delete(struct net_bridge_port *port, u16 vid);
@@ -665,6 +669,11 @@ static inline void br_vlan_flush(struct net_bridge *br)
665{ 669{
666} 670}
667 671
672static inline bool br_vlan_find(struct net_bridge *br, u16 vid)
673{
674 return false;
675}
676
668static inline int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags) 677static inline int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
669{ 678{
670 return -EOPNOTSUPP; 679 return -EOPNOTSUPP;
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 656a6f3e40de..189ba1e7d851 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -194,6 +194,8 @@ void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr)
194 194
195 wasroot = br_is_root_bridge(br); 195 wasroot = br_is_root_bridge(br);
196 196
197 br_fdb_change_mac_address(br, addr);
198
197 memcpy(oldaddr, br->bridge_id.addr, ETH_ALEN); 199 memcpy(oldaddr, br->bridge_id.addr, ETH_ALEN);
198 memcpy(br->bridge_id.addr, addr, ETH_ALEN); 200 memcpy(br->bridge_id.addr, addr, ETH_ALEN);
199 memcpy(br->dev->dev_addr, addr, ETH_ALEN); 201 memcpy(br->dev->dev_addr, addr, ETH_ALEN);
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 4ca4d0a0151c..8249ca764c79 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -275,9 +275,7 @@ int br_vlan_delete(struct net_bridge *br, u16 vid)
275 if (!pv) 275 if (!pv)
276 return -EINVAL; 276 return -EINVAL;
277 277
278 spin_lock_bh(&br->hash_lock); 278 br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
279 fdb_delete_by_addr(br, br->dev->dev_addr, vid);
280 spin_unlock_bh(&br->hash_lock);
281 279
282 __vlan_del(pv, vid); 280 __vlan_del(pv, vid);
283 return 0; 281 return 0;
@@ -295,6 +293,25 @@ void br_vlan_flush(struct net_bridge *br)
295 __vlan_flush(pv); 293 __vlan_flush(pv);
296} 294}
297 295
296bool br_vlan_find(struct net_bridge *br, u16 vid)
297{
298 struct net_port_vlans *pv;
299 bool found = false;
300
301 rcu_read_lock();
302 pv = rcu_dereference(br->vlan_info);
303
304 if (!pv)
305 goto out;
306
307 if (test_bit(vid, pv->vlan_bitmap))
308 found = true;
309
310out:
311 rcu_read_unlock();
312 return found;
313}
314
298int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val) 315int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
299{ 316{
300 if (!rtnl_trylock()) 317 if (!rtnl_trylock())
@@ -359,9 +376,7 @@ int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
359 if (!pv) 376 if (!pv)
360 return -EINVAL; 377 return -EINVAL;
361 378
362 spin_lock_bh(&port->br->hash_lock); 379 br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
363 fdb_delete_by_addr(port->br, port->dev->dev_addr, vid);
364 spin_unlock_bh(&port->br->hash_lock);
365 380
366 return __vlan_del(pv, vid); 381 return __vlan_del(pv, vid);
367} 382}
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 4dca159435cf..edbca468fa73 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -22,6 +22,7 @@
22#include <net/pkt_sched.h> 22#include <net/pkt_sched.h>
23#include <net/caif/caif_device.h> 23#include <net/caif/caif_device.h>
24#include <net/caif/caif_layer.h> 24#include <net/caif/caif_layer.h>
25#include <net/caif/caif_dev.h>
25#include <net/caif/cfpkt.h> 26#include <net/caif/cfpkt.h>
26#include <net/caif/cfcnfg.h> 27#include <net/caif/cfcnfg.h>
27#include <net/caif/cfserl.h> 28#include <net/caif/cfserl.h>
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index 353f793d1b3b..a6e115463052 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -15,6 +15,7 @@
15#include <net/caif/caif_layer.h> 15#include <net/caif/caif_layer.h>
16#include <net/caif/cfsrvl.h> 16#include <net/caif/cfsrvl.h>
17#include <net/caif/cfpkt.h> 17#include <net/caif/cfpkt.h>
18#include <net/caif/caif_dev.h>
18 19
19#define SRVL_CTRL_PKT_SIZE 1 20#define SRVL_CTRL_PKT_SIZE 1
20#define SRVL_FLOW_OFF 0x81 21#define SRVL_FLOW_OFF 0x81
diff --git a/net/can/af_can.c b/net/can/af_can.c
index d249874a366d..a27f8aad9e99 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -57,6 +57,7 @@
57#include <linux/skbuff.h> 57#include <linux/skbuff.h>
58#include <linux/can.h> 58#include <linux/can.h>
59#include <linux/can/core.h> 59#include <linux/can/core.h>
60#include <linux/can/skb.h>
60#include <linux/ratelimit.h> 61#include <linux/ratelimit.h>
61#include <net/net_namespace.h> 62#include <net/net_namespace.h>
62#include <net/sock.h> 63#include <net/sock.h>
@@ -290,7 +291,7 @@ int can_send(struct sk_buff *skb, int loop)
290 return -ENOMEM; 291 return -ENOMEM;
291 } 292 }
292 293
293 newskb->sk = skb->sk; 294 can_skb_set_owner(newskb, skb->sk);
294 newskb->ip_summed = CHECKSUM_UNNECESSARY; 295 newskb->ip_summed = CHECKSUM_UNNECESSARY;
295 newskb->pkt_type = PACKET_BROADCAST; 296 newskb->pkt_type = PACKET_BROADCAST;
296 } 297 }
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 3fc737b214c7..dcb75c0e66c1 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -268,7 +268,7 @@ static void bcm_can_tx(struct bcm_op *op)
268 268
269 /* send with loopback */ 269 /* send with loopback */
270 skb->dev = dev; 270 skb->dev = dev;
271 skb->sk = op->sk; 271 can_skb_set_owner(skb, op->sk);
272 can_send(skb, 1); 272 can_send(skb, 1);
273 273
274 /* update statistics */ 274 /* update statistics */
@@ -1223,7 +1223,7 @@ static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
1223 1223
1224 can_skb_prv(skb)->ifindex = dev->ifindex; 1224 can_skb_prv(skb)->ifindex = dev->ifindex;
1225 skb->dev = dev; 1225 skb->dev = dev;
1226 skb->sk = sk; 1226 can_skb_set_owner(skb, sk);
1227 err = can_send(skb, 1); /* send with loopback */ 1227 err = can_send(skb, 1); /* send with loopback */
1228 dev_put(dev); 1228 dev_put(dev);
1229 1229
diff --git a/net/can/raw.c b/net/can/raw.c
index 07d72d852324..8be757cca2ec 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -715,6 +715,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
715 715
716 skb->dev = dev; 716 skb->dev = dev;
717 skb->sk = sk; 717 skb->sk = sk;
718 skb->priority = sk->sk_priority;
718 719
719 err = can_send(skb, ro->loopback); 720 err = can_send(skb, ro->loopback);
720 721
diff --git a/net/core/dev.c b/net/core/dev.c
index 3721db716350..b1b0c8d4d7df 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2420,7 +2420,7 @@ EXPORT_SYMBOL(netdev_rx_csum_fault);
2420 * 2. No high memory really exists on this machine. 2420 * 2. No high memory really exists on this machine.
2421 */ 2421 */
2422 2422
2423static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) 2423static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb)
2424{ 2424{
2425#ifdef CONFIG_HIGHMEM 2425#ifdef CONFIG_HIGHMEM
2426 int i; 2426 int i;
@@ -2495,34 +2495,36 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
2495} 2495}
2496 2496
2497static netdev_features_t harmonize_features(struct sk_buff *skb, 2497static netdev_features_t harmonize_features(struct sk_buff *skb,
2498 netdev_features_t features) 2498 const struct net_device *dev,
2499 netdev_features_t features)
2499{ 2500{
2500 if (skb->ip_summed != CHECKSUM_NONE && 2501 if (skb->ip_summed != CHECKSUM_NONE &&
2501 !can_checksum_protocol(features, skb_network_protocol(skb))) { 2502 !can_checksum_protocol(features, skb_network_protocol(skb))) {
2502 features &= ~NETIF_F_ALL_CSUM; 2503 features &= ~NETIF_F_ALL_CSUM;
2503 } else if (illegal_highdma(skb->dev, skb)) { 2504 } else if (illegal_highdma(dev, skb)) {
2504 features &= ~NETIF_F_SG; 2505 features &= ~NETIF_F_SG;
2505 } 2506 }
2506 2507
2507 return features; 2508 return features;
2508} 2509}
2509 2510
2510netdev_features_t netif_skb_features(struct sk_buff *skb) 2511netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
2512 const struct net_device *dev)
2511{ 2513{
2512 __be16 protocol = skb->protocol; 2514 __be16 protocol = skb->protocol;
2513 netdev_features_t features = skb->dev->features; 2515 netdev_features_t features = dev->features;
2514 2516
2515 if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs) 2517 if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs)
2516 features &= ~NETIF_F_GSO_MASK; 2518 features &= ~NETIF_F_GSO_MASK;
2517 2519
2518 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) { 2520 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
2519 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 2521 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2520 protocol = veh->h_vlan_encapsulated_proto; 2522 protocol = veh->h_vlan_encapsulated_proto;
2521 } else if (!vlan_tx_tag_present(skb)) { 2523 } else if (!vlan_tx_tag_present(skb)) {
2522 return harmonize_features(skb, features); 2524 return harmonize_features(skb, dev, features);
2523 } 2525 }
2524 2526
2525 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX | 2527 features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
2526 NETIF_F_HW_VLAN_STAG_TX); 2528 NETIF_F_HW_VLAN_STAG_TX);
2527 2529
2528 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) 2530 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
@@ -2530,9 +2532,9 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
2530 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX | 2532 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
2531 NETIF_F_HW_VLAN_STAG_TX; 2533 NETIF_F_HW_VLAN_STAG_TX;
2532 2534
2533 return harmonize_features(skb, features); 2535 return harmonize_features(skb, dev, features);
2534} 2536}
2535EXPORT_SYMBOL(netif_skb_features); 2537EXPORT_SYMBOL(netif_skb_dev_features);
2536 2538
2537int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2539int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2538 struct netdev_queue *txq) 2540 struct netdev_queue *txq)
@@ -2803,7 +2805,7 @@ EXPORT_SYMBOL(dev_loopback_xmit);
2803 * the BH enable code must have IRQs enabled so that it will not deadlock. 2805 * the BH enable code must have IRQs enabled so that it will not deadlock.
2804 * --BLG 2806 * --BLG
2805 */ 2807 */
2806int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) 2808static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
2807{ 2809{
2808 struct net_device *dev = skb->dev; 2810 struct net_device *dev = skb->dev;
2809 struct netdev_queue *txq; 2811 struct netdev_queue *txq;
@@ -4637,7 +4639,7 @@ struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
4637} 4639}
4638EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu); 4640EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
4639 4641
4640int netdev_adjacent_sysfs_add(struct net_device *dev, 4642static int netdev_adjacent_sysfs_add(struct net_device *dev,
4641 struct net_device *adj_dev, 4643 struct net_device *adj_dev,
4642 struct list_head *dev_list) 4644 struct list_head *dev_list)
4643{ 4645{
@@ -4647,7 +4649,7 @@ int netdev_adjacent_sysfs_add(struct net_device *dev,
4647 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj), 4649 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
4648 linkname); 4650 linkname);
4649} 4651}
4650void netdev_adjacent_sysfs_del(struct net_device *dev, 4652static void netdev_adjacent_sysfs_del(struct net_device *dev,
4651 char *name, 4653 char *name,
4652 struct list_head *dev_list) 4654 struct list_head *dev_list)
4653{ 4655{
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index f409e0bd35c0..185c341fafbd 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -745,6 +745,13 @@ static int fib_rules_event(struct notifier_block *this, unsigned long event,
745 attach_rules(&ops->rules_list, dev); 745 attach_rules(&ops->rules_list, dev);
746 break; 746 break;
747 747
748 case NETDEV_CHANGENAME:
749 list_for_each_entry(ops, &net->rules_ops, list) {
750 detach_rules(&ops->rules_list, dev);
751 attach_rules(&ops->rules_list, dev);
752 }
753 break;
754
748 case NETDEV_UNREGISTER: 755 case NETDEV_UNREGISTER:
749 list_for_each_entry(ops, &net->rules_ops, list) 756 list_for_each_entry(ops, &net->rules_ops, list)
750 detach_rules(&ops->rules_list, dev); 757 detach_rules(&ops->rules_list, dev);
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 87577d447554..e29e810663d7 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -323,17 +323,6 @@ u32 __skb_get_poff(const struct sk_buff *skb)
323 return poff; 323 return poff;
324} 324}
325 325
326static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
327{
328 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
329 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
330 dev->name, queue_index,
331 dev->real_num_tx_queues);
332 return 0;
333 }
334 return queue_index;
335}
336
337static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) 326static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
338{ 327{
339#ifdef CONFIG_XPS 328#ifdef CONFIG_XPS
@@ -372,7 +361,7 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
372#endif 361#endif
373} 362}
374 363
375u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) 364static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
376{ 365{
377 struct sock *sk = skb->sk; 366 struct sock *sk = skb->sk;
378 int queue_index = sk_tx_queue_get(sk); 367 int queue_index = sk_tx_queue_get(sk);
@@ -392,7 +381,6 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
392 381
393 return queue_index; 382 return queue_index;
394} 383}
395EXPORT_SYMBOL(__netdev_pick_tx);
396 384
397struct netdev_queue *netdev_pick_tx(struct net_device *dev, 385struct netdev_queue *netdev_pick_tx(struct net_device *dev,
398 struct sk_buff *skb, 386 struct sk_buff *skb,
@@ -403,13 +391,13 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
403 if (dev->real_num_tx_queues != 1) { 391 if (dev->real_num_tx_queues != 1) {
404 const struct net_device_ops *ops = dev->netdev_ops; 392 const struct net_device_ops *ops = dev->netdev_ops;
405 if (ops->ndo_select_queue) 393 if (ops->ndo_select_queue)
406 queue_index = ops->ndo_select_queue(dev, skb, 394 queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
407 accel_priv); 395 __netdev_pick_tx);
408 else 396 else
409 queue_index = __netdev_pick_tx(dev, skb); 397 queue_index = __netdev_pick_tx(dev, skb);
410 398
411 if (!accel_priv) 399 if (!accel_priv)
412 queue_index = dev_cap_txqueue(dev, queue_index); 400 queue_index = netdev_cap_txqueue(dev, queue_index);
413 } 401 }
414 402
415 skb_set_queue_mapping(skb, queue_index); 403 skb_set_queue_mapping(skb, queue_index);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index c03f3dec4763..a664f7829a6d 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -948,6 +948,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
948{ 948{
949 char *cur=opt, *delim; 949 char *cur=opt, *delim;
950 int ipv6; 950 int ipv6;
951 bool ipversion_set = false;
951 952
952 if (*cur != '@') { 953 if (*cur != '@') {
953 if ((delim = strchr(cur, '@')) == NULL) 954 if ((delim = strchr(cur, '@')) == NULL)
@@ -960,6 +961,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
960 cur++; 961 cur++;
961 962
962 if (*cur != '/') { 963 if (*cur != '/') {
964 ipversion_set = true;
963 if ((delim = strchr(cur, '/')) == NULL) 965 if ((delim = strchr(cur, '/')) == NULL)
964 goto parse_failed; 966 goto parse_failed;
965 *delim = 0; 967 *delim = 0;
@@ -1002,7 +1004,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
1002 ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip); 1004 ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
1003 if (ipv6 < 0) 1005 if (ipv6 < 0)
1004 goto parse_failed; 1006 goto parse_failed;
1005 else if (np->ipv6 != (bool)ipv6) 1007 else if (ipversion_set && np->ipv6 != (bool)ipv6)
1006 goto parse_failed; 1008 goto parse_failed;
1007 else 1009 else
1008 np->ipv6 = (bool)ipv6; 1010 np->ipv6 = (bool)ipv6;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 393b1bc9a618..1a0dac2ef9ad 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -374,7 +374,7 @@ static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev)
374 if (!master_dev) 374 if (!master_dev)
375 return 0; 375 return 0;
376 ops = master_dev->rtnl_link_ops; 376 ops = master_dev->rtnl_link_ops;
377 if (!ops->get_slave_size) 377 if (!ops || !ops->get_slave_size)
378 return 0; 378 return 0;
379 /* IFLA_INFO_SLAVE_DATA + nested data */ 379 /* IFLA_INFO_SLAVE_DATA + nested data */
380 return nla_total_size(sizeof(struct nlattr)) + 380 return nla_total_size(sizeof(struct nlattr)) +
@@ -1963,16 +1963,21 @@ replay:
1963 1963
1964 dev->ifindex = ifm->ifi_index; 1964 dev->ifindex = ifm->ifi_index;
1965 1965
1966 if (ops->newlink) 1966 if (ops->newlink) {
1967 err = ops->newlink(net, dev, tb, data); 1967 err = ops->newlink(net, dev, tb, data);
1968 else 1968 /* Drivers should call free_netdev() in ->destructor
1969 * and unregister it on failure so that device could be
1970 * finally freed in rtnl_unlock.
1971 */
1972 if (err < 0)
1973 goto out;
1974 } else {
1969 err = register_netdevice(dev); 1975 err = register_netdevice(dev);
1970 1976 if (err < 0) {
1971 if (err < 0) { 1977 free_netdev(dev);
1972 free_netdev(dev); 1978 goto out;
1973 goto out; 1979 }
1974 } 1980 }
1975
1976 err = rtnl_configure_link(dev, ifm); 1981 err = rtnl_configure_link(dev, ifm);
1977 if (err < 0) 1982 if (err < 0)
1978 unregister_netdevice(dev); 1983 unregister_netdevice(dev);
diff --git a/net/core/sock.c b/net/core/sock.c
index 0c127dcdf6a8..5b6a9431b017 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1775,7 +1775,9 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1775 while (order) { 1775 while (order) {
1776 if (npages >= 1 << order) { 1776 if (npages >= 1 << order) {
1777 page = alloc_pages(sk->sk_allocation | 1777 page = alloc_pages(sk->sk_allocation |
1778 __GFP_COMP | __GFP_NOWARN, 1778 __GFP_COMP |
1779 __GFP_NOWARN |
1780 __GFP_NORETRY,
1779 order); 1781 order);
1780 if (page) 1782 if (page)
1781 goto fill_page; 1783 goto fill_page;
@@ -1845,7 +1847,7 @@ bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio)
1845 gfp_t gfp = prio; 1847 gfp_t gfp = prio;
1846 1848
1847 if (order) 1849 if (order)
1848 gfp |= __GFP_COMP | __GFP_NOWARN; 1850 gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
1849 pfrag->page = alloc_pages(gfp, order); 1851 pfrag->page = alloc_pages(gfp, order);
1850 if (likely(pfrag->page)) { 1852 if (likely(pfrag->page)) {
1851 pfrag->offset = 0; 1853 pfrag->offset = 0;
diff --git a/net/dccp/ccids/lib/tfrc.c b/net/dccp/ccids/lib/tfrc.c
index c073b81a1f3e..62b5828acde0 100644
--- a/net/dccp/ccids/lib/tfrc.c
+++ b/net/dccp/ccids/lib/tfrc.c
@@ -8,7 +8,7 @@
8#include "tfrc.h" 8#include "tfrc.h"
9 9
10#ifdef CONFIG_IP_DCCP_TFRC_DEBUG 10#ifdef CONFIG_IP_DCCP_TFRC_DEBUG
11static bool tfrc_debug; 11bool tfrc_debug;
12module_param(tfrc_debug, bool, 0644); 12module_param(tfrc_debug, bool, 0644);
13MODULE_PARM_DESC(tfrc_debug, "Enable TFRC debug messages"); 13MODULE_PARM_DESC(tfrc_debug, "Enable TFRC debug messages");
14#endif 14#endif
diff --git a/net/dccp/ccids/lib/tfrc.h b/net/dccp/ccids/lib/tfrc.h
index a3d8f7c76ae0..40ee7d62b652 100644
--- a/net/dccp/ccids/lib/tfrc.h
+++ b/net/dccp/ccids/lib/tfrc.h
@@ -21,6 +21,7 @@
21#include "packet_history.h" 21#include "packet_history.h"
22 22
23#ifdef CONFIG_IP_DCCP_TFRC_DEBUG 23#ifdef CONFIG_IP_DCCP_TFRC_DEBUG
24extern bool tfrc_debug;
24#define tfrc_pr_debug(format, a...) DCCP_PR_DEBUG(tfrc_debug, format, ##a) 25#define tfrc_pr_debug(format, a...) DCCP_PR_DEBUG(tfrc_debug, format, ##a)
25#else 26#else
26#define tfrc_pr_debug(format, a...) 27#define tfrc_pr_debug(format, a...)
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 2954dcbca832..4c04848953bd 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -2104,8 +2104,6 @@ static struct notifier_block dn_dev_notifier = {
2104 .notifier_call = dn_device_event, 2104 .notifier_call = dn_device_event,
2105}; 2105};
2106 2106
2107extern int dn_route_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
2108
2109static struct packet_type dn_dix_packet_type __read_mostly = { 2107static struct packet_type dn_dix_packet_type __read_mostly = {
2110 .type = cpu_to_be16(ETH_P_DNA_RT), 2108 .type = cpu_to_be16(ETH_P_DNA_RT),
2111 .func = dn_route_rcv, 2109 .func = dn_route_rcv,
@@ -2353,9 +2351,6 @@ static const struct proto_ops dn_proto_ops = {
2353 .sendpage = sock_no_sendpage, 2351 .sendpage = sock_no_sendpage,
2354}; 2352};
2355 2353
2356void dn_register_sysctl(void);
2357void dn_unregister_sysctl(void);
2358
2359MODULE_DESCRIPTION("The Linux DECnet Network Protocol"); 2354MODULE_DESCRIPTION("The Linux DECnet Network Protocol");
2360MODULE_AUTHOR("Linux DECnet Project Team"); 2355MODULE_AUTHOR("Linux DECnet Project Team");
2361MODULE_LICENSE("GPL"); 2356MODULE_LICENSE("GPL");
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 48b25c0af4d0..8edfea5da572 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -106,7 +106,6 @@ static int lowpan_header_create(struct sk_buff *skb,
106 unsigned short type, const void *_daddr, 106 unsigned short type, const void *_daddr,
107 const void *_saddr, unsigned int len) 107 const void *_saddr, unsigned int len)
108{ 108{
109 struct ipv6hdr *hdr;
110 const u8 *saddr = _saddr; 109 const u8 *saddr = _saddr;
111 const u8 *daddr = _daddr; 110 const u8 *daddr = _daddr;
112 struct ieee802154_addr sa, da; 111 struct ieee802154_addr sa, da;
@@ -117,8 +116,6 @@ static int lowpan_header_create(struct sk_buff *skb,
117 if (type != ETH_P_IPV6) 116 if (type != ETH_P_IPV6)
118 return 0; 117 return 0;
119 118
120 hdr = ipv6_hdr(skb);
121
122 if (!saddr) 119 if (!saddr)
123 saddr = dev->dev_addr; 120 saddr = dev->dev_addr;
124 121
@@ -533,7 +530,27 @@ static struct header_ops lowpan_header_ops = {
533 .create = lowpan_header_create, 530 .create = lowpan_header_create,
534}; 531};
535 532
533static struct lock_class_key lowpan_tx_busylock;
534static struct lock_class_key lowpan_netdev_xmit_lock_key;
535
536static void lowpan_set_lockdep_class_one(struct net_device *dev,
537 struct netdev_queue *txq,
538 void *_unused)
539{
540 lockdep_set_class(&txq->_xmit_lock,
541 &lowpan_netdev_xmit_lock_key);
542}
543
544
545static int lowpan_dev_init(struct net_device *dev)
546{
547 netdev_for_each_tx_queue(dev, lowpan_set_lockdep_class_one, NULL);
548 dev->qdisc_tx_busylock = &lowpan_tx_busylock;
549 return 0;
550}
551
536static const struct net_device_ops lowpan_netdev_ops = { 552static const struct net_device_ops lowpan_netdev_ops = {
553 .ndo_init = lowpan_dev_init,
537 .ndo_start_xmit = lowpan_xmit, 554 .ndo_start_xmit = lowpan_xmit,
538 .ndo_set_mac_address = lowpan_set_address, 555 .ndo_set_mac_address = lowpan_set_address,
539}; 556};
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index ac2dff3c2c1c..bdbf68bb2e2d 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1443,7 +1443,8 @@ static size_t inet_nlmsg_size(void)
1443 + nla_total_size(4) /* IFA_LOCAL */ 1443 + nla_total_size(4) /* IFA_LOCAL */
1444 + nla_total_size(4) /* IFA_BROADCAST */ 1444 + nla_total_size(4) /* IFA_BROADCAST */
1445 + nla_total_size(IFNAMSIZ) /* IFA_LABEL */ 1445 + nla_total_size(IFNAMSIZ) /* IFA_LABEL */
1446 + nla_total_size(4); /* IFA_FLAGS */ 1446 + nla_total_size(4) /* IFA_FLAGS */
1447 + nla_total_size(sizeof(struct ifa_cacheinfo)); /* IFA_CACHEINFO */
1447} 1448}
1448 1449
1449static inline u32 cstamp_delta(unsigned long cstamp) 1450static inline u32 cstamp_delta(unsigned long cstamp)
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index e9f1217a8afd..f3869c186d97 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -39,6 +39,71 @@
39#include <net/route.h> 39#include <net/route.h>
40#include <net/xfrm.h> 40#include <net/xfrm.h>
41 41
42static bool ip_may_fragment(const struct sk_buff *skb)
43{
44 return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) ||
45 !skb->local_df;
46}
47
48static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
49{
50 if (skb->len <= mtu || skb->local_df)
51 return false;
52
53 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
54 return false;
55
56 return true;
57}
58
59static bool ip_gso_exceeds_dst_mtu(const struct sk_buff *skb)
60{
61 unsigned int mtu;
62
63 if (skb->local_df || !skb_is_gso(skb))
64 return false;
65
66 mtu = ip_dst_mtu_maybe_forward(skb_dst(skb), true);
67
68 /* if seglen > mtu, do software segmentation for IP fragmentation on
69 * output. DF bit cannot be set since ip_forward would have sent
70 * icmp error.
71 */
72 return skb_gso_network_seglen(skb) > mtu;
73}
74
75/* called if GSO skb needs to be fragmented on forward */
76static int ip_forward_finish_gso(struct sk_buff *skb)
77{
78 struct dst_entry *dst = skb_dst(skb);
79 netdev_features_t features;
80 struct sk_buff *segs;
81 int ret = 0;
82
83 features = netif_skb_dev_features(skb, dst->dev);
84 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
85 if (IS_ERR(segs)) {
86 kfree_skb(skb);
87 return -ENOMEM;
88 }
89
90 consume_skb(skb);
91
92 do {
93 struct sk_buff *nskb = segs->next;
94 int err;
95
96 segs->next = NULL;
97 err = dst_output(segs);
98
99 if (err && ret == 0)
100 ret = err;
101 segs = nskb;
102 } while (segs);
103
104 return ret;
105}
106
42static int ip_forward_finish(struct sk_buff *skb) 107static int ip_forward_finish(struct sk_buff *skb)
43{ 108{
44 struct ip_options *opt = &(IPCB(skb)->opt); 109 struct ip_options *opt = &(IPCB(skb)->opt);
@@ -49,6 +114,9 @@ static int ip_forward_finish(struct sk_buff *skb)
49 if (unlikely(opt->optlen)) 114 if (unlikely(opt->optlen))
50 ip_forward_options(skb); 115 ip_forward_options(skb);
51 116
117 if (ip_gso_exceeds_dst_mtu(skb))
118 return ip_forward_finish_gso(skb);
119
52 return dst_output(skb); 120 return dst_output(skb);
53} 121}
54 122
@@ -91,8 +159,7 @@ int ip_forward(struct sk_buff *skb)
91 159
92 IPCB(skb)->flags |= IPSKB_FORWARDED; 160 IPCB(skb)->flags |= IPSKB_FORWARDED;
93 mtu = ip_dst_mtu_maybe_forward(&rt->dst, true); 161 mtu = ip_dst_mtu_maybe_forward(&rt->dst, true);
94 if (unlikely(skb->len > mtu && !skb_is_gso(skb) && 162 if (!ip_may_fragment(skb) && ip_exceeds_mtu(skb, mtu)) {
95 (ip_hdr(skb)->frag_off & htons(IP_DF))) && !skb->local_df) {
96 IP_INC_STATS(dev_net(rt->dst.dev), IPSTATS_MIB_FRAGFAILS); 163 IP_INC_STATS(dev_net(rt->dst.dev), IPSTATS_MIB_FRAGFAILS);
97 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 164 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
98 htonl(mtu)); 165 htonl(mtu));
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index bd28f386bd02..50228be5c17b 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -101,28 +101,22 @@ static void tunnel_dst_reset_all(struct ip_tunnel *t)
101 __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL); 101 __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL);
102} 102}
103 103
104static struct dst_entry *tunnel_dst_get(struct ip_tunnel *t) 104static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie)
105{ 105{
106 struct dst_entry *dst; 106 struct dst_entry *dst;
107 107
108 rcu_read_lock(); 108 rcu_read_lock();
109 dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst); 109 dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst);
110 if (dst) 110 if (dst) {
111 if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
112 rcu_read_unlock();
113 tunnel_dst_reset(t);
114 return NULL;
115 }
111 dst_hold(dst); 116 dst_hold(dst);
112 rcu_read_unlock();
113 return dst;
114}
115
116static struct dst_entry *tunnel_dst_check(struct ip_tunnel *t, u32 cookie)
117{
118 struct dst_entry *dst = tunnel_dst_get(t);
119
120 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
121 tunnel_dst_reset(t);
122 return NULL;
123 } 117 }
124 118 rcu_read_unlock();
125 return dst; 119 return (struct rtable *)dst;
126} 120}
127 121
128/* Often modified stats are per cpu, other are shared (netdev->stats) */ 122/* Often modified stats are per cpu, other are shared (netdev->stats) */
@@ -584,7 +578,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
584 struct flowi4 fl4; 578 struct flowi4 fl4;
585 u8 tos, ttl; 579 u8 tos, ttl;
586 __be16 df; 580 __be16 df;
587 struct rtable *rt = NULL; /* Route to the other host */ 581 struct rtable *rt; /* Route to the other host */
588 unsigned int max_headroom; /* The extra header space needed */ 582 unsigned int max_headroom; /* The extra header space needed */
589 __be32 dst; 583 __be32 dst;
590 int err; 584 int err;
@@ -657,8 +651,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
657 init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr, 651 init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
658 tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link); 652 tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link);
659 653
660 if (connected) 654 rt = connected ? tunnel_rtable_get(tunnel, 0) : NULL;
661 rt = (struct rtable *)tunnel_dst_check(tunnel, 0);
662 655
663 if (!rt) { 656 if (!rt) {
664 rt = ip_route_output_key(tunnel->net, &fl4); 657 rt = ip_route_output_key(tunnel->net, &fl4);
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index efa1138fa523..b3e86ea7b71b 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -273,7 +273,7 @@ static int __init ic_open_devs(void)
273 273
274 msleep(1); 274 msleep(1);
275 275
276 if time_before(jiffies, next_msg) 276 if (time_before(jiffies, next_msg))
277 continue; 277 continue;
278 278
279 elapsed = jiffies_to_msecs(jiffies - start); 279 elapsed = jiffies_to_msecs(jiffies - start);
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 81c6910cfa92..a26ce035e3fa 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -61,6 +61,11 @@ config NFT_CHAIN_NAT_IPV4
61 packet transformations such as the source, destination address and 61 packet transformations such as the source, destination address and
62 source and destination ports. 62 source and destination ports.
63 63
64config NFT_REJECT_IPV4
65 depends on NF_TABLES_IPV4
66 default NFT_REJECT
67 tristate
68
64config NF_TABLES_ARP 69config NF_TABLES_ARP
65 depends on NF_TABLES 70 depends on NF_TABLES
66 tristate "ARP nf_tables support" 71 tristate "ARP nf_tables support"
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index c16be9d58420..90b82405331e 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_NF_NAT_PROTO_GRE) += nf_nat_proto_gre.o
30obj-$(CONFIG_NF_TABLES_IPV4) += nf_tables_ipv4.o 30obj-$(CONFIG_NF_TABLES_IPV4) += nf_tables_ipv4.o
31obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV4) += nft_chain_route_ipv4.o 31obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV4) += nft_chain_route_ipv4.o
32obj-$(CONFIG_NFT_CHAIN_NAT_IPV4) += nft_chain_nat_ipv4.o 32obj-$(CONFIG_NFT_CHAIN_NAT_IPV4) += nft_chain_nat_ipv4.o
33obj-$(CONFIG_NFT_REJECT_IPV4) += nft_reject_ipv4.o
33obj-$(CONFIG_NF_TABLES_ARP) += nf_tables_arp.o 34obj-$(CONFIG_NF_TABLES_ARP) += nf_tables_arp.o
34 35
35# generic IP tables 36# generic IP tables
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index 9eea059dd621..574f7ebba0b6 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -229,7 +229,10 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
229 ret = nf_ct_expect_related(rtcp_exp); 229 ret = nf_ct_expect_related(rtcp_exp);
230 if (ret == 0) 230 if (ret == 0)
231 break; 231 break;
232 else if (ret != -EBUSY) { 232 else if (ret == -EBUSY) {
233 nf_ct_unexpect_related(rtp_exp);
234 continue;
235 } else if (ret < 0) {
233 nf_ct_unexpect_related(rtp_exp); 236 nf_ct_unexpect_related(rtp_exp);
234 nated_port = 0; 237 nated_port = 0;
235 break; 238 break;
diff --git a/net/ipv4/netfilter/nft_reject_ipv4.c b/net/ipv4/netfilter/nft_reject_ipv4.c
new file mode 100644
index 000000000000..e79718a382f2
--- /dev/null
+++ b/net/ipv4/netfilter/nft_reject_ipv4.c
@@ -0,0 +1,75 @@
1/*
2 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
3 * Copyright (c) 2013 Eric Leblond <eric@regit.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * Development of this code funded by Astaro AG (http://www.astaro.com/)
10 */
11
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/module.h>
15#include <linux/netlink.h>
16#include <linux/netfilter.h>
17#include <linux/netfilter/nf_tables.h>
18#include <net/netfilter/nf_tables.h>
19#include <net/icmp.h>
20#include <net/netfilter/ipv4/nf_reject.h>
21#include <net/netfilter/nft_reject.h>
22
23void nft_reject_ipv4_eval(const struct nft_expr *expr,
24 struct nft_data data[NFT_REG_MAX + 1],
25 const struct nft_pktinfo *pkt)
26{
27 struct nft_reject *priv = nft_expr_priv(expr);
28
29 switch (priv->type) {
30 case NFT_REJECT_ICMP_UNREACH:
31 nf_send_unreach(pkt->skb, priv->icmp_code);
32 break;
33 case NFT_REJECT_TCP_RST:
34 nf_send_reset(pkt->skb, pkt->ops->hooknum);
35 break;
36 }
37
38 data[NFT_REG_VERDICT].verdict = NF_DROP;
39}
40EXPORT_SYMBOL_GPL(nft_reject_ipv4_eval);
41
42static struct nft_expr_type nft_reject_ipv4_type;
43static const struct nft_expr_ops nft_reject_ipv4_ops = {
44 .type = &nft_reject_ipv4_type,
45 .size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
46 .eval = nft_reject_ipv4_eval,
47 .init = nft_reject_init,
48 .dump = nft_reject_dump,
49};
50
51static struct nft_expr_type nft_reject_ipv4_type __read_mostly = {
52 .family = NFPROTO_IPV4,
53 .name = "reject",
54 .ops = &nft_reject_ipv4_ops,
55 .policy = nft_reject_policy,
56 .maxattr = NFTA_REJECT_MAX,
57 .owner = THIS_MODULE,
58};
59
60static int __init nft_reject_ipv4_module_init(void)
61{
62 return nft_register_expr(&nft_reject_ipv4_type);
63}
64
65static void __exit nft_reject_ipv4_module_exit(void)
66{
67 nft_unregister_expr(&nft_reject_ipv4_type);
68}
69
70module_init(nft_reject_ipv4_module_init);
71module_exit(nft_reject_ipv4_module_exit);
72
73MODULE_LICENSE("GPL");
74MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
75MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "reject");
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 25071b48921c..4c011ec69ed4 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1597,6 +1597,7 @@ static int __mkroute_input(struct sk_buff *skb,
1597 rth->rt_gateway = 0; 1597 rth->rt_gateway = 0;
1598 rth->rt_uses_gateway = 0; 1598 rth->rt_uses_gateway = 0;
1599 INIT_LIST_HEAD(&rth->rt_uncached); 1599 INIT_LIST_HEAD(&rth->rt_uncached);
1600 RT_CACHE_STAT_INC(in_slow_tot);
1600 1601
1601 rth->dst.input = ip_forward; 1602 rth->dst.input = ip_forward;
1602 rth->dst.output = ip_output; 1603 rth->dst.output = ip_output;
@@ -1695,10 +1696,11 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1695 fl4.daddr = daddr; 1696 fl4.daddr = daddr;
1696 fl4.saddr = saddr; 1697 fl4.saddr = saddr;
1697 err = fib_lookup(net, &fl4, &res); 1698 err = fib_lookup(net, &fl4, &res);
1698 if (err != 0) 1699 if (err != 0) {
1700 if (!IN_DEV_FORWARD(in_dev))
1701 err = -EHOSTUNREACH;
1699 goto no_route; 1702 goto no_route;
1700 1703 }
1701 RT_CACHE_STAT_INC(in_slow_tot);
1702 1704
1703 if (res.type == RTN_BROADCAST) 1705 if (res.type == RTN_BROADCAST)
1704 goto brd_input; 1706 goto brd_input;
@@ -1712,8 +1714,10 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1712 goto local_input; 1714 goto local_input;
1713 } 1715 }
1714 1716
1715 if (!IN_DEV_FORWARD(in_dev)) 1717 if (!IN_DEV_FORWARD(in_dev)) {
1718 err = -EHOSTUNREACH;
1716 goto no_route; 1719 goto no_route;
1720 }
1717 if (res.type != RTN_UNICAST) 1721 if (res.type != RTN_UNICAST)
1718 goto martian_destination; 1722 goto martian_destination;
1719 1723
@@ -1768,6 +1772,7 @@ local_input:
1768 rth->rt_gateway = 0; 1772 rth->rt_gateway = 0;
1769 rth->rt_uses_gateway = 0; 1773 rth->rt_uses_gateway = 0;
1770 INIT_LIST_HEAD(&rth->rt_uncached); 1774 INIT_LIST_HEAD(&rth->rt_uncached);
1775 RT_CACHE_STAT_INC(in_slow_tot);
1771 if (res.type == RTN_UNREACHABLE) { 1776 if (res.type == RTN_UNREACHABLE) {
1772 rth->dst.input= ip_error; 1777 rth->dst.input= ip_error;
1773 rth->dst.error= -err; 1778 rth->dst.error= -err;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 4475b3bb494d..9f3a2db9109e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2229,7 +2229,7 @@ adjudge_to_death:
2229 /* This is a (useful) BSD violating of the RFC. There is a 2229 /* This is a (useful) BSD violating of the RFC. There is a
2230 * problem with TCP as specified in that the other end could 2230 * problem with TCP as specified in that the other end could
2231 * keep a socket open forever with no application left this end. 2231 * keep a socket open forever with no application left this end.
2232 * We use a 3 minute timeout (about the same as BSD) then kill 2232 * We use a 1 minute timeout (about the same as BSD) then kill
2233 * our end. If they send after that then tough - BUT: long enough 2233 * our end. If they send after that then tough - BUT: long enough
2234 * that we won't make the old 4*rto = almost no time - whoops 2234 * that we won't make the old 4*rto = almost no time - whoops
2235 * reset mistake. 2235 * reset mistake.
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 65cf90e063d5..227cba79fa6b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -671,6 +671,7 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
671{ 671{
672 struct tcp_sock *tp = tcp_sk(sk); 672 struct tcp_sock *tp = tcp_sk(sk);
673 long m = mrtt; /* RTT */ 673 long m = mrtt; /* RTT */
674 u32 srtt = tp->srtt;
674 675
675 /* The following amusing code comes from Jacobson's 676 /* The following amusing code comes from Jacobson's
676 * article in SIGCOMM '88. Note that rtt and mdev 677 * article in SIGCOMM '88. Note that rtt and mdev
@@ -688,11 +689,9 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
688 * does not matter how to _calculate_ it. Seems, it was trap 689 * does not matter how to _calculate_ it. Seems, it was trap
689 * that VJ failed to avoid. 8) 690 * that VJ failed to avoid. 8)
690 */ 691 */
691 if (m == 0) 692 if (srtt != 0) {
692 m = 1; 693 m -= (srtt >> 3); /* m is now error in rtt est */
693 if (tp->srtt != 0) { 694 srtt += m; /* rtt = 7/8 rtt + 1/8 new */
694 m -= (tp->srtt >> 3); /* m is now error in rtt est */
695 tp->srtt += m; /* rtt = 7/8 rtt + 1/8 new */
696 if (m < 0) { 695 if (m < 0) {
697 m = -m; /* m is now abs(error) */ 696 m = -m; /* m is now abs(error) */
698 m -= (tp->mdev >> 2); /* similar update on mdev */ 697 m -= (tp->mdev >> 2); /* similar update on mdev */
@@ -723,11 +722,12 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
723 } 722 }
724 } else { 723 } else {
725 /* no previous measure. */ 724 /* no previous measure. */
726 tp->srtt = m << 3; /* take the measured time to be rtt */ 725 srtt = m << 3; /* take the measured time to be rtt */
727 tp->mdev = m << 1; /* make sure rto = 3*rtt */ 726 tp->mdev = m << 1; /* make sure rto = 3*rtt */
728 tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); 727 tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
729 tp->rtt_seq = tp->snd_nxt; 728 tp->rtt_seq = tp->snd_nxt;
730 } 729 }
730 tp->srtt = max(1U, srtt);
731} 731}
732 732
733/* Set the sk_pacing_rate to allow proper sizing of TSO packets. 733/* Set the sk_pacing_rate to allow proper sizing of TSO packets.
@@ -746,8 +746,10 @@ static void tcp_update_pacing_rate(struct sock *sk)
746 746
747 rate *= max(tp->snd_cwnd, tp->packets_out); 747 rate *= max(tp->snd_cwnd, tp->packets_out);
748 748
749 /* Correction for small srtt : minimum srtt being 8 (1 jiffy << 3), 749 /* Correction for small srtt and scheduling constraints.
750 * be conservative and assume srtt = 1 (125 us instead of 1.25 ms) 750 * For small rtt, consider noise is too high, and use
751 * the minimal value (srtt = 1 -> 125 us for HZ=1000)
752 *
751 * We probably need usec resolution in the future. 753 * We probably need usec resolution in the future.
752 * Note: This also takes care of possible srtt=0 case, 754 * Note: This also takes care of possible srtt=0 case,
753 * when tcp_rtt_estimator() was not yet called. 755 * when tcp_rtt_estimator() was not yet called.
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 03d26b85eab8..3be16727f058 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -698,7 +698,8 @@ static void tcp_tsq_handler(struct sock *sk)
698 if ((1 << sk->sk_state) & 698 if ((1 << sk->sk_state) &
699 (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING | 699 (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
700 TCPF_CLOSE_WAIT | TCPF_LAST_ACK)) 700 TCPF_CLOSE_WAIT | TCPF_LAST_ACK))
701 tcp_write_xmit(sk, tcp_current_mss(sk), 0, 0, GFP_ATOMIC); 701 tcp_write_xmit(sk, tcp_current_mss(sk), tcp_sk(sk)->nonagle,
702 0, GFP_ATOMIC);
702} 703}
703/* 704/*
704 * One tasklet per cpu tries to send more skbs. 705 * One tasklet per cpu tries to send more skbs.
@@ -1904,7 +1905,15 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1904 1905
1905 if (atomic_read(&sk->sk_wmem_alloc) > limit) { 1906 if (atomic_read(&sk->sk_wmem_alloc) > limit) {
1906 set_bit(TSQ_THROTTLED, &tp->tsq_flags); 1907 set_bit(TSQ_THROTTLED, &tp->tsq_flags);
1907 break; 1908 /* It is possible TX completion already happened
1909 * before we set TSQ_THROTTLED, so we must
1910 * test again the condition.
1911 * We abuse smp_mb__after_clear_bit() because
1912 * there is no smp_mb__after_set_bit() yet
1913 */
1914 smp_mb__after_clear_bit();
1915 if (atomic_read(&sk->sk_wmem_alloc) > limit)
1916 break;
1908 } 1917 }
1909 1918
1910 limit = mss_now; 1919 limit = mss_now;
@@ -1977,7 +1986,7 @@ bool tcp_schedule_loss_probe(struct sock *sk)
1977 /* Schedule a loss probe in 2*RTT for SACK capable connections 1986 /* Schedule a loss probe in 2*RTT for SACK capable connections
1978 * in Open state, that are either limited by cwnd or application. 1987 * in Open state, that are either limited by cwnd or application.
1979 */ 1988 */
1980 if (sysctl_tcp_early_retrans < 3 || !rtt || !tp->packets_out || 1989 if (sysctl_tcp_early_retrans < 3 || !tp->srtt || !tp->packets_out ||
1981 !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open) 1990 !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
1982 return false; 1991 return false;
1983 1992
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 25f5cee3a08a..88b4023ecfcf 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -17,6 +17,8 @@
17static DEFINE_SPINLOCK(udp_offload_lock); 17static DEFINE_SPINLOCK(udp_offload_lock);
18static struct udp_offload_priv __rcu *udp_offload_base __read_mostly; 18static struct udp_offload_priv __rcu *udp_offload_base __read_mostly;
19 19
20#define udp_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&udp_offload_lock))
21
20struct udp_offload_priv { 22struct udp_offload_priv {
21 struct udp_offload *offload; 23 struct udp_offload *offload;
22 struct rcu_head rcu; 24 struct rcu_head rcu;
@@ -100,8 +102,7 @@ out:
100 102
101int udp_add_offload(struct udp_offload *uo) 103int udp_add_offload(struct udp_offload *uo)
102{ 104{
103 struct udp_offload_priv __rcu **head = &udp_offload_base; 105 struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_ATOMIC);
104 struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_KERNEL);
105 106
106 if (!new_offload) 107 if (!new_offload)
107 return -ENOMEM; 108 return -ENOMEM;
@@ -109,8 +110,8 @@ int udp_add_offload(struct udp_offload *uo)
109 new_offload->offload = uo; 110 new_offload->offload = uo;
110 111
111 spin_lock(&udp_offload_lock); 112 spin_lock(&udp_offload_lock);
112 rcu_assign_pointer(new_offload->next, rcu_dereference(*head)); 113 new_offload->next = udp_offload_base;
113 rcu_assign_pointer(*head, new_offload); 114 rcu_assign_pointer(udp_offload_base, new_offload);
114 spin_unlock(&udp_offload_lock); 115 spin_unlock(&udp_offload_lock);
115 116
116 return 0; 117 return 0;
@@ -130,12 +131,12 @@ void udp_del_offload(struct udp_offload *uo)
130 131
131 spin_lock(&udp_offload_lock); 132 spin_lock(&udp_offload_lock);
132 133
133 uo_priv = rcu_dereference(*head); 134 uo_priv = udp_deref_protected(*head);
134 for (; uo_priv != NULL; 135 for (; uo_priv != NULL;
135 uo_priv = rcu_dereference(*head)) { 136 uo_priv = udp_deref_protected(*head)) {
136
137 if (uo_priv->offload == uo) { 137 if (uo_priv->offload == uo) {
138 rcu_assign_pointer(*head, rcu_dereference(uo_priv->next)); 138 rcu_assign_pointer(*head,
139 udp_deref_protected(uo_priv->next));
139 goto unlock; 140 goto unlock;
140 } 141 }
141 head = &uo_priv->next; 142 head = &uo_priv->next;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index ad235690684c..fdbfeca36d63 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2783,6 +2783,8 @@ static void addrconf_gre_config(struct net_device *dev)
2783 ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0); 2783 ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
2784 if (!ipv6_generate_eui64(addr.s6_addr + 8, dev)) 2784 if (!ipv6_generate_eui64(addr.s6_addr + 8, dev))
2785 addrconf_add_linklocal(idev, &addr); 2785 addrconf_add_linklocal(idev, &addr);
2786 else
2787 addrconf_prefix_route(&addr, 64, dev, 0, 0);
2786} 2788}
2787#endif 2789#endif
2788 2790
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index f81f59686f21..f2610e157660 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -414,7 +414,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
414 addr_type = ipv6_addr_type(&hdr->daddr); 414 addr_type = ipv6_addr_type(&hdr->daddr);
415 415
416 if (ipv6_chk_addr(net, &hdr->daddr, skb->dev, 0) || 416 if (ipv6_chk_addr(net, &hdr->daddr, skb->dev, 0) ||
417 ipv6_anycast_destination(skb)) 417 ipv6_chk_acast_addr_src(net, skb->dev, &hdr->daddr))
418 saddr = &hdr->daddr; 418 saddr = &hdr->daddr;
419 419
420 /* 420 /*
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index ef02b26ccf81..070a2fae2375 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -342,6 +342,20 @@ static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
342 return mtu; 342 return mtu;
343} 343}
344 344
345static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
346{
347 if (skb->len <= mtu || skb->local_df)
348 return false;
349
350 if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
351 return true;
352
353 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
354 return false;
355
356 return true;
357}
358
345int ip6_forward(struct sk_buff *skb) 359int ip6_forward(struct sk_buff *skb)
346{ 360{
347 struct dst_entry *dst = skb_dst(skb); 361 struct dst_entry *dst = skb_dst(skb);
@@ -466,8 +480,7 @@ int ip6_forward(struct sk_buff *skb)
466 if (mtu < IPV6_MIN_MTU) 480 if (mtu < IPV6_MIN_MTU)
467 mtu = IPV6_MIN_MTU; 481 mtu = IPV6_MIN_MTU;
468 482
469 if ((!skb->local_df && skb->len > mtu && !skb_is_gso(skb)) || 483 if (ip6_pkt_too_big(skb, mtu)) {
470 (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)) {
471 /* Again, force OUTPUT device used as source address */ 484 /* Again, force OUTPUT device used as source address */
472 skb->dev = dst->dev; 485 skb->dev = dst->dev;
473 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 486 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 35750df744dc..4bff1f297e39 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -50,6 +50,11 @@ config NFT_CHAIN_NAT_IPV6
50 packet transformations such as the source, destination address and 50 packet transformations such as the source, destination address and
51 source and destination ports. 51 source and destination ports.
52 52
53config NFT_REJECT_IPV6
54 depends on NF_TABLES_IPV6
55 default NFT_REJECT
56 tristate
57
53config IP6_NF_IPTABLES 58config IP6_NF_IPTABLES
54 tristate "IP6 tables support (required for filtering)" 59 tristate "IP6 tables support (required for filtering)"
55 depends on INET && IPV6 60 depends on INET && IPV6
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index d1b4928f34f7..70d3dd66f2cd 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_NF_DEFRAG_IPV6) += nf_defrag_ipv6.o
27obj-$(CONFIG_NF_TABLES_IPV6) += nf_tables_ipv6.o 27obj-$(CONFIG_NF_TABLES_IPV6) += nf_tables_ipv6.o
28obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV6) += nft_chain_route_ipv6.o 28obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV6) += nft_chain_route_ipv6.o
29obj-$(CONFIG_NFT_CHAIN_NAT_IPV6) += nft_chain_nat_ipv6.o 29obj-$(CONFIG_NFT_CHAIN_NAT_IPV6) += nft_chain_nat_ipv6.o
30obj-$(CONFIG_NFT_REJECT_IPV6) += nft_reject_ipv6.o
30 31
31# matches 32# matches
32obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o 33obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o
diff --git a/net/ipv6/netfilter/nft_reject_ipv6.c b/net/ipv6/netfilter/nft_reject_ipv6.c
new file mode 100644
index 000000000000..0bc19fa87821
--- /dev/null
+++ b/net/ipv6/netfilter/nft_reject_ipv6.c
@@ -0,0 +1,76 @@
1/*
2 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
3 * Copyright (c) 2013 Eric Leblond <eric@regit.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * Development of this code funded by Astaro AG (http://www.astaro.com/)
10 */
11
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/module.h>
15#include <linux/netlink.h>
16#include <linux/netfilter.h>
17#include <linux/netfilter/nf_tables.h>
18#include <net/netfilter/nf_tables.h>
19#include <net/netfilter/nft_reject.h>
20#include <net/netfilter/ipv6/nf_reject.h>
21
22void nft_reject_ipv6_eval(const struct nft_expr *expr,
23 struct nft_data data[NFT_REG_MAX + 1],
24 const struct nft_pktinfo *pkt)
25{
26 struct nft_reject *priv = nft_expr_priv(expr);
27 struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out);
28
29 switch (priv->type) {
30 case NFT_REJECT_ICMP_UNREACH:
31 nf_send_unreach6(net, pkt->skb, priv->icmp_code,
32 pkt->ops->hooknum);
33 break;
34 case NFT_REJECT_TCP_RST:
35 nf_send_reset6(net, pkt->skb, pkt->ops->hooknum);
36 break;
37 }
38
39 data[NFT_REG_VERDICT].verdict = NF_DROP;
40}
41EXPORT_SYMBOL_GPL(nft_reject_ipv6_eval);
42
43static struct nft_expr_type nft_reject_ipv6_type;
44static const struct nft_expr_ops nft_reject_ipv6_ops = {
45 .type = &nft_reject_ipv6_type,
46 .size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
47 .eval = nft_reject_ipv6_eval,
48 .init = nft_reject_init,
49 .dump = nft_reject_dump,
50};
51
52static struct nft_expr_type nft_reject_ipv6_type __read_mostly = {
53 .family = NFPROTO_IPV6,
54 .name = "reject",
55 .ops = &nft_reject_ipv6_ops,
56 .policy = nft_reject_policy,
57 .maxattr = NFTA_REJECT_MAX,
58 .owner = THIS_MODULE,
59};
60
61static int __init nft_reject_ipv6_module_init(void)
62{
63 return nft_register_expr(&nft_reject_ipv6_type);
64}
65
66static void __exit nft_reject_ipv6_module_exit(void)
67{
68 nft_unregister_expr(&nft_reject_ipv6_type);
69}
70
71module_init(nft_reject_ipv6_module_init);
72module_exit(nft_reject_ipv6_module_exit);
73
74MODULE_LICENSE("GPL");
75MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
76MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "reject");
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 994e28bfb32e..00b2a6d1c009 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -52,18 +52,12 @@
52#include <net/p8022.h> 52#include <net/p8022.h>
53#include <net/psnap.h> 53#include <net/psnap.h>
54#include <net/sock.h> 54#include <net/sock.h>
55#include <net/datalink.h>
55#include <net/tcp_states.h> 56#include <net/tcp_states.h>
57#include <net/net_namespace.h>
56 58
57#include <asm/uaccess.h> 59#include <asm/uaccess.h>
58 60
59#ifdef CONFIG_SYSCTL
60extern void ipx_register_sysctl(void);
61extern void ipx_unregister_sysctl(void);
62#else
63#define ipx_register_sysctl()
64#define ipx_unregister_sysctl()
65#endif
66
67/* Configuration Variables */ 61/* Configuration Variables */
68static unsigned char ipxcfg_max_hops = 16; 62static unsigned char ipxcfg_max_hops = 16;
69static char ipxcfg_auto_select_primary; 63static char ipxcfg_auto_select_primary;
@@ -84,15 +78,6 @@ DEFINE_SPINLOCK(ipx_interfaces_lock);
84struct ipx_interface *ipx_primary_net; 78struct ipx_interface *ipx_primary_net;
85struct ipx_interface *ipx_internal_net; 79struct ipx_interface *ipx_internal_net;
86 80
87extern int ipxrtr_add_route(__be32 network, struct ipx_interface *intrfc,
88 unsigned char *node);
89extern void ipxrtr_del_routes(struct ipx_interface *intrfc);
90extern int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx,
91 struct iovec *iov, size_t len, int noblock);
92extern int ipxrtr_route_skb(struct sk_buff *skb);
93extern struct ipx_route *ipxrtr_lookup(__be32 net);
94extern int ipxrtr_ioctl(unsigned int cmd, void __user *arg);
95
96struct ipx_interface *ipx_interfaces_head(void) 81struct ipx_interface *ipx_interfaces_head(void)
97{ 82{
98 struct ipx_interface *rc = NULL; 83 struct ipx_interface *rc = NULL;
@@ -1986,9 +1971,6 @@ static struct notifier_block ipx_dev_notifier = {
1986 .notifier_call = ipxitf_device_event, 1971 .notifier_call = ipxitf_device_event,
1987}; 1972};
1988 1973
1989extern struct datalink_proto *make_EII_client(void);
1990extern void destroy_EII_client(struct datalink_proto *);
1991
1992static const unsigned char ipx_8022_type = 0xE0; 1974static const unsigned char ipx_8022_type = 0xE0;
1993static const unsigned char ipx_snap_id[5] = { 0x0, 0x0, 0x0, 0x81, 0x37 }; 1975static const unsigned char ipx_snap_id[5] = { 0x0, 0x0, 0x0, 0x81, 0x37 };
1994static const char ipx_EII_err_msg[] __initconst = 1976static const char ipx_EII_err_msg[] __initconst =
diff --git a/net/ipx/ipx_route.c b/net/ipx/ipx_route.c
index 30f4519b092f..c1f03185c5e1 100644
--- a/net/ipx/ipx_route.c
+++ b/net/ipx/ipx_route.c
@@ -20,15 +20,11 @@ DEFINE_RWLOCK(ipx_routes_lock);
20 20
21extern struct ipx_interface *ipx_internal_net; 21extern struct ipx_interface *ipx_internal_net;
22 22
23extern __be16 ipx_cksum(struct ipxhdr *packet, int length);
24extern struct ipx_interface *ipxitf_find_using_net(__be32 net); 23extern struct ipx_interface *ipxitf_find_using_net(__be32 net);
25extern int ipxitf_demux_socket(struct ipx_interface *intrfc, 24extern int ipxitf_demux_socket(struct ipx_interface *intrfc,
26 struct sk_buff *skb, int copy); 25 struct sk_buff *skb, int copy);
27extern int ipxitf_demux_socket(struct ipx_interface *intrfc, 26extern int ipxitf_demux_socket(struct ipx_interface *intrfc,
28 struct sk_buff *skb, int copy); 27 struct sk_buff *skb, int copy);
29extern int ipxitf_send(struct ipx_interface *intrfc, struct sk_buff *skb,
30 char *node);
31extern struct ipx_interface *ipxitf_find_using_net(__be32 net);
32 28
33struct ipx_route *ipxrtr_lookup(__be32 net) 29struct ipx_route *ipxrtr_lookup(__be32 net)
34{ 30{
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index f9ae9b85d4c1..453e974287d1 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1021,8 +1021,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
1021 IEEE80211_P2P_OPPPS_ENABLE_BIT; 1021 IEEE80211_P2P_OPPPS_ENABLE_BIT;
1022 1022
1023 err = ieee80211_assign_beacon(sdata, &params->beacon); 1023 err = ieee80211_assign_beacon(sdata, &params->beacon);
1024 if (err < 0) 1024 if (err < 0) {
1025 ieee80211_vif_release_channel(sdata);
1025 return err; 1026 return err;
1027 }
1026 changed |= err; 1028 changed |= err;
1027 1029
1028 err = drv_start_ap(sdata->local, sdata); 1030 err = drv_start_ap(sdata->local, sdata);
@@ -1032,6 +1034,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
1032 if (old) 1034 if (old)
1033 kfree_rcu(old, rcu_head); 1035 kfree_rcu(old, rcu_head);
1034 RCU_INIT_POINTER(sdata->u.ap.beacon, NULL); 1036 RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
1037 ieee80211_vif_release_channel(sdata);
1035 return err; 1038 return err;
1036 } 1039 }
1037 1040
@@ -1090,8 +1093,6 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
1090 kfree(sdata->u.ap.next_beacon); 1093 kfree(sdata->u.ap.next_beacon);
1091 sdata->u.ap.next_beacon = NULL; 1094 sdata->u.ap.next_beacon = NULL;
1092 1095
1093 cancel_work_sync(&sdata->u.ap.request_smps_work);
1094
1095 /* turn off carrier for this interface and dependent VLANs */ 1096 /* turn off carrier for this interface and dependent VLANs */
1096 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) 1097 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
1097 netif_carrier_off(vlan->dev); 1098 netif_carrier_off(vlan->dev);
@@ -1103,6 +1104,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
1103 kfree_rcu(old_beacon, rcu_head); 1104 kfree_rcu(old_beacon, rcu_head);
1104 if (old_probe_resp) 1105 if (old_probe_resp)
1105 kfree_rcu(old_probe_resp, rcu_head); 1106 kfree_rcu(old_probe_resp, rcu_head);
1107 sdata->u.ap.driver_smps_mode = IEEE80211_SMPS_OFF;
1106 1108
1107 __sta_info_flush(sdata, true); 1109 __sta_info_flush(sdata, true);
1108 ieee80211_free_keys(sdata, true); 1110 ieee80211_free_keys(sdata, true);
@@ -2638,6 +2640,24 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
2638 INIT_DELAYED_WORK(&roc->work, ieee80211_sw_roc_work); 2640 INIT_DELAYED_WORK(&roc->work, ieee80211_sw_roc_work);
2639 INIT_LIST_HEAD(&roc->dependents); 2641 INIT_LIST_HEAD(&roc->dependents);
2640 2642
2643 /*
2644 * cookie is either the roc cookie (for normal roc)
2645 * or the SKB (for mgmt TX)
2646 */
2647 if (!txskb) {
2648 /* local->mtx protects this */
2649 local->roc_cookie_counter++;
2650 roc->cookie = local->roc_cookie_counter;
2651 /* wow, you wrapped 64 bits ... more likely a bug */
2652 if (WARN_ON(roc->cookie == 0)) {
2653 roc->cookie = 1;
2654 local->roc_cookie_counter++;
2655 }
2656 *cookie = roc->cookie;
2657 } else {
2658 *cookie = (unsigned long)txskb;
2659 }
2660
2641 /* if there's one pending or we're scanning, queue this one */ 2661 /* if there's one pending or we're scanning, queue this one */
2642 if (!list_empty(&local->roc_list) || 2662 if (!list_empty(&local->roc_list) ||
2643 local->scanning || local->radar_detect_enabled) 2663 local->scanning || local->radar_detect_enabled)
@@ -2772,24 +2792,6 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
2772 if (!queued) 2792 if (!queued)
2773 list_add_tail(&roc->list, &local->roc_list); 2793 list_add_tail(&roc->list, &local->roc_list);
2774 2794
2775 /*
2776 * cookie is either the roc cookie (for normal roc)
2777 * or the SKB (for mgmt TX)
2778 */
2779 if (!txskb) {
2780 /* local->mtx protects this */
2781 local->roc_cookie_counter++;
2782 roc->cookie = local->roc_cookie_counter;
2783 /* wow, you wrapped 64 bits ... more likely a bug */
2784 if (WARN_ON(roc->cookie == 0)) {
2785 roc->cookie = 1;
2786 local->roc_cookie_counter++;
2787 }
2788 *cookie = roc->cookie;
2789 } else {
2790 *cookie = (unsigned long)txskb;
2791 }
2792
2793 return 0; 2795 return 0;
2794} 2796}
2795 2797
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index fab7b91923e0..70dd013de836 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -466,7 +466,9 @@ void ieee80211_request_smps_ap_work(struct work_struct *work)
466 u.ap.request_smps_work); 466 u.ap.request_smps_work);
467 467
468 sdata_lock(sdata); 468 sdata_lock(sdata);
469 __ieee80211_request_smps_ap(sdata, sdata->u.ap.driver_smps_mode); 469 if (sdata_dereference(sdata->u.ap.beacon, sdata))
470 __ieee80211_request_smps_ap(sdata,
471 sdata->u.ap.driver_smps_mode);
470 sdata_unlock(sdata); 472 sdata_unlock(sdata);
471} 473}
472 474
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 771080ec7212..2796a198728f 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -695,12 +695,9 @@ static void ieee80211_ibss_disconnect(struct ieee80211_sub_if_data *sdata)
695 struct cfg80211_bss *cbss; 695 struct cfg80211_bss *cbss;
696 struct beacon_data *presp; 696 struct beacon_data *presp;
697 struct sta_info *sta; 697 struct sta_info *sta;
698 int active_ibss;
699 u16 capability; 698 u16 capability;
700 699
701 active_ibss = ieee80211_sta_active_ibss(sdata); 700 if (!is_zero_ether_addr(ifibss->bssid)) {
702
703 if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) {
704 capability = WLAN_CAPABILITY_IBSS; 701 capability = WLAN_CAPABILITY_IBSS;
705 702
706 if (ifibss->privacy) 703 if (ifibss->privacy)
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 3dfd20a453ab..ce1c44370610 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -418,20 +418,24 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
418 return ret; 418 return ret;
419 } 419 }
420 420
421 mutex_lock(&local->iflist_mtx);
422 rcu_assign_pointer(local->monitor_sdata, sdata);
423 mutex_unlock(&local->iflist_mtx);
424
421 mutex_lock(&local->mtx); 425 mutex_lock(&local->mtx);
422 ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef, 426 ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef,
423 IEEE80211_CHANCTX_EXCLUSIVE); 427 IEEE80211_CHANCTX_EXCLUSIVE);
424 mutex_unlock(&local->mtx); 428 mutex_unlock(&local->mtx);
425 if (ret) { 429 if (ret) {
430 mutex_lock(&local->iflist_mtx);
431 rcu_assign_pointer(local->monitor_sdata, NULL);
432 mutex_unlock(&local->iflist_mtx);
433 synchronize_net();
426 drv_remove_interface(local, sdata); 434 drv_remove_interface(local, sdata);
427 kfree(sdata); 435 kfree(sdata);
428 return ret; 436 return ret;
429 } 437 }
430 438
431 mutex_lock(&local->iflist_mtx);
432 rcu_assign_pointer(local->monitor_sdata, sdata);
433 mutex_unlock(&local->iflist_mtx);
434
435 return 0; 439 return 0;
436} 440}
437 441
@@ -770,12 +774,19 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
770 774
771 ieee80211_roc_purge(local, sdata); 775 ieee80211_roc_purge(local, sdata);
772 776
773 if (sdata->vif.type == NL80211_IFTYPE_STATION) 777 switch (sdata->vif.type) {
778 case NL80211_IFTYPE_STATION:
774 ieee80211_mgd_stop(sdata); 779 ieee80211_mgd_stop(sdata);
775 780 break;
776 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) 781 case NL80211_IFTYPE_ADHOC:
777 ieee80211_ibss_stop(sdata); 782 ieee80211_ibss_stop(sdata);
778 783 break;
784 case NL80211_IFTYPE_AP:
785 cancel_work_sync(&sdata->u.ap.request_smps_work);
786 break;
787 default:
788 break;
789 }
779 790
780 /* 791 /*
781 * Remove all stations associated with this interface. 792 * Remove all stations associated with this interface.
@@ -1046,7 +1057,8 @@ static void ieee80211_uninit(struct net_device *dev)
1046 1057
1047static u16 ieee80211_netdev_select_queue(struct net_device *dev, 1058static u16 ieee80211_netdev_select_queue(struct net_device *dev,
1048 struct sk_buff *skb, 1059 struct sk_buff *skb,
1049 void *accel_priv) 1060 void *accel_priv,
1061 select_queue_fallback_t fallback)
1050{ 1062{
1051 return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); 1063 return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
1052} 1064}
@@ -1064,7 +1076,8 @@ static const struct net_device_ops ieee80211_dataif_ops = {
1064 1076
1065static u16 ieee80211_monitor_select_queue(struct net_device *dev, 1077static u16 ieee80211_monitor_select_queue(struct net_device *dev,
1066 struct sk_buff *skb, 1078 struct sk_buff *skb,
1067 void *accel_priv) 1079 void *accel_priv,
1080 select_queue_fallback_t fallback)
1068{ 1081{
1069 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1082 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1070 struct ieee80211_local *local = sdata->local; 1083 struct ieee80211_local *local = sdata->local;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 27c990bf2320..97a02d3f7d87 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -878,7 +878,7 @@ static int ieee80211_fragment(struct ieee80211_tx_data *tx,
878 } 878 }
879 879
880 /* adjust first fragment's length */ 880 /* adjust first fragment's length */
881 skb->len = hdrlen + per_fragm; 881 skb_trim(skb, hdrlen + per_fragm);
882 return 0; 882 return 0;
883} 883}
884 884
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index c37467562fd0..e9410d17619d 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -513,7 +513,6 @@ config NFT_QUEUE
513 513
514config NFT_REJECT 514config NFT_REJECT
515 depends on NF_TABLES 515 depends on NF_TABLES
516 depends on NF_TABLES_IPV6 || !NF_TABLES_IPV6
517 default m if NETFILTER_ADVANCED=n 516 default m if NETFILTER_ADVANCED=n
518 tristate "Netfilter nf_tables reject support" 517 tristate "Netfilter nf_tables reject support"
519 help 518 help
@@ -521,6 +520,11 @@ config NFT_REJECT
521 explicitly deny and notify via TCP reset/ICMP informational errors 520 explicitly deny and notify via TCP reset/ICMP informational errors
522 unallowed traffic. 521 unallowed traffic.
523 522
523config NFT_REJECT_INET
524 depends on NF_TABLES_INET
525 default NFT_REJECT
526 tristate
527
524config NFT_COMPAT 528config NFT_COMPAT
525 depends on NF_TABLES 529 depends on NF_TABLES
526 depends on NETFILTER_XTABLES 530 depends on NETFILTER_XTABLES
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index ee9c4de5f8ed..bffdad774da7 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -79,6 +79,7 @@ obj-$(CONFIG_NFT_LIMIT) += nft_limit.o
79obj-$(CONFIG_NFT_NAT) += nft_nat.o 79obj-$(CONFIG_NFT_NAT) += nft_nat.o
80obj-$(CONFIG_NFT_QUEUE) += nft_queue.o 80obj-$(CONFIG_NFT_QUEUE) += nft_queue.o
81obj-$(CONFIG_NFT_REJECT) += nft_reject.o 81obj-$(CONFIG_NFT_REJECT) += nft_reject.o
82obj-$(CONFIG_NFT_REJECT_INET) += nft_reject_inet.o
82obj-$(CONFIG_NFT_RBTREE) += nft_rbtree.o 83obj-$(CONFIG_NFT_RBTREE) += nft_rbtree.o
83obj-$(CONFIG_NFT_HASH) += nft_hash.o 84obj-$(CONFIG_NFT_HASH) += nft_hash.o
84obj-$(CONFIG_NFT_COUNTER) += nft_counter.o 85obj-$(CONFIG_NFT_COUNTER) += nft_counter.o
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 59a1a85bcb3e..a8eb0a89326a 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -871,11 +871,11 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
871 cp->protocol = p->protocol; 871 cp->protocol = p->protocol;
872 ip_vs_addr_set(p->af, &cp->caddr, p->caddr); 872 ip_vs_addr_set(p->af, &cp->caddr, p->caddr);
873 cp->cport = p->cport; 873 cp->cport = p->cport;
874 ip_vs_addr_set(p->af, &cp->vaddr, p->vaddr); 874 /* proto should only be IPPROTO_IP if p->vaddr is a fwmark */
875 cp->vport = p->vport;
876 /* proto should only be IPPROTO_IP if d_addr is a fwmark */
877 ip_vs_addr_set(p->protocol == IPPROTO_IP ? AF_UNSPEC : p->af, 875 ip_vs_addr_set(p->protocol == IPPROTO_IP ? AF_UNSPEC : p->af,
878 &cp->daddr, daddr); 876 &cp->vaddr, p->vaddr);
877 cp->vport = p->vport;
878 ip_vs_addr_set(p->af, &cp->daddr, daddr);
879 cp->dport = dport; 879 cp->dport = dport;
880 cp->flags = flags; 880 cp->flags = flags;
881 cp->fwmark = fwmark; 881 cp->fwmark = fwmark;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 8824ed0ccc9c..356bef519fe5 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -312,6 +312,21 @@ static void death_by_timeout(unsigned long ul_conntrack)
312 nf_ct_delete((struct nf_conn *)ul_conntrack, 0, 0); 312 nf_ct_delete((struct nf_conn *)ul_conntrack, 0, 0);
313} 313}
314 314
315static inline bool
316nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
317 const struct nf_conntrack_tuple *tuple,
318 u16 zone)
319{
320 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
321
322 /* A conntrack can be recreated with the equal tuple,
323 * so we need to check that the conntrack is confirmed
324 */
325 return nf_ct_tuple_equal(tuple, &h->tuple) &&
326 nf_ct_zone(ct) == zone &&
327 nf_ct_is_confirmed(ct);
328}
329
315/* 330/*
316 * Warning : 331 * Warning :
317 * - Caller must take a reference on returned object 332 * - Caller must take a reference on returned object
@@ -333,8 +348,7 @@ ____nf_conntrack_find(struct net *net, u16 zone,
333 local_bh_disable(); 348 local_bh_disable();
334begin: 349begin:
335 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) { 350 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) {
336 if (nf_ct_tuple_equal(tuple, &h->tuple) && 351 if (nf_ct_key_equal(h, tuple, zone)) {
337 nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)) == zone) {
338 NF_CT_STAT_INC(net, found); 352 NF_CT_STAT_INC(net, found);
339 local_bh_enable(); 353 local_bh_enable();
340 return h; 354 return h;
@@ -372,8 +386,7 @@ begin:
372 !atomic_inc_not_zero(&ct->ct_general.use))) 386 !atomic_inc_not_zero(&ct->ct_general.use)))
373 h = NULL; 387 h = NULL;
374 else { 388 else {
375 if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple) || 389 if (unlikely(!nf_ct_key_equal(h, tuple, zone))) {
376 nf_ct_zone(ct) != zone)) {
377 nf_ct_put(ct); 390 nf_ct_put(ct);
378 goto begin; 391 goto begin;
379 } 392 }
@@ -435,7 +448,9 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
435 goto out; 448 goto out;
436 449
437 add_timer(&ct->timeout); 450 add_timer(&ct->timeout);
438 nf_conntrack_get(&ct->ct_general); 451 smp_wmb();
452 /* The caller holds a reference to this object */
453 atomic_set(&ct->ct_general.use, 2);
439 __nf_conntrack_hash_insert(ct, hash, repl_hash); 454 __nf_conntrack_hash_insert(ct, hash, repl_hash);
440 NF_CT_STAT_INC(net, insert); 455 NF_CT_STAT_INC(net, insert);
441 spin_unlock_bh(&nf_conntrack_lock); 456 spin_unlock_bh(&nf_conntrack_lock);
@@ -449,6 +464,21 @@ out:
449} 464}
450EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert); 465EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
451 466
467/* deletion from this larval template list happens via nf_ct_put() */
468void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl)
469{
470 __set_bit(IPS_TEMPLATE_BIT, &tmpl->status);
471 __set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
472 nf_conntrack_get(&tmpl->ct_general);
473
474 spin_lock_bh(&nf_conntrack_lock);
475 /* Overload tuple linked list to put us in template list. */
476 hlist_nulls_add_head_rcu(&tmpl->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
477 &net->ct.tmpl);
478 spin_unlock_bh(&nf_conntrack_lock);
479}
480EXPORT_SYMBOL_GPL(nf_conntrack_tmpl_insert);
481
452/* Confirm a connection given skb; places it in hash table */ 482/* Confirm a connection given skb; places it in hash table */
453int 483int
454__nf_conntrack_confirm(struct sk_buff *skb) 484__nf_conntrack_confirm(struct sk_buff *skb)
@@ -720,11 +750,10 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
720 nf_ct_zone->id = zone; 750 nf_ct_zone->id = zone;
721 } 751 }
722#endif 752#endif
723 /* 753 /* Because we use RCU lookups, we set ct_general.use to zero before
724 * changes to lookup keys must be done before setting refcnt to 1 754 * this is inserted in any list.
725 */ 755 */
726 smp_wmb(); 756 atomic_set(&ct->ct_general.use, 0);
727 atomic_set(&ct->ct_general.use, 1);
728 return ct; 757 return ct;
729 758
730#ifdef CONFIG_NF_CONNTRACK_ZONES 759#ifdef CONFIG_NF_CONNTRACK_ZONES
@@ -748,6 +777,11 @@ void nf_conntrack_free(struct nf_conn *ct)
748{ 777{
749 struct net *net = nf_ct_net(ct); 778 struct net *net = nf_ct_net(ct);
750 779
780 /* A freed object has refcnt == 0, that's
781 * the golden rule for SLAB_DESTROY_BY_RCU
782 */
783 NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 0);
784
751 nf_ct_ext_destroy(ct); 785 nf_ct_ext_destroy(ct);
752 nf_ct_ext_free(ct); 786 nf_ct_ext_free(ct);
753 kmem_cache_free(net->ct.nf_conntrack_cachep, ct); 787 kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
@@ -843,6 +877,9 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
843 NF_CT_STAT_INC(net, new); 877 NF_CT_STAT_INC(net, new);
844 } 878 }
845 879
880 /* Now it is inserted into the unconfirmed list, bump refcount */
881 nf_conntrack_get(&ct->ct_general);
882
846 /* Overload tuple linked list to put us in unconfirmed list. */ 883 /* Overload tuple linked list to put us in unconfirmed list. */
847 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, 884 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
848 &net->ct.unconfirmed); 885 &net->ct.unconfirmed);
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index 9858e3e51a3a..52e20c9a46a5 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -363,9 +363,8 @@ static int __net_init synproxy_net_init(struct net *net)
363 goto err2; 363 goto err2;
364 if (!nfct_synproxy_ext_add(ct)) 364 if (!nfct_synproxy_ext_add(ct))
365 goto err2; 365 goto err2;
366 __set_bit(IPS_TEMPLATE_BIT, &ct->status);
367 __set_bit(IPS_CONFIRMED_BIT, &ct->status);
368 366
367 nf_conntrack_tmpl_insert(net, ct);
369 snet->tmpl = ct; 368 snet->tmpl = ct;
370 369
371 snet->stats = alloc_percpu(struct synproxy_stats); 370 snet->stats = alloc_percpu(struct synproxy_stats);
@@ -390,7 +389,7 @@ static void __net_exit synproxy_net_exit(struct net *net)
390{ 389{
391 struct synproxy_net *snet = synproxy_pernet(net); 390 struct synproxy_net *snet = synproxy_pernet(net);
392 391
393 nf_conntrack_free(snet->tmpl); 392 nf_ct_put(snet->tmpl);
394 synproxy_proc_exit(net); 393 synproxy_proc_exit(net);
395 free_percpu(snet->stats); 394 free_percpu(snet->stats);
396} 395}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 117bbaaddde6..adce01e8bb57 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1008,10 +1008,8 @@ notify:
1008 return 0; 1008 return 0;
1009} 1009}
1010 1010
1011static void nf_tables_rcu_chain_destroy(struct rcu_head *head) 1011static void nf_tables_chain_destroy(struct nft_chain *chain)
1012{ 1012{
1013 struct nft_chain *chain = container_of(head, struct nft_chain, rcu_head);
1014
1015 BUG_ON(chain->use > 0); 1013 BUG_ON(chain->use > 0);
1016 1014
1017 if (chain->flags & NFT_BASE_CHAIN) { 1015 if (chain->flags & NFT_BASE_CHAIN) {
@@ -1045,7 +1043,7 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
1045 if (IS_ERR(chain)) 1043 if (IS_ERR(chain))
1046 return PTR_ERR(chain); 1044 return PTR_ERR(chain);
1047 1045
1048 if (!list_empty(&chain->rules)) 1046 if (!list_empty(&chain->rules) || chain->use > 0)
1049 return -EBUSY; 1047 return -EBUSY;
1050 1048
1051 list_del(&chain->list); 1049 list_del(&chain->list);
@@ -1059,7 +1057,9 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
1059 family); 1057 family);
1060 1058
1061 /* Make sure all rule references are gone before this is released */ 1059 /* Make sure all rule references are gone before this is released */
1062 call_rcu(&chain->rcu_head, nf_tables_rcu_chain_destroy); 1060 synchronize_rcu();
1061
1062 nf_tables_chain_destroy(chain);
1063 return 0; 1063 return 0;
1064} 1064}
1065 1065
@@ -1114,35 +1114,45 @@ void nft_unregister_expr(struct nft_expr_type *type)
1114} 1114}
1115EXPORT_SYMBOL_GPL(nft_unregister_expr); 1115EXPORT_SYMBOL_GPL(nft_unregister_expr);
1116 1116
1117static const struct nft_expr_type *__nft_expr_type_get(struct nlattr *nla) 1117static const struct nft_expr_type *__nft_expr_type_get(u8 family,
1118 struct nlattr *nla)
1118{ 1119{
1119 const struct nft_expr_type *type; 1120 const struct nft_expr_type *type;
1120 1121
1121 list_for_each_entry(type, &nf_tables_expressions, list) { 1122 list_for_each_entry(type, &nf_tables_expressions, list) {
1122 if (!nla_strcmp(nla, type->name)) 1123 if (!nla_strcmp(nla, type->name) &&
1124 (!type->family || type->family == family))
1123 return type; 1125 return type;
1124 } 1126 }
1125 return NULL; 1127 return NULL;
1126} 1128}
1127 1129
1128static const struct nft_expr_type *nft_expr_type_get(struct nlattr *nla) 1130static const struct nft_expr_type *nft_expr_type_get(u8 family,
1131 struct nlattr *nla)
1129{ 1132{
1130 const struct nft_expr_type *type; 1133 const struct nft_expr_type *type;
1131 1134
1132 if (nla == NULL) 1135 if (nla == NULL)
1133 return ERR_PTR(-EINVAL); 1136 return ERR_PTR(-EINVAL);
1134 1137
1135 type = __nft_expr_type_get(nla); 1138 type = __nft_expr_type_get(family, nla);
1136 if (type != NULL && try_module_get(type->owner)) 1139 if (type != NULL && try_module_get(type->owner))
1137 return type; 1140 return type;
1138 1141
1139#ifdef CONFIG_MODULES 1142#ifdef CONFIG_MODULES
1140 if (type == NULL) { 1143 if (type == NULL) {
1141 nfnl_unlock(NFNL_SUBSYS_NFTABLES); 1144 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
1145 request_module("nft-expr-%u-%.*s", family,
1146 nla_len(nla), (char *)nla_data(nla));
1147 nfnl_lock(NFNL_SUBSYS_NFTABLES);
1148 if (__nft_expr_type_get(family, nla))
1149 return ERR_PTR(-EAGAIN);
1150
1151 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
1142 request_module("nft-expr-%.*s", 1152 request_module("nft-expr-%.*s",
1143 nla_len(nla), (char *)nla_data(nla)); 1153 nla_len(nla), (char *)nla_data(nla));
1144 nfnl_lock(NFNL_SUBSYS_NFTABLES); 1154 nfnl_lock(NFNL_SUBSYS_NFTABLES);
1145 if (__nft_expr_type_get(nla)) 1155 if (__nft_expr_type_get(family, nla))
1146 return ERR_PTR(-EAGAIN); 1156 return ERR_PTR(-EAGAIN);
1147 } 1157 }
1148#endif 1158#endif
@@ -1193,7 +1203,7 @@ static int nf_tables_expr_parse(const struct nft_ctx *ctx,
1193 if (err < 0) 1203 if (err < 0)
1194 return err; 1204 return err;
1195 1205
1196 type = nft_expr_type_get(tb[NFTA_EXPR_NAME]); 1206 type = nft_expr_type_get(ctx->afi->family, tb[NFTA_EXPR_NAME]);
1197 if (IS_ERR(type)) 1207 if (IS_ERR(type))
1198 return PTR_ERR(type); 1208 return PTR_ERR(type);
1199 1209
@@ -1521,9 +1531,8 @@ err:
1521 return err; 1531 return err;
1522} 1532}
1523 1533
1524static void nf_tables_rcu_rule_destroy(struct rcu_head *head) 1534static void nf_tables_rule_destroy(struct nft_rule *rule)
1525{ 1535{
1526 struct nft_rule *rule = container_of(head, struct nft_rule, rcu_head);
1527 struct nft_expr *expr; 1536 struct nft_expr *expr;
1528 1537
1529 /* 1538 /*
@@ -1538,11 +1547,6 @@ static void nf_tables_rcu_rule_destroy(struct rcu_head *head)
1538 kfree(rule); 1547 kfree(rule);
1539} 1548}
1540 1549
1541static void nf_tables_rule_destroy(struct nft_rule *rule)
1542{
1543 call_rcu(&rule->rcu_head, nf_tables_rcu_rule_destroy);
1544}
1545
1546#define NFT_RULE_MAXEXPRS 128 1550#define NFT_RULE_MAXEXPRS 128
1547 1551
1548static struct nft_expr_info *info; 1552static struct nft_expr_info *info;
@@ -1809,9 +1813,6 @@ static int nf_tables_commit(struct sk_buff *skb)
1809 synchronize_rcu(); 1813 synchronize_rcu();
1810 1814
1811 list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) { 1815 list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
1812 /* Delete this rule from the dirty list */
1813 list_del(&rupd->list);
1814
1815 /* This rule was inactive in the past and just became active. 1816 /* This rule was inactive in the past and just became active.
1816 * Clear the next bit of the genmask since its meaning has 1817 * Clear the next bit of the genmask since its meaning has
1817 * changed, now it is the future. 1818 * changed, now it is the future.
@@ -1822,6 +1823,7 @@ static int nf_tables_commit(struct sk_buff *skb)
1822 rupd->chain, rupd->rule, 1823 rupd->chain, rupd->rule,
1823 NFT_MSG_NEWRULE, 0, 1824 NFT_MSG_NEWRULE, 0,
1824 rupd->family); 1825 rupd->family);
1826 list_del(&rupd->list);
1825 kfree(rupd); 1827 kfree(rupd);
1826 continue; 1828 continue;
1827 } 1829 }
@@ -1831,7 +1833,15 @@ static int nf_tables_commit(struct sk_buff *skb)
1831 nf_tables_rule_notify(skb, rupd->nlh, rupd->table, rupd->chain, 1833 nf_tables_rule_notify(skb, rupd->nlh, rupd->table, rupd->chain,
1832 rupd->rule, NFT_MSG_DELRULE, 0, 1834 rupd->rule, NFT_MSG_DELRULE, 0,
1833 rupd->family); 1835 rupd->family);
1836 }
1837
1838 /* Make sure we don't see any packet traversing old rules */
1839 synchronize_rcu();
1840
1841 /* Now we can safely release unused old rules */
1842 list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
1834 nf_tables_rule_destroy(rupd->rule); 1843 nf_tables_rule_destroy(rupd->rule);
1844 list_del(&rupd->list);
1835 kfree(rupd); 1845 kfree(rupd);
1836 } 1846 }
1837 1847
@@ -1844,20 +1854,26 @@ static int nf_tables_abort(struct sk_buff *skb)
1844 struct nft_rule_trans *rupd, *tmp; 1854 struct nft_rule_trans *rupd, *tmp;
1845 1855
1846 list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) { 1856 list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
1847 /* Delete all rules from the dirty list */
1848 list_del(&rupd->list);
1849
1850 if (!nft_rule_is_active_next(net, rupd->rule)) { 1857 if (!nft_rule_is_active_next(net, rupd->rule)) {
1851 nft_rule_clear(net, rupd->rule); 1858 nft_rule_clear(net, rupd->rule);
1859 list_del(&rupd->list);
1852 kfree(rupd); 1860 kfree(rupd);
1853 continue; 1861 continue;
1854 } 1862 }
1855 1863
1856 /* This rule is inactive, get rid of it */ 1864 /* This rule is inactive, get rid of it */
1857 list_del_rcu(&rupd->rule->list); 1865 list_del_rcu(&rupd->rule->list);
1866 }
1867
1868 /* Make sure we don't see any packet accessing aborted rules */
1869 synchronize_rcu();
1870
1871 list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
1858 nf_tables_rule_destroy(rupd->rule); 1872 nf_tables_rule_destroy(rupd->rule);
1873 list_del(&rupd->list);
1859 kfree(rupd); 1874 kfree(rupd);
1860 } 1875 }
1876
1861 return 0; 1877 return 0;
1862} 1878}
1863 1879
@@ -1943,6 +1959,9 @@ static int nft_ctx_init_from_setattr(struct nft_ctx *ctx,
1943 } 1959 }
1944 1960
1945 if (nla[NFTA_SET_TABLE] != NULL) { 1961 if (nla[NFTA_SET_TABLE] != NULL) {
1962 if (afi == NULL)
1963 return -EAFNOSUPPORT;
1964
1946 table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE]); 1965 table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE]);
1947 if (IS_ERR(table)) 1966 if (IS_ERR(table))
1948 return PTR_ERR(table); 1967 return PTR_ERR(table);
@@ -1989,13 +2008,13 @@ static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set,
1989 2008
1990 if (!sscanf(i->name, name, &tmp)) 2009 if (!sscanf(i->name, name, &tmp))
1991 continue; 2010 continue;
1992 if (tmp < 0 || tmp > BITS_PER_LONG * PAGE_SIZE) 2011 if (tmp < 0 || tmp >= BITS_PER_BYTE * PAGE_SIZE)
1993 continue; 2012 continue;
1994 2013
1995 set_bit(tmp, inuse); 2014 set_bit(tmp, inuse);
1996 } 2015 }
1997 2016
1998 n = find_first_zero_bit(inuse, BITS_PER_LONG * PAGE_SIZE); 2017 n = find_first_zero_bit(inuse, BITS_PER_BYTE * PAGE_SIZE);
1999 free_page((unsigned long)inuse); 2018 free_page((unsigned long)inuse);
2000 } 2019 }
2001 2020
@@ -2428,6 +2447,8 @@ static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
2428 struct nft_ctx ctx; 2447 struct nft_ctx ctx;
2429 int err; 2448 int err;
2430 2449
2450 if (nfmsg->nfgen_family == NFPROTO_UNSPEC)
2451 return -EAFNOSUPPORT;
2431 if (nla[NFTA_SET_TABLE] == NULL) 2452 if (nla[NFTA_SET_TABLE] == NULL)
2432 return -EINVAL; 2453 return -EINVAL;
2433 2454
@@ -2435,9 +2456,6 @@ static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
2435 if (err < 0) 2456 if (err < 0)
2436 return err; 2457 return err;
2437 2458
2438 if (nfmsg->nfgen_family == NFPROTO_UNSPEC)
2439 return -EAFNOSUPPORT;
2440
2441 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]); 2459 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]);
2442 if (IS_ERR(set)) 2460 if (IS_ERR(set))
2443 return PTR_ERR(set); 2461 return PTR_ERR(set);
@@ -2723,6 +2741,9 @@ static int nft_add_set_elem(const struct nft_ctx *ctx, struct nft_set *set,
2723 if (nla[NFTA_SET_ELEM_DATA] == NULL && 2741 if (nla[NFTA_SET_ELEM_DATA] == NULL &&
2724 !(elem.flags & NFT_SET_ELEM_INTERVAL_END)) 2742 !(elem.flags & NFT_SET_ELEM_INTERVAL_END))
2725 return -EINVAL; 2743 return -EINVAL;
2744 if (nla[NFTA_SET_ELEM_DATA] != NULL &&
2745 elem.flags & NFT_SET_ELEM_INTERVAL_END)
2746 return -EINVAL;
2726 } else { 2747 } else {
2727 if (nla[NFTA_SET_ELEM_DATA] != NULL) 2748 if (nla[NFTA_SET_ELEM_DATA] != NULL)
2728 return -EINVAL; 2749 return -EINVAL;
@@ -2977,6 +2998,9 @@ static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx,
2977 const struct nft_set_iter *iter, 2998 const struct nft_set_iter *iter,
2978 const struct nft_set_elem *elem) 2999 const struct nft_set_elem *elem)
2979{ 3000{
3001 if (elem->flags & NFT_SET_ELEM_INTERVAL_END)
3002 return 0;
3003
2980 switch (elem->data.verdict) { 3004 switch (elem->data.verdict) {
2981 case NFT_JUMP: 3005 case NFT_JUMP:
2982 case NFT_GOTO: 3006 case NFT_GOTO:
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index 0d879fcb8763..90998a6ff8b9 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -103,9 +103,9 @@ static struct nf_loginfo trace_loginfo = {
103 }, 103 },
104}; 104};
105 105
106static inline void nft_trace_packet(const struct nft_pktinfo *pkt, 106static void nft_trace_packet(const struct nft_pktinfo *pkt,
107 const struct nft_chain *chain, 107 const struct nft_chain *chain,
108 int rulenum, enum nft_trace type) 108 int rulenum, enum nft_trace type)
109{ 109{
110 struct net *net = dev_net(pkt->in ? pkt->in : pkt->out); 110 struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
111 111
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index 917052e20602..46e275403838 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -226,6 +226,7 @@ static int nft_ct_init_validate_get(const struct nft_expr *expr,
226 if (tb[NFTA_CT_DIRECTION] != NULL) 226 if (tb[NFTA_CT_DIRECTION] != NULL)
227 return -EINVAL; 227 return -EINVAL;
228 break; 228 break;
229 case NFT_CT_L3PROTOCOL:
229 case NFT_CT_PROTOCOL: 230 case NFT_CT_PROTOCOL:
230 case NFT_CT_SRC: 231 case NFT_CT_SRC:
231 case NFT_CT_DST: 232 case NFT_CT_DST:
@@ -311,8 +312,19 @@ static int nft_ct_get_dump(struct sk_buff *skb, const struct nft_expr *expr)
311 goto nla_put_failure; 312 goto nla_put_failure;
312 if (nla_put_be32(skb, NFTA_CT_KEY, htonl(priv->key))) 313 if (nla_put_be32(skb, NFTA_CT_KEY, htonl(priv->key)))
313 goto nla_put_failure; 314 goto nla_put_failure;
314 if (nla_put_u8(skb, NFTA_CT_DIRECTION, priv->dir)) 315
315 goto nla_put_failure; 316 switch (priv->key) {
317 case NFT_CT_PROTOCOL:
318 case NFT_CT_SRC:
319 case NFT_CT_DST:
320 case NFT_CT_PROTO_SRC:
321 case NFT_CT_PROTO_DST:
322 if (nla_put_u8(skb, NFTA_CT_DIRECTION, priv->dir))
323 goto nla_put_failure;
324 default:
325 break;
326 }
327
316 return 0; 328 return 0;
317 329
318nla_put_failure: 330nla_put_failure:
diff --git a/net/netfilter/nft_log.c b/net/netfilter/nft_log.c
index 5af790123ad8..26c5154e05f3 100644
--- a/net/netfilter/nft_log.c
+++ b/net/netfilter/nft_log.c
@@ -23,7 +23,6 @@ static const char *nft_log_null_prefix = "";
23struct nft_log { 23struct nft_log {
24 struct nf_loginfo loginfo; 24 struct nf_loginfo loginfo;
25 char *prefix; 25 char *prefix;
26 int family;
27}; 26};
28 27
29static void nft_log_eval(const struct nft_expr *expr, 28static void nft_log_eval(const struct nft_expr *expr,
@@ -33,7 +32,7 @@ static void nft_log_eval(const struct nft_expr *expr,
33 const struct nft_log *priv = nft_expr_priv(expr); 32 const struct nft_log *priv = nft_expr_priv(expr);
34 struct net *net = dev_net(pkt->in ? pkt->in : pkt->out); 33 struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
35 34
36 nf_log_packet(net, priv->family, pkt->ops->hooknum, pkt->skb, pkt->in, 35 nf_log_packet(net, pkt->ops->pf, pkt->ops->hooknum, pkt->skb, pkt->in,
37 pkt->out, &priv->loginfo, "%s", priv->prefix); 36 pkt->out, &priv->loginfo, "%s", priv->prefix);
38} 37}
39 38
@@ -52,8 +51,6 @@ static int nft_log_init(const struct nft_ctx *ctx,
52 struct nf_loginfo *li = &priv->loginfo; 51 struct nf_loginfo *li = &priv->loginfo;
53 const struct nlattr *nla; 52 const struct nlattr *nla;
54 53
55 priv->family = ctx->afi->family;
56
57 nla = tb[NFTA_LOG_PREFIX]; 54 nla = tb[NFTA_LOG_PREFIX];
58 if (nla != NULL) { 55 if (nla != NULL) {
59 priv->prefix = kmalloc(nla_len(nla) + 1, GFP_KERNEL); 56 priv->prefix = kmalloc(nla_len(nla) + 1, GFP_KERNEL);
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index 8a6116b75b5a..bb4ef4cccb6e 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -16,6 +16,7 @@
16#include <linux/netfilter.h> 16#include <linux/netfilter.h>
17#include <linux/netfilter/nf_tables.h> 17#include <linux/netfilter/nf_tables.h>
18#include <net/netfilter/nf_tables.h> 18#include <net/netfilter/nf_tables.h>
19#include <net/netfilter/nf_tables_core.h>
19 20
20struct nft_lookup { 21struct nft_lookup {
21 struct nft_set *set; 22 struct nft_set *set;
diff --git a/net/netfilter/nft_queue.c b/net/netfilter/nft_queue.c
index cbea473d69e9..e8ae2f6bf232 100644
--- a/net/netfilter/nft_queue.c
+++ b/net/netfilter/nft_queue.c
@@ -25,7 +25,6 @@ struct nft_queue {
25 u16 queuenum; 25 u16 queuenum;
26 u16 queues_total; 26 u16 queues_total;
27 u16 flags; 27 u16 flags;
28 u8 family;
29}; 28};
30 29
31static void nft_queue_eval(const struct nft_expr *expr, 30static void nft_queue_eval(const struct nft_expr *expr,
@@ -43,7 +42,7 @@ static void nft_queue_eval(const struct nft_expr *expr,
43 queue = priv->queuenum + cpu % priv->queues_total; 42 queue = priv->queuenum + cpu % priv->queues_total;
44 } else { 43 } else {
45 queue = nfqueue_hash(pkt->skb, queue, 44 queue = nfqueue_hash(pkt->skb, queue,
46 priv->queues_total, priv->family, 45 priv->queues_total, pkt->ops->pf,
47 jhash_initval); 46 jhash_initval);
48 } 47 }
49 } 48 }
@@ -71,7 +70,6 @@ static int nft_queue_init(const struct nft_ctx *ctx,
71 return -EINVAL; 70 return -EINVAL;
72 71
73 init_hashrandom(&jhash_initval); 72 init_hashrandom(&jhash_initval);
74 priv->family = ctx->afi->family;
75 priv->queuenum = ntohs(nla_get_be16(tb[NFTA_QUEUE_NUM])); 73 priv->queuenum = ntohs(nla_get_be16(tb[NFTA_QUEUE_NUM]));
76 74
77 if (tb[NFTA_QUEUE_TOTAL] != NULL) 75 if (tb[NFTA_QUEUE_TOTAL] != NULL)
diff --git a/net/netfilter/nft_rbtree.c b/net/netfilter/nft_rbtree.c
index ca0c1b231bfe..e21d69d13506 100644
--- a/net/netfilter/nft_rbtree.c
+++ b/net/netfilter/nft_rbtree.c
@@ -69,8 +69,10 @@ static void nft_rbtree_elem_destroy(const struct nft_set *set,
69 struct nft_rbtree_elem *rbe) 69 struct nft_rbtree_elem *rbe)
70{ 70{
71 nft_data_uninit(&rbe->key, NFT_DATA_VALUE); 71 nft_data_uninit(&rbe->key, NFT_DATA_VALUE);
72 if (set->flags & NFT_SET_MAP) 72 if (set->flags & NFT_SET_MAP &&
73 !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
73 nft_data_uninit(rbe->data, set->dtype); 74 nft_data_uninit(rbe->data, set->dtype);
75
74 kfree(rbe); 76 kfree(rbe);
75} 77}
76 78
@@ -108,7 +110,8 @@ static int nft_rbtree_insert(const struct nft_set *set,
108 int err; 110 int err;
109 111
110 size = sizeof(*rbe); 112 size = sizeof(*rbe);
111 if (set->flags & NFT_SET_MAP) 113 if (set->flags & NFT_SET_MAP &&
114 !(elem->flags & NFT_SET_ELEM_INTERVAL_END))
112 size += sizeof(rbe->data[0]); 115 size += sizeof(rbe->data[0]);
113 116
114 rbe = kzalloc(size, GFP_KERNEL); 117 rbe = kzalloc(size, GFP_KERNEL);
@@ -117,7 +120,8 @@ static int nft_rbtree_insert(const struct nft_set *set,
117 120
118 rbe->flags = elem->flags; 121 rbe->flags = elem->flags;
119 nft_data_copy(&rbe->key, &elem->key); 122 nft_data_copy(&rbe->key, &elem->key);
120 if (set->flags & NFT_SET_MAP) 123 if (set->flags & NFT_SET_MAP &&
124 !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
121 nft_data_copy(rbe->data, &elem->data); 125 nft_data_copy(rbe->data, &elem->data);
122 126
123 err = __nft_rbtree_insert(set, rbe); 127 err = __nft_rbtree_insert(set, rbe);
@@ -153,7 +157,8 @@ static int nft_rbtree_get(const struct nft_set *set, struct nft_set_elem *elem)
153 parent = parent->rb_right; 157 parent = parent->rb_right;
154 else { 158 else {
155 elem->cookie = rbe; 159 elem->cookie = rbe;
156 if (set->flags & NFT_SET_MAP) 160 if (set->flags & NFT_SET_MAP &&
161 !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
157 nft_data_copy(&elem->data, rbe->data); 162 nft_data_copy(&elem->data, rbe->data);
158 elem->flags = rbe->flags; 163 elem->flags = rbe->flags;
159 return 0; 164 return 0;
@@ -177,7 +182,8 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
177 182
178 rbe = rb_entry(node, struct nft_rbtree_elem, node); 183 rbe = rb_entry(node, struct nft_rbtree_elem, node);
179 nft_data_copy(&elem.key, &rbe->key); 184 nft_data_copy(&elem.key, &rbe->key);
180 if (set->flags & NFT_SET_MAP) 185 if (set->flags & NFT_SET_MAP &&
186 !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
181 nft_data_copy(&elem.data, rbe->data); 187 nft_data_copy(&elem.data, rbe->data);
182 elem.flags = rbe->flags; 188 elem.flags = rbe->flags;
183 189
diff --git a/net/netfilter/nft_reject.c b/net/netfilter/nft_reject.c
index 5e204711d704..f3448c296446 100644
--- a/net/netfilter/nft_reject.c
+++ b/net/netfilter/nft_reject.c
@@ -16,65 +16,23 @@
16#include <linux/netfilter.h> 16#include <linux/netfilter.h>
17#include <linux/netfilter/nf_tables.h> 17#include <linux/netfilter/nf_tables.h>
18#include <net/netfilter/nf_tables.h> 18#include <net/netfilter/nf_tables.h>
19#include <net/icmp.h> 19#include <net/netfilter/nft_reject.h>
20#include <net/netfilter/ipv4/nf_reject.h>
21 20
22#if IS_ENABLED(CONFIG_NF_TABLES_IPV6) 21const struct nla_policy nft_reject_policy[NFTA_REJECT_MAX + 1] = {
23#include <net/netfilter/ipv6/nf_reject.h>
24#endif
25
26struct nft_reject {
27 enum nft_reject_types type:8;
28 u8 icmp_code;
29 u8 family;
30};
31
32static void nft_reject_eval(const struct nft_expr *expr,
33 struct nft_data data[NFT_REG_MAX + 1],
34 const struct nft_pktinfo *pkt)
35{
36 struct nft_reject *priv = nft_expr_priv(expr);
37#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
38 struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out);
39#endif
40 switch (priv->type) {
41 case NFT_REJECT_ICMP_UNREACH:
42 if (priv->family == NFPROTO_IPV4)
43 nf_send_unreach(pkt->skb, priv->icmp_code);
44#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
45 else if (priv->family == NFPROTO_IPV6)
46 nf_send_unreach6(net, pkt->skb, priv->icmp_code,
47 pkt->ops->hooknum);
48#endif
49 break;
50 case NFT_REJECT_TCP_RST:
51 if (priv->family == NFPROTO_IPV4)
52 nf_send_reset(pkt->skb, pkt->ops->hooknum);
53#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
54 else if (priv->family == NFPROTO_IPV6)
55 nf_send_reset6(net, pkt->skb, pkt->ops->hooknum);
56#endif
57 break;
58 }
59
60 data[NFT_REG_VERDICT].verdict = NF_DROP;
61}
62
63static const struct nla_policy nft_reject_policy[NFTA_REJECT_MAX + 1] = {
64 [NFTA_REJECT_TYPE] = { .type = NLA_U32 }, 22 [NFTA_REJECT_TYPE] = { .type = NLA_U32 },
65 [NFTA_REJECT_ICMP_CODE] = { .type = NLA_U8 }, 23 [NFTA_REJECT_ICMP_CODE] = { .type = NLA_U8 },
66}; 24};
25EXPORT_SYMBOL_GPL(nft_reject_policy);
67 26
68static int nft_reject_init(const struct nft_ctx *ctx, 27int nft_reject_init(const struct nft_ctx *ctx,
69 const struct nft_expr *expr, 28 const struct nft_expr *expr,
70 const struct nlattr * const tb[]) 29 const struct nlattr * const tb[])
71{ 30{
72 struct nft_reject *priv = nft_expr_priv(expr); 31 struct nft_reject *priv = nft_expr_priv(expr);
73 32
74 if (tb[NFTA_REJECT_TYPE] == NULL) 33 if (tb[NFTA_REJECT_TYPE] == NULL)
75 return -EINVAL; 34 return -EINVAL;
76 35
77 priv->family = ctx->afi->family;
78 priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE])); 36 priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
79 switch (priv->type) { 37 switch (priv->type) {
80 case NFT_REJECT_ICMP_UNREACH: 38 case NFT_REJECT_ICMP_UNREACH:
@@ -89,8 +47,9 @@ static int nft_reject_init(const struct nft_ctx *ctx,
89 47
90 return 0; 48 return 0;
91} 49}
50EXPORT_SYMBOL_GPL(nft_reject_init);
92 51
93static int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr) 52int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr)
94{ 53{
95 const struct nft_reject *priv = nft_expr_priv(expr); 54 const struct nft_reject *priv = nft_expr_priv(expr);
96 55
@@ -109,37 +68,7 @@ static int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr)
109nla_put_failure: 68nla_put_failure:
110 return -1; 69 return -1;
111} 70}
112 71EXPORT_SYMBOL_GPL(nft_reject_dump);
113static struct nft_expr_type nft_reject_type;
114static const struct nft_expr_ops nft_reject_ops = {
115 .type = &nft_reject_type,
116 .size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
117 .eval = nft_reject_eval,
118 .init = nft_reject_init,
119 .dump = nft_reject_dump,
120};
121
122static struct nft_expr_type nft_reject_type __read_mostly = {
123 .name = "reject",
124 .ops = &nft_reject_ops,
125 .policy = nft_reject_policy,
126 .maxattr = NFTA_REJECT_MAX,
127 .owner = THIS_MODULE,
128};
129
130static int __init nft_reject_module_init(void)
131{
132 return nft_register_expr(&nft_reject_type);
133}
134
135static void __exit nft_reject_module_exit(void)
136{
137 nft_unregister_expr(&nft_reject_type);
138}
139
140module_init(nft_reject_module_init);
141module_exit(nft_reject_module_exit);
142 72
143MODULE_LICENSE("GPL"); 73MODULE_LICENSE("GPL");
144MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); 74MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
145MODULE_ALIAS_NFT_EXPR("reject");
diff --git a/net/netfilter/nft_reject_inet.c b/net/netfilter/nft_reject_inet.c
new file mode 100644
index 000000000000..8a310f239c93
--- /dev/null
+++ b/net/netfilter/nft_reject_inet.c
@@ -0,0 +1,63 @@
1/*
2 * Copyright (c) 2014 Patrick McHardy <kaber@trash.net>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/kernel.h>
10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/netlink.h>
13#include <linux/netfilter.h>
14#include <linux/netfilter/nf_tables.h>
15#include <net/netfilter/nf_tables.h>
16#include <net/netfilter/nft_reject.h>
17
18static void nft_reject_inet_eval(const struct nft_expr *expr,
19 struct nft_data data[NFT_REG_MAX + 1],
20 const struct nft_pktinfo *pkt)
21{
22 switch (pkt->ops->pf) {
23 case NFPROTO_IPV4:
24 nft_reject_ipv4_eval(expr, data, pkt);
25 case NFPROTO_IPV6:
26 nft_reject_ipv6_eval(expr, data, pkt);
27 }
28}
29
30static struct nft_expr_type nft_reject_inet_type;
31static const struct nft_expr_ops nft_reject_inet_ops = {
32 .type = &nft_reject_inet_type,
33 .size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
34 .eval = nft_reject_inet_eval,
35 .init = nft_reject_init,
36 .dump = nft_reject_dump,
37};
38
39static struct nft_expr_type nft_reject_inet_type __read_mostly = {
40 .family = NFPROTO_INET,
41 .name = "reject",
42 .ops = &nft_reject_inet_ops,
43 .policy = nft_reject_policy,
44 .maxattr = NFTA_REJECT_MAX,
45 .owner = THIS_MODULE,
46};
47
48static int __init nft_reject_inet_module_init(void)
49{
50 return nft_register_expr(&nft_reject_inet_type);
51}
52
53static void __exit nft_reject_inet_module_exit(void)
54{
55 nft_unregister_expr(&nft_reject_inet_type);
56}
57
58module_init(nft_reject_inet_module_init);
59module_exit(nft_reject_inet_module_exit);
60
61MODULE_LICENSE("GPL");
62MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
63MODULE_ALIAS_NFT_AF_EXPR(1, "reject");
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 5929be622c5c..75747aecdebe 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -228,12 +228,7 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
228 goto err3; 228 goto err3;
229 } 229 }
230 230
231 __set_bit(IPS_TEMPLATE_BIT, &ct->status); 231 nf_conntrack_tmpl_insert(par->net, ct);
232 __set_bit(IPS_CONFIRMED_BIT, &ct->status);
233
234 /* Overload tuple linked list to put us in template list. */
235 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
236 &par->net->ct.tmpl);
237out: 232out:
238 info->ct = ct; 233 info->ct = ct;
239 return 0; 234 return 0;
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index df4692826ead..e9a48baf8551 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -55,6 +55,7 @@
55 55
56#include "datapath.h" 56#include "datapath.h"
57#include "flow.h" 57#include "flow.h"
58#include "flow_table.h"
58#include "flow_netlink.h" 59#include "flow_netlink.h"
59#include "vport-internal_dev.h" 60#include "vport-internal_dev.h"
60#include "vport-netdev.h" 61#include "vport-netdev.h"
@@ -160,7 +161,6 @@ static void destroy_dp_rcu(struct rcu_head *rcu)
160{ 161{
161 struct datapath *dp = container_of(rcu, struct datapath, rcu); 162 struct datapath *dp = container_of(rcu, struct datapath, rcu);
162 163
163 ovs_flow_tbl_destroy(&dp->table);
164 free_percpu(dp->stats_percpu); 164 free_percpu(dp->stats_percpu);
165 release_net(ovs_dp_get_net(dp)); 165 release_net(ovs_dp_get_net(dp));
166 kfree(dp->ports); 166 kfree(dp->ports);
@@ -466,6 +466,14 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
466 466
467 skb_zerocopy(user_skb, skb, skb->len, hlen); 467 skb_zerocopy(user_skb, skb, skb->len, hlen);
468 468
469 /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
470 if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
471 size_t plen = NLA_ALIGN(user_skb->len) - user_skb->len;
472
473 if (plen > 0)
474 memset(skb_put(user_skb, plen), 0, plen);
475 }
476
469 ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len; 477 ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
470 478
471 err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid); 479 err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
@@ -852,11 +860,8 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
852 goto err_unlock_ovs; 860 goto err_unlock_ovs;
853 861
854 /* The unmasked key has to be the same for flow updates. */ 862 /* The unmasked key has to be the same for flow updates. */
855 error = -EINVAL; 863 if (!ovs_flow_cmp_unmasked_key(flow, &match))
856 if (!ovs_flow_cmp_unmasked_key(flow, &match)) {
857 OVS_NLERR("Flow modification message rejected, unmasked key does not match.\n");
858 goto err_unlock_ovs; 864 goto err_unlock_ovs;
859 }
860 865
861 /* Update actions. */ 866 /* Update actions. */
862 old_acts = ovsl_dereference(flow->sf_acts); 867 old_acts = ovsl_dereference(flow->sf_acts);
@@ -1079,6 +1084,7 @@ static size_t ovs_dp_cmd_msg_size(void)
1079 msgsize += nla_total_size(IFNAMSIZ); 1084 msgsize += nla_total_size(IFNAMSIZ);
1080 msgsize += nla_total_size(sizeof(struct ovs_dp_stats)); 1085 msgsize += nla_total_size(sizeof(struct ovs_dp_stats));
1081 msgsize += nla_total_size(sizeof(struct ovs_dp_megaflow_stats)); 1086 msgsize += nla_total_size(sizeof(struct ovs_dp_megaflow_stats));
1087 msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */
1082 1088
1083 return msgsize; 1089 return msgsize;
1084} 1090}
@@ -1279,7 +1285,7 @@ err_destroy_ports_array:
1279err_destroy_percpu: 1285err_destroy_percpu:
1280 free_percpu(dp->stats_percpu); 1286 free_percpu(dp->stats_percpu);
1281err_destroy_table: 1287err_destroy_table:
1282 ovs_flow_tbl_destroy(&dp->table); 1288 ovs_flow_tbl_destroy(&dp->table, false);
1283err_free_dp: 1289err_free_dp:
1284 release_net(ovs_dp_get_net(dp)); 1290 release_net(ovs_dp_get_net(dp));
1285 kfree(dp); 1291 kfree(dp);
@@ -1306,10 +1312,13 @@ static void __dp_destroy(struct datapath *dp)
1306 list_del_rcu(&dp->list_node); 1312 list_del_rcu(&dp->list_node);
1307 1313
1308 /* OVSP_LOCAL is datapath internal port. We need to make sure that 1314 /* OVSP_LOCAL is datapath internal port. We need to make sure that
1309 * all port in datapath are destroyed first before freeing datapath. 1315 * all ports in datapath are destroyed first before freeing datapath.
1310 */ 1316 */
1311 ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL)); 1317 ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
1312 1318
1319 /* RCU destroy the flow table */
1320 ovs_flow_tbl_destroy(&dp->table, true);
1321
1313 call_rcu(&dp->rcu, destroy_dp_rcu); 1322 call_rcu(&dp->rcu, destroy_dp_rcu);
1314} 1323}
1315 1324
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index c58a0fe3c889..3c268b3d71c3 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -153,29 +153,29 @@ static void rcu_free_flow_callback(struct rcu_head *rcu)
153 flow_free(flow); 153 flow_free(flow);
154} 154}
155 155
156static void flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred)
157{
158 if (!mask)
159 return;
160
161 BUG_ON(!mask->ref_count);
162 mask->ref_count--;
163
164 if (!mask->ref_count) {
165 list_del_rcu(&mask->list);
166 if (deferred)
167 kfree_rcu(mask, rcu);
168 else
169 kfree(mask);
170 }
171}
172
173void ovs_flow_free(struct sw_flow *flow, bool deferred) 156void ovs_flow_free(struct sw_flow *flow, bool deferred)
174{ 157{
175 if (!flow) 158 if (!flow)
176 return; 159 return;
177 160
178 flow_mask_del_ref(flow->mask, deferred); 161 if (flow->mask) {
162 struct sw_flow_mask *mask = flow->mask;
163
164 /* ovs-lock is required to protect mask-refcount and
165 * mask list.
166 */
167 ASSERT_OVSL();
168 BUG_ON(!mask->ref_count);
169 mask->ref_count--;
170
171 if (!mask->ref_count) {
172 list_del_rcu(&mask->list);
173 if (deferred)
174 kfree_rcu(mask, rcu);
175 else
176 kfree(mask);
177 }
178 }
179 179
180 if (deferred) 180 if (deferred)
181 call_rcu(&flow->rcu, rcu_free_flow_callback); 181 call_rcu(&flow->rcu, rcu_free_flow_callback);
@@ -188,26 +188,9 @@ static void free_buckets(struct flex_array *buckets)
188 flex_array_free(buckets); 188 flex_array_free(buckets);
189} 189}
190 190
191
191static void __table_instance_destroy(struct table_instance *ti) 192static void __table_instance_destroy(struct table_instance *ti)
192{ 193{
193 int i;
194
195 if (ti->keep_flows)
196 goto skip_flows;
197
198 for (i = 0; i < ti->n_buckets; i++) {
199 struct sw_flow *flow;
200 struct hlist_head *head = flex_array_get(ti->buckets, i);
201 struct hlist_node *n;
202 int ver = ti->node_ver;
203
204 hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
205 hlist_del(&flow->hash_node[ver]);
206 ovs_flow_free(flow, false);
207 }
208 }
209
210skip_flows:
211 free_buckets(ti->buckets); 194 free_buckets(ti->buckets);
212 kfree(ti); 195 kfree(ti);
213} 196}
@@ -258,20 +241,38 @@ static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
258 241
259static void table_instance_destroy(struct table_instance *ti, bool deferred) 242static void table_instance_destroy(struct table_instance *ti, bool deferred)
260{ 243{
244 int i;
245
261 if (!ti) 246 if (!ti)
262 return; 247 return;
263 248
249 if (ti->keep_flows)
250 goto skip_flows;
251
252 for (i = 0; i < ti->n_buckets; i++) {
253 struct sw_flow *flow;
254 struct hlist_head *head = flex_array_get(ti->buckets, i);
255 struct hlist_node *n;
256 int ver = ti->node_ver;
257
258 hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
259 hlist_del_rcu(&flow->hash_node[ver]);
260 ovs_flow_free(flow, deferred);
261 }
262 }
263
264skip_flows:
264 if (deferred) 265 if (deferred)
265 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); 266 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
266 else 267 else
267 __table_instance_destroy(ti); 268 __table_instance_destroy(ti);
268} 269}
269 270
270void ovs_flow_tbl_destroy(struct flow_table *table) 271void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred)
271{ 272{
272 struct table_instance *ti = ovsl_dereference(table->ti); 273 struct table_instance *ti = ovsl_dereference(table->ti);
273 274
274 table_instance_destroy(ti, false); 275 table_instance_destroy(ti, deferred);
275} 276}
276 277
277struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti, 278struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
@@ -504,16 +505,11 @@ static struct sw_flow_mask *mask_alloc(void)
504 505
505 mask = kmalloc(sizeof(*mask), GFP_KERNEL); 506 mask = kmalloc(sizeof(*mask), GFP_KERNEL);
506 if (mask) 507 if (mask)
507 mask->ref_count = 0; 508 mask->ref_count = 1;
508 509
509 return mask; 510 return mask;
510} 511}
511 512
512static void mask_add_ref(struct sw_flow_mask *mask)
513{
514 mask->ref_count++;
515}
516
517static bool mask_equal(const struct sw_flow_mask *a, 513static bool mask_equal(const struct sw_flow_mask *a,
518 const struct sw_flow_mask *b) 514 const struct sw_flow_mask *b)
519{ 515{
@@ -554,9 +550,11 @@ static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
554 mask->key = new->key; 550 mask->key = new->key;
555 mask->range = new->range; 551 mask->range = new->range;
556 list_add_rcu(&mask->list, &tbl->mask_list); 552 list_add_rcu(&mask->list, &tbl->mask_list);
553 } else {
554 BUG_ON(!mask->ref_count);
555 mask->ref_count++;
557 } 556 }
558 557
559 mask_add_ref(mask);
560 flow->mask = mask; 558 flow->mask = mask;
561 return 0; 559 return 0;
562} 560}
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
index 1996e34c0fd8..baaeb101924d 100644
--- a/net/openvswitch/flow_table.h
+++ b/net/openvswitch/flow_table.h
@@ -60,7 +60,7 @@ void ovs_flow_free(struct sw_flow *, bool deferred);
60 60
61int ovs_flow_tbl_init(struct flow_table *); 61int ovs_flow_tbl_init(struct flow_table *);
62int ovs_flow_tbl_count(struct flow_table *table); 62int ovs_flow_tbl_count(struct flow_table *table);
63void ovs_flow_tbl_destroy(struct flow_table *table); 63void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred);
64int ovs_flow_tbl_flush(struct flow_table *flow_table); 64int ovs_flow_tbl_flush(struct flow_table *flow_table);
65 65
66int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, 66int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 6a2bb37506c5..48a6a93db296 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -308,11 +308,27 @@ static bool packet_use_direct_xmit(const struct packet_sock *po)
308 return po->xmit == packet_direct_xmit; 308 return po->xmit == packet_direct_xmit;
309} 309}
310 310
311static u16 packet_pick_tx_queue(struct net_device *dev) 311static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
312{ 312{
313 return (u16) raw_smp_processor_id() % dev->real_num_tx_queues; 313 return (u16) raw_smp_processor_id() % dev->real_num_tx_queues;
314} 314}
315 315
316static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
317{
318 const struct net_device_ops *ops = dev->netdev_ops;
319 u16 queue_index;
320
321 if (ops->ndo_select_queue) {
322 queue_index = ops->ndo_select_queue(dev, skb, NULL,
323 __packet_pick_tx_queue);
324 queue_index = netdev_cap_txqueue(dev, queue_index);
325 } else {
326 queue_index = __packet_pick_tx_queue(dev, skb);
327 }
328
329 skb_set_queue_mapping(skb, queue_index);
330}
331
316/* register_prot_hook must be invoked with the po->bind_lock held, 332/* register_prot_hook must be invoked with the po->bind_lock held,
317 * or from a context in which asynchronous accesses to the packet 333 * or from a context in which asynchronous accesses to the packet
318 * socket is not possible (packet_create()). 334 * socket is not possible (packet_create()).
@@ -2285,7 +2301,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2285 } 2301 }
2286 } 2302 }
2287 2303
2288 skb_set_queue_mapping(skb, packet_pick_tx_queue(dev)); 2304 packet_pick_tx_queue(dev, skb);
2305
2289 skb->destructor = tpacket_destruct_skb; 2306 skb->destructor = tpacket_destruct_skb;
2290 __packet_set_status(po, ph, TP_STATUS_SENDING); 2307 __packet_set_status(po, ph, TP_STATUS_SENDING);
2291 packet_inc_pending(&po->tx_ring); 2308 packet_inc_pending(&po->tx_ring);
@@ -2499,7 +2516,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2499 skb->dev = dev; 2516 skb->dev = dev;
2500 skb->priority = sk->sk_priority; 2517 skb->priority = sk->sk_priority;
2501 skb->mark = sk->sk_mark; 2518 skb->mark = sk->sk_mark;
2502 skb_set_queue_mapping(skb, packet_pick_tx_queue(dev)); 2519
2520 packet_pick_tx_queue(dev, skb);
2503 2521
2504 if (po->has_vnet_hdr) { 2522 if (po->has_vnet_hdr) {
2505 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 2523 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
@@ -3786,7 +3804,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
3786 */ 3804 */
3787 if (!tx_ring) 3805 if (!tx_ring)
3788 init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring); 3806 init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
3789 break; 3807 break;
3790 default: 3808 default:
3791 break; 3809 break;
3792 } 3810 }
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index a255d0200a59..fefeeb73f15f 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -15,6 +15,11 @@
15 * 15 *
16 * ECN support is added by Naeem Khademi <naeemk@ifi.uio.no> 16 * ECN support is added by Naeem Khademi <naeemk@ifi.uio.no>
17 * University of Oslo, Norway. 17 * University of Oslo, Norway.
18 *
19 * References:
20 * IETF draft submission: http://tools.ietf.org/html/draft-pan-aqm-pie-00
21 * IEEE Conference on High Performance Switching and Routing 2013 :
22 * "PIE: A * Lightweight Control Scheme to Address the Bufferbloat Problem"
18 */ 23 */
19 24
20#include <linux/module.h> 25#include <linux/module.h>
@@ -36,7 +41,7 @@ struct pie_params {
36 psched_time_t target; /* user specified target delay in pschedtime */ 41 psched_time_t target; /* user specified target delay in pschedtime */
37 u32 tupdate; /* timer frequency (in jiffies) */ 42 u32 tupdate; /* timer frequency (in jiffies) */
38 u32 limit; /* number of packets that can be enqueued */ 43 u32 limit; /* number of packets that can be enqueued */
39 u32 alpha; /* alpha and beta are between -4 and 4 */ 44 u32 alpha; /* alpha and beta are between 0 and 32 */
40 u32 beta; /* and are used for shift relative to 1 */ 45 u32 beta; /* and are used for shift relative to 1 */
41 bool ecn; /* true if ecn is enabled */ 46 bool ecn; /* true if ecn is enabled */
42 bool bytemode; /* to scale drop early prob based on pkt size */ 47 bool bytemode; /* to scale drop early prob based on pkt size */
@@ -326,10 +331,16 @@ static void calculate_probability(struct Qdisc *sch)
326 if (qdelay == 0 && qlen != 0) 331 if (qdelay == 0 && qlen != 0)
327 update_prob = false; 332 update_prob = false;
328 333
329 /* Add ranges for alpha and beta, more aggressive for high dropping 334 /* In the algorithm, alpha and beta are between 0 and 2 with typical
330 * mode and gentle steps for light dropping mode 335 * value for alpha as 0.125. In this implementation, we use values 0-32
331 * In light dropping mode, take gentle steps; in medium dropping mode, 336 * passed from user space to represent this. Also, alpha and beta have
332 * take medium steps; in high dropping mode, take big steps. 337 * unit of HZ and need to be scaled before they can used to update
338 * probability. alpha/beta are updated locally below by 1) scaling them
339 * appropriately 2) scaling down by 16 to come to 0-2 range.
340 * Please see paper for details.
341 *
342 * We scale alpha and beta differently depending on whether we are in
343 * light, medium or high dropping mode.
333 */ 344 */
334 if (q->vars.prob < MAX_PROB / 100) { 345 if (q->vars.prob < MAX_PROB / 100) {
335 alpha = 346 alpha =
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 5ae609200674..f558433537b8 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1367,44 +1367,35 @@ static inline bool sctp_peer_needs_update(struct sctp_association *asoc)
1367 return false; 1367 return false;
1368} 1368}
1369 1369
1370/* Increase asoc's rwnd by len and send any window update SACK if needed. */ 1370/* Update asoc's rwnd for the approximated state in the buffer,
1371void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len) 1371 * and check whether SACK needs to be sent.
1372 */
1373void sctp_assoc_rwnd_update(struct sctp_association *asoc, bool update_peer)
1372{ 1374{
1375 int rx_count;
1373 struct sctp_chunk *sack; 1376 struct sctp_chunk *sack;
1374 struct timer_list *timer; 1377 struct timer_list *timer;
1375 1378
1376 if (asoc->rwnd_over) { 1379 if (asoc->ep->rcvbuf_policy)
1377 if (asoc->rwnd_over >= len) { 1380 rx_count = atomic_read(&asoc->rmem_alloc);
1378 asoc->rwnd_over -= len; 1381 else
1379 } else { 1382 rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
1380 asoc->rwnd += (len - asoc->rwnd_over);
1381 asoc->rwnd_over = 0;
1382 }
1383 } else {
1384 asoc->rwnd += len;
1385 }
1386 1383
1387 /* If we had window pressure, start recovering it 1384 if ((asoc->base.sk->sk_rcvbuf - rx_count) > 0)
1388 * once our rwnd had reached the accumulated pressure 1385 asoc->rwnd = (asoc->base.sk->sk_rcvbuf - rx_count) >> 1;
1389 * threshold. The idea is to recover slowly, but up 1386 else
1390 * to the initial advertised window. 1387 asoc->rwnd = 0;
1391 */
1392 if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) {
1393 int change = min(asoc->pathmtu, asoc->rwnd_press);
1394 asoc->rwnd += change;
1395 asoc->rwnd_press -= change;
1396 }
1397 1388
1398 pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n", 1389 pr_debug("%s: asoc:%p rwnd=%u, rx_count=%d, sk_rcvbuf=%d\n",
1399 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, 1390 __func__, asoc, asoc->rwnd, rx_count,
1400 asoc->a_rwnd); 1391 asoc->base.sk->sk_rcvbuf);
1401 1392
1402 /* Send a window update SACK if the rwnd has increased by at least the 1393 /* Send a window update SACK if the rwnd has increased by at least the
1403 * minimum of the association's PMTU and half of the receive buffer. 1394 * minimum of the association's PMTU and half of the receive buffer.
1404 * The algorithm used is similar to the one described in 1395 * The algorithm used is similar to the one described in
1405 * Section 4.2.3.3 of RFC 1122. 1396 * Section 4.2.3.3 of RFC 1122.
1406 */ 1397 */
1407 if (sctp_peer_needs_update(asoc)) { 1398 if (update_peer && sctp_peer_needs_update(asoc)) {
1408 asoc->a_rwnd = asoc->rwnd; 1399 asoc->a_rwnd = asoc->rwnd;
1409 1400
1410 pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u " 1401 pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u "
@@ -1426,45 +1417,6 @@ void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
1426 } 1417 }
1427} 1418}
1428 1419
1429/* Decrease asoc's rwnd by len. */
1430void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
1431{
1432 int rx_count;
1433 int over = 0;
1434
1435 if (unlikely(!asoc->rwnd || asoc->rwnd_over))
1436 pr_debug("%s: association:%p has asoc->rwnd:%u, "
1437 "asoc->rwnd_over:%u!\n", __func__, asoc,
1438 asoc->rwnd, asoc->rwnd_over);
1439
1440 if (asoc->ep->rcvbuf_policy)
1441 rx_count = atomic_read(&asoc->rmem_alloc);
1442 else
1443 rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
1444
1445 /* If we've reached or overflowed our receive buffer, announce
1446 * a 0 rwnd if rwnd would still be positive. Store the
1447 * the potential pressure overflow so that the window can be restored
1448 * back to original value.
1449 */
1450 if (rx_count >= asoc->base.sk->sk_rcvbuf)
1451 over = 1;
1452
1453 if (asoc->rwnd >= len) {
1454 asoc->rwnd -= len;
1455 if (over) {
1456 asoc->rwnd_press += asoc->rwnd;
1457 asoc->rwnd = 0;
1458 }
1459 } else {
1460 asoc->rwnd_over = len - asoc->rwnd;
1461 asoc->rwnd = 0;
1462 }
1463
1464 pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n",
1465 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
1466 asoc->rwnd_press);
1467}
1468 1420
1469/* Build the bind address list for the association based on info from the 1421/* Build the bind address list for the association based on info from the
1470 * local endpoint and the remote peer. 1422 * local endpoint and the remote peer.
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 0f6259a6a932..2b1738ef9394 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -662,6 +662,8 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
662 */ 662 */
663 sctp_v6_to_sk_daddr(&asoc->peer.primary_addr, newsk); 663 sctp_v6_to_sk_daddr(&asoc->peer.primary_addr, newsk);
664 664
665 newsk->sk_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
666
665 sk_refcnt_debug_inc(newsk); 667 sk_refcnt_debug_inc(newsk);
666 668
667 if (newsk->sk_prot->init(newsk)) { 669 if (newsk->sk_prot->init(newsk)) {
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 483dcd71b3c5..591b44d3b7de 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -6176,7 +6176,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
6176 * PMTU. In cases, such as loopback, this might be a rather 6176 * PMTU. In cases, such as loopback, this might be a rather
6177 * large spill over. 6177 * large spill over.
6178 */ 6178 */
6179 if ((!chunk->data_accepted) && (!asoc->rwnd || asoc->rwnd_over || 6179 if ((!chunk->data_accepted) && (!asoc->rwnd ||
6180 (datalen > asoc->rwnd + asoc->frag_point))) { 6180 (datalen > asoc->rwnd + asoc->frag_point))) {
6181 6181
6182 /* If this is the next TSN, consider reneging to make 6182 /* If this is the next TSN, consider reneging to make
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 9e91d6e5df63..981aaf8b6ace 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -64,6 +64,7 @@
64#include <linux/crypto.h> 64#include <linux/crypto.h>
65#include <linux/slab.h> 65#include <linux/slab.h>
66#include <linux/file.h> 66#include <linux/file.h>
67#include <linux/compat.h>
67 68
68#include <net/ip.h> 69#include <net/ip.h>
69#include <net/icmp.h> 70#include <net/icmp.h>
@@ -1368,11 +1369,19 @@ static int sctp_setsockopt_connectx(struct sock *sk,
1368/* 1369/*
1369 * New (hopefully final) interface for the API. 1370 * New (hopefully final) interface for the API.
1370 * We use the sctp_getaddrs_old structure so that use-space library 1371 * We use the sctp_getaddrs_old structure so that use-space library
1371 * can avoid any unnecessary allocations. The only defferent part 1372 * can avoid any unnecessary allocations. The only different part
1372 * is that we store the actual length of the address buffer into the 1373 * is that we store the actual length of the address buffer into the
1373 * addrs_num structure member. That way we can re-use the existing 1374 * addrs_num structure member. That way we can re-use the existing
1374 * code. 1375 * code.
1375 */ 1376 */
1377#ifdef CONFIG_COMPAT
1378struct compat_sctp_getaddrs_old {
1379 sctp_assoc_t assoc_id;
1380 s32 addr_num;
1381 compat_uptr_t addrs; /* struct sockaddr * */
1382};
1383#endif
1384
1376static int sctp_getsockopt_connectx3(struct sock *sk, int len, 1385static int sctp_getsockopt_connectx3(struct sock *sk, int len,
1377 char __user *optval, 1386 char __user *optval,
1378 int __user *optlen) 1387 int __user *optlen)
@@ -1381,16 +1390,30 @@ static int sctp_getsockopt_connectx3(struct sock *sk, int len,
1381 sctp_assoc_t assoc_id = 0; 1390 sctp_assoc_t assoc_id = 0;
1382 int err = 0; 1391 int err = 0;
1383 1392
1384 if (len < sizeof(param)) 1393#ifdef CONFIG_COMPAT
1385 return -EINVAL; 1394 if (is_compat_task()) {
1395 struct compat_sctp_getaddrs_old param32;
1386 1396
1387 if (copy_from_user(&param, optval, sizeof(param))) 1397 if (len < sizeof(param32))
1388 return -EFAULT; 1398 return -EINVAL;
1399 if (copy_from_user(&param32, optval, sizeof(param32)))
1400 return -EFAULT;
1389 1401
1390 err = __sctp_setsockopt_connectx(sk, 1402 param.assoc_id = param32.assoc_id;
1391 (struct sockaddr __user *)param.addrs, 1403 param.addr_num = param32.addr_num;
1392 param.addr_num, &assoc_id); 1404 param.addrs = compat_ptr(param32.addrs);
1405 } else
1406#endif
1407 {
1408 if (len < sizeof(param))
1409 return -EINVAL;
1410 if (copy_from_user(&param, optval, sizeof(param)))
1411 return -EFAULT;
1412 }
1393 1413
1414 err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *)
1415 param.addrs, param.addr_num,
1416 &assoc_id);
1394 if (err == 0 || err == -EINPROGRESS) { 1417 if (err == 0 || err == -EINPROGRESS) {
1395 if (copy_to_user(optval, &assoc_id, sizeof(assoc_id))) 1418 if (copy_to_user(optval, &assoc_id, sizeof(assoc_id)))
1396 return -EFAULT; 1419 return -EFAULT;
@@ -2092,12 +2115,6 @@ static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk,
2092 sctp_skb_pull(skb, copied); 2115 sctp_skb_pull(skb, copied);
2093 skb_queue_head(&sk->sk_receive_queue, skb); 2116 skb_queue_head(&sk->sk_receive_queue, skb);
2094 2117
2095 /* When only partial message is copied to the user, increase
2096 * rwnd by that amount. If all the data in the skb is read,
2097 * rwnd is updated when the event is freed.
2098 */
2099 if (!sctp_ulpevent_is_notification(event))
2100 sctp_assoc_rwnd_increase(event->asoc, copied);
2101 goto out; 2118 goto out;
2102 } else if ((event->msg_flags & MSG_NOTIFICATION) || 2119 } else if ((event->msg_flags & MSG_NOTIFICATION) ||
2103 (event->msg_flags & MSG_EOR)) 2120 (event->msg_flags & MSG_EOR))
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 7135e617ab0f..35c8923b5554 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -151,6 +151,7 @@ static struct ctl_table sctp_net_table[] = {
151 }, 151 },
152 { 152 {
153 .procname = "cookie_hmac_alg", 153 .procname = "cookie_hmac_alg",
154 .data = &init_net.sctp.sctp_hmac_alg,
154 .maxlen = 8, 155 .maxlen = 8,
155 .mode = 0644, 156 .mode = 0644,
156 .proc_handler = proc_sctp_do_hmac_alg, 157 .proc_handler = proc_sctp_do_hmac_alg,
@@ -401,15 +402,18 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
401 402
402int sctp_sysctl_net_register(struct net *net) 403int sctp_sysctl_net_register(struct net *net)
403{ 404{
404 struct ctl_table *table; 405 struct ctl_table *table = sctp_net_table;
405 int i; 406
407 if (!net_eq(net, &init_net)) {
408 int i;
406 409
407 table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL); 410 table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL);
408 if (!table) 411 if (!table)
409 return -ENOMEM; 412 return -ENOMEM;
410 413
411 for (i = 0; table[i].data; i++) 414 for (i = 0; table[i].data; i++)
412 table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp; 415 table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp;
416 }
413 417
414 net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table); 418 net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table);
415 return 0; 419 return 0;
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 85c64658bd0b..8d198ae03606 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -989,7 +989,7 @@ static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event,
989 skb = sctp_event2skb(event); 989 skb = sctp_event2skb(event);
990 /* Set the owner and charge rwnd for bytes received. */ 990 /* Set the owner and charge rwnd for bytes received. */
991 sctp_ulpevent_set_owner(event, asoc); 991 sctp_ulpevent_set_owner(event, asoc);
992 sctp_assoc_rwnd_decrease(asoc, skb_headlen(skb)); 992 sctp_assoc_rwnd_update(asoc, false);
993 993
994 if (!skb->data_len) 994 if (!skb->data_len)
995 return; 995 return;
@@ -1011,6 +1011,7 @@ static void sctp_ulpevent_release_data(struct sctp_ulpevent *event)
1011{ 1011{
1012 struct sk_buff *skb, *frag; 1012 struct sk_buff *skb, *frag;
1013 unsigned int len; 1013 unsigned int len;
1014 struct sctp_association *asoc;
1014 1015
1015 /* Current stack structures assume that the rcv buffer is 1016 /* Current stack structures assume that the rcv buffer is
1016 * per socket. For UDP style sockets this is not true as 1017 * per socket. For UDP style sockets this is not true as
@@ -1035,8 +1036,11 @@ static void sctp_ulpevent_release_data(struct sctp_ulpevent *event)
1035 } 1036 }
1036 1037
1037done: 1038done:
1038 sctp_assoc_rwnd_increase(event->asoc, len); 1039 asoc = event->asoc;
1040 sctp_association_hold(asoc);
1039 sctp_ulpevent_release_owner(event); 1041 sctp_ulpevent_release_owner(event);
1042 sctp_assoc_rwnd_update(asoc, true);
1043 sctp_association_put(asoc);
1040} 1044}
1041 1045
1042static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event) 1046static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event)
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 6c0513a7f992..36e431ee1c90 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -108,6 +108,7 @@ struct gss_auth {
108static DEFINE_SPINLOCK(pipe_version_lock); 108static DEFINE_SPINLOCK(pipe_version_lock);
109static struct rpc_wait_queue pipe_version_rpc_waitqueue; 109static struct rpc_wait_queue pipe_version_rpc_waitqueue;
110static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue); 110static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue);
111static void gss_put_auth(struct gss_auth *gss_auth);
111 112
112static void gss_free_ctx(struct gss_cl_ctx *); 113static void gss_free_ctx(struct gss_cl_ctx *);
113static const struct rpc_pipe_ops gss_upcall_ops_v0; 114static const struct rpc_pipe_ops gss_upcall_ops_v0;
@@ -320,6 +321,7 @@ gss_release_msg(struct gss_upcall_msg *gss_msg)
320 if (gss_msg->ctx != NULL) 321 if (gss_msg->ctx != NULL)
321 gss_put_ctx(gss_msg->ctx); 322 gss_put_ctx(gss_msg->ctx);
322 rpc_destroy_wait_queue(&gss_msg->rpc_waitqueue); 323 rpc_destroy_wait_queue(&gss_msg->rpc_waitqueue);
324 gss_put_auth(gss_msg->auth);
323 kfree(gss_msg); 325 kfree(gss_msg);
324} 326}
325 327
@@ -498,9 +500,12 @@ gss_alloc_msg(struct gss_auth *gss_auth,
498 default: 500 default:
499 err = gss_encode_v1_msg(gss_msg, service_name, gss_auth->target_name); 501 err = gss_encode_v1_msg(gss_msg, service_name, gss_auth->target_name);
500 if (err) 502 if (err)
501 goto err_free_msg; 503 goto err_put_pipe_version;
502 }; 504 };
505 kref_get(&gss_auth->kref);
503 return gss_msg; 506 return gss_msg;
507err_put_pipe_version:
508 put_pipe_version(gss_auth->net);
504err_free_msg: 509err_free_msg:
505 kfree(gss_msg); 510 kfree(gss_msg);
506err: 511err:
@@ -991,6 +996,8 @@ gss_create_new(struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
991 gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor); 996 gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor);
992 if (gss_auth->service == 0) 997 if (gss_auth->service == 0)
993 goto err_put_mech; 998 goto err_put_mech;
999 if (!gssd_running(gss_auth->net))
1000 goto err_put_mech;
994 auth = &gss_auth->rpc_auth; 1001 auth = &gss_auth->rpc_auth;
995 auth->au_cslack = GSS_CRED_SLACK >> 2; 1002 auth->au_cslack = GSS_CRED_SLACK >> 2;
996 auth->au_rslack = GSS_VERF_SLACK >> 2; 1003 auth->au_rslack = GSS_VERF_SLACK >> 2;
@@ -1062,6 +1069,12 @@ gss_free_callback(struct kref *kref)
1062} 1069}
1063 1070
1064static void 1071static void
1072gss_put_auth(struct gss_auth *gss_auth)
1073{
1074 kref_put(&gss_auth->kref, gss_free_callback);
1075}
1076
1077static void
1065gss_destroy(struct rpc_auth *auth) 1078gss_destroy(struct rpc_auth *auth)
1066{ 1079{
1067 struct gss_auth *gss_auth = container_of(auth, 1080 struct gss_auth *gss_auth = container_of(auth,
@@ -1082,7 +1095,7 @@ gss_destroy(struct rpc_auth *auth)
1082 gss_auth->gss_pipe[1] = NULL; 1095 gss_auth->gss_pipe[1] = NULL;
1083 rpcauth_destroy_credcache(auth); 1096 rpcauth_destroy_credcache(auth);
1084 1097
1085 kref_put(&gss_auth->kref, gss_free_callback); 1098 gss_put_auth(gss_auth);
1086} 1099}
1087 1100
1088/* 1101/*
@@ -1253,7 +1266,7 @@ gss_destroy_nullcred(struct rpc_cred *cred)
1253 call_rcu(&cred->cr_rcu, gss_free_cred_callback); 1266 call_rcu(&cred->cr_rcu, gss_free_cred_callback);
1254 if (ctx) 1267 if (ctx)
1255 gss_put_ctx(ctx); 1268 gss_put_ctx(ctx);
1256 kref_put(&gss_auth->kref, gss_free_callback); 1269 gss_put_auth(gss_auth);
1257} 1270}
1258 1271
1259static void 1272static void
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 890a29912d5a..e860d4f7ed2a 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -64,7 +64,6 @@ static void xprt_free_allocation(struct rpc_rqst *req)
64 free_page((unsigned long)xbufp->head[0].iov_base); 64 free_page((unsigned long)xbufp->head[0].iov_base);
65 xbufp = &req->rq_snd_buf; 65 xbufp = &req->rq_snd_buf;
66 free_page((unsigned long)xbufp->head[0].iov_base); 66 free_page((unsigned long)xbufp->head[0].iov_base);
67 list_del(&req->rq_bc_pa_list);
68 kfree(req); 67 kfree(req);
69} 68}
70 69
@@ -168,8 +167,10 @@ out_free:
168 /* 167 /*
169 * Memory allocation failed, free the temporary list 168 * Memory allocation failed, free the temporary list
170 */ 169 */
171 list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list) 170 list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list) {
171 list_del(&req->rq_bc_pa_list);
172 xprt_free_allocation(req); 172 xprt_free_allocation(req);
173 }
173 174
174 dprintk("RPC: setup backchannel transport failed\n"); 175 dprintk("RPC: setup backchannel transport failed\n");
175 return -ENOMEM; 176 return -ENOMEM;
@@ -198,6 +199,7 @@ void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
198 xprt_dec_alloc_count(xprt, max_reqs); 199 xprt_dec_alloc_count(xprt, max_reqs);
199 list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) { 200 list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
200 dprintk("RPC: req=%p\n", req); 201 dprintk("RPC: req=%p\n", req);
202 list_del(&req->rq_bc_pa_list);
201 xprt_free_allocation(req); 203 xprt_free_allocation(req);
202 if (--max_reqs == 0) 204 if (--max_reqs == 0)
203 break; 205 break;
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 80a6640f329b..06c6ff0cb911 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -571,7 +571,7 @@ static void svc_check_conn_limits(struct svc_serv *serv)
571 } 571 }
572} 572}
573 573
574int svc_alloc_arg(struct svc_rqst *rqstp) 574static int svc_alloc_arg(struct svc_rqst *rqstp)
575{ 575{
576 struct svc_serv *serv = rqstp->rq_server; 576 struct svc_serv *serv = rqstp->rq_server;
577 struct xdr_buf *arg; 577 struct xdr_buf *arg;
@@ -612,7 +612,7 @@ int svc_alloc_arg(struct svc_rqst *rqstp)
612 return 0; 612 return 0;
613} 613}
614 614
615struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) 615static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
616{ 616{
617 struct svc_xprt *xprt; 617 struct svc_xprt *xprt;
618 struct svc_pool *pool = rqstp->rq_pool; 618 struct svc_pool *pool = rqstp->rq_pool;
@@ -691,7 +691,7 @@ struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
691 return xprt; 691 return xprt;
692} 692}
693 693
694void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt) 694static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt)
695{ 695{
696 spin_lock_bh(&serv->sv_lock); 696 spin_lock_bh(&serv->sv_lock);
697 set_bit(XPT_TEMP, &newxpt->xpt_flags); 697 set_bit(XPT_TEMP, &newxpt->xpt_flags);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 817a1e523969..0addefca8e77 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -510,6 +510,7 @@ static int xs_nospace(struct rpc_task *task)
510 struct rpc_rqst *req = task->tk_rqstp; 510 struct rpc_rqst *req = task->tk_rqstp;
511 struct rpc_xprt *xprt = req->rq_xprt; 511 struct rpc_xprt *xprt = req->rq_xprt;
512 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 512 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
513 struct sock *sk = transport->inet;
513 int ret = -EAGAIN; 514 int ret = -EAGAIN;
514 515
515 dprintk("RPC: %5u xmit incomplete (%u left of %u)\n", 516 dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
@@ -527,7 +528,7 @@ static int xs_nospace(struct rpc_task *task)
527 * window size 528 * window size
528 */ 529 */
529 set_bit(SOCK_NOSPACE, &transport->sock->flags); 530 set_bit(SOCK_NOSPACE, &transport->sock->flags);
530 transport->inet->sk_write_pending++; 531 sk->sk_write_pending++;
531 /* ...and wait for more buffer space */ 532 /* ...and wait for more buffer space */
532 xprt_wait_for_buffer_space(task, xs_nospace_callback); 533 xprt_wait_for_buffer_space(task, xs_nospace_callback);
533 } 534 }
@@ -537,6 +538,9 @@ static int xs_nospace(struct rpc_task *task)
537 } 538 }
538 539
539 spin_unlock_bh(&xprt->transport_lock); 540 spin_unlock_bh(&xprt->transport_lock);
541
542 /* Race breaker in case memory is freed before above code is called */
543 sk->sk_write_space(sk);
540 return ret; 544 return ret;
541} 545}
542 546
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 1ff477b0450d..5569d96b4da3 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -192,6 +192,7 @@ static inline void k_term_timer(struct timer_list *timer)
192 192
193struct tipc_skb_cb { 193struct tipc_skb_cb {
194 void *handle; 194 void *handle;
195 bool deferred;
195}; 196};
196 197
197#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0])) 198#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
diff --git a/net/tipc/link.c b/net/tipc/link.c
index d4b5de41b682..da6018beb6eb 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1391,6 +1391,12 @@ static int link_recv_buf_validate(struct sk_buff *buf)
1391 u32 hdr_size; 1391 u32 hdr_size;
1392 u32 min_hdr_size; 1392 u32 min_hdr_size;
1393 1393
1394 /* If this packet comes from the defer queue, the skb has already
1395 * been validated
1396 */
1397 if (unlikely(TIPC_SKB_CB(buf)->deferred))
1398 return 1;
1399
1394 if (unlikely(buf->len < MIN_H_SIZE)) 1400 if (unlikely(buf->len < MIN_H_SIZE))
1395 return 0; 1401 return 0;
1396 1402
@@ -1703,6 +1709,7 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
1703 &l_ptr->newest_deferred_in, buf)) { 1709 &l_ptr->newest_deferred_in, buf)) {
1704 l_ptr->deferred_inqueue_sz++; 1710 l_ptr->deferred_inqueue_sz++;
1705 l_ptr->stats.deferred_recv++; 1711 l_ptr->stats.deferred_recv++;
1712 TIPC_SKB_CB(buf)->deferred = true;
1706 if ((l_ptr->deferred_inqueue_sz % 16) == 1) 1713 if ((l_ptr->deferred_inqueue_sz % 16) == 1)
1707 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); 1714 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1708 } else 1715 } else
diff --git a/net/wireless/core.c b/net/wireless/core.c
index d89dee2259b5..010892b81a06 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -203,8 +203,11 @@ void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
203 203
204 rdev->opencount--; 204 rdev->opencount--;
205 205
206 WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev && 206 if (rdev->scan_req && rdev->scan_req->wdev == wdev) {
207 !rdev->scan_req->notified); 207 if (WARN_ON(!rdev->scan_req->notified))
208 rdev->scan_req->aborted = true;
209 ___cfg80211_scan_done(rdev, false);
210 }
208} 211}
209 212
210static int cfg80211_rfkill_set_block(void *data, bool blocked) 213static int cfg80211_rfkill_set_block(void *data, bool blocked)
@@ -440,9 +443,6 @@ int wiphy_register(struct wiphy *wiphy)
440 int i; 443 int i;
441 u16 ifmodes = wiphy->interface_modes; 444 u16 ifmodes = wiphy->interface_modes;
442 445
443 /* support for 5/10 MHz is broken due to nl80211 API mess - disable */
444 wiphy->flags &= ~WIPHY_FLAG_SUPPORTS_5_10_MHZ;
445
446 /* 446 /*
447 * There are major locking problems in nl80211/mac80211 for CSA, 447 * There are major locking problems in nl80211/mac80211 for CSA,
448 * disable for all drivers until this has been reworked. 448 * disable for all drivers until this has been reworked.
@@ -859,8 +859,11 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
859 break; 859 break;
860 case NETDEV_DOWN: 860 case NETDEV_DOWN:
861 cfg80211_update_iface_num(rdev, wdev->iftype, -1); 861 cfg80211_update_iface_num(rdev, wdev->iftype, -1);
862 WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev && 862 if (rdev->scan_req && rdev->scan_req->wdev == wdev) {
863 !rdev->scan_req->notified); 863 if (WARN_ON(!rdev->scan_req->notified))
864 rdev->scan_req->aborted = true;
865 ___cfg80211_scan_done(rdev, false);
866 }
864 867
865 if (WARN_ON(rdev->sched_scan_req && 868 if (WARN_ON(rdev->sched_scan_req &&
866 rdev->sched_scan_req->dev == wdev->netdev)) { 869 rdev->sched_scan_req->dev == wdev->netdev)) {
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 37ec16d7bb1a..f1d193b557b6 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -62,6 +62,7 @@ struct cfg80211_registered_device {
62 struct rb_root bss_tree; 62 struct rb_root bss_tree;
63 u32 bss_generation; 63 u32 bss_generation;
64 struct cfg80211_scan_request *scan_req; /* protected by RTNL */ 64 struct cfg80211_scan_request *scan_req; /* protected by RTNL */
65 struct sk_buff *scan_msg;
65 struct cfg80211_sched_scan_request *sched_scan_req; 66 struct cfg80211_sched_scan_request *sched_scan_req;
66 unsigned long suspend_at; 67 unsigned long suspend_at;
67 struct work_struct scan_done_wk; 68 struct work_struct scan_done_wk;
@@ -361,7 +362,8 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
361 struct key_params *params, int key_idx, 362 struct key_params *params, int key_idx,
362 bool pairwise, const u8 *mac_addr); 363 bool pairwise, const u8 *mac_addr);
363void __cfg80211_scan_done(struct work_struct *wk); 364void __cfg80211_scan_done(struct work_struct *wk);
364void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev); 365void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev,
366 bool send_message);
365void __cfg80211_sched_scan_results(struct work_struct *wk); 367void __cfg80211_sched_scan_results(struct work_struct *wk);
366int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev, 368int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev,
367 bool driver_initiated); 369 bool driver_initiated);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 7a742594916e..4fe2e6e2bc76 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1719,9 +1719,10 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
1719 * We can then retry with the larger buffer. 1719 * We can then retry with the larger buffer.
1720 */ 1720 */
1721 if ((ret == -ENOBUFS || ret == -EMSGSIZE) && 1721 if ((ret == -ENOBUFS || ret == -EMSGSIZE) &&
1722 !skb->len && 1722 !skb->len && !state->split &&
1723 cb->min_dump_alloc < 4096) { 1723 cb->min_dump_alloc < 4096) {
1724 cb->min_dump_alloc = 4096; 1724 cb->min_dump_alloc = 4096;
1725 state->split_start = 0;
1725 rtnl_unlock(); 1726 rtnl_unlock();
1726 return 1; 1727 return 1;
1727 } 1728 }
@@ -5244,7 +5245,7 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
5244 if (!rdev->ops->scan) 5245 if (!rdev->ops->scan)
5245 return -EOPNOTSUPP; 5246 return -EOPNOTSUPP;
5246 5247
5247 if (rdev->scan_req) { 5248 if (rdev->scan_req || rdev->scan_msg) {
5248 err = -EBUSY; 5249 err = -EBUSY;
5249 goto unlock; 5250 goto unlock;
5250 } 5251 }
@@ -10011,40 +10012,31 @@ void nl80211_send_scan_start(struct cfg80211_registered_device *rdev,
10011 NL80211_MCGRP_SCAN, GFP_KERNEL); 10012 NL80211_MCGRP_SCAN, GFP_KERNEL);
10012} 10013}
10013 10014
10014void nl80211_send_scan_done(struct cfg80211_registered_device *rdev, 10015struct sk_buff *nl80211_build_scan_msg(struct cfg80211_registered_device *rdev,
10015 struct wireless_dev *wdev) 10016 struct wireless_dev *wdev, bool aborted)
10016{ 10017{
10017 struct sk_buff *msg; 10018 struct sk_buff *msg;
10018 10019
10019 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 10020 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
10020 if (!msg) 10021 if (!msg)
10021 return; 10022 return NULL;
10022 10023
10023 if (nl80211_send_scan_msg(msg, rdev, wdev, 0, 0, 0, 10024 if (nl80211_send_scan_msg(msg, rdev, wdev, 0, 0, 0,
10024 NL80211_CMD_NEW_SCAN_RESULTS) < 0) { 10025 aborted ? NL80211_CMD_SCAN_ABORTED :
10026 NL80211_CMD_NEW_SCAN_RESULTS) < 0) {
10025 nlmsg_free(msg); 10027 nlmsg_free(msg);
10026 return; 10028 return NULL;
10027 } 10029 }
10028 10030
10029 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, 10031 return msg;
10030 NL80211_MCGRP_SCAN, GFP_KERNEL);
10031} 10032}
10032 10033
10033void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev, 10034void nl80211_send_scan_result(struct cfg80211_registered_device *rdev,
10034 struct wireless_dev *wdev) 10035 struct sk_buff *msg)
10035{ 10036{
10036 struct sk_buff *msg;
10037
10038 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
10039 if (!msg) 10037 if (!msg)
10040 return; 10038 return;
10041 10039
10042 if (nl80211_send_scan_msg(msg, rdev, wdev, 0, 0, 0,
10043 NL80211_CMD_SCAN_ABORTED) < 0) {
10044 nlmsg_free(msg);
10045 return;
10046 }
10047
10048 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, 10040 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
10049 NL80211_MCGRP_SCAN, GFP_KERNEL); 10041 NL80211_MCGRP_SCAN, GFP_KERNEL);
10050} 10042}
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index b1b231324e10..75799746d845 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -8,10 +8,10 @@ void nl80211_exit(void);
8void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev); 8void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev);
9void nl80211_send_scan_start(struct cfg80211_registered_device *rdev, 9void nl80211_send_scan_start(struct cfg80211_registered_device *rdev,
10 struct wireless_dev *wdev); 10 struct wireless_dev *wdev);
11void nl80211_send_scan_done(struct cfg80211_registered_device *rdev, 11struct sk_buff *nl80211_build_scan_msg(struct cfg80211_registered_device *rdev,
12 struct wireless_dev *wdev); 12 struct wireless_dev *wdev, bool aborted);
13void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev, 13void nl80211_send_scan_result(struct cfg80211_registered_device *rdev,
14 struct wireless_dev *wdev); 14 struct sk_buff *msg);
15void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev, 15void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev,
16 struct net_device *netdev, u32 cmd); 16 struct net_device *netdev, u32 cmd);
17void nl80211_send_sched_scan_results(struct cfg80211_registered_device *rdev, 17void nl80211_send_sched_scan_results(struct cfg80211_registered_device *rdev,
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index b528e31da2cf..d1ed4aebbbb7 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -161,18 +161,25 @@ static void __cfg80211_bss_expire(struct cfg80211_registered_device *dev,
161 dev->bss_generation++; 161 dev->bss_generation++;
162} 162}
163 163
164void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev) 164void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev,
165 bool send_message)
165{ 166{
166 struct cfg80211_scan_request *request; 167 struct cfg80211_scan_request *request;
167 struct wireless_dev *wdev; 168 struct wireless_dev *wdev;
169 struct sk_buff *msg;
168#ifdef CONFIG_CFG80211_WEXT 170#ifdef CONFIG_CFG80211_WEXT
169 union iwreq_data wrqu; 171 union iwreq_data wrqu;
170#endif 172#endif
171 173
172 ASSERT_RTNL(); 174 ASSERT_RTNL();
173 175
174 request = rdev->scan_req; 176 if (rdev->scan_msg) {
177 nl80211_send_scan_result(rdev, rdev->scan_msg);
178 rdev->scan_msg = NULL;
179 return;
180 }
175 181
182 request = rdev->scan_req;
176 if (!request) 183 if (!request)
177 return; 184 return;
178 185
@@ -186,18 +193,16 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev)
186 if (wdev->netdev) 193 if (wdev->netdev)
187 cfg80211_sme_scan_done(wdev->netdev); 194 cfg80211_sme_scan_done(wdev->netdev);
188 195
189 if (request->aborted) { 196 if (!request->aborted &&
190 nl80211_send_scan_aborted(rdev, wdev); 197 request->flags & NL80211_SCAN_FLAG_FLUSH) {
191 } else { 198 /* flush entries from previous scans */
192 if (request->flags & NL80211_SCAN_FLAG_FLUSH) { 199 spin_lock_bh(&rdev->bss_lock);
193 /* flush entries from previous scans */ 200 __cfg80211_bss_expire(rdev, request->scan_start);
194 spin_lock_bh(&rdev->bss_lock); 201 spin_unlock_bh(&rdev->bss_lock);
195 __cfg80211_bss_expire(rdev, request->scan_start);
196 spin_unlock_bh(&rdev->bss_lock);
197 }
198 nl80211_send_scan_done(rdev, wdev);
199 } 202 }
200 203
204 msg = nl80211_build_scan_msg(rdev, wdev, request->aborted);
205
201#ifdef CONFIG_CFG80211_WEXT 206#ifdef CONFIG_CFG80211_WEXT
202 if (wdev->netdev && !request->aborted) { 207 if (wdev->netdev && !request->aborted) {
203 memset(&wrqu, 0, sizeof(wrqu)); 208 memset(&wrqu, 0, sizeof(wrqu));
@@ -211,6 +216,11 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev)
211 216
212 rdev->scan_req = NULL; 217 rdev->scan_req = NULL;
213 kfree(request); 218 kfree(request);
219
220 if (!send_message)
221 rdev->scan_msg = msg;
222 else
223 nl80211_send_scan_result(rdev, msg);
214} 224}
215 225
216void __cfg80211_scan_done(struct work_struct *wk) 226void __cfg80211_scan_done(struct work_struct *wk)
@@ -221,7 +231,7 @@ void __cfg80211_scan_done(struct work_struct *wk)
221 scan_done_wk); 231 scan_done_wk);
222 232
223 rtnl_lock(); 233 rtnl_lock();
224 ___cfg80211_scan_done(rdev); 234 ___cfg80211_scan_done(rdev, true);
225 rtnl_unlock(); 235 rtnl_unlock();
226} 236}
227 237
@@ -1079,7 +1089,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,
1079 if (IS_ERR(rdev)) 1089 if (IS_ERR(rdev))
1080 return PTR_ERR(rdev); 1090 return PTR_ERR(rdev);
1081 1091
1082 if (rdev->scan_req) { 1092 if (rdev->scan_req || rdev->scan_msg) {
1083 err = -EBUSY; 1093 err = -EBUSY;
1084 goto out; 1094 goto out;
1085 } 1095 }
@@ -1481,7 +1491,7 @@ int cfg80211_wext_giwscan(struct net_device *dev,
1481 if (IS_ERR(rdev)) 1491 if (IS_ERR(rdev))
1482 return PTR_ERR(rdev); 1492 return PTR_ERR(rdev);
1483 1493
1484 if (rdev->scan_req) 1494 if (rdev->scan_req || rdev->scan_msg)
1485 return -EAGAIN; 1495 return -EAGAIN;
1486 1496
1487 res = ieee80211_scan_results(rdev, info, extra, data->length); 1497 res = ieee80211_scan_results(rdev, info, extra, data->length);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index a63509118508..f04d4c32e96e 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -67,7 +67,7 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
67 ASSERT_RDEV_LOCK(rdev); 67 ASSERT_RDEV_LOCK(rdev);
68 ASSERT_WDEV_LOCK(wdev); 68 ASSERT_WDEV_LOCK(wdev);
69 69
70 if (rdev->scan_req) 70 if (rdev->scan_req || rdev->scan_msg)
71 return -EBUSY; 71 return -EBUSY;
72 72
73 if (wdev->conn->params.channel) 73 if (wdev->conn->params.channel)
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 49392ecbef17..79c059e70860 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -152,6 +152,7 @@ ld_flags = $(LDFLAGS) $(ldflags-y)
152dtc_cpp_flags = -Wp,-MD,$(depfile).pre.tmp -nostdinc \ 152dtc_cpp_flags = -Wp,-MD,$(depfile).pre.tmp -nostdinc \
153 -I$(srctree)/arch/$(SRCARCH)/boot/dts \ 153 -I$(srctree)/arch/$(SRCARCH)/boot/dts \
154 -I$(srctree)/arch/$(SRCARCH)/boot/dts/include \ 154 -I$(srctree)/arch/$(SRCARCH)/boot/dts/include \
155 -I$(srctree)/drivers/of/testcase-data \
155 -undef -D__DTS__ 156 -undef -D__DTS__
156 157
157# Finds the multi-part object the current object will be linked into 158# Finds the multi-part object the current object will be linked into
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 0ea2a1e24ade..464dcef79b35 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -471,7 +471,7 @@ sub seed_camelcase_includes {
471 471
472 $camelcase_seeded = 1; 472 $camelcase_seeded = 1;
473 473
474 if (-d ".git") { 474 if (-e ".git") {
475 my $git_last_include_commit = `git log --no-merges --pretty=format:"%h%n" -1 -- include`; 475 my $git_last_include_commit = `git log --no-merges --pretty=format:"%h%n" -1 -- include`;
476 chomp $git_last_include_commit; 476 chomp $git_last_include_commit;
477 $camelcase_cache = ".checkpatch-camelcase.git.$git_last_include_commit"; 477 $camelcase_cache = ".checkpatch-camelcase.git.$git_last_include_commit";
@@ -499,7 +499,7 @@ sub seed_camelcase_includes {
499 return; 499 return;
500 } 500 }
501 501
502 if (-d ".git") { 502 if (-e ".git") {
503 $files = `git ls-files "include/*.h"`; 503 $files = `git ls-files "include/*.h"`;
504 @include_files = split('\n', $files); 504 @include_files = split('\n', $files);
505 } 505 }
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
index 9c3986f4140c..41987885bd31 100755
--- a/scripts/get_maintainer.pl
+++ b/scripts/get_maintainer.pl
@@ -95,7 +95,7 @@ my %VCS_cmds;
95 95
96my %VCS_cmds_git = ( 96my %VCS_cmds_git = (
97 "execute_cmd" => \&git_execute_cmd, 97 "execute_cmd" => \&git_execute_cmd,
98 "available" => '(which("git") ne "") && (-d ".git")', 98 "available" => '(which("git") ne "") && (-e ".git")',
99 "find_signers_cmd" => 99 "find_signers_cmd" =>
100 "git log --no-color --follow --since=\$email_git_since " . 100 "git log --no-color --follow --since=\$email_git_since " .
101 '--numstat --no-merges ' . 101 '--numstat --no-merges ' .
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 23708636b05c..25e5cb0aaef6 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -210,8 +210,8 @@ static void do_usb_entry(void *symval,
210 range_lo < 0x9 ? "[%X-9" : "[%X", 210 range_lo < 0x9 ? "[%X-9" : "[%X",
211 range_lo); 211 range_lo);
212 sprintf(alias + strlen(alias), 212 sprintf(alias + strlen(alias),
213 range_hi > 0xA ? "a-%X]" : "%X]", 213 range_hi > 0xA ? "A-%X]" : "%X]",
214 range_lo); 214 range_hi);
215 } 215 }
216 } 216 }
217 if (bcdDevice_initial_digits < (sizeof(bcdDevice_lo) * 2 - 1)) 217 if (bcdDevice_initial_digits < (sizeof(bcdDevice_lo) * 2 - 1))
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index 332ac8a80cf5..2df7b900e259 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -17,6 +17,7 @@
17#include <linux/inet_diag.h> 17#include <linux/inet_diag.h>
18#include <linux/xfrm.h> 18#include <linux/xfrm.h>
19#include <linux/audit.h> 19#include <linux/audit.h>
20#include <linux/sock_diag.h>
20 21
21#include "flask.h" 22#include "flask.h"
22#include "av_permissions.h" 23#include "av_permissions.h"
@@ -78,6 +79,7 @@ static struct nlmsg_perm nlmsg_tcpdiag_perms[] =
78{ 79{
79 { TCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ }, 80 { TCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
80 { DCCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ }, 81 { DCCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
82 { SOCK_DIAG_BY_FAMILY, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
81}; 83};
82 84
83static struct nlmsg_perm nlmsg_xfrm_perms[] = 85static struct nlmsg_perm nlmsg_xfrm_perms[] =
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index c0f498842129..9c5cdc2caaef 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -3338,10 +3338,10 @@ static int filename_write_helper(void *key, void *data, void *ptr)
3338 if (rc) 3338 if (rc)
3339 return rc; 3339 return rc;
3340 3340
3341 buf[0] = ft->stype; 3341 buf[0] = cpu_to_le32(ft->stype);
3342 buf[1] = ft->ttype; 3342 buf[1] = cpu_to_le32(ft->ttype);
3343 buf[2] = ft->tclass; 3343 buf[2] = cpu_to_le32(ft->tclass);
3344 buf[3] = otype->otype; 3344 buf[3] = cpu_to_le32(otype->otype);
3345 3345
3346 rc = put_entry(buf, sizeof(u32), 4, fp); 3346 rc = put_entry(buf, sizeof(u32), 4, fp);
3347 if (rc) 3347 if (rc)
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index c93c21127f0c..5d0144ee8ed6 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -1232,6 +1232,10 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
1232 struct context context; 1232 struct context context;
1233 int rc = 0; 1233 int rc = 0;
1234 1234
1235 /* An empty security context is never valid. */
1236 if (!scontext_len)
1237 return -EINVAL;
1238
1235 if (!ss_initialized) { 1239 if (!ss_initialized) {
1236 int i; 1240 int i;
1237 1241
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index ec4536c8d8d4..dafcf82139e2 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -932,7 +932,7 @@ int snd_hda_bus_new(struct snd_card *card,
932} 932}
933EXPORT_SYMBOL_GPL(snd_hda_bus_new); 933EXPORT_SYMBOL_GPL(snd_hda_bus_new);
934 934
935#ifdef CONFIG_SND_HDA_GENERIC 935#if IS_ENABLED(CONFIG_SND_HDA_GENERIC)
936#define is_generic_config(codec) \ 936#define is_generic_config(codec) \
937 (codec->modelname && !strcmp(codec->modelname, "generic")) 937 (codec->modelname && !strcmp(codec->modelname, "generic"))
938#else 938#else
@@ -1339,23 +1339,15 @@ get_hda_cvt_setup(struct hda_codec *codec, hda_nid_t nid)
1339/* 1339/*
1340 * Dynamic symbol binding for the codec parsers 1340 * Dynamic symbol binding for the codec parsers
1341 */ 1341 */
1342#ifdef MODULE
1343#define load_parser_sym(sym) ((int (*)(struct hda_codec *))symbol_request(sym))
1344#define unload_parser_addr(addr) symbol_put_addr(addr)
1345#else
1346#define load_parser_sym(sym) (sym)
1347#define unload_parser_addr(addr) do {} while (0)
1348#endif
1349 1342
1350#define load_parser(codec, sym) \ 1343#define load_parser(codec, sym) \
1351 ((codec)->parser = load_parser_sym(sym)) 1344 ((codec)->parser = (int (*)(struct hda_codec *))symbol_request(sym))
1352 1345
1353static void unload_parser(struct hda_codec *codec) 1346static void unload_parser(struct hda_codec *codec)
1354{ 1347{
1355 if (codec->parser) { 1348 if (codec->parser)
1356 unload_parser_addr(codec->parser); 1349 symbol_put_addr(codec->parser);
1357 codec->parser = NULL; 1350 codec->parser = NULL;
1358 }
1359} 1351}
1360 1352
1361/* 1353/*
@@ -1570,7 +1562,7 @@ int snd_hda_codec_update_widgets(struct hda_codec *codec)
1570EXPORT_SYMBOL_GPL(snd_hda_codec_update_widgets); 1562EXPORT_SYMBOL_GPL(snd_hda_codec_update_widgets);
1571 1563
1572 1564
1573#ifdef CONFIG_SND_HDA_CODEC_HDMI 1565#if IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI)
1574/* if all audio out widgets are digital, let's assume the codec as a HDMI/DP */ 1566/* if all audio out widgets are digital, let's assume the codec as a HDMI/DP */
1575static bool is_likely_hdmi_codec(struct hda_codec *codec) 1567static bool is_likely_hdmi_codec(struct hda_codec *codec)
1576{ 1568{
@@ -1620,12 +1612,20 @@ int snd_hda_codec_configure(struct hda_codec *codec)
1620 patch = codec->preset->patch; 1612 patch = codec->preset->patch;
1621 if (!patch) { 1613 if (!patch) {
1622 unload_parser(codec); /* to be sure */ 1614 unload_parser(codec); /* to be sure */
1623 if (is_likely_hdmi_codec(codec)) 1615 if (is_likely_hdmi_codec(codec)) {
1616#if IS_MODULE(CONFIG_SND_HDA_CODEC_HDMI)
1624 patch = load_parser(codec, snd_hda_parse_hdmi_codec); 1617 patch = load_parser(codec, snd_hda_parse_hdmi_codec);
1625#ifdef CONFIG_SND_HDA_GENERIC 1618#elif IS_BUILTIN(CONFIG_SND_HDA_CODEC_HDMI)
1626 if (!patch) 1619 patch = snd_hda_parse_hdmi_codec;
1620#endif
1621 }
1622 if (!patch) {
1623#if IS_MODULE(CONFIG_SND_HDA_GENERIC)
1627 patch = load_parser(codec, snd_hda_parse_generic_codec); 1624 patch = load_parser(codec, snd_hda_parse_generic_codec);
1625#elif IS_BUILTIN(CONFIG_SND_HDA_GENERIC)
1626 patch = snd_hda_parse_generic_codec;
1628#endif 1627#endif
1628 }
1629 if (!patch) { 1629 if (!patch) {
1630 printk(KERN_ERR "hda-codec: No codec parser is available\n"); 1630 printk(KERN_ERR "hda-codec: No codec parser is available\n");
1631 return -ENODEV; 1631 return -ENODEV;
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 8321a97d5c05..d9a09bdd09db 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -3269,7 +3269,7 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol,
3269 mutex_unlock(&codec->control_mutex); 3269 mutex_unlock(&codec->control_mutex);
3270 snd_hda_codec_flush_cache(codec); /* flush the updates */ 3270 snd_hda_codec_flush_cache(codec); /* flush the updates */
3271 if (err >= 0 && spec->cap_sync_hook) 3271 if (err >= 0 && spec->cap_sync_hook)
3272 spec->cap_sync_hook(codec, ucontrol); 3272 spec->cap_sync_hook(codec, kcontrol, ucontrol);
3273 return err; 3273 return err;
3274} 3274}
3275 3275
@@ -3390,7 +3390,7 @@ static int cap_single_sw_put(struct snd_kcontrol *kcontrol,
3390 return ret; 3390 return ret;
3391 3391
3392 if (spec->cap_sync_hook) 3392 if (spec->cap_sync_hook)
3393 spec->cap_sync_hook(codec, ucontrol); 3393 spec->cap_sync_hook(codec, kcontrol, ucontrol);
3394 3394
3395 return ret; 3395 return ret;
3396} 3396}
@@ -3795,7 +3795,7 @@ static int mux_select(struct hda_codec *codec, unsigned int adc_idx,
3795 return 0; 3795 return 0;
3796 snd_hda_activate_path(codec, path, true, false); 3796 snd_hda_activate_path(codec, path, true, false);
3797 if (spec->cap_sync_hook) 3797 if (spec->cap_sync_hook)
3798 spec->cap_sync_hook(codec, NULL); 3798 spec->cap_sync_hook(codec, NULL, NULL);
3799 path_power_down_sync(codec, old_path); 3799 path_power_down_sync(codec, old_path);
3800 return 1; 3800 return 1;
3801} 3801}
@@ -5270,7 +5270,7 @@ static void init_input_src(struct hda_codec *codec)
5270 } 5270 }
5271 5271
5272 if (spec->cap_sync_hook) 5272 if (spec->cap_sync_hook)
5273 spec->cap_sync_hook(codec, NULL); 5273 spec->cap_sync_hook(codec, NULL, NULL);
5274} 5274}
5275 5275
5276/* set right pin controls for digital I/O */ 5276/* set right pin controls for digital I/O */
diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
index 07f767231c9f..c908afbe4d94 100644
--- a/sound/pci/hda/hda_generic.h
+++ b/sound/pci/hda/hda_generic.h
@@ -274,6 +274,7 @@ struct hda_gen_spec {
274 void (*init_hook)(struct hda_codec *codec); 274 void (*init_hook)(struct hda_codec *codec);
275 void (*automute_hook)(struct hda_codec *codec); 275 void (*automute_hook)(struct hda_codec *codec);
276 void (*cap_sync_hook)(struct hda_codec *codec, 276 void (*cap_sync_hook)(struct hda_codec *codec,
277 struct snd_kcontrol *kcontrol,
277 struct snd_ctl_elem_value *ucontrol); 278 struct snd_ctl_elem_value *ucontrol);
278 279
279 /* PCM hooks */ 280 /* PCM hooks */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index fa2879a21a50..e354ab1ec20f 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -198,7 +198,7 @@ MODULE_DESCRIPTION("Intel HDA driver");
198#endif 198#endif
199 199
200#if defined(CONFIG_PM) && defined(CONFIG_VGA_SWITCHEROO) 200#if defined(CONFIG_PM) && defined(CONFIG_VGA_SWITCHEROO)
201#ifdef CONFIG_SND_HDA_CODEC_HDMI 201#if IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI)
202#define SUPPORT_VGA_SWITCHEROO 202#define SUPPORT_VGA_SWITCHEROO
203#endif 203#endif
204#endif 204#endif
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 54d14793725a..46ecdbb9053f 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -2662,60 +2662,6 @@ static bool dspload_wait_loaded(struct hda_codec *codec)
2662} 2662}
2663 2663
2664/* 2664/*
2665 * PCM stuffs
2666 */
2667static void ca0132_setup_stream(struct hda_codec *codec, hda_nid_t nid,
2668 u32 stream_tag,
2669 int channel_id, int format)
2670{
2671 unsigned int oldval, newval;
2672
2673 if (!nid)
2674 return;
2675
2676 snd_printdd(
2677 "ca0132_setup_stream: NID=0x%x, stream=0x%x, "
2678 "channel=%d, format=0x%x\n",
2679 nid, stream_tag, channel_id, format);
2680
2681 /* update the format-id if changed */
2682 oldval = snd_hda_codec_read(codec, nid, 0,
2683 AC_VERB_GET_STREAM_FORMAT,
2684 0);
2685 if (oldval != format) {
2686 msleep(20);
2687 snd_hda_codec_write(codec, nid, 0,
2688 AC_VERB_SET_STREAM_FORMAT,
2689 format);
2690 }
2691
2692 oldval = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0);
2693 newval = (stream_tag << 4) | channel_id;
2694 if (oldval != newval) {
2695 snd_hda_codec_write(codec, nid, 0,
2696 AC_VERB_SET_CHANNEL_STREAMID,
2697 newval);
2698 }
2699}
2700
2701static void ca0132_cleanup_stream(struct hda_codec *codec, hda_nid_t nid)
2702{
2703 unsigned int val;
2704
2705 if (!nid)
2706 return;
2707
2708 snd_printdd(KERN_INFO "ca0132_cleanup_stream: NID=0x%x\n", nid);
2709
2710 val = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0);
2711 if (!val)
2712 return;
2713
2714 snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_STREAM_FORMAT, 0);
2715 snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CHANNEL_STREAMID, 0);
2716}
2717
2718/*
2719 * PCM callbacks 2665 * PCM callbacks
2720 */ 2666 */
2721static int ca0132_playback_pcm_prepare(struct hda_pcm_stream *hinfo, 2667static int ca0132_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
@@ -2726,7 +2672,7 @@ static int ca0132_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
2726{ 2672{
2727 struct ca0132_spec *spec = codec->spec; 2673 struct ca0132_spec *spec = codec->spec;
2728 2674
2729 ca0132_setup_stream(codec, spec->dacs[0], stream_tag, 0, format); 2675 snd_hda_codec_setup_stream(codec, spec->dacs[0], stream_tag, 0, format);
2730 2676
2731 return 0; 2677 return 0;
2732} 2678}
@@ -2745,7 +2691,7 @@ static int ca0132_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
2745 if (spec->effects_switch[PLAY_ENHANCEMENT - EFFECT_START_NID]) 2691 if (spec->effects_switch[PLAY_ENHANCEMENT - EFFECT_START_NID])
2746 msleep(50); 2692 msleep(50);
2747 2693
2748 ca0132_cleanup_stream(codec, spec->dacs[0]); 2694 snd_hda_codec_cleanup_stream(codec, spec->dacs[0]);
2749 2695
2750 return 0; 2696 return 0;
2751} 2697}
@@ -2822,10 +2768,8 @@ static int ca0132_capture_pcm_prepare(struct hda_pcm_stream *hinfo,
2822 unsigned int format, 2768 unsigned int format,
2823 struct snd_pcm_substream *substream) 2769 struct snd_pcm_substream *substream)
2824{ 2770{
2825 struct ca0132_spec *spec = codec->spec; 2771 snd_hda_codec_setup_stream(codec, hinfo->nid,
2826 2772 stream_tag, 0, format);
2827 ca0132_setup_stream(codec, spec->adcs[substream->number],
2828 stream_tag, 0, format);
2829 2773
2830 return 0; 2774 return 0;
2831} 2775}
@@ -2839,7 +2783,7 @@ static int ca0132_capture_pcm_cleanup(struct hda_pcm_stream *hinfo,
2839 if (spec->dsp_state == DSP_DOWNLOADING) 2783 if (spec->dsp_state == DSP_DOWNLOADING)
2840 return 0; 2784 return 0;
2841 2785
2842 ca0132_cleanup_stream(codec, hinfo->nid); 2786 snd_hda_codec_cleanup_stream(codec, hinfo->nid);
2843 return 0; 2787 return 0;
2844} 2788}
2845 2789
@@ -4742,6 +4686,8 @@ static int patch_ca0132(struct hda_codec *codec)
4742 return err; 4686 return err;
4743 4687
4744 codec->patch_ops = ca0132_patch_ops; 4688 codec->patch_ops = ca0132_patch_ops;
4689 codec->pcm_format_first = 1;
4690 codec->no_sticky_stream = 1;
4745 4691
4746 return 0; 4692 return 0;
4747} 4693}
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 4e0ec146553d..bcf91bea3317 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -3291,7 +3291,8 @@ static void cxt_update_headset_mode(struct hda_codec *codec)
3291} 3291}
3292 3292
3293static void cxt_update_headset_mode_hook(struct hda_codec *codec, 3293static void cxt_update_headset_mode_hook(struct hda_codec *codec,
3294 struct snd_ctl_elem_value *ucontrol) 3294 struct snd_kcontrol *kcontrol,
3295 struct snd_ctl_elem_value *ucontrol)
3295{ 3296{
3296 cxt_update_headset_mode(codec); 3297 cxt_update_headset_mode(codec);
3297} 3298}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index d9693ca9546f..6eb903cc6237 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -708,7 +708,8 @@ static void alc_inv_dmic_sync(struct hda_codec *codec, bool force)
708} 708}
709 709
710static void alc_inv_dmic_hook(struct hda_codec *codec, 710static void alc_inv_dmic_hook(struct hda_codec *codec,
711 struct snd_ctl_elem_value *ucontrol) 711 struct snd_kcontrol *kcontrol,
712 struct snd_ctl_elem_value *ucontrol)
712{ 713{
713 alc_inv_dmic_sync(codec, false); 714 alc_inv_dmic_sync(codec, false);
714} 715}
@@ -3218,7 +3219,8 @@ static void alc269_fixup_hp_gpio_mute_hook(void *private_data, int enabled)
3218 3219
3219/* turn on/off mic-mute LED per capture hook */ 3220/* turn on/off mic-mute LED per capture hook */
3220static void alc269_fixup_hp_gpio_mic_mute_hook(struct hda_codec *codec, 3221static void alc269_fixup_hp_gpio_mic_mute_hook(struct hda_codec *codec,
3221 struct snd_ctl_elem_value *ucontrol) 3222 struct snd_kcontrol *kcontrol,
3223 struct snd_ctl_elem_value *ucontrol)
3222{ 3224{
3223 struct alc_spec *spec = codec->spec; 3225 struct alc_spec *spec = codec->spec;
3224 unsigned int oldval = spec->gpio_led; 3226 unsigned int oldval = spec->gpio_led;
@@ -3528,7 +3530,8 @@ static void alc_update_headset_mode(struct hda_codec *codec)
3528} 3530}
3529 3531
3530static void alc_update_headset_mode_hook(struct hda_codec *codec, 3532static void alc_update_headset_mode_hook(struct hda_codec *codec,
3531 struct snd_ctl_elem_value *ucontrol) 3533 struct snd_kcontrol *kcontrol,
3534 struct snd_ctl_elem_value *ucontrol)
3532{ 3535{
3533 alc_update_headset_mode(codec); 3536 alc_update_headset_mode(codec);
3534} 3537}
@@ -4305,7 +4308,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4305 SND_PCI_QUIRK(0x1028, 0x0651, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 4308 SND_PCI_QUIRK(0x1028, 0x0651, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
4306 SND_PCI_QUIRK(0x1028, 0x0652, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 4309 SND_PCI_QUIRK(0x1028, 0x0652, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
4307 SND_PCI_QUIRK(0x1028, 0x0653, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 4310 SND_PCI_QUIRK(0x1028, 0x0653, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
4311 SND_PCI_QUIRK(0x1028, 0x0657, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
4308 SND_PCI_QUIRK(0x1028, 0x0658, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 4312 SND_PCI_QUIRK(0x1028, 0x0658, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4313 SND_PCI_QUIRK(0x1028, 0x065f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
4309 SND_PCI_QUIRK(0x1028, 0x0662, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 4314 SND_PCI_QUIRK(0x1028, 0x0662, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
4310 SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), 4315 SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
4311 SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), 4316 SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
@@ -4329,6 +4334,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4329 SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC), 4334 SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
4330 SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC), 4335 SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
4331 SND_PCI_QUIRK(0x1043, 0x8516, "ASUS X101CH", ALC269_FIXUP_ASUS_X101), 4336 SND_PCI_QUIRK(0x1043, 0x8516, "ASUS X101CH", ALC269_FIXUP_ASUS_X101),
4337 SND_PCI_QUIRK(0x104d, 0x90b5, "Sony VAIO Pro 11", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
4332 SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE), 4338 SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
4333 SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIXUP_SONY_VAIO_GPIO2), 4339 SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIXUP_SONY_VAIO_GPIO2),
4334 SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ), 4340 SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
@@ -4434,9 +4440,6 @@ static void alc269_fill_coef(struct hda_codec *codec)
4434 4440
4435 if (spec->codec_variant != ALC269_TYPE_ALC269VB) 4441 if (spec->codec_variant != ALC269_TYPE_ALC269VB)
4436 return; 4442 return;
4437 /* ALC271X doesn't seem to support these COEFs (bko#52181) */
4438 if (!strcmp(codec->chip_name, "ALC271X"))
4439 return;
4440 4443
4441 if ((alc_get_coef0(codec) & 0x00ff) < 0x015) { 4444 if ((alc_get_coef0(codec) & 0x00ff) < 0x015) {
4442 alc_write_coef_idx(codec, 0xf, 0x960b); 4445 alc_write_coef_idx(codec, 0xf, 0x960b);
@@ -5106,6 +5109,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
5106 SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE), 5109 SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
5107 SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), 5110 SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
5108 SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), 5111 SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
5112 SND_PCI_QUIRK(0x1028, 0x060a, "Dell XPS 13", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
5109 SND_PCI_QUIRK(0x1028, 0x0623, "Dell", ALC668_FIXUP_AUTO_MUTE), 5113 SND_PCI_QUIRK(0x1028, 0x0623, "Dell", ALC668_FIXUP_AUTO_MUTE),
5110 SND_PCI_QUIRK(0x1028, 0x0624, "Dell", ALC668_FIXUP_AUTO_MUTE), 5114 SND_PCI_QUIRK(0x1028, 0x0624, "Dell", ALC668_FIXUP_AUTO_MUTE),
5111 SND_PCI_QUIRK(0x1028, 0x0625, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), 5115 SND_PCI_QUIRK(0x1028, 0x0625, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 6998cf29b9bc..a2f11bf8155c 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -83,6 +83,7 @@ enum {
83 STAC_DELL_M6_BOTH, 83 STAC_DELL_M6_BOTH,
84 STAC_DELL_EQ, 84 STAC_DELL_EQ,
85 STAC_ALIENWARE_M17X, 85 STAC_ALIENWARE_M17X,
86 STAC_92HD89XX_HP_FRONT_JACK,
86 STAC_92HD73XX_MODELS 87 STAC_92HD73XX_MODELS
87}; 88};
88 89
@@ -194,7 +195,7 @@ struct sigmatel_spec {
194 int default_polarity; 195 int default_polarity;
195 196
196 unsigned int mic_mute_led_gpio; /* capture mute LED GPIO */ 197 unsigned int mic_mute_led_gpio; /* capture mute LED GPIO */
197 bool mic_mute_led_on; /* current mic mute state */ 198 unsigned int mic_enabled; /* current mic mute state (bitmask) */
198 199
199 /* stream */ 200 /* stream */
200 unsigned int stream_delay; 201 unsigned int stream_delay;
@@ -324,19 +325,26 @@ static void stac_gpio_set(struct hda_codec *codec, unsigned int mask,
324 325
325/* hook for controlling mic-mute LED GPIO */ 326/* hook for controlling mic-mute LED GPIO */
326static void stac_capture_led_hook(struct hda_codec *codec, 327static void stac_capture_led_hook(struct hda_codec *codec,
327 struct snd_ctl_elem_value *ucontrol) 328 struct snd_kcontrol *kcontrol,
329 struct snd_ctl_elem_value *ucontrol)
328{ 330{
329 struct sigmatel_spec *spec = codec->spec; 331 struct sigmatel_spec *spec = codec->spec;
330 bool mute; 332 unsigned int mask;
333 bool cur_mute, prev_mute;
331 334
332 if (!ucontrol) 335 if (!kcontrol || !ucontrol)
333 return; 336 return;
334 337
335 mute = !(ucontrol->value.integer.value[0] || 338 mask = 1U << snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
336 ucontrol->value.integer.value[1]); 339 prev_mute = !spec->mic_enabled;
337 if (spec->mic_mute_led_on != mute) { 340 if (ucontrol->value.integer.value[0] ||
338 spec->mic_mute_led_on = mute; 341 ucontrol->value.integer.value[1])
339 if (mute) 342 spec->mic_enabled |= mask;
343 else
344 spec->mic_enabled &= ~mask;
345 cur_mute = !spec->mic_enabled;
346 if (cur_mute != prev_mute) {
347 if (cur_mute)
340 spec->gpio_data |= spec->mic_mute_led_gpio; 348 spec->gpio_data |= spec->mic_mute_led_gpio;
341 else 349 else
342 spec->gpio_data &= ~spec->mic_mute_led_gpio; 350 spec->gpio_data &= ~spec->mic_mute_led_gpio;
@@ -1788,6 +1796,12 @@ static const struct hda_pintbl intel_dg45id_pin_configs[] = {
1788 {} 1796 {}
1789}; 1797};
1790 1798
1799static const struct hda_pintbl stac92hd89xx_hp_front_jack_pin_configs[] = {
1800 { 0x0a, 0x02214030 },
1801 { 0x0b, 0x02A19010 },
1802 {}
1803};
1804
1791static void stac92hd73xx_fixup_ref(struct hda_codec *codec, 1805static void stac92hd73xx_fixup_ref(struct hda_codec *codec,
1792 const struct hda_fixup *fix, int action) 1806 const struct hda_fixup *fix, int action)
1793{ 1807{
@@ -1906,6 +1920,10 @@ static const struct hda_fixup stac92hd73xx_fixups[] = {
1906 [STAC_92HD73XX_NO_JD] = { 1920 [STAC_92HD73XX_NO_JD] = {
1907 .type = HDA_FIXUP_FUNC, 1921 .type = HDA_FIXUP_FUNC,
1908 .v.func = stac92hd73xx_fixup_no_jd, 1922 .v.func = stac92hd73xx_fixup_no_jd,
1923 },
1924 [STAC_92HD89XX_HP_FRONT_JACK] = {
1925 .type = HDA_FIXUP_PINS,
1926 .v.pins = stac92hd89xx_hp_front_jack_pin_configs,
1909 } 1927 }
1910}; 1928};
1911 1929
@@ -1966,6 +1984,8 @@ static const struct snd_pci_quirk stac92hd73xx_fixup_tbl[] = {
1966 "Alienware M17x", STAC_ALIENWARE_M17X), 1984 "Alienware M17x", STAC_ALIENWARE_M17X),
1967 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0490, 1985 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0490,
1968 "Alienware M17x R3", STAC_DELL_EQ), 1986 "Alienware M17x R3", STAC_DELL_EQ),
1987 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2b17,
1988 "unknown HP", STAC_92HD89XX_HP_FRONT_JACK),
1969 {} /* terminator */ 1989 {} /* terminator */
1970}; 1990};
1971 1991
@@ -4462,7 +4482,7 @@ static void stac_setup_gpio(struct hda_codec *codec)
4462 if (spec->mic_mute_led_gpio) { 4482 if (spec->mic_mute_led_gpio) {
4463 spec->gpio_mask |= spec->mic_mute_led_gpio; 4483 spec->gpio_mask |= spec->mic_mute_led_gpio;
4464 spec->gpio_dir |= spec->mic_mute_led_gpio; 4484 spec->gpio_dir |= spec->mic_mute_led_gpio;
4465 spec->mic_mute_led_on = true; 4485 spec->mic_enabled = 0;
4466 spec->gpio_data |= spec->mic_mute_led_gpio; 4486 spec->gpio_data |= spec->mic_mute_led_gpio;
4467 4487
4468 spec->gen.cap_sync_hook = stac_capture_led_hook; 4488 spec->gen.cap_sync_hook = stac_capture_led_hook;
diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c
index 5799fbc24c28..8fe3b8c18ed4 100644
--- a/sound/pci/hda/thinkpad_helper.c
+++ b/sound/pci/hda/thinkpad_helper.c
@@ -39,6 +39,7 @@ static void update_tpacpi_mute_led(void *private_data, int enabled)
39} 39}
40 40
41static void update_tpacpi_micmute_led(struct hda_codec *codec, 41static void update_tpacpi_micmute_led(struct hda_codec *codec,
42 struct snd_kcontrol *kcontrol,
42 struct snd_ctl_elem_value *ucontrol) 43 struct snd_ctl_elem_value *ucontrol)
43{ 44{
44 if (!ucontrol || !led_set_func) 45 if (!ucontrol || !led_set_func)
diff --git a/sound/soc/blackfin/Kconfig b/sound/soc/blackfin/Kconfig
index 54f74f8cbb75..4544d8eb1452 100644
--- a/sound/soc/blackfin/Kconfig
+++ b/sound/soc/blackfin/Kconfig
@@ -11,7 +11,7 @@ config SND_BF5XX_I2S
11 11
12config SND_BF5XX_SOC_SSM2602 12config SND_BF5XX_SOC_SSM2602
13 tristate "SoC SSM2602 Audio Codec Add-On Card support" 13 tristate "SoC SSM2602 Audio Codec Add-On Card support"
14 depends on SND_BF5XX_I2S && (SPI_MASTER || I2C) 14 depends on SND_BF5XX_I2S && SND_SOC_I2C_AND_SPI
15 select SND_BF5XX_SOC_I2S if !BF60x 15 select SND_BF5XX_SOC_I2S if !BF60x
16 select SND_BF6XX_SOC_I2S if BF60x 16 select SND_BF6XX_SOC_I2S if BF60x
17 select SND_SOC_SSM2602 17 select SND_SOC_SSM2602
@@ -21,10 +21,9 @@ config SND_BF5XX_SOC_SSM2602
21 21
22config SND_SOC_BFIN_EVAL_ADAU1701 22config SND_SOC_BFIN_EVAL_ADAU1701
23 tristate "Support for the EVAL-ADAU1701MINIZ board on Blackfin eval boards" 23 tristate "Support for the EVAL-ADAU1701MINIZ board on Blackfin eval boards"
24 depends on SND_BF5XX_I2S 24 depends on SND_BF5XX_I2S && I2C
25 select SND_BF5XX_SOC_I2S 25 select SND_BF5XX_SOC_I2S
26 select SND_SOC_ADAU1701 26 select SND_SOC_ADAU1701
27 select I2C
28 help 27 help
29 Say Y if you want to add support for the Analog Devices EVAL-ADAU1701MINIZ 28 Say Y if you want to add support for the Analog Devices EVAL-ADAU1701MINIZ
30 board connected to one of the Blackfin evaluation boards like the 29 board connected to one of the Blackfin evaluation boards like the
@@ -45,7 +44,7 @@ config SND_SOC_BFIN_EVAL_ADAU1373
45 44
46config SND_SOC_BFIN_EVAL_ADAV80X 45config SND_SOC_BFIN_EVAL_ADAV80X
47 tristate "Support for the EVAL-ADAV80X boards on Blackfin eval boards" 46 tristate "Support for the EVAL-ADAV80X boards on Blackfin eval boards"
48 depends on SND_BF5XX_I2S && (SPI_MASTER || I2C) 47 depends on SND_BF5XX_I2S && SND_SOC_I2C_AND_SPI
49 select SND_BF5XX_SOC_I2S 48 select SND_BF5XX_SOC_I2S
50 select SND_SOC_ADAV80X 49 select SND_SOC_ADAV80X
51 help 50 help
@@ -58,7 +57,7 @@ config SND_SOC_BFIN_EVAL_ADAV80X
58 57
59config SND_BF5XX_SOC_AD1836 58config SND_BF5XX_SOC_AD1836
60 tristate "SoC AD1836 Audio support for BF5xx" 59 tristate "SoC AD1836 Audio support for BF5xx"
61 depends on SND_BF5XX_I2S 60 depends on SND_BF5XX_I2S && SPI_MASTER
62 select SND_BF5XX_SOC_I2S 61 select SND_BF5XX_SOC_I2S
63 select SND_SOC_AD1836 62 select SND_SOC_AD1836
64 help 63 help
@@ -66,7 +65,7 @@ config SND_BF5XX_SOC_AD1836
66 65
67config SND_BF5XX_SOC_AD193X 66config SND_BF5XX_SOC_AD193X
68 tristate "SoC AD193X Audio support for Blackfin" 67 tristate "SoC AD193X Audio support for Blackfin"
69 depends on SND_BF5XX_I2S 68 depends on SND_BF5XX_I2S && SND_SOC_I2C_AND_SPI
70 select SND_BF5XX_SOC_I2S 69 select SND_BF5XX_SOC_I2S
71 select SND_SOC_AD193X 70 select SND_SOC_AD193X
72 help 71 help
diff --git a/sound/soc/codecs/da9055.c b/sound/soc/codecs/da9055.c
index 52b79a487ac7..422812613a28 100644
--- a/sound/soc/codecs/da9055.c
+++ b/sound/soc/codecs/da9055.c
@@ -1523,8 +1523,15 @@ static int da9055_remove(struct i2c_client *client)
1523 return 0; 1523 return 0;
1524} 1524}
1525 1525
1526/*
1527 * DO NOT change the device Ids. The naming is intentionally specific as both
1528 * the CODEC and PMIC parts of this chip are instantiated separately as I2C
1529 * devices (both have configurable I2C addresses, and are to all intents and
1530 * purposes separate). As a result there are specific DA9055 Ids for CODEC
1531 * and PMIC, which must be different to operate together.
1532 */
1526static const struct i2c_device_id da9055_i2c_id[] = { 1533static const struct i2c_device_id da9055_i2c_id[] = {
1527 { "da9055", 0 }, 1534 { "da9055-codec", 0 },
1528 { } 1535 { }
1529}; 1536};
1530MODULE_DEVICE_TABLE(i2c, da9055_i2c_id); 1537MODULE_DEVICE_TABLE(i2c, da9055_i2c_id);
@@ -1532,7 +1539,7 @@ MODULE_DEVICE_TABLE(i2c, da9055_i2c_id);
1532/* I2C codec control layer */ 1539/* I2C codec control layer */
1533static struct i2c_driver da9055_i2c_driver = { 1540static struct i2c_driver da9055_i2c_driver = {
1534 .driver = { 1541 .driver = {
1535 .name = "da9055", 1542 .name = "da9055-codec",
1536 .owner = THIS_MODULE, 1543 .owner = THIS_MODULE,
1537 }, 1544 },
1538 .probe = da9055_i2c_probe, 1545 .probe = da9055_i2c_probe,
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
index 51f9b3d16b41..9f714ea86613 100644
--- a/sound/soc/codecs/max98090.c
+++ b/sound/soc/codecs/max98090.c
@@ -336,6 +336,7 @@ static bool max98090_readable_register(struct device *dev, unsigned int reg)
336 case M98090_REG_RECORD_TDM_SLOT: 336 case M98090_REG_RECORD_TDM_SLOT:
337 case M98090_REG_SAMPLE_RATE: 337 case M98090_REG_SAMPLE_RATE:
338 case M98090_REG_DMIC34_BIQUAD_BASE ... M98090_REG_DMIC34_BIQUAD_BASE + 0x0E: 338 case M98090_REG_DMIC34_BIQUAD_BASE ... M98090_REG_DMIC34_BIQUAD_BASE + 0x0E:
339 case M98090_REG_REVISION_ID:
339 return true; 340 return true;
340 default: 341 default:
341 return false; 342 return false;
@@ -1769,16 +1770,6 @@ static int max98090_set_bias_level(struct snd_soc_codec *codec,
1769 1770
1770 switch (level) { 1771 switch (level) {
1771 case SND_SOC_BIAS_ON: 1772 case SND_SOC_BIAS_ON:
1772 if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
1773 ret = regcache_sync(max98090->regmap);
1774
1775 if (ret != 0) {
1776 dev_err(codec->dev,
1777 "Failed to sync cache: %d\n", ret);
1778 return ret;
1779 }
1780 }
1781
1782 if (max98090->jack_state == M98090_JACK_STATE_HEADSET) { 1773 if (max98090->jack_state == M98090_JACK_STATE_HEADSET) {
1783 /* 1774 /*
1784 * Set to normal bias level. 1775 * Set to normal bias level.
@@ -1792,6 +1783,16 @@ static int max98090_set_bias_level(struct snd_soc_codec *codec,
1792 break; 1783 break;
1793 1784
1794 case SND_SOC_BIAS_STANDBY: 1785 case SND_SOC_BIAS_STANDBY:
1786 if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
1787 ret = regcache_sync(max98090->regmap);
1788 if (ret != 0) {
1789 dev_err(codec->dev,
1790 "Failed to sync cache: %d\n", ret);
1791 return ret;
1792 }
1793 }
1794 break;
1795
1795 case SND_SOC_BIAS_OFF: 1796 case SND_SOC_BIAS_OFF:
1796 /* Set internal pull-up to lowest power mode */ 1797 /* Set internal pull-up to lowest power mode */
1797 snd_soc_update_bits(codec, M98090_REG_JACK_DETECT, 1798 snd_soc_update_bits(codec, M98090_REG_JACK_DETECT,
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
index a3fb41179636..886924934aa5 100644
--- a/sound/soc/codecs/rt5640.c
+++ b/sound/soc/codecs/rt5640.c
@@ -2093,6 +2093,7 @@ MODULE_DEVICE_TABLE(i2c, rt5640_i2c_id);
2093#ifdef CONFIG_ACPI 2093#ifdef CONFIG_ACPI
2094static struct acpi_device_id rt5640_acpi_match[] = { 2094static struct acpi_device_id rt5640_acpi_match[] = {
2095 { "INT33CA", 0 }, 2095 { "INT33CA", 0 },
2096 { "10EC5640", 0 },
2096 { }, 2097 { },
2097}; 2098};
2098MODULE_DEVICE_TABLE(acpi, rt5640_acpi_match); 2099MODULE_DEVICE_TABLE(acpi, rt5640_acpi_match);
diff --git a/sound/soc/codecs/wm8993.c b/sound/soc/codecs/wm8993.c
index 433d59a0f3ef..2ee23a39622c 100644
--- a/sound/soc/codecs/wm8993.c
+++ b/sound/soc/codecs/wm8993.c
@@ -1562,7 +1562,6 @@ static int wm8993_remove(struct snd_soc_codec *codec)
1562 struct wm8993_priv *wm8993 = snd_soc_codec_get_drvdata(codec); 1562 struct wm8993_priv *wm8993 = snd_soc_codec_get_drvdata(codec);
1563 1563
1564 wm8993_set_bias_level(codec, SND_SOC_BIAS_OFF); 1564 wm8993_set_bias_level(codec, SND_SOC_BIAS_OFF);
1565 regulator_bulk_free(ARRAY_SIZE(wm8993->supplies), wm8993->supplies);
1566 return 0; 1565 return 0;
1567} 1566}
1568 1567
diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c
index 70ff3772079f..5e3bc3c6801a 100644
--- a/sound/soc/davinci/davinci-evm.c
+++ b/sound/soc/davinci/davinci-evm.c
@@ -399,6 +399,7 @@ static struct platform_driver davinci_evm_driver = {
399 .driver = { 399 .driver = {
400 .name = "davinci_evm", 400 .name = "davinci_evm",
401 .owner = THIS_MODULE, 401 .owner = THIS_MODULE,
402 .pm = &snd_soc_pm_ops,
402 .of_match_table = of_match_ptr(davinci_evm_dt_ids), 403 .of_match_table = of_match_ptr(davinci_evm_dt_ids),
403 }, 404 },
404}; 405};
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index b7858bfa0295..670afa29e30d 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -263,7 +263,9 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
263 unsigned int fmt) 263 unsigned int fmt)
264{ 264{
265 struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(cpu_dai); 265 struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(cpu_dai);
266 int ret = 0;
266 267
268 pm_runtime_get_sync(mcasp->dev);
267 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { 269 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
268 case SND_SOC_DAIFMT_DSP_B: 270 case SND_SOC_DAIFMT_DSP_B:
269 case SND_SOC_DAIFMT_AC97: 271 case SND_SOC_DAIFMT_AC97:
@@ -317,7 +319,8 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
317 break; 319 break;
318 320
319 default: 321 default:
320 return -EINVAL; 322 ret = -EINVAL;
323 goto out;
321 } 324 }
322 325
323 switch (fmt & SND_SOC_DAIFMT_INV_MASK) { 326 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
@@ -354,10 +357,12 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
354 break; 357 break;
355 358
356 default: 359 default:
357 return -EINVAL; 360 ret = -EINVAL;
361 break;
358 } 362 }
359 363out:
360 return 0; 364 pm_runtime_put_sync(mcasp->dev);
365 return ret;
361} 366}
362 367
363static int davinci_mcasp_set_clkdiv(struct snd_soc_dai *dai, int div_id, int div) 368static int davinci_mcasp_set_clkdiv(struct snd_soc_dai *dai, int div_id, int div)
@@ -448,7 +453,7 @@ static int davinci_config_channel_size(struct davinci_mcasp *mcasp,
448 return 0; 453 return 0;
449} 454}
450 455
451static int davinci_hw_common_param(struct davinci_mcasp *mcasp, int stream, 456static int mcasp_common_hw_param(struct davinci_mcasp *mcasp, int stream,
452 int channels) 457 int channels)
453{ 458{
454 int i; 459 int i;
@@ -524,12 +529,18 @@ static int davinci_hw_common_param(struct davinci_mcasp *mcasp, int stream,
524 return 0; 529 return 0;
525} 530}
526 531
527static void davinci_hw_param(struct davinci_mcasp *mcasp, int stream) 532static int mcasp_i2s_hw_param(struct davinci_mcasp *mcasp, int stream)
528{ 533{
529 int i, active_slots; 534 int i, active_slots;
530 u32 mask = 0; 535 u32 mask = 0;
531 u32 busel = 0; 536 u32 busel = 0;
532 537
538 if ((mcasp->tdm_slots < 2) || (mcasp->tdm_slots > 32)) {
539 dev_err(mcasp->dev, "tdm slot %d not supported\n",
540 mcasp->tdm_slots);
541 return -EINVAL;
542 }
543
533 active_slots = (mcasp->tdm_slots > 31) ? 32 : mcasp->tdm_slots; 544 active_slots = (mcasp->tdm_slots > 31) ? 32 : mcasp->tdm_slots;
534 for (i = 0; i < active_slots; i++) 545 for (i = 0; i < active_slots; i++)
535 mask |= (1 << i); 546 mask |= (1 << i);
@@ -539,35 +550,21 @@ static void davinci_hw_param(struct davinci_mcasp *mcasp, int stream)
539 if (!mcasp->dat_port) 550 if (!mcasp->dat_port)
540 busel = TXSEL; 551 busel = TXSEL;
541 552
542 if (stream == SNDRV_PCM_STREAM_PLAYBACK) { 553 mcasp_set_reg(mcasp, DAVINCI_MCASP_TXTDM_REG, mask);
543 /* bit stream is MSB first with no delay */ 554 mcasp_set_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, busel | TXORD);
544 /* DSP_B mode */ 555 mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG,
545 mcasp_set_reg(mcasp, DAVINCI_MCASP_TXTDM_REG, mask); 556 FSXMOD(mcasp->tdm_slots), FSXMOD(0x1FF));
546 mcasp_set_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, busel | TXORD); 557
547 558 mcasp_set_reg(mcasp, DAVINCI_MCASP_RXTDM_REG, mask);
548 if ((mcasp->tdm_slots >= 2) && (mcasp->tdm_slots <= 32)) 559 mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMT_REG, busel | RXORD);
549 mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG, 560 mcasp_mod_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG,
550 FSXMOD(mcasp->tdm_slots), FSXMOD(0x1FF)); 561 FSRMOD(mcasp->tdm_slots), FSRMOD(0x1FF));
551 else 562
552 printk(KERN_ERR "playback tdm slot %d not supported\n", 563 return 0;
553 mcasp->tdm_slots);
554 } else {
555 /* bit stream is MSB first with no delay */
556 /* DSP_B mode */
557 mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMT_REG, busel | RXORD);
558 mcasp_set_reg(mcasp, DAVINCI_MCASP_RXTDM_REG, mask);
559
560 if ((mcasp->tdm_slots >= 2) && (mcasp->tdm_slots <= 32))
561 mcasp_mod_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG,
562 FSRMOD(mcasp->tdm_slots), FSRMOD(0x1FF));
563 else
564 printk(KERN_ERR "capture tdm slot %d not supported\n",
565 mcasp->tdm_slots);
566 }
567} 564}
568 565
569/* S/PDIF */ 566/* S/PDIF */
570static void davinci_hw_dit_param(struct davinci_mcasp *mcasp) 567static int mcasp_dit_hw_param(struct davinci_mcasp *mcasp)
571{ 568{
572 /* Set the TX format : 24 bit right rotation, 32 bit slot, Pad 0 569 /* Set the TX format : 24 bit right rotation, 32 bit slot, Pad 0
573 and LSB first */ 570 and LSB first */
@@ -589,6 +586,8 @@ static void davinci_hw_dit_param(struct davinci_mcasp *mcasp)
589 586
590 /* Enable the DIT */ 587 /* Enable the DIT */
591 mcasp_set_bits(mcasp, DAVINCI_MCASP_TXDITCTL_REG, DITEN); 588 mcasp_set_bits(mcasp, DAVINCI_MCASP_TXDITCTL_REG, DITEN);
589
590 return 0;
592} 591}
593 592
594static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream, 593static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream,
@@ -605,13 +604,14 @@ static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream,
605 u8 slots = mcasp->tdm_slots; 604 u8 slots = mcasp->tdm_slots;
606 u8 active_serializers; 605 u8 active_serializers;
607 int channels; 606 int channels;
607 int ret;
608 struct snd_interval *pcm_channels = hw_param_interval(params, 608 struct snd_interval *pcm_channels = hw_param_interval(params,
609 SNDRV_PCM_HW_PARAM_CHANNELS); 609 SNDRV_PCM_HW_PARAM_CHANNELS);
610 channels = pcm_channels->min; 610 channels = pcm_channels->min;
611 611
612 active_serializers = (channels + slots - 1) / slots; 612 active_serializers = (channels + slots - 1) / slots;
613 613
614 if (davinci_hw_common_param(mcasp, substream->stream, channels) == -EINVAL) 614 if (mcasp_common_hw_param(mcasp, substream->stream, channels) == -EINVAL)
615 return -EINVAL; 615 return -EINVAL;
616 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 616 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
617 fifo_level = mcasp->txnumevt * active_serializers; 617 fifo_level = mcasp->txnumevt * active_serializers;
@@ -619,9 +619,12 @@ static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream,
619 fifo_level = mcasp->rxnumevt * active_serializers; 619 fifo_level = mcasp->rxnumevt * active_serializers;
620 620
621 if (mcasp->op_mode == DAVINCI_MCASP_DIT_MODE) 621 if (mcasp->op_mode == DAVINCI_MCASP_DIT_MODE)
622 davinci_hw_dit_param(mcasp); 622 ret = mcasp_dit_hw_param(mcasp);
623 else 623 else
624 davinci_hw_param(mcasp, substream->stream); 624 ret = mcasp_i2s_hw_param(mcasp, substream->stream);
625
626 if (ret)
627 return ret;
625 628
626 switch (params_format(params)) { 629 switch (params_format(params)) {
627 case SNDRV_PCM_FORMAT_U8: 630 case SNDRV_PCM_FORMAT_U8:
@@ -678,19 +681,9 @@ static int davinci_mcasp_trigger(struct snd_pcm_substream *substream,
678 case SNDRV_PCM_TRIGGER_RESUME: 681 case SNDRV_PCM_TRIGGER_RESUME:
679 case SNDRV_PCM_TRIGGER_START: 682 case SNDRV_PCM_TRIGGER_START:
680 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 683 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
681 ret = pm_runtime_get_sync(mcasp->dev);
682 if (IS_ERR_VALUE(ret))
683 dev_err(mcasp->dev, "pm_runtime_get_sync() failed\n");
684 davinci_mcasp_start(mcasp, substream->stream); 684 davinci_mcasp_start(mcasp, substream->stream);
685 break; 685 break;
686
687 case SNDRV_PCM_TRIGGER_SUSPEND: 686 case SNDRV_PCM_TRIGGER_SUSPEND:
688 davinci_mcasp_stop(mcasp, substream->stream);
689 ret = pm_runtime_put_sync(mcasp->dev);
690 if (IS_ERR_VALUE(ret))
691 dev_err(mcasp->dev, "pm_runtime_put_sync() failed\n");
692 break;
693
694 case SNDRV_PCM_TRIGGER_STOP: 687 case SNDRV_PCM_TRIGGER_STOP:
695 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 688 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
696 davinci_mcasp_stop(mcasp, substream->stream); 689 davinci_mcasp_stop(mcasp, substream->stream);
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index d0c72ed261e7..c84026c99134 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -326,7 +326,7 @@ static int fsl_esai_set_dai_tdm_slot(struct snd_soc_dai *dai, u32 tx_mask,
326 regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMA, 326 regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMA,
327 ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(tx_mask)); 327 ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(tx_mask));
328 regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMB, 328 regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMB,
329 ESAI_xSMA_xS_MASK, ESAI_xSMB_xS(tx_mask)); 329 ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(tx_mask));
330 330
331 regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR, 331 regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR,
332 ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots)); 332 ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots));
@@ -334,7 +334,7 @@ static int fsl_esai_set_dai_tdm_slot(struct snd_soc_dai *dai, u32 tx_mask,
334 regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMA, 334 regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMA,
335 ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(rx_mask)); 335 ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(rx_mask));
336 regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMB, 336 regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMB,
337 ESAI_xSMA_xS_MASK, ESAI_xSMB_xS(rx_mask)); 337 ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(rx_mask));
338 338
339 esai_priv->slot_width = slot_width; 339 esai_priv->slot_width = slot_width;
340 340
diff --git a/sound/soc/fsl/fsl_esai.h b/sound/soc/fsl/fsl_esai.h
index 9c9f957fcae1..75e14033e8d8 100644
--- a/sound/soc/fsl/fsl_esai.h
+++ b/sound/soc/fsl/fsl_esai.h
@@ -322,7 +322,7 @@
322#define ESAI_xSMB_xS_SHIFT 0 322#define ESAI_xSMB_xS_SHIFT 0
323#define ESAI_xSMB_xS_WIDTH 16 323#define ESAI_xSMB_xS_WIDTH 16
324#define ESAI_xSMB_xS_MASK (((1 << ESAI_xSMB_xS_WIDTH) - 1) << ESAI_xSMB_xS_SHIFT) 324#define ESAI_xSMB_xS_MASK (((1 << ESAI_xSMB_xS_WIDTH) - 1) << ESAI_xSMB_xS_SHIFT)
325#define ESAI_xSMB_xS(v) (((v) >> ESAI_xSMA_xS_WIDTH) & ESAI_xSMA_xS_MASK) 325#define ESAI_xSMB_xS(v) (((v) >> ESAI_xSMA_xS_WIDTH) & ESAI_xSMB_xS_MASK)
326 326
327/* Port C Direction Register -- REG_ESAI_PRRC 0xF8 */ 327/* Port C Direction Register -- REG_ESAI_PRRC 0xF8 */
328#define ESAI_PRRC_PDC_SHIFT 0 328#define ESAI_PRRC_PDC_SHIFT 0
diff --git a/sound/soc/fsl/imx-mc13783.c b/sound/soc/fsl/imx-mc13783.c
index 79cee782dbbf..a2fd7321b5a9 100644
--- a/sound/soc/fsl/imx-mc13783.c
+++ b/sound/soc/fsl/imx-mc13783.c
@@ -160,7 +160,6 @@ static struct platform_driver imx_mc13783_audio_driver = {
160 .driver = { 160 .driver = {
161 .name = "imx_mc13783", 161 .name = "imx_mc13783",
162 .owner = THIS_MODULE, 162 .owner = THIS_MODULE,
163 .pm = &snd_soc_pm_ops,
164 }, 163 },
165 .probe = imx_mc13783_probe, 164 .probe = imx_mc13783_probe,
166 .remove = imx_mc13783_remove 165 .remove = imx_mc13783_remove
diff --git a/sound/soc/fsl/imx-sgtl5000.c b/sound/soc/fsl/imx-sgtl5000.c
index f2beae78969f..1cb22dd034eb 100644
--- a/sound/soc/fsl/imx-sgtl5000.c
+++ b/sound/soc/fsl/imx-sgtl5000.c
@@ -33,8 +33,7 @@ struct imx_sgtl5000_data {
33 33
34static int imx_sgtl5000_dai_init(struct snd_soc_pcm_runtime *rtd) 34static int imx_sgtl5000_dai_init(struct snd_soc_pcm_runtime *rtd)
35{ 35{
36 struct imx_sgtl5000_data *data = container_of(rtd->card, 36 struct imx_sgtl5000_data *data = snd_soc_card_get_drvdata(rtd->card);
37 struct imx_sgtl5000_data, card);
38 struct device *dev = rtd->card->dev; 37 struct device *dev = rtd->card->dev;
39 int ret; 38 int ret;
40 39
@@ -159,13 +158,15 @@ static int imx_sgtl5000_probe(struct platform_device *pdev)
159 data->card.dapm_widgets = imx_sgtl5000_dapm_widgets; 158 data->card.dapm_widgets = imx_sgtl5000_dapm_widgets;
160 data->card.num_dapm_widgets = ARRAY_SIZE(imx_sgtl5000_dapm_widgets); 159 data->card.num_dapm_widgets = ARRAY_SIZE(imx_sgtl5000_dapm_widgets);
161 160
161 platform_set_drvdata(pdev, &data->card);
162 snd_soc_card_set_drvdata(&data->card, data);
163
162 ret = devm_snd_soc_register_card(&pdev->dev, &data->card); 164 ret = devm_snd_soc_register_card(&pdev->dev, &data->card);
163 if (ret) { 165 if (ret) {
164 dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret); 166 dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
165 goto fail; 167 goto fail;
166 } 168 }
167 169
168 platform_set_drvdata(pdev, data);
169 of_node_put(ssi_np); 170 of_node_put(ssi_np);
170 of_node_put(codec_np); 171 of_node_put(codec_np);
171 172
@@ -184,7 +185,8 @@ fail:
184 185
185static int imx_sgtl5000_remove(struct platform_device *pdev) 186static int imx_sgtl5000_remove(struct platform_device *pdev)
186{ 187{
187 struct imx_sgtl5000_data *data = platform_get_drvdata(pdev); 188 struct snd_soc_card *card = platform_get_drvdata(pdev);
189 struct imx_sgtl5000_data *data = snd_soc_card_get_drvdata(card);
188 190
189 clk_put(data->codec_clk); 191 clk_put(data->codec_clk);
190 192
diff --git a/sound/soc/fsl/imx-wm8962.c b/sound/soc/fsl/imx-wm8962.c
index 3fd76bc391de..3a3d17ce6ba4 100644
--- a/sound/soc/fsl/imx-wm8962.c
+++ b/sound/soc/fsl/imx-wm8962.c
@@ -71,7 +71,7 @@ static int imx_wm8962_set_bias_level(struct snd_soc_card *card,
71{ 71{
72 struct snd_soc_dai *codec_dai = card->rtd[0].codec_dai; 72 struct snd_soc_dai *codec_dai = card->rtd[0].codec_dai;
73 struct imx_priv *priv = &card_priv; 73 struct imx_priv *priv = &card_priv;
74 struct imx_wm8962_data *data = platform_get_drvdata(priv->pdev); 74 struct imx_wm8962_data *data = snd_soc_card_get_drvdata(card);
75 struct device *dev = &priv->pdev->dev; 75 struct device *dev = &priv->pdev->dev;
76 unsigned int pll_out; 76 unsigned int pll_out;
77 int ret; 77 int ret;
@@ -137,7 +137,7 @@ static int imx_wm8962_late_probe(struct snd_soc_card *card)
137{ 137{
138 struct snd_soc_dai *codec_dai = card->rtd[0].codec_dai; 138 struct snd_soc_dai *codec_dai = card->rtd[0].codec_dai;
139 struct imx_priv *priv = &card_priv; 139 struct imx_priv *priv = &card_priv;
140 struct imx_wm8962_data *data = platform_get_drvdata(priv->pdev); 140 struct imx_wm8962_data *data = snd_soc_card_get_drvdata(card);
141 struct device *dev = &priv->pdev->dev; 141 struct device *dev = &priv->pdev->dev;
142 int ret; 142 int ret;
143 143
@@ -264,13 +264,15 @@ static int imx_wm8962_probe(struct platform_device *pdev)
264 data->card.late_probe = imx_wm8962_late_probe; 264 data->card.late_probe = imx_wm8962_late_probe;
265 data->card.set_bias_level = imx_wm8962_set_bias_level; 265 data->card.set_bias_level = imx_wm8962_set_bias_level;
266 266
267 platform_set_drvdata(pdev, &data->card);
268 snd_soc_card_set_drvdata(&data->card, data);
269
267 ret = devm_snd_soc_register_card(&pdev->dev, &data->card); 270 ret = devm_snd_soc_register_card(&pdev->dev, &data->card);
268 if (ret) { 271 if (ret) {
269 dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret); 272 dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
270 goto clk_fail; 273 goto clk_fail;
271 } 274 }
272 275
273 platform_set_drvdata(pdev, data);
274 of_node_put(ssi_np); 276 of_node_put(ssi_np);
275 of_node_put(codec_np); 277 of_node_put(codec_np);
276 278
@@ -289,7 +291,8 @@ fail:
289 291
290static int imx_wm8962_remove(struct platform_device *pdev) 292static int imx_wm8962_remove(struct platform_device *pdev)
291{ 293{
292 struct imx_wm8962_data *data = platform_get_drvdata(pdev); 294 struct snd_soc_card *card = platform_get_drvdata(pdev);
295 struct imx_wm8962_data *data = snd_soc_card_get_drvdata(card);
293 296
294 if (!IS_ERR(data->codec_clk)) 297 if (!IS_ERR(data->codec_clk))
295 clk_disable_unprepare(data->codec_clk); 298 clk_disable_unprepare(data->codec_clk);
diff --git a/sound/soc/samsung/Kconfig b/sound/soc/samsung/Kconfig
index 454f41cfc828..350757400391 100644
--- a/sound/soc/samsung/Kconfig
+++ b/sound/soc/samsung/Kconfig
@@ -59,7 +59,7 @@ config SND_SOC_SAMSUNG_JIVE_WM8750
59 select SND_SOC_WM8750 59 select SND_SOC_WM8750
60 select SND_S3C2412_SOC_I2S 60 select SND_S3C2412_SOC_I2S
61 help 61 help
62 Sat Y if you want to add support for SoC audio on the Jive. 62 Say Y if you want to add support for SoC audio on the Jive.
63 63
64config SND_SOC_SAMSUNG_SMDK_WM8580 64config SND_SOC_SAMSUNG_SMDK_WM8580
65 tristate "SoC I2S Audio support for WM8580 on SMDK" 65 tristate "SoC I2S Audio support for WM8580 on SMDK"
@@ -145,11 +145,11 @@ config SND_SOC_SAMSUNG_RX1950_UDA1380
145 145
146config SND_SOC_SAMSUNG_SMDK_WM9713 146config SND_SOC_SAMSUNG_SMDK_WM9713
147 tristate "SoC AC97 Audio support for SMDK with WM9713" 147 tristate "SoC AC97 Audio support for SMDK with WM9713"
148 depends on SND_SOC_SAMSUNG && (MACH_SMDK6410 || MACH_SMDKC100 || MACH_SMDKV210 || MACH_SMDKC110 || MACH_SMDKV310 || MACH_SMDKC210) 148 depends on SND_SOC_SAMSUNG && (MACH_SMDK6410 || MACH_SMDKC100 || MACH_SMDKV210 || MACH_SMDKC110)
149 select SND_SOC_WM9713 149 select SND_SOC_WM9713
150 select SND_SAMSUNG_AC97 150 select SND_SAMSUNG_AC97
151 help 151 help
152 Sat Y if you want to add support for SoC audio on the SMDK. 152 Say Y if you want to add support for SoC audio on the SMDK.
153 153
154config SND_SOC_SMARTQ 154config SND_SOC_SMARTQ
155 tristate "SoC I2S Audio support for SmartQ board" 155 tristate "SoC I2S Audio support for SmartQ board"
diff --git a/sound/soc/txx9/txx9aclc-ac97.c b/sound/soc/txx9/txx9aclc-ac97.c
index e0305a148568..9edd68db9f48 100644
--- a/sound/soc/txx9/txx9aclc-ac97.c
+++ b/sound/soc/txx9/txx9aclc-ac97.c
@@ -183,14 +183,16 @@ static int txx9aclc_ac97_dev_probe(struct platform_device *pdev)
183 irq = platform_get_irq(pdev, 0); 183 irq = platform_get_irq(pdev, 0);
184 if (irq < 0) 184 if (irq < 0)
185 return irq; 185 return irq;
186
187 drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
188 if (!drvdata)
189 return -ENOMEM;
190
186 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 191 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
187 drvdata->base = devm_ioremap_resource(&pdev->dev, r); 192 drvdata->base = devm_ioremap_resource(&pdev->dev, r);
188 if (IS_ERR(drvdata->base)) 193 if (IS_ERR(drvdata->base))
189 return PTR_ERR(drvdata->base); 194 return PTR_ERR(drvdata->base);
190 195
191 drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
192 if (!drvdata)
193 return -ENOMEM;
194 platform_set_drvdata(pdev, drvdata); 196 platform_set_drvdata(pdev, drvdata);
195 drvdata->physbase = r->start; 197 drvdata->physbase = r->start;
196 if (sizeof(drvdata->physbase) > sizeof(r->start) && 198 if (sizeof(drvdata->physbase) > sizeof(r->start) &&
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
index 32af6b741ef5..d1d72ff50347 100644
--- a/sound/usb/mixer_maps.c
+++ b/sound/usb/mixer_maps.c
@@ -328,6 +328,11 @@ static struct usbmix_name_map gamecom780_map[] = {
328 {} 328 {}
329}; 329};
330 330
331static const struct usbmix_name_map kef_x300a_map[] = {
332 { 10, NULL }, /* firmware locks up (?) when we try to access this FU */
333 { 0 }
334};
335
331/* 336/*
332 * Control map entries 337 * Control map entries
333 */ 338 */
@@ -419,6 +424,10 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
419 .id = USB_ID(0x200c, 0x1018), 424 .id = USB_ID(0x200c, 0x1018),
420 .map = ebox44_map, 425 .map = ebox44_map,
421 }, 426 },
427 {
428 .id = USB_ID(0x27ac, 0x1000),
429 .map = kef_x300a_map,
430 },
422 { 0 } /* terminator */ 431 { 0 } /* terminator */
423}; 432};
424 433
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index be456ce264d0..8ca405cd7c1a 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -24,6 +24,7 @@
24#include <linux/of.h> 24#include <linux/of.h>
25#include <linux/of_address.h> 25#include <linux/of_address.h>
26#include <linux/of_irq.h> 26#include <linux/of_irq.h>
27#include <linux/uaccess.h>
27 28
28#include <linux/irqchip/arm-gic.h> 29#include <linux/irqchip/arm-gic.h>
29 30
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index 88b2fe3ddf42..00d86427af0f 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -154,17 +154,13 @@ int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
154 list_add_tail(&dev->list, &kvm->coalesced_zones); 154 list_add_tail(&dev->list, &kvm->coalesced_zones);
155 mutex_unlock(&kvm->slots_lock); 155 mutex_unlock(&kvm->slots_lock);
156 156
157 return ret; 157 return 0;
158 158
159out_free_dev: 159out_free_dev:
160 mutex_unlock(&kvm->slots_lock); 160 mutex_unlock(&kvm->slots_lock);
161
162 kfree(dev); 161 kfree(dev);
163 162
164 if (dev == NULL) 163 return ret;
165 return -ENXIO;
166
167 return 0;
168} 164}
169 165
170int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, 166int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,